repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
tchernomax/ansible | lib/ansible/modules/cloud/amazon/aws_glue_job.py | 27 | 12871 | #!/usr/bin/python
# Copyright: (c) 2018, Rob White (@wimnat)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_glue_job
short_description: Manage an AWS Glue job
description:
- Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details.
version_added: "2.6"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
allocated_capacity:
description:
- The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs
can be allocated; the default is 10. A DPU is a relative measure of processing power that consists
of 4 vCPUs of compute capacity and 16 GB of memory.
required: false
command_name:
description:
- The name of the job command. This must be 'glueetl'.
required: false
default: glueetl
command_script_location:
description:
- The S3 path to a script that executes a job.
required: true
connections:
description:
- A list of Glue connections used for this job.
required: false
default_arguments:
description:
- A dict of default arguments for this job. You can specify arguments here that your own job-execution
script consumes, as well as arguments that AWS Glue itself consumes.
required: false
description:
description:
- Description of the job being defined.
required: false
max_concurrent_runs:
description:
- The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when
this threshold is reached. The maximum value you can specify is controlled by a service limit.
required: false
max_retries:
description:
- The maximum number of times to retry this job if it fails.
required: false
name:
description:
- The name you assign to this job definition. It must be unique in your account.
required: true
role:
description:
- The name or ARN of the IAM role associated with this job.
required: true
state:
description:
- Create or delete the AWS Glue job.
required: true
choices: [ 'present', 'absent' ]
timeout:
description:
- The job timeout in minutes.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an AWS Glue job
- aws_glue_job:
command_script_location: s3bucket/script.py
name: my-glue-job
role: my-iam-role
state: present
# Delete an AWS Glue job
- aws_glue_job:
name: my-glue-job
state: absent
'''
RETURN = '''
allocated_capacity:
description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to
100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power
that consists of 4 vCPUs of compute capacity and 16 GB of memory.
returned: when state is present
type: int
sample: 10
command:
description: The JobCommand that executes this job.
returned: when state is present
type: complex
contains:
name:
description: The name of the job command.
returned: when state is present
type: string
sample: glueetl
script_location:
description: Specifies the S3 path to a script that executes a job.
returned: when state is present
type: string
sample: mybucket/myscript.py
connections:
description: The connections used for this job.
returned: when state is present
type: dict
sample: "{ Connections: [ 'list', 'of', 'connections' ] }"
created_on:
description: The time and date that this job definition was created.
returned: when state is present
type: string
sample: "2018-04-21T05:19:58.326000+00:00"
default_arguments:
description: The default arguments for this job, specified as name-value pairs.
returned: when state is present
type: dict
sample: "{ 'mykey1': 'myvalue1' }"
description:
description: Description of the job being defined.
returned: when state is present
type: string
sample: My first Glue job
job_name:
description: The name of the AWS Glue job.
returned: always
type: string
sample: my-glue-job
execution_property:
description: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
returned: always
type: complex
contains:
max_concurrent_runs:
description: The maximum number of concurrent runs allowed for the job. The default is 1. An error is
returned when this threshold is reached. The maximum value you can specify is controlled by
a service limit.
returned: when state is present
type: int
sample: 1
last_modified_on:
description: The last point in time when this job definition was modified.
returned: when state is present
type: string
sample: "2018-04-21T05:19:58.326000+00:00"
max_retries:
description: The maximum number of times to retry this job after a JobRun fails.
returned: when state is present
type: int
sample: 5
name:
description: The name assigned to this job definition.
returned: when state is present
type: string
sample: my-glue-job
role:
description: The name or ARN of the IAM role associated with this job.
returned: when state is present
type: string
sample: my-iam-role
timeout:
description: The job timeout in minutes.
returned: when state is present
type: int
sample: 300
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
# Non-ansible imports
import copy
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass
def _get_glue_job(connection, module, glue_job_name):
"""
Get an AWS Glue job based on name. If not found, return None.
:param connection: AWS boto3 glue connection
:param module: Ansible module
:param glue_job_name: Name of Glue job to get
:return: boto3 Glue job dict or None if not found
"""
try:
return connection.get_job(JobName=glue_job_name)['Job']
except (BotoCoreError, ClientError) as e:
if e.response['Error']['Code'] == 'EntityNotFoundException':
return None
else:
module.fail_json_aws(e)
def _compare_glue_job_params(user_params, current_params):
"""
Compare Glue job params. If there is a difference, return True immediately else return False
:param user_params: the Glue job parameters passed by the user
:param current_params: the Glue job parameters currently configured
:return: True if any parameter is mismatched else False
"""
# Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
# To counter this, add the key if it's missing with a blank value
if 'Description' not in current_params:
current_params['Description'] = ""
if 'DefaultArguments' not in current_params:
current_params['DefaultArguments'] = dict()
if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']:
return True
if 'Command' in user_params and user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']:
return True
if 'Connections' in user_params and set(user_params['Connections']) != set(current_params['Connections']):
return True
if 'DefaultArguments' in user_params and set(user_params['DefaultArguments']) != set(current_params['DefaultArguments']):
return True
if 'Description' in user_params and user_params['Description'] != current_params['Description']:
return True
if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']:
return True
if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']:
return True
if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']:
return True
return False
def create_or_update_glue_job(connection, module, glue_job):
"""
Create or update an AWS Glue job
:param connection: AWS boto3 glue connection
:param module: Ansible module
:param glue_job: a dict of AWS Glue job parameters or None
:return:
"""
changed = False
params = dict()
params['Name'] = module.params.get("name")
params['Role'] = module.params.get("role")
if module.params.get("allocated_capacity") is not None:
params['AllocatedCapacity'] = module.params.get("allocated_capacity")
if module.params.get("command_script_location") is not None:
params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")}
if module.params.get("connections") is not None:
params['Connections'] = {'Connections': module.params.get("connections")}
if module.params.get("default_arguments") is not None:
params['DefaultArguments'] = module.params.get("default_arguments")
if module.params.get("description") is not None:
params['Description'] = module.params.get("description")
if module.params.get("max_concurrent_runs") is not None:
params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")}
if module.params.get("max_retries") is not None:
params['MaxRetries'] = module.params.get("max_retries")
if module.params.get("timeout") is not None:
params['Timeout'] = module.params.get("timeout")
# If glue_job is not None then check if it needs to be modified, else create it
if glue_job:
if _compare_glue_job_params(params, glue_job):
try:
# Update job needs slightly modified params
update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)}
del update_params['JobUpdate']['Name']
connection.update_job(**update_params)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
else:
try:
connection.create_job(**params)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
# If changed, get the Glue job again
if changed:
glue_job = _get_glue_job(connection, module, params['Name'])
module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job))
def delete_glue_job(connection, module, glue_job):
"""
Delete an AWS Glue job
:param connection: AWS boto3 glue connection
:param module: Ansible module
:param glue_job: a dict of AWS Glue job parameters or None
:return:
"""
changed = False
if glue_job:
try:
connection.delete_job(JobName=glue_job['Name'])
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
module.exit_json(changed=changed)
def main():
argument_spec = (
dict(
allocated_capacity=dict(type='int'),
command_name=dict(type='str', default='glueetl'),
command_script_location=dict(type='str'),
connections=dict(type='list'),
default_arguments=dict(type='dict'),
description=dict(type='str'),
max_concurrent_runs=dict(type='int'),
max_retries=dict(type='int'),
name=dict(required=True, type='str'),
role=dict(type='str'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
timeout=dict(type='int')
)
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[
('state', 'present', ['role', 'command_script_location'])
]
)
connection = module.client('glue')
state = module.params.get("state")
glue_job = _get_glue_job(connection, module, module.params.get("name"))
if state == 'present':
create_or_update_glue_job(connection, module, glue_job)
else:
delete_glue_job(connection, module, glue_job)
if __name__ == '__main__':
main()
| gpl-3.0 |
dvliman/jaikuengine | .google_appengine/google/appengine/tools/devappserver2/application_configuration_test.py | 6 | 36273 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.apphosting.tools.devappserver2.application_configuration."""
import collections
import os.path
import unittest
import google
import mox
from google.appengine.api import appinfo
from google.appengine.api import backendinfo
from google.appengine.api import dispatchinfo
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import errors
class TestModuleConfiguration(unittest.TestCase):
"""Tests for application_configuration.ModuleConfiguration."""
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(
application_configuration.ModuleConfiguration,
'_parse_configuration')
self.mox.StubOutWithMock(os.path, 'getmtime')
def tearDown(self):
self.mox.UnsetStubs()
def test_good_app_yaml_configuration(self):
automatic_scaling = appinfo.AutomaticScaling(min_pending_latency='1.0s',
max_pending_latency='2.0s',
min_idle_instances=1,
max_idle_instances=2)
error_handlers = [appinfo.ErrorHandlers(file='error.html')]
handlers = [appinfo.URLMap()]
env_variables = appinfo.EnvironmentVariables()
info = appinfo.AppInfoExternal(
application='app',
module='module1',
version='1',
runtime='python27',
threadsafe=False,
automatic_scaling=automatic_scaling,
skip_files=r'\*.gif',
error_handlers=error_handlers,
handlers=handlers,
inbound_services=['warmup'],
env_variables=env_variables,
)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, []))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration(
'/appdir/app.yaml')
self.mox.VerifyAll()
self.assertEqual(os.path.realpath('/appdir'), config.application_root)
self.assertEqual('dev~app', config.application)
self.assertEqual('module1', config.module_name)
self.assertEqual('1', config.major_version)
self.assertRegexpMatches(config.version_id, r'module1:1\.\d+')
self.assertEqual('python27', config.runtime)
self.assertFalse(config.threadsafe)
self.assertEqual(automatic_scaling, config.automatic_scaling)
self.assertEqual(info.GetNormalizedLibraries(),
config.normalized_libraries)
self.assertEqual(r'\*.gif', config.skip_files)
self.assertEqual(error_handlers, config.error_handlers)
self.assertEqual(handlers, config.handlers)
self.assertEqual(['warmup'], config.inbound_services)
self.assertEqual(env_variables, config.env_variables)
self.assertEqual({'/appdir/app.yaml': 10}, config._mtimes)
def test_check_for_updates_unchanged_mtime(self):
info = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
threadsafe=False)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, []))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.assertSequenceEqual(set(), config.check_for_updates())
self.mox.VerifyAll()
def test_check_for_updates_with_includes(self):
info = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
includes=['/appdir/include.yaml'],
threadsafe=False)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, ['/appdir/include.yaml']))
os.path.getmtime('/appdir/app.yaml').InAnyOrder().AndReturn(10)
os.path.getmtime('/appdir/include.yaml').InAnyOrder().AndReturn(10)
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
os.path.getmtime('/appdir/include.yaml').AndReturn(11)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, ['/appdir/include.yaml']))
os.path.getmtime('/appdir/app.yaml').InAnyOrder().AndReturn(10)
os.path.getmtime('/appdir/include.yaml').InAnyOrder().AndReturn(11)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.assertEqual({'/appdir/app.yaml': 10, '/appdir/include.yaml': 10},
config._mtimes)
config._mtimes = collections.OrderedDict([('/appdir/app.yaml', 10),
('/appdir/include.yaml', 10)])
self.assertSequenceEqual(set(), config.check_for_updates())
self.mox.VerifyAll()
self.assertEqual({'/appdir/app.yaml': 10, '/appdir/include.yaml': 11},
config._mtimes)
def test_check_for_updates_no_changes(self):
info = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
threadsafe=False)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, []))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, []))
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.assertSequenceEqual(set(), config.check_for_updates())
self.mox.VerifyAll()
self.assertEqual({'/appdir/app.yaml': 11}, config._mtimes)
def test_check_for_updates_immutable_changes(self):
automatic_scaling1 = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='1.0s',
min_idle_instances=1,
max_idle_instances=2)
info1 = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
threadsafe=False,
automatic_scaling=automatic_scaling1)
info2 = appinfo.AppInfoExternal(
application='app2',
module='default2',
version='version2',
runtime='python',
threadsafe=True,
automatic_scaling=appinfo.AutomaticScaling(
min_pending_latency='1.0s',
max_pending_latency='2.0s',
min_idle_instances=1,
max_idle_instances=2))
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info1, []))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info2, []))
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.assertSequenceEqual(set(), config.check_for_updates())
self.mox.VerifyAll()
self.assertEqual('dev~app', config.application)
self.assertEqual('default', config.module_name)
self.assertEqual('version', config.major_version)
self.assertRegexpMatches(config.version_id, r'^version\.\d+$')
self.assertEqual('python27', config.runtime)
self.assertFalse(config.threadsafe)
self.assertEqual(automatic_scaling1, config.automatic_scaling)
def test_check_for_mutable_changes(self):
info1 = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
threadsafe=False,
libraries=[appinfo.Library(name='django', version='latest')],
skip_files='.*',
handlers=[],
inbound_services=['warmup'],
env_variables=appinfo.EnvironmentVariables(),
error_handlers=[appinfo.ErrorHandlers(file='error.html')],
)
info2 = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
threadsafe=False,
libraries=[appinfo.Library(name='jinja2', version='latest')],
skip_files=r'.*\.py',
handlers=[appinfo.URLMap()],
inbound_services=[],
)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info1, []))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info2, []))
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.assertSequenceEqual(
set([application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.HANDLERS_CHANGED,
application_configuration.INBOUND_SERVICES_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED,
application_configuration.ERROR_HANDLERS_CHANGED]),
config.check_for_updates())
self.mox.VerifyAll()
self.assertEqual(info2.GetNormalizedLibraries(),
config.normalized_libraries)
self.assertEqual(info2.skip_files, config.skip_files)
self.assertEqual(info2.error_handlers, config.error_handlers)
self.assertEqual(info2.handlers, config.handlers)
self.assertEqual(info2.inbound_services, config.inbound_services)
self.assertEqual(info2.env_variables, config.env_variables)
class TestBackendsConfiguration(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(
application_configuration.BackendsConfiguration,
'_parse_configuration')
self.mox.StubOutWithMock(application_configuration, 'BackendConfiguration')
def tearDown(self):
self.mox.UnsetStubs()
def test_good_configuration(self):
self.mox.StubOutWithMock(application_configuration, 'ModuleConfiguration')
static_backend_entry = backendinfo.BackendEntry(name='static')
dynamic_backend_entry = backendinfo.BackendEntry(name='dynamic')
backend_info = backendinfo.BackendInfoExternal(
backends=[static_backend_entry, dynamic_backend_entry])
module_config = object()
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
application_configuration.BackendsConfiguration._parse_configuration(
'/appdir/backends.yaml').AndReturn(backend_info)
static_configuration = object()
dynamic_configuration = object()
application_configuration.BackendConfiguration(
module_config,
mox.IgnoreArg(),
static_backend_entry).InAnyOrder().AndReturn(static_configuration)
application_configuration.BackendConfiguration(
module_config,
mox.IgnoreArg(),
dynamic_backend_entry).InAnyOrder().AndReturn(dynamic_configuration)
self.mox.ReplayAll()
config = application_configuration.BackendsConfiguration(
'/appdir/app.yaml',
'/appdir/backends.yaml')
self.assertItemsEqual([static_configuration, dynamic_configuration],
config.get_backend_configurations())
self.mox.VerifyAll()
def test_no_backends(self):
self.mox.StubOutWithMock(application_configuration, 'ModuleConfiguration')
backend_info = backendinfo.BackendInfoExternal()
module_config = object()
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
application_configuration.BackendsConfiguration._parse_configuration(
'/appdir/backends.yaml').AndReturn(backend_info)
self.mox.ReplayAll()
config = application_configuration.BackendsConfiguration(
'/appdir/app.yaml',
'/appdir/backends.yaml')
self.assertEqual([], config.get_backend_configurations())
self.mox.VerifyAll()
def test_check_for_changes(self):
static_backend_entry = backendinfo.BackendEntry(name='static')
dynamic_backend_entry = backendinfo.BackendEntry(name='dynamic')
backend_info = backendinfo.BackendInfoExternal(
backends=[static_backend_entry, dynamic_backend_entry])
module_config = self.mox.CreateMock(
application_configuration.ModuleConfiguration)
self.mox.StubOutWithMock(application_configuration, 'ModuleConfiguration')
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
application_configuration.BackendsConfiguration._parse_configuration(
'/appdir/backends.yaml').AndReturn(backend_info)
module_config.check_for_updates().AndReturn(set())
module_config.check_for_updates().AndReturn(set([1]))
module_config.check_for_updates().AndReturn(set([2]))
module_config.check_for_updates().AndReturn(set())
self.mox.ReplayAll()
config = application_configuration.BackendsConfiguration(
'/appdir/app.yaml',
'/appdir/backends.yaml')
self.assertEqual(set(), config.check_for_updates('dynamic'))
self.assertEqual(set([1]), config.check_for_updates('static'))
self.assertEqual(set([1, 2]), config.check_for_updates('dynamic'))
self.assertEqual(set([2]), config.check_for_updates('static'))
self.mox.VerifyAll()
class TestDispatchConfiguration(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(os.path, 'getmtime')
self.mox.StubOutWithMock(
application_configuration.DispatchConfiguration,
'_parse_configuration')
def tearDown(self):
self.mox.UnsetStubs()
def test_good_configuration(self):
info = dispatchinfo.DispatchInfoExternal(
application='appid',
dispatch=[
dispatchinfo.DispatchEntry(url='*/path', module='foo'),
dispatchinfo.DispatchEntry(url='domain.com/path', module='bar'),
dispatchinfo.DispatchEntry(url='*/path/*', module='baz'),
dispatchinfo.DispatchEntry(url='*.domain.com/path/*', module='foo'),
])
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(123.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndReturn(info)
self.mox.ReplayAll()
config = application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml')
self.mox.VerifyAll()
self.assertEqual(123.456, config._mtime)
self.assertEqual(2, len(config.dispatch))
self.assertEqual(vars(dispatchinfo.ParsedURL('*/path')),
vars(config.dispatch[0][0]))
self.assertEqual('foo', config.dispatch[0][1])
self.assertEqual(vars(dispatchinfo.ParsedURL('*/path/*')),
vars(config.dispatch[1][0]))
self.assertEqual('baz', config.dispatch[1][1])
def test_check_for_updates_no_modification(self):
info = dispatchinfo.DispatchInfoExternal(
application='appid',
dispatch=[])
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(123.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndReturn(info)
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(123.456)
self.mox.ReplayAll()
config = application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml')
config.check_for_updates()
self.mox.VerifyAll()
def test_check_for_updates_with_invalid_modification(self):
info = dispatchinfo.DispatchInfoExternal(
application='appid',
dispatch=[
dispatchinfo.DispatchEntry(url='*/path', module='bar'),
])
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(123.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndReturn(info)
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(124.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndRaise(Exception)
self.mox.ReplayAll()
config = application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml')
self.assertEqual('bar', config.dispatch[0][1])
config.check_for_updates()
self.mox.VerifyAll()
self.assertEqual('bar', config.dispatch[0][1])
def test_check_for_updates_with_modification(self):
info = dispatchinfo.DispatchInfoExternal(
application='appid',
dispatch=[
dispatchinfo.DispatchEntry(url='*/path', module='bar'),
])
new_info = dispatchinfo.DispatchInfoExternal(
application='appid',
dispatch=[
dispatchinfo.DispatchEntry(url='*/path', module='foo'),
])
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(123.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndReturn(info)
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(124.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndReturn(new_info)
self.mox.ReplayAll()
config = application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml')
self.assertEqual('bar', config.dispatch[0][1])
config.check_for_updates()
self.mox.VerifyAll()
self.assertEqual('foo', config.dispatch[0][1])
class TestBackendConfiguration(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(
application_configuration.ModuleConfiguration,
'_parse_configuration')
self.mox.StubOutWithMock(os.path, 'getmtime')
def tearDown(self):
self.mox.UnsetStubs()
def test_good_configuration(self):
automatic_scaling = appinfo.AutomaticScaling(min_pending_latency='1.0s',
max_pending_latency='2.0s',
min_idle_instances=1,
max_idle_instances=2)
error_handlers = [appinfo.ErrorHandlers(file='error.html')]
handlers = [appinfo.URLMap()]
env_variables = appinfo.EnvironmentVariables()
info = appinfo.AppInfoExternal(
application='app',
module='module1',
version='1',
runtime='python27',
threadsafe=False,
automatic_scaling=automatic_scaling,
skip_files=r'\*.gif',
error_handlers=error_handlers,
handlers=handlers,
inbound_services=['warmup'],
env_variables=env_variables,
)
backend_entry = backendinfo.BackendEntry(
name='static',
instances='3',
options='public')
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, []))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
self.mox.ReplayAll()
module_config = application_configuration.ModuleConfiguration(
'/appdir/app.yaml')
config = application_configuration.BackendConfiguration(
module_config, None, backend_entry)
self.mox.VerifyAll()
self.assertEqual(os.path.realpath('/appdir'), config.application_root)
self.assertEqual('dev~app', config.application)
self.assertEqual('static', config.module_name)
self.assertEqual('1', config.major_version)
self.assertRegexpMatches(config.version_id, r'static:1\.\d+')
self.assertEqual('python27', config.runtime)
self.assertFalse(config.threadsafe)
self.assertEqual(None, config.automatic_scaling)
self.assertEqual(None, config.basic_scaling)
self.assertEqual(appinfo.ManualScaling(instances='3'),
config.manual_scaling)
self.assertEqual(info.GetNormalizedLibraries(),
config.normalized_libraries)
self.assertEqual(r'\*.gif', config.skip_files)
self.assertEqual(error_handlers, config.error_handlers)
self.assertEqual(handlers, config.handlers)
self.assertEqual(['warmup'], config.inbound_services)
self.assertEqual(env_variables, config.env_variables)
whitelist_fields = ['module_name', 'version_id', 'automatic_scaling',
'manual_scaling', 'basic_scaling', 'is_backend']
# Check that all public attributes and methods in a ModuleConfiguration
# exist in a BackendConfiguration.
for field in dir(module_config):
if not field.startswith('_'):
self.assertTrue(hasattr(config, field), 'Missing field: %s' % field)
value = getattr(module_config, field)
if field not in whitelist_fields and not callable(value):
# Check that the attributes other than those in the whitelist have
# equal values in the BackendConfiguration to the ModuleConfiguration
# from which it inherits.
self.assertEqual(value, getattr(config, field))
def test_good_configuration_dynamic_scaling(self):
automatic_scaling = appinfo.AutomaticScaling(min_pending_latency='1.0s',
max_pending_latency='2.0s',
min_idle_instances=1,
max_idle_instances=2)
error_handlers = [appinfo.ErrorHandlers(file='error.html')]
handlers = [appinfo.URLMap()]
env_variables = appinfo.EnvironmentVariables()
info = appinfo.AppInfoExternal(
application='app',
module='module1',
version='1',
runtime='python27',
threadsafe=False,
automatic_scaling=automatic_scaling,
skip_files=r'\*.gif',
error_handlers=error_handlers,
handlers=handlers,
inbound_services=['warmup'],
env_variables=env_variables,
)
backend_entry = backendinfo.BackendEntry(
name='dynamic',
instances='3',
options='public, dynamic',
start='handler')
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, []))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
self.mox.ReplayAll()
module_config = application_configuration.ModuleConfiguration(
'/appdir/app.yaml')
config = application_configuration.BackendConfiguration(
module_config, None, backend_entry)
self.mox.VerifyAll()
self.assertEqual(os.path.realpath('/appdir'), config.application_root)
self.assertEqual('dev~app', config.application)
self.assertEqual('dynamic', config.module_name)
self.assertEqual('1', config.major_version)
self.assertRegexpMatches(config.version_id, r'dynamic:1\.\d+')
self.assertEqual('python27', config.runtime)
self.assertFalse(config.threadsafe)
self.assertEqual(None, config.automatic_scaling)
self.assertEqual(None, config.manual_scaling)
self.assertEqual(appinfo.BasicScaling(max_instances='3'),
config.basic_scaling)
self.assertEqual(info.GetNormalizedLibraries(),
config.normalized_libraries)
self.assertEqual(r'\*.gif', config.skip_files)
self.assertEqual(error_handlers, config.error_handlers)
start_handler = appinfo.URLMap(url='/_ah/start',
script=backend_entry.start,
login='admin')
self.assertEqual([start_handler] + handlers, config.handlers)
self.assertEqual(['warmup'], config.inbound_services)
self.assertEqual(env_variables, config.env_variables)
def test_check_for_changes(self):
backends_config = self.mox.CreateMock(
application_configuration.BackendsConfiguration)
config = application_configuration.BackendConfiguration(
None, backends_config, backendinfo.BackendEntry(name='backend'))
changes = object()
backends_config.check_for_updates('backend').AndReturn([])
backends_config.check_for_updates('backend').AndReturn(changes)
minor_version = config._minor_version_id
self.mox.ReplayAll()
self.assertEqual([], config.check_for_updates())
self.assertEqual(minor_version, config._minor_version_id)
self.assertEqual(changes, config.check_for_updates())
self.assertNotEqual(minor_version, config._minor_version_id)
self.mox.VerifyAll()
class ModuleConfigurationStub(object):
def __init__(self, application='myapp', module_name='module'):
self.application = application
self.module_name = module_name
class DispatchConfigurationStub(object):
def __init__(self, dispatch):
self.dispatch = dispatch
class TestApplicationConfiguration(unittest.TestCase):
"""Tests for application_configuration.ApplicationConfiguration."""
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(os.path, 'isdir')
self.mox.StubOutWithMock(os.path, 'getmtime')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(application_configuration, 'ModuleConfiguration')
self.mox.StubOutWithMock(application_configuration, 'BackendsConfiguration')
self.mox.StubOutWithMock(application_configuration, 'DispatchConfiguration')
def tearDown(self):
self.mox.UnsetStubs()
def test_yaml_files(self):
os.path.isdir('/appdir/app.yaml').AndReturn(False)
module_config1 = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config1)
os.path.isdir('/appdir/other.yaml').AndReturn(False)
module_config2 = ModuleConfigurationStub(module_name='other')
application_configuration.ModuleConfiguration(
'/appdir/other.yaml').AndReturn(module_config2)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(
['/appdir/app.yaml', '/appdir/other.yaml'])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config1, module_config2], config.modules)
def test_yaml_files_with_different_app_ids(self):
os.path.isdir('/appdir/app.yaml').AndReturn(False)
module_config1 = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config1)
os.path.isdir('/appdir/other.yaml').AndReturn(False)
module_config2 = ModuleConfigurationStub(application='other_app',
module_name='other')
application_configuration.ModuleConfiguration(
'/appdir/other.yaml').AndReturn(module_config2)
self.mox.ReplayAll()
self.assertRaises(errors.InvalidAppConfigError,
application_configuration.ApplicationConfiguration,
['/appdir/app.yaml', '/appdir/other.yaml'])
self.mox.VerifyAll()
def test_yaml_files_with_duplicate_module_names(self):
os.path.isdir('/appdir/app.yaml').AndReturn(False)
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(ModuleConfigurationStub())
os.path.isdir('/appdir/other.yaml').AndReturn(False)
application_configuration.ModuleConfiguration(
'/appdir/other.yaml').AndReturn(ModuleConfigurationStub())
self.mox.ReplayAll()
self.assertRaises(errors.InvalidAppConfigError,
application_configuration.ApplicationConfiguration,
['/appdir/app.yaml', '/appdir/other.yaml'])
self.mox.VerifyAll()
def test_directory(self):
os.path.isdir('/appdir').AndReturn(True)
os.path.exists(os.path.join('/appdir', 'app.yaml')).AndReturn(True)
os.path.exists(os.path.join('/appdir', 'backends.yaml')).AndReturn(False)
os.path.exists(os.path.join('/appdir', 'backends.yml')).AndReturn(False)
os.path.isdir(os.path.join('/appdir', 'app.yaml')).AndReturn(False)
module_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
os.path.join('/appdir', 'app.yaml')).AndReturn(module_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(['/appdir'])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config], config.modules)
def test_directory_app_yml_only(self):
os.path.isdir('/appdir').AndReturn(True)
os.path.exists(os.path.join('/appdir', 'app.yaml')).AndReturn(False)
os.path.exists(os.path.join('/appdir', 'app.yml')).AndReturn(True)
os.path.exists(os.path.join('/appdir', 'backends.yaml')).AndReturn(False)
os.path.exists(os.path.join('/appdir', 'backends.yml')).AndReturn(False)
os.path.isdir(os.path.join('/appdir', 'app.yml')).AndReturn(False)
module_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
os.path.join('/appdir', 'app.yml')).AndReturn(module_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(['/appdir'])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config], config.modules)
def test_directory_no_app_yamls(self):
os.path.isdir('/appdir').AndReturn(True)
os.path.exists(os.path.join('/appdir', 'app.yaml')).AndReturn(False)
os.path.exists(os.path.join('/appdir', 'app.yml')).AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(errors.AppConfigNotFoundError,
application_configuration.ApplicationConfiguration,
['/appdir'])
self.mox.VerifyAll()
def test_app_yaml(self):
os.path.isdir('/appdir/app.yaml').AndReturn(False)
os.path.isdir('/appdir/app.yaml').AndReturn(False)
module_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(
['/appdir/app.yaml'])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config], config.modules)
def test_directory_with_backends_yaml(self):
os.path.isdir('/appdir').AndReturn(True)
os.path.exists(os.path.join('/appdir', 'app.yaml')).AndReturn(True)
os.path.isdir(os.path.join('/appdir', 'app.yaml')).AndReturn(False)
os.path.exists(os.path.join('/appdir', 'backends.yaml')).AndReturn(True)
os.path.isdir(os.path.join('/appdir', 'backends.yaml')).AndReturn(False)
module_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
os.path.join('/appdir', 'app.yaml')).AndReturn(module_config)
backend_config = ModuleConfigurationStub(module_name='backend')
backends_config = self.mox.CreateMock(
application_configuration.BackendsConfiguration)
backends_config.get_backend_configurations().AndReturn([backend_config])
application_configuration.BackendsConfiguration(
os.path.join('/appdir', 'app.yaml'),
os.path.join('/appdir', 'backends.yaml')).AndReturn(backends_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(['/appdir'])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config, backend_config], config.modules)
def test_yaml_files_with_backends_yaml(self):
os.path.isdir('/appdir/app.yaml').AndReturn(False)
module_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
os.path.isdir('/appdir/backends.yaml').AndReturn(False)
backend_config = ModuleConfigurationStub(module_name='backend')
backends_config = self.mox.CreateMock(
application_configuration.BackendsConfiguration)
backends_config.get_backend_configurations().AndReturn([backend_config])
application_configuration.BackendsConfiguration(
'/appdir/app.yaml',
'/appdir/backends.yaml').AndReturn(backends_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(
['/appdir/app.yaml', '/appdir/backends.yaml'])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config, backend_config], config.modules)
def test_yaml_files_with_backends_and_dispatch_yaml(self):
os.path.isdir('/appdir/app.yaml').AndReturn(False)
module_config = ModuleConfigurationStub(module_name='default')
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
os.path.isdir('/appdir/backends.yaml').AndReturn(False)
backend_config = ModuleConfigurationStub(module_name='backend')
backends_config = self.mox.CreateMock(
application_configuration.BackendsConfiguration)
backends_config.get_backend_configurations().AndReturn([backend_config])
application_configuration.BackendsConfiguration(
os.path.join('/appdir', 'app.yaml'),
os.path.join('/appdir', 'backends.yaml')).AndReturn(backends_config)
os.path.isdir('/appdir/dispatch.yaml').AndReturn(False)
dispatch_config = DispatchConfigurationStub(
[(None, 'default'), (None, 'backend')])
application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml').AndReturn(dispatch_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(
['/appdir/app.yaml', '/appdir/backends.yaml', '/appdir/dispatch.yaml'])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config, backend_config], config.modules)
self.assertEqual(dispatch_config, config.dispatch)
def test_yaml_files_dispatch_yaml_and_no_default_module(self):
os.path.isdir('/appdir/app.yaml').AndReturn(False)
module_config = ModuleConfigurationStub(module_name='not-default')
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
os.path.isdir('/appdir/dispatch.yaml').AndReturn(False)
dispatch_config = DispatchConfigurationStub([(None, 'default')])
application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml').AndReturn(dispatch_config)
self.mox.ReplayAll()
self.assertRaises(errors.InvalidAppConfigError,
application_configuration.ApplicationConfiguration,
['/appdir/app.yaml', '/appdir/dispatch.yaml'])
self.mox.VerifyAll()
def test_yaml_files_dispatch_yaml_and_missing_dispatch_target(self):
os.path.isdir('/appdir/app.yaml').AndReturn(False)
module_config = ModuleConfigurationStub(module_name='default')
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
os.path.isdir('/appdir/dispatch.yaml').AndReturn(False)
dispatch_config = DispatchConfigurationStub(
[(None, 'default'), (None, 'fake-module')])
application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml').AndReturn(dispatch_config)
self.mox.ReplayAll()
self.assertRaises(errors.InvalidAppConfigError,
application_configuration.ApplicationConfiguration,
['/appdir/app.yaml', '/appdir/dispatch.yaml'])
self.mox.VerifyAll()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
massot/odoo | addons/hr_payroll/res_config.py | 441 | 1294 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class human_resources_configuration(osv.osv_memory):
_inherit = 'hr.config.settings'
_columns = {
'module_hr_payroll_account': fields.boolean('Link your payroll to accounting system',
help ="""Create journal entries from payslips"""),
}
| agpl-3.0 |
sandeepdsouza93/TensorFlow-15712 | tensorflow/python/training/training_ops_test.py | 12 | 11303 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.learning.training_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import training_ops
class TrainingOpsTest(TensorFlowTestCase):
def _toType(self, dtype):
if dtype == np.float16:
return tf.float16
elif dtype == np.float32:
return tf.float32
elif dtype == np.float64:
return tf.float64
elif dtype == np.int32:
return tf.int32
elif dtype == np.int64:
return tf.int64
else:
assert False, (dtype)
def _testTypes(self, x, alpha, delta, use_gpu=None):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var = variables.Variable(x)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(x, var.eval())
apply_sgd = training_ops.apply_gradient_descent(var, alpha, delta)
out = apply_sgd.eval()
self.assertShapeEqual(out, apply_sgd)
self.assertAllCloseAccordingToType(x - alpha * delta, out)
def testApplyGradientDescent(self):
for (dtype, use_gpu) in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
x = np.arange(100).astype(dtype)
alpha = np.array(2.0).astype(dtype)
delta = np.arange(100).astype(dtype)
self._testTypes(x, alpha, delta, use_gpu)
def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var = variables.Variable(x)
accum = variables.Variable(y)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(x, var.eval())
apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
out = apply_adagrad.eval()
self.assertShapeEqual(out, apply_adagrad)
self.assertAllCloseAccordingToType(
x - lr * grad * (y + grad * grad) ** (-0.5), out)
self.assertAllCloseAccordingToType(y + grad * grad, accum.eval())
def _testTypesForFtrl(self, x, y, z, lr, grad, use_gpu=None, l1=0.0,
l2=0.0, lr_power=-0.5):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var = variables.Variable(x)
accum = variables.Variable(y)
linear = variables.Variable(z)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(x, var.eval())
apply_ftrl = training_ops.apply_ftrl(var, accum, linear, grad, lr, l1, l2,
lr_power)
out = apply_ftrl.eval()
self.assertShapeEqual(out, apply_ftrl)
accum_update = y + grad * grad
linear_update = z + grad - (accum_update ** (-lr_power) - y ** (
-lr_power)) / lr * x
quadratic = 1.0 / (accum_update ** (lr_power) * lr) + 2 * l2
expected_out = np.array([(np.sign(
linear_update[i]) * l1 - linear_update[i]) / (
quadratic[i]) if np.abs(
linear_update[i]) > l1 else 0.0 for i in range(
linear_update.size)])
self.assertAllCloseAccordingToType(accum_update, accum.eval())
if x.dtype == np.float16:
# The calculations here really are not very precise in float16.
self.assertAllClose(linear_update, linear.eval(), rtol=2e-2, atol=2e-2)
self.assertAllClose(expected_out, out, rtol=2e-2, atol=2e-2)
else:
self.assertAllClose(linear_update, linear.eval())
self.assertAllClose(expected_out, out)
def testApplyAdagrad(self):
for (dtype, use_gpu) in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForAdagrad(x, y, lr, grad, use_gpu)
def testApplyFtrl(self):
for dtype in [np.float16, np.float32, np.float64]:
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
z = np.arange(102, 202).astype(dtype)
lr = np.array(2.0).astype(dtype)
l1 = np.array(3.0).astype(dtype)
l2 = np.array(4.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForFtrl(x, y, z, lr, grad, use_gpu=False, l1=l1, l2=l2)
def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):
self.setUp()
with self.test_session(use_gpu=False):
var = variables.Variable(x)
accum = variables.Variable(y)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(x, var.eval())
sparse_apply_adagrad = training_ops.sparse_apply_adagrad(
var, accum, lr, grad,
constant_op.constant(indices, self._toType(indices.dtype)))
out = sparse_apply_adagrad.eval()
self.assertShapeEqual(out, sparse_apply_adagrad)
for (i, index) in enumerate(indices):
self.assertAllCloseAccordingToType(
x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i]) ** (-0.5),
var.eval()[index])
self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
accum.eval()[index])
def _testTypesForSparseFtrl(self, x, y, z, lr, grad, indices, l1=0.0, l2=0.0,
lr_power=-0.5):
self.setUp()
with self.test_session(use_gpu=False):
var = variables.Variable(x)
accum = variables.Variable(y)
linear = variables.Variable(z)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(x, var.eval())
sparse_apply_ftrl = training_ops.sparse_apply_ftrl(
var, accum, linear, grad,
constant_op.constant(indices, self._toType(indices.dtype)),
lr, l1, l2, lr_power=lr_power)
out = sparse_apply_ftrl.eval()
self.assertShapeEqual(out, sparse_apply_ftrl)
for (i, index) in enumerate(indices):
self.assertAllCloseAccordingToType(
x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i]) ** (
lr_power),
var.eval()[index])
self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
accum.eval()[index])
def testSparseApplyAdagrad(self):
for (dtype, index_type) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64]):
x_val = [np.arange(10), np.arange(10, 20), np.arange(20, 30)]
y_val = [np.arange(1, 11), np.arange(11, 21), np.arange(21, 31)]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [np.arange(10), np.arange(10)]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdagrad(x, y, lr, grad, indices)
def testSparseApplyAdagradDim1(self):
for (dtype, index_type) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64]):
x_val = [[1.0], [2.0], [3.0]]
y_val = [[4.0], [5.0], [6.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdagrad(x, y, lr, grad, indices)
def testSparseApplyFtrlDim1(self):
for (dtype, index_type) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64]):
x_val = [[0.0], [0.0], [0.0]]
y_val = [[4.0], [5.0], [6.0]]
z_val = [[0.0], [0.0], [0.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
z = np.array(z_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseFtrl(x, y, z, lr, grad, indices)
def testApplyAdam(self):
for dtype, use_gpu in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
var = np.arange(100).astype(dtype)
m = np.arange(1, 101).astype(dtype)
v = np.arange(101, 201).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForAdam(var, m, v, grad, use_gpu)
def _testTypesForAdam(self, var, m, v, grad, use_gpu):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var_t = variables.Variable(var)
m_t = variables.Variable(m)
v_t = variables.Variable(v)
t = 1
beta1 = np.array(0.9, dtype=var.dtype)
beta2 = np.array(0.999, dtype=var.dtype)
beta1_power = beta1**t
beta2_power = beta2**t
lr = np.array(0.001, dtype=var.dtype)
epsilon = np.array(1e-8, dtype=var.dtype)
beta1_t = constant_op.constant(beta1, self._toType(var.dtype), [])
beta2_t = constant_op.constant(beta2, self._toType(var.dtype), [])
beta1_power_t = variables.Variable(beta1_power)
beta2_power_t = variables.Variable(beta2_power)
lr_t = constant_op.constant(lr, self._toType(var.dtype), [])
epsilon_t = constant_op.constant(epsilon, self._toType(var.dtype), [])
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(var, var_t.eval())
new_var, _, _ = self._adamUpdateNumpy(var, grad, t, m, v,
lr, beta1, beta2, epsilon)
apply_adam = training_ops.apply_adam(var_t, m_t, v_t, beta1_power_t,
beta2_power_t, lr_t,
beta1_t, beta2_t, epsilon_t, grad)
out = apply_adam.eval()
self.assertShapeEqual(out, apply_adam)
self.assertAllCloseAccordingToType(new_var, out)
def _adamUpdateNumpy(self, param, g_t, t, m, v, alpha, beta1,
beta2, epsilon):
alpha_t = alpha * np.sqrt(1 - beta2 ** t) / (1 - beta1 ** t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
juergenhamel/cuon | cuon_client/cuon/Databases/xbase.py | 7 | 5402 | import struct, datetime, decimal, itertools
def dbfreader(f):
"""Returns an iterator over records in a Xbase DBF file.
The first row returned contains the field names.
The second row contains field specs: (type, size, decimal places).
Subsequent rows contain the data records.
If a record is marked as deleted, it is skipped.
File should be opened for binary reads.
"""
# See DBF format spec at:
# http://www.pgts.com.au/download/public/xbase.htm#DBF_STRUCT
numrec, lenheader = struct.unpack('<xxxxLH22x', f.read(32))
numfields = (lenheader - 33) // 32
fields = []
for fieldno in xrange(numfields):
name, typ, size, deci = struct.unpack('<11sc4xBB14x', f.read(32))
name = name.replace('\0', '') # eliminate NULs from string
fields.append((name, typ, size, deci))
yield [field[0] for field in fields]
yield [tuple(field[1:]) for field in fields]
terminator = f.read(1)
assert terminator == '\r'
fields.insert(0, ('DeletionFlag', 'C', 1, 0))
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in fields])
fmtsiz = struct.calcsize(fmt)
for i in xrange(numrec):
record = struct.unpack(fmt, f.read(fmtsiz))
if record[0] != ' ':
continue # deleted record
result = []
for (name, typ, size, deci), value in itertools.izip(fields, record):
print value
if name == 'DeletionFlag':
continue
if typ == "N":
value = value.replace('\0', '').lstrip()
if value == '':
value = 0
elif deci:
value = decimal.Decimal(value)
else:
value = int(value)
elif typ == 'D':
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = datetime.date(y, m, d)
except:
value = `value`
elif typ == 'L':
value = (value in 'YyTt' and 'T') or (value in 'NnFf' and 'F') or '?'
result.append(value)
yield result
def dbfwriter(f, fieldnames, fieldspecs, records):
""" Return a string suitable for writing directly to a binary dbf file.
File f should be open for writing in a binary mode.
Fieldnames should be no longer than ten characters and not include \x00.
Fieldspecs are in the form (type, size, deci) where
type is one of:
C for ascii character data
M for ascii character memo data (real memo fields not supported)
D for datetime objects
N for ints or decimal objects
L for logical values 'T', 'F', or '?'
size is the field width
deci is the number of decimal places in the provided decimal object
Records can be an iterable over the records (sequences of field values).
"""
# header info
ver = 3
now = datetime.datetime.now()
yr, mon, day = now.year-1900, now.month, now.day
numrec = len(records)
numfields = len(fieldspecs)
lenheader = numfields * 32 + 33
lenrecord = sum(field[1] for field in fieldspecs) + 1
hdr = struct.pack('<BBBBLHH20x', ver, yr, mon, day, numrec, lenheader, lenrecord)
f.write(hdr)
# field specs
for name, (typ, size, deci) in itertools.izip(fieldnames, fieldspecs):
name = name.ljust(11, '\x00')
fld = struct.pack('<11sc4xBB14x', name, typ, size, deci)
f.write(fld)
# terminator
f.write('\r')
# records
for record in records:
f.write(' ') # deletion flag
for (typ, size, deci), value in itertools.izip(fieldspecs, record):
if typ == "N":
value = str(value).rjust(size, ' ')
elif typ == 'D':
value = value.strftime('%Y%m%d')
elif typ == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size, ' ')
assert len(value) == size
f.write(value)
# End of file
f.write('\x1A')
# -------------------------------------------------------
# Example calls
if __name__ == '__main__':
import sys, csv
from cStringIO import StringIO
from operator import itemgetter
# Read a database
filename = '/home/jhamel/test/taxonno.dbf'
#if len(sys.argv) == 2:
# filename = sys.argv[1]
f = open(filename, 'rb')
db = list(dbfreader(f))
f.close()
for record in db:
print record
fieldnames, fieldspecs, records = db[0], db[1], db[2:]
## # Alter the database
## del records[4]
## records.sort(key=itemgetter(4))
##
## # Remove a field
## del fieldnames[0]
## del fieldspecs[0]
## records = [rec[1:] for rec in records]
##
## # Create a new DBF
## f = StringIO()
## dbfwriter(f, fieldnames, fieldspecs, records)
##
## # Read the data back from the new DBF
## print '-' * 20
## f.seek(0)
## for line in dbfreader(f):
## print line
## f.close()
##
## # Convert to CSV
## print '.' * 20
## f = StringIO()
## csv.writer(f).writerow(fieldnames)
## csv.writer(f).writerows(records)
## print f.getvalue()
## f.close()
| gpl-3.0 |
shubhangiKishore/pattern | test/test_it.py | 3 | 12484 | # -*- coding: utf-8 -*-
from __future__ import print_function
from util import *
from pattern import it
#-------------------------------------------------------------------------
class TestInflection(unittest.TestCase):
def setUp(self):
pass
def test_article(self):
# Assert definite and indefinite article inflection.
for a, n, g in (
("il", "giorno", it.M),
("l'", "altro giorno", it.M),
("lo", "zio", it.M),
("l'", "amica", it.F),
("la", "nouva amica", it.F),
("i", "giapponesi", it.M + it.PL),
("gli", "italiani", it.M + it.PL),
("gli", "zii", it.M + it.PL),
("le", "zie", it.F + it.PL)):
v = it.article(n, "definite", gender=g)
self.assertEqual(a, v)
for a, n, g in (
("uno", "zio", it.M),
("una", "zia", it.F),
("un", "amico", it.M),
("un'", "amica", it.F)):
v = it.article(n, "indefinite", gender=g)
self.assertEqual(a, v)
v = it.referenced("amica", gender="f")
self.assertEqual(v, "un'amica")
print("pattern.it.article()")
print("pattern.it.referenced()")
def test_gender(self):
# Assert the accuracy of the gender disambiguation algorithm.
from pattern.db import Datasheet
i, n = 0, 0
for pos, sg, pl, mf in Datasheet.load(os.path.join(PATH, "corpora", "wordforms-it-wiktionary.csv")):
g = it.gender(sg)
if mf in g and it.PLURAL not in g:
i += 1
g = it.gender(pl)
if mf in g and it.PLURAL in g:
i += 1
n += 2
self.assertTrue(float(i) / n > 0.92)
print("pattern.it.gender()")
def test_pluralize(self):
# Assert the accuracy of the pluralization algorithm.
from pattern.db import Datasheet
i, n = 0, 0
for pos, sg, pl, mf in Datasheet.load(os.path.join(PATH, "corpora", "wordforms-it-wiktionary.csv")):
if it.pluralize(sg) == pl:
i += 1
n += 1
self.assertTrue(float(i) / n > 0.93)
print("pattern.it.pluralize()")
def test_singularize(self):
# Assert the accuracy of the singularization algorithm.
from pattern.db import Datasheet
i, n = 0, 0
for pos, sg, pl, mf in Datasheet.load(os.path.join(PATH, "corpora", "wordforms-it-wiktionary.csv")):
if it.singularize(pl) == sg:
i += 1
n += 1
self.assertTrue(float(i) / n > 0.84)
print("pattern.it.singularize()")
def test_predicative(self):
# Assert the accuracy of the predicative algorithm ("cruciali" =>
# "cruciale").
from pattern.db import Datasheet
i, n = 0, 0
for pos, sg, pl, mf in Datasheet.load(os.path.join(PATH, "corpora", "wordforms-it-wiktionary.csv")):
if pos != "j":
continue
if it.predicative(pl) == sg:
i += 1
n += 1
self.assertTrue(float(i) / n > 0.87)
print("pattern.it.predicative()")
def test_find_lemma(self):
# Assert the accuracy of the verb lemmatization algorithm.
i, n = 0, 0
r = 0
for v1, v2 in it.inflect.verbs.inflections.items():
if it.inflect.verbs.find_lemma(v1) == v2:
i += 1
n += 1
self.assertTrue(float(i) / n > 0.81)
print("pattern.it.inflect.verbs.find_lemma()")
def test_find_lexeme(self):
# Assert the accuracy of the verb conjugation algorithm.
i, n = 0, 0
for v, lexeme1 in it.inflect.verbs.infinitives.items():
lexeme2 = it.inflect.verbs.find_lexeme(v)
for j in range(len(lexeme2)):
if lexeme1[j] == lexeme2[j]:
i += 1
n += 1
self.assertTrue(float(i) / n > 0.89)
print("pattern.it.inflect.verbs.find_lexeme()")
def test_conjugate(self):
# Assert different tenses with different conjugations.
for (v1, v2, tense) in (
("essere", u"essere", it.INFINITIVE),
("essere", u"sono", (it.PRESENT, 1, it.SINGULAR)),
("essere", u"sei", (it.PRESENT, 2, it.SINGULAR)),
("essere", u"è", (it.PRESENT, 3, it.SINGULAR)),
("essere", u"siamo", (it.PRESENT, 1, it.PLURAL)),
("essere", u"siete", (it.PRESENT, 2, it.PLURAL)),
("essere", u"sono", (it.PRESENT, 3, it.PLURAL)),
("essere", u"essendo", (it.PRESENT + it.PARTICIPLE)),
("essere", u"stato", (it.PAST + it.PARTICIPLE)),
("essere", u"ero", (it.IMPERFECT, 1, it.SINGULAR)),
("essere", u"eri", (it.IMPERFECT, 2, it.SINGULAR)),
("essere", u"era", (it.IMPERFECT, 3, it.SINGULAR)),
("essere", u"eravamo", (it.IMPERFECT, 1, it.PLURAL)),
("essere", u"eravate", (it.IMPERFECT, 2, it.PLURAL)),
("essere", u"erano", (it.IMPERFECT, 3, it.PLURAL)),
("essere", u"fui", (it.PRETERITE, 1, it.SINGULAR)),
("essere", u"fosti", (it.PRETERITE, 2, it.SINGULAR)),
("essere", u"fu", (it.PRETERITE, 3, it.SINGULAR)),
("essere", u"fummo", (it.PRETERITE, 1, it.PLURAL)),
("essere", u"foste", (it.PRETERITE, 2, it.PLURAL)),
("essere", u"furono", (it.PRETERITE, 3, it.PLURAL)),
("essere", u"sarei", (it.CONDITIONAL, 1, it.SINGULAR)),
("essere", u"saresti", (it.CONDITIONAL, 2, it.SINGULAR)),
("essere", u"sarebbe", (it.CONDITIONAL, 3, it.SINGULAR)),
("essere", u"saremmo", (it.CONDITIONAL, 1, it.PLURAL)),
("essere", u"sareste", (it.CONDITIONAL, 2, it.PLURAL)),
("essere", u"sarebbero", (it.CONDITIONAL, 3, it.PLURAL)),
("essere", u"sarò", (it.FUTURE, 1, it.SINGULAR)),
("essere", u"sarai", (it.FUTURE, 2, it.SINGULAR)),
("essere", u"sarà", (it.FUTURE, 3, it.SINGULAR)),
("essere", u"saremo", (it.FUTURE, 1, it.PLURAL)),
("essere", u"sarete", (it.FUTURE, 2, it.PLURAL)),
("essere", u"saranno", (it.FUTURE, 3, it.PLURAL)),
("essere", u"sii",
(it.PRESENT, 2, it.SINGULAR, it.IMPERATIVE)),
("essere", u"sia",
(it.PRESENT, 3, it.SINGULAR, it.IMPERATIVE)),
("essere", u"siamo",
(it.PRESENT, 1, it.PLURAL, it.IMPERATIVE)),
("essere", u"siate",
(it.PRESENT, 2, it.PLURAL, it.IMPERATIVE)),
("essere", u"siano",
(it.PRESENT, 3, it.PLURAL, it.IMPERATIVE)),
("essere", u"sia",
(it.PRESENT, 1, it.SINGULAR, it.SUBJUNCTIVE)),
("essere", u"sia",
(it.PRESENT, 2, it.SINGULAR, it.SUBJUNCTIVE)),
("essere", u"sia",
(it.PRESENT, 3, it.SINGULAR, it.SUBJUNCTIVE)),
("essere", u"siamo",
(it.PRESENT, 1, it.PLURAL, it.SUBJUNCTIVE)),
("essere", u"siate",
(it.PRESENT, 2, it.PLURAL, it.SUBJUNCTIVE)),
("essere", u"siano",
(it.PRESENT, 3, it.PLURAL, it.SUBJUNCTIVE)),
("essere", u"fossi",
(it.PAST, 1, it.SINGULAR, it.SUBJUNCTIVE)),
("essere", u"fossi",
(it.PAST, 2, it.SINGULAR, it.SUBJUNCTIVE)),
("essere", u"fosse",
(it.PAST, 3, it.SINGULAR, it.SUBJUNCTIVE)),
("essere", u"fossimo",
(it.PAST, 1, it.PLURAL, it.SUBJUNCTIVE)),
("essere", u"foste",
(it.PAST, 2, it.PLURAL, it.SUBJUNCTIVE)),
("essere", u"fossero", (it.PAST, 3, it.PLURAL, it.SUBJUNCTIVE))):
self.assertEqual(it.conjugate(v1, tense), v2)
print("pattern.it.conjugate()")
def test_lexeme(self):
# Assert all inflections of "essere".
v = it.lexeme("essere")
self.assertEqual(v, [
u'essere', u'sono', u'sei', u'è', u'siamo', u'siete', u'essendo',
u'fui', u'fosti', u'fu', u'fummo', u'foste', u'furono', u'stato',
u'ero', u'eri', u'era', u'eravamo', u'eravate', u'erano',
u'sarò', u'sarai', u'sarà', u'saremo', u'sarete', u'saranno',
u'sarei', u'saresti', u'sarebbe', u'saremmo', u'sareste', u'sarebbero',
u'sii', u'sia', u'siate', u'siano',
u'fossi', u'fosse', u'fossimo', u'fossero'
])
print("pattern.it.inflect.lexeme()")
def test_tenses(self):
# Assert tense recognition.
self.assertTrue((it.PRESENT, 3, it.SG) in it.tenses(u"è"))
self.assertTrue("2sg" in it.tenses("sei"))
print("pattern.it.tenses()")
#-------------------------------------------------------------------------
class TestParser(unittest.TestCase):
def setUp(self):
pass
def test_find_lemmata(self):
# Assert lemmata for nouns, adjectives, verbs and determiners.
v = it.parser.find_lemmata([
["I", "DT"], ["gatti", "NNS"], ["neri", "JJ"],
["seduti", "VB"], ["sul", "IN"], ["tatami", "NN"]])
self.assertEqual(v, [
["I", "DT", "il"],
["gatti", "NNS", "gatto"],
["neri", "JJ", "nero"],
["seduti", "VB", "sedutare"],
["sul", "IN", "sul"],
["tatami", "NN", "tatami"]])
print("pattern.it.parser.find_lemmata()")
def test_parse(self):
# Assert parsed output with Penn Treebank II tags (slash-formatted).
# "il gatto nero" is a noun phrase, "sulla stuoia" is a prepositional noun phrase.
v = it.parser.parse(u"Il gatto nero seduto sulla stuoia.")
self.assertEqual(v,
u"Il/DT/B-NP/O gatto/NN/I-NP/O nero/JJ/I-NP/O " +
u"seduto/VB/B-VP/O " +
u"sulla/IN/B-PP/B-PNP stuoia/NN/B-NP/I-PNP ././O/O"
)
# Assert the accuracy of the Italian tagger.
i, n = 0, 0
for sentence in open(os.path.join(PATH, "corpora", "tagged-it-wacky.txt")).readlines():
sentence = sentence.strip()
s1 = [w.split("/") for w in sentence.split(" ")]
s2 = [[w for w, pos in s1]]
s2 = it.parse(s2, tokenize=False)
s2 = [w.split("/") for w in s2.split(" ")]
for j in range(len(s1)):
t1 = s1[j][1]
t2 = s2[j][1]
# WaCKy test set tags plural nouns as "NN", pattern.it as "NNS".
# Some punctuation marks are also tagged differently,
# but these are not necessarily errors.
if t1 == t2 or (t1 == "NN" and t2.startswith("NN")) or s1[j][0] in "\":;)-":
i += 1
n += 1
#print(float(i) / n)
self.assertTrue(float(i) / n > 0.92)
print("pattern.it.parser.parse()")
def test_tag(self):
# Assert [("il", "DT"), ("gatto", "NN"), ("nero", "JJ")].
v = it.tag("il gatto nero")
self.assertEqual(v, [("il", "DT"), ("gatto", "NN"), ("nero", "JJ")])
print("pattern.it.tag()")
def test_command_line(self):
from sys import version_info
if version_info[:2] == (2, 6):
raise unittest.SkipTest("FIXME")
# Assert parsed output from the command-line (example from the
# documentation).
p = ["python", "-m", "pattern.it", "-s", "Il gatto nero.", "-OTCRL"]
p = subprocess.Popen(p, stdout=subprocess.PIPE)
p.wait()
v = p.stdout.read()
v = v.strip()
self.assertEqual(
v, b"Il/DT/B-NP/O/O/il gatto/NN/I-NP/O/O/gatto nero/JJ/I-NP/O/O/nero ././O/O/O/.")
print("python -m pattern.it")
#-------------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
dgillis/scrapy | scrapy/crawler.py | 31 | 11176 | import six
import signal
import logging
import warnings
from twisted.internet import reactor, defer
from zope.interface.verify import verifyClass, DoesNotImplement
from scrapy.core.engine import ExecutionEngine
from scrapy.resolver import CachingThreadedResolver
from scrapy.interfaces import ISpiderLoader
from scrapy.extension import ExtensionManager
from scrapy.settings import Settings
from scrapy.signalmanager import SignalManager
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
from scrapy.utils.misc import load_object
from scrapy.utils.log import LogCounterHandler, configure_logging, log_scrapy_info
from scrapy import signals
logger = logging.getLogger(__name__)
class Crawler(object):
def __init__(self, spidercls, settings=None):
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
self.spidercls = spidercls
self.settings = settings.copy()
self.spidercls.update_settings(self.settings)
self.signals = SignalManager(self)
self.stats = load_object(self.settings['STATS_CLASS'])(self)
handler = LogCounterHandler(self, level=settings.get('LOG_LEVEL'))
logging.root.addHandler(handler)
# lambda is assigned to Crawler attribute because this way it is not
# garbage collected after leaving __init__ scope
self.__remove_handler = lambda: logging.root.removeHandler(handler)
self.signals.connect(self.__remove_handler, signals.engine_stopped)
lf_cls = load_object(self.settings['LOG_FORMATTER'])
self.logformatter = lf_cls.from_crawler(self)
self.extensions = ExtensionManager.from_crawler(self)
self.settings.freeze()
self.crawling = False
self.spider = None
self.engine = None
@property
def spiders(self):
if not hasattr(self, '_spiders'):
warnings.warn("Crawler.spiders is deprecated, use "
"CrawlerRunner.spider_loader or instantiate "
"scrapy.spiderloader.SpiderLoader with your "
"settings.",
category=ScrapyDeprecationWarning, stacklevel=2)
self._spiders = _get_spider_loader(self.settings.frozencopy())
return self._spiders
@defer.inlineCallbacks
def crawl(self, *args, **kwargs):
assert not self.crawling, "Crawling already taking place"
self.crawling = True
try:
self.spider = self._create_spider(*args, **kwargs)
self.engine = self._create_engine()
start_requests = iter(self.spider.start_requests())
yield self.engine.open_spider(self.spider, start_requests)
yield defer.maybeDeferred(self.engine.start)
except Exception:
self.crawling = False
raise
def _create_spider(self, *args, **kwargs):
return self.spidercls.from_crawler(self, *args, **kwargs)
def _create_engine(self):
return ExecutionEngine(self, lambda _: self.stop())
@defer.inlineCallbacks
def stop(self):
if self.crawling:
self.crawling = False
yield defer.maybeDeferred(self.engine.stop)
class CrawlerRunner(object):
"""
This is a convenient helper class that keeps track of, manages and runs
crawlers inside an already setup Twisted `reactor`_.
The CrawlerRunner object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
"""
crawlers = property(
lambda self: self._crawlers,
doc="Set of :class:`crawlers <scrapy.crawler.Crawler>` started by "
":meth:`crawl` and managed by this class."
)
def __init__(self, settings=None):
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
self.settings = settings
self.spider_loader = _get_spider_loader(settings)
self._crawlers = set()
self._active = set()
@property
def spiders(self):
warnings.warn("CrawlerRunner.spiders attribute is renamed to "
"CrawlerRunner.spider_loader.",
category=ScrapyDeprecationWarning, stacklevel=2)
return self.spider_loader
def crawl(self, crawler_or_spidercls, *args, **kwargs):
"""
Run a crawler with the provided arguments.
It will call the given Crawler's :meth:`~Crawler.crawl` method, while
keeping track of it so it can be stopped later.
If `crawler_or_spidercls` isn't a :class:`~scrapy.crawler.Crawler`
instance, this method will try to create one using this parameter as
the spider class given to it.
Returns a deferred that is fired when the crawling is finished.
:param crawler_or_spidercls: already created crawler, or a spider class
or spider's name inside the project to create it
:type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
:class:`~scrapy.spiders.Spider` subclass or string
:param list args: arguments to initialize the spider
:param dict kwargs: keyword arguments to initialize the spider
"""
crawler = crawler_or_spidercls
if not isinstance(crawler_or_spidercls, Crawler):
crawler = self._create_crawler(crawler_or_spidercls)
return self._crawl(crawler, *args, **kwargs)
def _crawl(self, crawler, *args, **kwargs):
self.crawlers.add(crawler)
d = crawler.crawl(*args, **kwargs)
self._active.add(d)
def _done(result):
self.crawlers.discard(crawler)
self._active.discard(d)
return result
return d.addBoth(_done)
def _create_crawler(self, spidercls):
if isinstance(spidercls, six.string_types):
spidercls = self.spider_loader.load(spidercls)
return Crawler(spidercls, self.settings)
def stop(self):
"""
Stops simultaneously all the crawling jobs taking place.
Returns a deferred that is fired when they all have ended.
"""
return defer.DeferredList([c.stop() for c in self.crawlers])
@defer.inlineCallbacks
def join(self):
"""
join()
Returns a deferred that is fired when all managed :attr:`crawlers` have
completed their executions.
"""
while self._active:
yield defer.DeferredList(self._active)
class CrawlerProcess(CrawlerRunner):
"""
A class to run multiple scrapy crawlers in a process simultaneously.
This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
for starting a Twisted `reactor`_ and handling shutdown signals, like the
keyboard interrupt command Ctrl-C. It also configures top-level logging.
This utility should be a better fit than
:class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
Twisted `reactor`_ within your application.
The CrawlerProcess object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
"""
def __init__(self, settings=None):
super(CrawlerProcess, self).__init__(settings)
install_shutdown_handlers(self._signal_shutdown)
configure_logging(self.settings)
log_scrapy_info(self.settings)
def _signal_shutdown(self, signum, _):
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
logger.info("Received %(signame)s, shutting down gracefully. Send again to force ",
{'signame': signame})
reactor.callFromThread(self._graceful_stop_reactor)
def _signal_kill(self, signum, _):
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
logger.info('Received %(signame)s twice, forcing unclean shutdown',
{'signame': signame})
reactor.callFromThread(self._stop_reactor)
def start(self, stop_after_crawl=True):
"""
This method starts a Twisted `reactor`_, adjusts its pool size to
:setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based
on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
If `stop_after_crawl` is True, the reactor will be stopped after all
crawlers have finished, using :meth:`join`.
:param boolean stop_after_crawl: stop or not the reactor when all
crawlers have finished
"""
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(self._stop_reactor)
reactor.installResolver(self._get_dns_resolver())
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
reactor.run(installSignalHandlers=False) # blocking call
def _get_dns_resolver(self):
if self.settings.getbool('DNSCACHE_ENABLED'):
cache_size = self.settings.getint('DNSCACHE_SIZE')
else:
cache_size = 0
return CachingThreadedResolver(
reactor=reactor,
cache_size=cache_size,
timeout=self.settings.getfloat('DNS_TIMEOUT')
)
def _graceful_stop_reactor(self):
d = self.stop()
d.addBoth(self._stop_reactor)
return d
def _stop_reactor(self, _=None):
try:
reactor.stop()
except RuntimeError: # raised if already stopped or in shutdown stage
pass
def _get_spider_loader(settings):
""" Get SpiderLoader instance from settings """
if settings.get('SPIDER_MANAGER_CLASS'):
warnings.warn(
'SPIDER_MANAGER_CLASS option is deprecated. '
'Please use SPIDER_LOADER_CLASS.',
category=ScrapyDeprecationWarning, stacklevel=2
)
cls_path = settings.get('SPIDER_MANAGER_CLASS',
settings.get('SPIDER_LOADER_CLASS'))
loader_cls = load_object(cls_path)
try:
verifyClass(ISpiderLoader, loader_cls)
except DoesNotImplement:
warnings.warn(
'SPIDER_LOADER_CLASS (previously named SPIDER_MANAGER_CLASS) does '
'not fully implement scrapy.interfaces.ISpiderLoader interface. '
'Please add all missing methods to avoid unexpected runtime errors.',
category=ScrapyDeprecationWarning, stacklevel=2
)
return loader_cls.from_settings(settings.frozencopy())
| bsd-3-clause |
revmischa/boto | boto/glacier/layer1.py | 121 | 60796 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
import boto.glacier
from boto.compat import json
from boto.connection import AWSAuthConnection
from boto.glacier.exceptions import UnexpectedHTTPResponseError
from boto.glacier.response import GlacierResponse
from boto.glacier.utils import ResettingFileSender
class Layer1(AWSAuthConnection):
"""
Amazon Glacier is a storage solution for "cold data."
Amazon Glacier is an extremely low-cost storage service that
provides secure, durable and easy-to-use storage for data backup
and archival. With Amazon Glacier, customers can store their data
cost effectively for months, years, or decades. Amazon Glacier
also enables customers to offload the administrative burdens of
operating and scaling storage to AWS, so they don't have to worry
about capacity planning, hardware provisioning, data replication,
hardware failure and recovery, or time-consuming hardware
migrations.
Amazon Glacier is a great storage choice when low storage cost is
paramount, your data is rarely retrieved, and retrieval latency of
several hours is acceptable. If your application requires fast or
frequent access to your data, consider using Amazon S3. For more
information, go to `Amazon Simple Storage Service (Amazon S3)`_.
You can store any kind of data in any format. There is no maximum
limit on the total amount of data you can store in Amazon Glacier.
If you are a first-time user of Amazon Glacier, we recommend that
you begin by reading the following sections in the Amazon Glacier
Developer Guide :
+ `What is Amazon Glacier`_ - This section of the Developer Guide
describes the underlying data model, the operations it supports,
and the AWS SDKs that you can use to interact with the service.
+ `Getting Started with Amazon Glacier`_ - The Getting Started
section walks you through the process of creating a vault,
uploading archives, creating jobs to download archives, retrieving
the job output, and deleting archives.
"""
Version = '2012-06-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
account_id='-', is_secure=True, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, path='/',
provider='aws', security_token=None,
suppress_consec_slashes=True,
region=None, region_name='us-east-1',
profile_name=None):
if not region:
for reg in boto.glacier.regions():
if reg.name == region_name:
region = reg
break
self.region = region
self.account_id = account_id
super(Layer1, self).__init__(region.endpoint,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, debug,
https_connection_factory,
path, provider, security_token,
suppress_consec_slashes,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def make_request(self, verb, resource, headers=None,
data='', ok_responses=(200,), params=None,
sender=None, response_headers=None):
if headers is None:
headers = {}
headers['x-amz-glacier-version'] = self.Version
uri = '/%s/%s' % (self.account_id, resource)
response = super(Layer1, self).make_request(verb, uri,
params=params,
headers=headers,
sender=sender,
data=data)
if response.status in ok_responses:
return GlacierResponse(response, response_headers)
else:
# create glacier-specific exceptions
raise UnexpectedHTTPResponseError(ok_responses, response)
# Vaults
def list_vaults(self, limit=None, marker=None):
"""
This operation lists all vaults owned by the calling user's
account. The list returned in the response is ASCII-sorted by
vault name.
By default, this operation returns up to 1,000 items. If there
are more vaults to list, the response `marker` field contains
the vault Amazon Resource Name (ARN) at which to continue the
list with a new List Vaults request; otherwise, the `marker`
field is `null`. To return a list of vaults that begins at a
specific vault, set the `marker` request parameter to the
vault ARN you obtained from a previous List Vaults request.
You can also limit the number of vaults returned in the
response by specifying the `limit` parameter in the request.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Retrieving Vault Metadata in Amazon Glacier`_ and `List
Vaults `_ in the Amazon Glacier Developer Guide .
:type marker: string
:param marker: A string used for pagination. The marker specifies the
vault ARN after which the listing of vaults should begin.
:type limit: string
:param limit: The maximum number of items returned in the response. If
you don't specify a value, the List Vaults operation returns up to
1,000 items.
"""
params = {}
if limit:
params['limit'] = limit
if marker:
params['marker'] = marker
return self.make_request('GET', 'vaults', params=params)
def describe_vault(self, vault_name):
"""
This operation returns information about a vault, including
the vault's Amazon Resource Name (ARN), the date the vault was
created, the number of archives it contains, and the total
size of all the archives in the vault. The number of archives
and their total size are as of the last inventory generation.
This means that if you add or remove an archive from a vault,
and then immediately use Describe Vault, the change in
contents will not be immediately reflected. If you want to
retrieve the latest inventory of the vault, use InitiateJob.
Amazon Glacier generates vault inventories approximately
daily. For more information, see `Downloading a Vault
Inventory in Amazon Glacier`_.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Retrieving Vault Metadata in Amazon Glacier`_ and `Describe
Vault `_ in the Amazon Glacier Developer Guide .
:type vault_name: string
:param vault_name: The name of the vault.
"""
uri = 'vaults/%s' % vault_name
return self.make_request('GET', uri)
def create_vault(self, vault_name):
"""
This operation creates a new vault with the specified name.
The name of the vault must be unique within a region for an
AWS account. You can create up to 1,000 vaults per account. If
you need to create more vaults, contact Amazon Glacier.
You must use the following guidelines when naming a vault.
+ Names can be between 1 and 255 characters long.
+ Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-'
(hyphen), and '.' (period).
This operation is idempotent.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Creating a Vault in Amazon Glacier`_ and `Create Vault `_ in
the Amazon Glacier Developer Guide .
:type vault_name: string
:param vault_name: The name of the vault.
"""
uri = 'vaults/%s' % vault_name
return self.make_request('PUT', uri, ok_responses=(201,),
response_headers=[('Location', 'Location')])
def delete_vault(self, vault_name):
"""
This operation deletes a vault. Amazon Glacier will delete a
vault only if there are no archives in the vault as of the
last inventory and there have been no writes to the vault
since the last inventory. If either of these conditions is not
satisfied, the vault deletion fails (that is, the vault is not
removed) and Amazon Glacier returns an error. You can use
DescribeVault to return the number of archives in a vault, and
you can use `Initiate a Job (POST jobs)`_ to initiate a new
inventory retrieval for a vault. The inventory contains the
archive IDs you use to delete archives using `Delete Archive
(DELETE archive)`_.
This operation is idempotent.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Deleting a Vault in Amazon Glacier`_ and `Delete Vault `_ in
the Amazon Glacier Developer Guide .
:type vault_name: string
:param vault_name: The name of the vault.
"""
uri = 'vaults/%s' % vault_name
return self.make_request('DELETE', uri, ok_responses=(204,))
def get_vault_notifications(self, vault_name):
"""
This operation retrieves the `notification-configuration`
subresource of the specified vault.
For information about setting a notification configuration on
a vault, see SetVaultNotifications. If a notification
configuration for a vault is not set, the operation returns a
`404 Not Found` error. For more information about vault
notifications, see `Configuring Vault Notifications in Amazon
Glacier`_.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Configuring Vault Notifications in Amazon Glacier`_ and `Get
Vault Notification Configuration `_ in the Amazon Glacier
Developer Guide .
:type vault_name: string
:param vault_name: The name of the vault.
"""
uri = 'vaults/%s/notification-configuration' % vault_name
return self.make_request('GET', uri)
def set_vault_notifications(self, vault_name, notification_config):
"""
This operation configures notifications that will be sent when
specific events happen to a vault. By default, you don't get
any notifications.
To configure vault notifications, send a PUT request to the
`notification-configuration` subresource of the vault. The
request should include a JSON document that provides an Amazon
SNS topic and specific events for which you want Amazon
Glacier to send notifications to the topic.
Amazon SNS topics must grant permission to the vault to be
allowed to publish notifications to the topic. You can
configure a vault to publish a notification for the following
vault events:
+ **ArchiveRetrievalCompleted** This event occurs when a job
that was initiated for an archive retrieval is completed
(InitiateJob). The status of the completed job can be
"Succeeded" or "Failed". The notification sent to the SNS
topic is the same output as returned from DescribeJob.
+ **InventoryRetrievalCompleted** This event occurs when a job
that was initiated for an inventory retrieval is completed
(InitiateJob). The status of the completed job can be
"Succeeded" or "Failed". The notification sent to the SNS
topic is the same output as returned from DescribeJob.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Configuring Vault Notifications in Amazon Glacier`_ and `Set
Vault Notification Configuration `_ in the Amazon Glacier
Developer Guide .
:type vault_name: string
:param vault_name: The name of the vault.
:type vault_notification_config: dict
:param vault_notification_config: Provides options for specifying
notification configuration.
The format of the dictionary is:
{'SNSTopic': 'mytopic',
'Events': [event1,...]}
"""
uri = 'vaults/%s/notification-configuration' % vault_name
json_config = json.dumps(notification_config)
return self.make_request('PUT', uri, data=json_config,
ok_responses=(204,))
def delete_vault_notifications(self, vault_name):
"""
This operation deletes the notification configuration set for
a vault. The operation is eventually consistent;that is, it
might take some time for Amazon Glacier to completely disable
the notifications and you might still receive some
notifications for a short time after you send the delete
request.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Configuring Vault Notifications in Amazon Glacier`_ and
`Delete Vault Notification Configuration `_ in the Amazon
Glacier Developer Guide.
:type vault_name: string
:param vault_name: The name of the vault.
"""
uri = 'vaults/%s/notification-configuration' % vault_name
return self.make_request('DELETE', uri, ok_responses=(204,))
# Jobs
def list_jobs(self, vault_name, completed=None, status_code=None,
limit=None, marker=None):
"""
This operation lists jobs for a vault, including jobs that are
in-progress and jobs that have recently finished.
Amazon Glacier retains recently completed jobs for a period
before deleting them; however, it eventually removes completed
jobs. The output of completed jobs can be retrieved. Retaining
completed jobs for a period of time after they have completed
enables you to get a job output in the event you miss the job
completion notification or your first attempt to download it
fails. For example, suppose you start an archive retrieval job
to download an archive. After the job completes, you start to
download the archive but encounter a network error. In this
scenario, you can retry and download the archive while the job
exists.
To retrieve an archive or retrieve a vault inventory from
Amazon Glacier, you first initiate a job, and after the job
completes, you download the data. For an archive retrieval,
the output is the archive data, and for an inventory
retrieval, it is the inventory list. The List Job operation
returns a list of these jobs sorted by job initiation time.
This List Jobs operation supports pagination. By default, this
operation returns up to 1,000 jobs in the response. You should
always check the response for a `marker` at which to continue
the list; if there are no more items the `marker` is `null`.
To return a list of jobs that begins at a specific job, set
the `marker` request parameter to the value you obtained from
a previous List Jobs request. You can also limit the number of
jobs returned in the response by specifying the `limit`
parameter in the request.
Additionally, you can filter the jobs list returned by
specifying an optional `statuscode` (InProgress, Succeeded, or
Failed) and `completed` (true, false) parameter. The
`statuscode` allows you to specify that only jobs that match a
specified status are returned. The `completed` parameter
allows you to specify that only jobs in a specific completion
state are returned.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For the underlying REST API, go to `List Jobs `_
:type vault_name: string
:param vault_name: The name of the vault.
:type limit: string
:param limit: Specifies that the response be limited to the specified
number of items or fewer. If not specified, the List Jobs operation
returns up to 1,000 jobs.
:type marker: string
:param marker: An opaque string used for pagination. This value
specifies the job at which the listing of jobs should begin. Get
the marker value from a previous List Jobs response. You need only
include the marker if you are continuing the pagination of results
started in a previous List Jobs request.
:type statuscode: string
:param statuscode: Specifies the type of job status to return. You can
specify the following values: "InProgress", "Succeeded", or
"Failed".
:type completed: string
:param completed: Specifies the state of the jobs to return. You can
specify `True` or `False`.
"""
params = {}
if limit:
params['limit'] = limit
if marker:
params['marker'] = marker
if status_code:
params['statuscode'] = status_code
if completed is not None:
params['completed'] = 'true' if completed else 'false'
uri = 'vaults/%s/jobs' % vault_name
return self.make_request('GET', uri, params=params)
def describe_job(self, vault_name, job_id):
"""
This operation returns information about a job you previously
initiated, including the job initiation date, the user who
initiated the job, the job status code/message and the Amazon
SNS topic to notify after Amazon Glacier completes the job.
For more information about initiating a job, see InitiateJob.
This operation enables you to check the status of your job.
However, it is strongly recommended that you set up an Amazon
SNS topic and specify it in your initiate job request so that
Amazon Glacier can notify the topic after it completes the
job.
A job ID will not expire for at least 24 hours after Amazon
Glacier completes the job.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For information about the underlying REST API, go to `Working
with Archives in Amazon Glacier`_ in the Amazon Glacier
Developer Guide .
:type vault_name: string
:param vault_name: The name of the vault.
:type job_id: string
:param job_id: The ID of the job to describe.
"""
uri = 'vaults/%s/jobs/%s' % (vault_name, job_id)
return self.make_request('GET', uri, ok_responses=(200,))
def initiate_job(self, vault_name, job_data):
"""
This operation initiates a job of the specified type. In this
release, you can initiate a job to retrieve either an archive
or a vault inventory (a list of archives in a vault).
Retrieving data from Amazon Glacier is a two-step process:
#. Initiate a retrieval job.
#. After the job completes, download the bytes.
The retrieval request is executed asynchronously. When you
initiate a retrieval job, Amazon Glacier creates a job and
returns a job ID in the response. When Amazon Glacier
completes the job, you can get the job output (archive or
inventory data). For information about getting job output, see
GetJobOutput operation.
The job must complete before you can get its output. To
determine when a job is complete, you have the following
options:
+ **Use Amazon SNS Notification** You can specify an Amazon
Simple Notification Service (Amazon SNS) topic to which Amazon
Glacier can post a notification after the job is completed.
You can specify an SNS topic per job request. The notification
is sent only after Amazon Glacier completes the job. In
addition to specifying an SNS topic per job request, you can
configure vault notifications for a vault so that job
notifications are always sent. For more information, see
SetVaultNotifications.
+ **Get job details** You can make a DescribeJob request to
obtain job status information while a job is in progress.
However, it is more efficient to use an Amazon SNS
notification to determine when a job is complete.
The information you get via notification is same that you get
by calling DescribeJob.
If for a specific event, you add both the notification
configuration on the vault and also specify an SNS topic in
your initiate job request, Amazon Glacier sends both
notifications. For more information, see
SetVaultNotifications.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
**About the Vault Inventory**
Amazon Glacier prepares an inventory for each vault
periodically, every 24 hours. When you initiate a job for a
vault inventory, Amazon Glacier returns the last inventory for
the vault. The inventory data you get might be up to a day or
two days old. Also, the initiate inventory job might take some
time to complete before you can download the vault inventory.
So you do not want to retrieve a vault inventory for each
vault operation. However, in some scenarios, you might find
the vault inventory useful. For example, when you upload an
archive, you can provide an archive description but not an
archive name. Amazon Glacier provides you a unique archive ID,
an opaque string of characters. So, you might maintain your
own database that maps archive names to their corresponding
Amazon Glacier assigned archive IDs. You might find the vault
inventory useful in the event you need to reconcile
information in your database with the actual vault inventory.
**About Ranged Archive Retrieval**
You can initiate an archive retrieval for the whole archive or
a range of the archive. In the case of ranged archive
retrieval, you specify a byte range to return or the whole
archive. The range specified must be megabyte (MB) aligned,
that is the range start value must be divisible by 1 MB and
range end value plus 1 must be divisible by 1 MB or equal the
end of the archive. If the ranged archive retrieval is not
megabyte aligned, this operation returns a 400 response.
Furthermore, to ensure you get checksum values for data you
download using Get Job Output API, the range must be tree hash
aligned.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and the underlying REST API, go to
`Initiate a Job`_ and `Downloading a Vault Inventory`_
:type account_id: string
:param account_id: The `AccountId` is the AWS Account ID. You can
specify either the AWS Account ID or optionally a '-', in which
case Amazon Glacier uses the AWS Account ID associated with the
credentials used to sign the request. If you specify your Account
ID, do not include hyphens in it.
:type vault_name: string
:param vault_name: The name of the vault.
:type job_parameters: dict
:param job_parameters: Provides options for specifying job information.
The dictionary can contain the following attributes:
* ArchiveId - The ID of the archive you want to retrieve.
This field is required only if the Type is set to
archive-retrieval.
* Description - The optional description for the job.
* Format - When initiating a job to retrieve a vault
inventory, you can optionally add this parameter to
specify the output format. Valid values are: CSV|JSON.
* SNSTopic - The Amazon SNS topic ARN where Amazon Glacier
sends a notification when the job is completed and the
output is ready for you to download.
* Type - The job type. Valid values are:
archive-retrieval|inventory-retrieval
* RetrievalByteRange - Optionally specify the range of
bytes to retrieve.
* InventoryRetrievalParameters: Optional job parameters
* Format - The output format, like "JSON"
* StartDate - ISO8601 starting date string
* EndDate - ISO8601 ending date string
* Limit - Maximum number of entries
* Marker - A unique string used for pagination
"""
uri = 'vaults/%s/jobs' % vault_name
response_headers = [('x-amz-job-id', u'JobId'),
('Location', u'Location')]
json_job_data = json.dumps(job_data)
return self.make_request('POST', uri, data=json_job_data,
ok_responses=(202,),
response_headers=response_headers)
def get_job_output(self, vault_name, job_id, byte_range=None):
"""
This operation downloads the output of the job you initiated
using InitiateJob. Depending on the job type you specified
when you initiated the job, the output will be either the
content of an archive or a vault inventory.
A job ID will not expire for at least 24 hours after Amazon
Glacier completes the job. That is, you can download the job
output within the 24 hours period after Amazon Glacier
completes the job.
If the job output is large, then you can use the `Range`
request header to retrieve a portion of the output. This
allows you to download the entire output in smaller chunks of
bytes. For example, suppose you have 1 GB of job output you
want to download and you decide to download 128 MB chunks of
data at a time, which is a total of eight Get Job Output
requests. You use the following process to download the job
output:
#. Download a 128 MB chunk of output by specifying the
appropriate byte range using the `Range` header.
#. Along with the data, the response includes a checksum of
the payload. You compute the checksum of the payload on the
client and compare it with the checksum you received in the
response to ensure you received all the expected data.
#. Repeat steps 1 and 2 for all the eight 128 MB chunks of
output data, each time specifying the appropriate byte range.
#. After downloading all the parts of the job output, you have
a list of eight checksum values. Compute the tree hash of
these values to find the checksum of the entire output. Using
the Describe Job API, obtain job information of the job that
provided you the output. The response includes the checksum of
the entire archive stored in Amazon Glacier. You compare this
value with the checksum you computed to ensure you have
downloaded the entire archive content with no errors.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and the underlying REST API, go to
`Downloading a Vault Inventory`_, `Downloading an Archive`_,
and `Get Job Output `_
:type account_id: string
:param account_id: The `AccountId` is the AWS Account ID. You can
specify either the AWS Account ID or optionally a '-', in which
case Amazon Glacier uses the AWS Account ID associated with the
credentials used to sign the request. If you specify your Account
ID, do not include hyphens in it.
:type vault_name: string
:param vault_name: The name of the vault.
:type job_id: string
:param job_id: The job ID whose data is downloaded.
:type byte_range: string
:param byte_range: The range of bytes to retrieve from the output. For
example, if you want to download the first 1,048,576 bytes, specify
"Range: bytes=0-1048575". By default, this operation downloads the
entire output.
"""
response_headers = [('x-amz-sha256-tree-hash', u'TreeHash'),
('Content-Range', u'ContentRange'),
('Content-Type', u'ContentType')]
headers = None
if byte_range:
headers = {'Range': 'bytes=%d-%d' % byte_range}
uri = 'vaults/%s/jobs/%s/output' % (vault_name, job_id)
response = self.make_request('GET', uri, headers=headers,
ok_responses=(200, 206),
response_headers=response_headers)
return response
# Archives
def upload_archive(self, vault_name, archive,
linear_hash, tree_hash, description=None):
"""
This operation adds an archive to a vault. This is a
synchronous operation, and for a successful upload, your data
is durably persisted. Amazon Glacier returns the archive ID in
the `x-amz-archive-id` header of the response.
You must use the archive ID to access your data in Amazon
Glacier. After you upload an archive, you should save the
archive ID returned so that you can retrieve or delete the
archive later. Besides saving the archive ID, you can also
index it and give it a friendly name to allow for better
searching. You can also use the optional archive description
field to specify how the archive is referred to in an external
index of archives, such as you might create in Amazon
DynamoDB. You can also get the vault inventory to obtain a
list of archive IDs in a vault. For more information, see
InitiateJob.
You must provide a SHA256 tree hash of the data you are
uploading. For information about computing a SHA256 tree hash,
see `Computing Checksums`_.
You can optionally specify an archive description of up to
1,024 printable ASCII characters. You can get the archive
description when you either retrieve the archive or get the
vault inventory. For more information, see InitiateJob. Amazon
Glacier does not interpret the description in any way. An
archive description does not need to be unique. You cannot use
the description to retrieve or sort the archive list.
Archives are immutable. After you upload an archive, you
cannot edit the archive or its description.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Uploading an Archive in Amazon Glacier`_ and `Upload
Archive`_ in the Amazon Glacier Developer Guide .
:type vault_name: str
:param vault_name: The name of the vault
:type archive: bytes
:param archive: The data to upload.
:type linear_hash: str
:param linear_hash: The SHA256 checksum (a linear hash) of the
payload.
:type tree_hash: str
:param tree_hash: The user-computed SHA256 tree hash of the
payload. For more information on computing the
tree hash, see http://goo.gl/u7chF.
:type description: str
:param description: The optional description of the archive you
are uploading.
"""
response_headers = [('x-amz-archive-id', u'ArchiveId'),
('Location', u'Location'),
('x-amz-sha256-tree-hash', u'TreeHash')]
uri = 'vaults/%s/archives' % vault_name
try:
content_length = str(len(archive))
except (TypeError, AttributeError):
# If a file like object is provided, try to retrieve
# the file size via fstat.
content_length = str(os.fstat(archive.fileno()).st_size)
headers = {'x-amz-content-sha256': linear_hash,
'x-amz-sha256-tree-hash': tree_hash,
'Content-Length': content_length}
if description:
headers['x-amz-archive-description'] = description
if self._is_file_like(archive):
sender = ResettingFileSender(archive)
else:
sender = None
return self.make_request('POST', uri, headers=headers,
sender=sender,
data=archive, ok_responses=(201,),
response_headers=response_headers)
def _is_file_like(self, archive):
return hasattr(archive, 'seek') and hasattr(archive, 'tell')
def delete_archive(self, vault_name, archive_id):
"""
This operation deletes an archive from a vault. Subsequent
requests to initiate a retrieval of this archive will fail.
Archive retrievals that are in progress for this archive ID
may or may not succeed according to the following scenarios:
+ If the archive retrieval job is actively preparing the data
for download when Amazon Glacier receives the delete archive
request, the archival retrieval operation might fail.
+ If the archive retrieval job has successfully prepared the
archive for download when Amazon Glacier receives the delete
archive request, you will be able to download the output.
This operation is idempotent. Attempting to delete an already-
deleted archive does not result in an error.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Deleting an Archive in Amazon Glacier`_ and `Delete Archive`_
in the Amazon Glacier Developer Guide .
:type vault_name: string
:param vault_name: The name of the vault.
:type archive_id: string
:param archive_id: The ID of the archive to delete.
"""
uri = 'vaults/%s/archives/%s' % (vault_name, archive_id)
return self.make_request('DELETE', uri, ok_responses=(204,))
# Multipart
def initiate_multipart_upload(self, vault_name, part_size,
description=None):
"""
This operation initiates a multipart upload. Amazon Glacier
creates a multipart upload resource and returns its ID in the
response. The multipart upload ID is used in subsequent
requests to upload parts of an archive (see
UploadMultipartPart).
When you initiate a multipart upload, you specify the part
size in number of bytes. The part size must be a megabyte
(1024 KB) multiplied by a power of 2-for example, 1048576 (1
MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so
on. The minimum allowable part size is 1 MB, and the maximum
is 4 GB.
Every part you upload to this resource (see
UploadMultipartPart), except the last one, must have the same
size. The last one can be the same size or smaller. For
example, suppose you want to upload a 16.2 MB file. If you
initiate the multipart upload with a part size of 4 MB, you
will upload four parts of 4 MB each and one part of 0.2 MB.
You don't need to know the size of the archive when you start
a multipart upload because Amazon Glacier does not require you
to specify the overall archive size.
After you complete the multipart upload, Amazon Glacier
removes the multipart upload resource referenced by the ID.
Amazon Glacier also removes the multipart upload resource if
you cancel the multipart upload or it may be removed if there
is no activity for a period of 24 hours.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Uploading Large Archives in Parts (Multipart Upload)`_ and
`Initiate Multipart Upload`_ in the Amazon Glacier Developer
Guide .
The part size must be a megabyte (1024 KB) multiplied by a power of
2, for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB),
8388608 (8 MB), and so on. The minimum allowable part size is 1 MB,
and the maximum is 4 GB (4096 MB).
:type vault_name: str
:param vault_name: The name of the vault.
:type description: str
:param description: The archive description that you are uploading in
parts.
:type part_size: int
:param part_size: The size of each part except the last, in bytes. The
last part can be smaller than this part size.
"""
response_headers = [('x-amz-multipart-upload-id', u'UploadId'),
('Location', u'Location')]
headers = {'x-amz-part-size': str(part_size)}
if description:
headers['x-amz-archive-description'] = description
uri = 'vaults/%s/multipart-uploads' % vault_name
response = self.make_request('POST', uri, headers=headers,
ok_responses=(201,),
response_headers=response_headers)
return response
def complete_multipart_upload(self, vault_name, upload_id,
sha256_treehash, archive_size):
"""
You call this operation to inform Amazon Glacier that all the
archive parts have been uploaded and that Amazon Glacier can
now assemble the archive from the uploaded parts. After
assembling and saving the archive to the vault, Amazon Glacier
returns the URI path of the newly created archive resource.
Using the URI path, you can then access the archive. After you
upload an archive, you should save the archive ID returned to
retrieve the archive at a later point. You can also get the
vault inventory to obtain a list of archive IDs in a vault.
For more information, see InitiateJob.
In the request, you must include the computed SHA256 tree hash
of the entire archive you have uploaded. For information about
computing a SHA256 tree hash, see `Computing Checksums`_. On
the server side, Amazon Glacier also constructs the SHA256
tree hash of the assembled archive. If the values match,
Amazon Glacier saves the archive to the vault; otherwise, it
returns an error, and the operation fails. The ListParts
operation returns a list of parts uploaded for a specific
multipart upload. It includes checksum information for each
uploaded part that can be used to debug a bad checksum issue.
Additionally, Amazon Glacier also checks for any missing
content ranges when assembling the archive, if missing content
ranges are found, Amazon Glacier returns an error and the
operation fails.
Complete Multipart Upload is an idempotent operation. After
your first successful complete multipart upload, if you call
the operation again within a short period, the operation will
succeed and return the same archive ID. This is useful in the
event you experience a network issue that causes an aborted
connection or receive a 500 server error, in which case you
can repeat your Complete Multipart Upload request and get the
same archive ID without creating duplicate archives. Note,
however, that after the multipart upload completes, you cannot
call the List Parts operation and the multipart upload will
not appear in List Multipart Uploads response, even if
idempotent complete is possible.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Uploading Large Archives in Parts (Multipart Upload)`_ and
`Complete Multipart Upload`_ in the Amazon Glacier Developer
Guide .
:type checksum: string
:param checksum: The SHA256 tree hash of the entire archive. It is the
tree hash of SHA256 tree hash of the individual parts. If the value
you specify in the request does not match the SHA256 tree hash of
the final assembled archive as computed by Amazon Glacier, Amazon
Glacier returns an error and the request fails.
:type vault_name: str
:param vault_name: The name of the vault.
:type upload_id: str
:param upload_id: The upload ID of the multipart upload.
:type sha256_treehash: str
:param sha256_treehash: The SHA256 tree hash of the entire archive.
It is the tree hash of SHA256 tree hash of the individual parts.
If the value you specify in the request does not match the SHA256
tree hash of the final assembled archive as computed by Amazon
Glacier, Amazon Glacier returns an error and the request fails.
:type archive_size: int
:param archive_size: The total size, in bytes, of the entire
archive. This value should be the sum of all the sizes of
the individual parts that you uploaded.
"""
response_headers = [('x-amz-archive-id', u'ArchiveId'),
('Location', u'Location')]
headers = {'x-amz-sha256-tree-hash': sha256_treehash,
'x-amz-archive-size': str(archive_size)}
uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
response = self.make_request('POST', uri, headers=headers,
ok_responses=(201,),
response_headers=response_headers)
return response
def abort_multipart_upload(self, vault_name, upload_id):
"""
This operation aborts a multipart upload identified by the
upload ID.
After the Abort Multipart Upload request succeeds, you cannot
upload any more parts to the multipart upload or complete the
multipart upload. Aborting a completed upload fails. However,
aborting an already-aborted upload will succeed, for a short
time. For more information about uploading a part and
completing a multipart upload, see UploadMultipartPart and
CompleteMultipartUpload.
This operation is idempotent.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Working with Archives in Amazon Glacier`_ and `Abort
Multipart Upload`_ in the Amazon Glacier Developer Guide .
:type vault_name: string
:param vault_name: The name of the vault.
:type upload_id: string
:param upload_id: The upload ID of the multipart upload to delete.
"""
uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
return self.make_request('DELETE', uri, ok_responses=(204,))
def list_multipart_uploads(self, vault_name, limit=None, marker=None):
"""
This operation lists in-progress multipart uploads for the
specified vault. An in-progress multipart upload is a
multipart upload that has been initiated by an
InitiateMultipartUpload request, but has not yet been
completed or aborted. The list returned in the List Multipart
Upload response has no guaranteed order.
The List Multipart Uploads operation supports pagination. By
default, this operation returns up to 1,000 multipart uploads
in the response. You should always check the response for a
`marker` at which to continue the list; if there are no more
items the `marker` is `null`. To return a list of multipart
uploads that begins at a specific upload, set the `marker`
request parameter to the value you obtained from a previous
List Multipart Upload request. You can also limit the number
of uploads returned in the response by specifying the `limit`
parameter in the request.
Note the difference between this operation and listing parts
(ListParts). The List Multipart Uploads operation lists all
multipart uploads for a vault and does not require a multipart
upload ID. The List Parts operation requires a multipart
upload ID since parts are associated with a single upload.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and the underlying REST API, go to
`Working with Archives in Amazon Glacier`_ and `List Multipart
Uploads `_ in the Amazon Glacier Developer Guide .
:type vault_name: string
:param vault_name: The name of the vault.
:type limit: string
:param limit: Specifies the maximum number of uploads returned in the
response body. If this value is not specified, the List Uploads
operation returns up to 1,000 uploads.
:type marker: string
:param marker: An opaque string used for pagination. This value
specifies the upload at which the listing of uploads should begin.
Get the marker value from a previous List Uploads response. You
need only include the marker if you are continuing the pagination
of results started in a previous List Uploads request.
"""
params = {}
if limit:
params['limit'] = limit
if marker:
params['marker'] = marker
uri = 'vaults/%s/multipart-uploads' % vault_name
return self.make_request('GET', uri, params=params)
def list_parts(self, vault_name, upload_id, limit=None, marker=None):
"""
This operation lists the parts of an archive that have been
uploaded in a specific multipart upload. You can make this
request at any time during an in-progress multipart upload
before you complete the upload (see CompleteMultipartUpload.
List Parts returns an error for completed uploads. The list
returned in the List Parts response is sorted by part range.
The List Parts operation supports pagination. By default, this
operation returns up to 1,000 uploaded parts in the response.
You should always check the response for a `marker` at which
to continue the list; if there are no more items the `marker`
is `null`. To return a list of parts that begins at a specific
part, set the `marker` request parameter to the value you
obtained from a previous List Parts request. You can also
limit the number of parts returned in the response by
specifying the `limit` parameter in the request.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and the underlying REST API, go to
`Working with Archives in Amazon Glacier`_ and `List Parts`_
in the Amazon Glacier Developer Guide .
:type vault_name: string
:param vault_name: The name of the vault.
:type upload_id: string
:param upload_id: The upload ID of the multipart upload.
:type marker: string
:param marker: An opaque string used for pagination. This value
specifies the part at which the listing of parts should begin. Get
the marker value from the response of a previous List Parts
response. You need only include the marker if you are continuing
the pagination of results started in a previous List Parts request.
:type limit: string
:param limit: Specifies the maximum number of parts returned in the
response body. If this value is not specified, the List Parts
operation returns up to 1,000 uploads.
"""
params = {}
if limit:
params['limit'] = limit
if marker:
params['marker'] = marker
uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
return self.make_request('GET', uri, params=params)
def upload_part(self, vault_name, upload_id, linear_hash,
tree_hash, byte_range, part_data):
"""
This operation uploads a part of an archive. You can upload
archive parts in any order. You can also upload them in
parallel. You can upload up to 10,000 parts for a multipart
upload.
Amazon Glacier rejects your upload part request if any of the
following conditions is true:
+ **SHA256 tree hash does not match**To ensure that part data
is not corrupted in transmission, you compute a SHA256 tree
hash of the part and include it in your request. Upon
receiving the part data, Amazon Glacier also computes a SHA256
tree hash. If these hash values don't match, the operation
fails. For information about computing a SHA256 tree hash, see
`Computing Checksums`_.
+ **Part size does not match**The size of each part except the
last must match the size specified in the corresponding
InitiateMultipartUpload request. The size of the last part
must be the same size as, or smaller than, the specified size.
If you upload a part whose size is smaller than the part size
you specified in your initiate multipart upload request and
that part is not the last part, then the upload part request
will succeed. However, the subsequent Complete Multipart
Upload request will fail.
+ **Range does not align**The byte range value in the request
does not align with the part size specified in the
corresponding initiate request. For example, if you specify a
part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4
MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid
part ranges. However, if you set a range value of 2 MB to 6
MB, the range does not align with the part size and the upload
will fail.
This operation is idempotent. If you upload the same part
multiple times, the data included in the most recent request
overwrites the previously uploaded data.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM)
users don't have any permissions by default. You must grant
them explicit permission to perform specific actions. For more
information, see `Access Control Using AWS Identity and Access
Management (IAM)`_.
For conceptual information and underlying REST API, go to
`Uploading Large Archives in Parts (Multipart Upload)`_ and
`Upload Part `_ in the Amazon Glacier Developer Guide .
:type vault_name: str
:param vault_name: The name of the vault.
:type linear_hash: str
:param linear_hash: The SHA256 checksum (a linear hash) of the
payload.
:type tree_hash: str
:param tree_hash: The user-computed SHA256 tree hash of the
payload. For more information on computing the
tree hash, see http://goo.gl/u7chF.
:type upload_id: str
:param upload_id: The unique ID associated with this upload
operation.
:type byte_range: tuple of ints
:param byte_range: Identifies the range of bytes in the assembled
archive that will be uploaded in this part. Amazon Glacier uses
this information to assemble the archive in the proper sequence.
The format of this header follows RFC 2616. An example header is
Content-Range:bytes 0-4194303/*.
:type part_data: bytes
:param part_data: The data to be uploaded for the part
"""
headers = {'x-amz-content-sha256': linear_hash,
'x-amz-sha256-tree-hash': tree_hash,
'Content-Range': 'bytes %d-%d/*' % byte_range}
response_headers = [('x-amz-sha256-tree-hash', u'TreeHash')]
uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
return self.make_request('PUT', uri, headers=headers,
data=part_data, ok_responses=(204,),
response_headers=response_headers)
| mit |
adaussy/eclipse-monkey-revival | plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_fpformat.py | 129 | 2309 | '''
Tests for fpformat module
Nick Mathewson
'''
from test.test_support import run_unittest, import_module
import unittest
fpformat = import_module('fpformat', deprecated=True)
fix, sci, NotANumber = fpformat.fix, fpformat.sci, fpformat.NotANumber
StringType = type('')
# Test the old and obsolescent fpformat module.
#
# (It's obsolescent because fix(n,d) == "%.*f"%(d,n) and
# sci(n,d) == "%.*e"%(d,n)
# for all reasonable numeric n and d, except that sci gives 3 exponent
# digits instead of 2.
#
# Differences only occur for unreasonable n and d. <.2 wink>)
class FpformatTest(unittest.TestCase):
def checkFix(self, n, digits):
result = fix(n, digits)
if isinstance(n, StringType):
n = repr(n)
expected = "%.*f" % (digits, float(n))
self.assertEqual(result, expected)
def checkSci(self, n, digits):
result = sci(n, digits)
if isinstance(n, StringType):
n = repr(n)
expected = "%.*e" % (digits, float(n))
# add the extra 0 if needed
num, exp = expected.split("e")
if len(exp) < 4:
exp = exp[0] + "0" + exp[1:]
expected = "%se%s" % (num, exp)
self.assertEqual(result, expected)
def test_basic_cases(self):
self.assertEqual(fix(100.0/3, 3), '33.333')
self.assertEqual(sci(100.0/3, 3), '3.333e+001')
def test_reasonable_values(self):
for d in range(7):
for val in (1000.0/3, 1000, 1000.0, .002, 1.0/3, 1e10):
for realVal in (val, 1.0/val, -val, -1.0/val):
self.checkFix(realVal, d)
self.checkSci(realVal, d)
def test_failing_values(self):
# Now for 'unreasonable n and d'
self.assertEqual(fix(1.0, 1000), '1.'+('0'*1000))
self.assertEqual(sci("1"+('0'*1000), 0), '1e+1000')
# This behavior is inconsistent. sci raises an exception; fix doesn't.
yacht = "Throatwobbler Mangrove"
self.assertEqual(fix(yacht, 10), yacht)
try:
sci(yacht, 10)
except NotANumber:
pass
else:
self.fail("No exception on non-numeric sci")
def test_main():
run_unittest(FpformatTest)
if __name__ == "__main__":
test_main()
| epl-1.0 |
botswana-harvard/bais-subject | bais_subject/models/background_characteristics.py | 1 | 3719 | from django.db import models
from edc_base.model_fields import OtherCharField
from edc_base.model_mixins import BaseUuidModel
from ..choices import (
MALE_FEMALE,
EDUCATION,
EMPLOYMENT_STATUS,
YES_NO,
MINE_OCCUPATION,
COMMODITY,
RELIGION,
MARITAL_STATUS,
SPOUSE_VISIT)
class BackgroundCharacteristics(BaseUuidModel):
respondent_sex = models.CharField(
verbose_name='Choose sex of the respondent',
max_length=15,
choices=MALE_FEMALE)
respondent_age = models.IntegerField(
verbose_name='How old are you in complete years?',
)
respondent_education = models.CharField(
verbose_name='What is the highest level of education you have'
' completed? ',
max_length=45,
choices=EDUCATION
)
respondent_employment = models.CharField(
verbose_name='What is your employment status?',
max_length=45,
choices=EMPLOYMENT_STATUS
)
respondent_employment_other = OtherCharField(
verbose_name='If other, specify',
max_length=250,
blank=True,
null=True
)
current_occupation = OtherCharField(
verbose_name='What is your current occupation?',
max_length=250
)
mine = models.CharField(
verbose_name='Have you ever worked in the mine?',
max_length=45,
choices=YES_NO
)
mine_period = OtherCharField(
verbose_name='For how long have you worked in the mine?',
max_length=250,
blank=True,
null=True,
help_text='specify'
)
mine_occupation = models.CharField(
verbose_name='What was your occupation?',
max_length=45,
blank=True,
null=True,
choices=MINE_OCCUPATION
)
mine_occupation_other = OtherCharField(
verbose_name='If other, specify',
max_length=250,
blank=True,
null=True
)
commodity = models.CharField(
verbose_name='What is/was the type of the commodity mined?',
max_length=45,
blank=True,
null=True,
choices=COMMODITY
)
commodity_other = OtherCharField(
verbose_name='If other, specify',
max_length=250,
blank=True,
null=True
)
religion = models.CharField(
verbose_name='What is your main religious affiliation? ',
max_length=45,
choices=RELIGION
)
religion_other = OtherCharField(
verbose_name='If other, specify',
max_length=250,
blank=True,
null=True
)
marital_status = models.CharField(
verbose_name='What is your current marital status?',
max_length=45,
choices=MARITAL_STATUS
)
respondent_marriage_age = models.IntegerField(
verbose_name='How old were you when you first married/started'
' living together?',
help_text='AGE IN YEARS',
blank=True,
null=True,
)
living_with_spouse = models.CharField(
verbose_name='Does your husband/wife/partner live with you?',
max_length=45,
choices=YES_NO,
blank=True,
null=True,
)
spouse_visit = models.CharField(
verbose_name='If no, how often do you see/visit each other?',
max_length=45,
choices=SPOUSE_VISIT,
blank=True,
null=True,
)
respondent_married_years = models.IntegerField(
verbose_name='For how many years have you been married or '
'living together? ',
help_text='RECORD 00 IF LESS THAN ONE YEAR.',
blank=True,
null=True,
)
class Meta(BaseUuidModel.Meta):
app_label = 'bais_subject'
| gpl-3.0 |
risent/unidecode | unidecode/x017.py | 252 | 4190 | data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'[?]', # 0x20
'[?]', # 0x21
'[?]', # 0x22
'[?]', # 0x23
'[?]', # 0x24
'[?]', # 0x25
'[?]', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'k', # 0x80
'kh', # 0x81
'g', # 0x82
'gh', # 0x83
'ng', # 0x84
'c', # 0x85
'ch', # 0x86
'j', # 0x87
'jh', # 0x88
'ny', # 0x89
't', # 0x8a
'tth', # 0x8b
'd', # 0x8c
'ddh', # 0x8d
'nn', # 0x8e
't', # 0x8f
'th', # 0x90
'd', # 0x91
'dh', # 0x92
'n', # 0x93
'p', # 0x94
'ph', # 0x95
'b', # 0x96
'bh', # 0x97
'm', # 0x98
'y', # 0x99
'r', # 0x9a
'l', # 0x9b
'v', # 0x9c
'sh', # 0x9d
'ss', # 0x9e
's', # 0x9f
'h', # 0xa0
'l', # 0xa1
'q', # 0xa2
'a', # 0xa3
'aa', # 0xa4
'i', # 0xa5
'ii', # 0xa6
'u', # 0xa7
'uk', # 0xa8
'uu', # 0xa9
'uuv', # 0xaa
'ry', # 0xab
'ryy', # 0xac
'ly', # 0xad
'lyy', # 0xae
'e', # 0xaf
'ai', # 0xb0
'oo', # 0xb1
'oo', # 0xb2
'au', # 0xb3
'a', # 0xb4
'aa', # 0xb5
'aa', # 0xb6
'i', # 0xb7
'ii', # 0xb8
'y', # 0xb9
'yy', # 0xba
'u', # 0xbb
'uu', # 0xbc
'ua', # 0xbd
'oe', # 0xbe
'ya', # 0xbf
'ie', # 0xc0
'e', # 0xc1
'ae', # 0xc2
'ai', # 0xc3
'oo', # 0xc4
'au', # 0xc5
'M', # 0xc6
'H', # 0xc7
'a`', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'r', # 0xcc
'', # 0xcd
'!', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'.', # 0xd4
' // ', # 0xd5
':', # 0xd6
'+', # 0xd7
'++', # 0xd8
' * ', # 0xd9
' /// ', # 0xda
'KR', # 0xdb
'\'', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'0', # 0xe0
'1', # 0xe1
'2', # 0xe2
'3', # 0xe3
'4', # 0xe4
'5', # 0xe5
'6', # 0xe6
'7', # 0xe7
'8', # 0xe8
'9', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
PetePriority/home-assistant | homeassistant/components/switch/anel_pwrctrl.py | 8 | 3373 | """
Support for ANEL PwrCtrl switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.pwrctrl/
"""
import logging
import socket
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_HOST, CONF_PASSWORD, CONF_USERNAME)
from homeassistant.util import Throttle
REQUIREMENTS = ['anel_pwrctrl-homeassistant==0.0.1.dev2']
_LOGGER = logging.getLogger(__name__)
CONF_PORT_RECV = 'port_recv'
CONF_PORT_SEND = 'port_send'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PORT_RECV): cv.port,
vol.Required(CONF_PORT_SEND): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOST): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up PwrCtrl devices/switches."""
host = config.get(CONF_HOST, None)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port_recv = config.get(CONF_PORT_RECV)
port_send = config.get(CONF_PORT_SEND)
from anel_pwrctrl import DeviceMaster
try:
master = DeviceMaster(
username=username, password=password, read_port=port_send,
write_port=port_recv)
master.query(ip_addr=host)
except socket.error as ex:
_LOGGER.error("Unable to discover PwrCtrl device: %s", str(ex))
return False
devices = []
for device in master.devices.values():
parent_device = PwrCtrlDevice(device)
devices.extend(
PwrCtrlSwitch(switch, parent_device)
for switch in device.switches.values()
)
add_entities(devices)
class PwrCtrlSwitch(SwitchDevice):
"""Representation of a PwrCtrl switch."""
def __init__(self, port, parent_device):
"""Initialize the PwrCtrl switch."""
self._port = port
self._parent_device = parent_device
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def unique_id(self):
"""Return the unique ID of the device."""
return '{device}-{switch_idx}'.format(
device=self._port.device.host,
switch_idx=self._port.get_index()
)
@property
def name(self):
"""Return the name of the device."""
return self._port.label
@property
def is_on(self):
"""Return true if the device is on."""
return self._port.get_state()
def update(self):
"""Trigger update for all switches on the parent device."""
self._parent_device.update()
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._port.on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._port.off()
class PwrCtrlDevice:
"""Device representation for per device throttling."""
def __init__(self, device):
"""Initialize the PwrCtrl device."""
self._device = device
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update the device and all its switches."""
self._device.update()
| apache-2.0 |
amraboelela/swift | utils/swift_build_support/tests/test_shell.py | 39 | 5264 | # tests/test_shell.py -------------------------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
# ----------------------------------------------------------------------------
import os
import os.path
import shutil
import sys
import tempfile
import unittest
try:
# py2
from StringIO import StringIO
except ImportError:
# py3
from io import StringIO
from swift_build_support import shell
class ShellTestCase(unittest.TestCase):
def setUp(self):
self.tmpdir = os.path.realpath(tempfile.mkdtemp())
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
sys.stdout = self._orig_stdout
sys.stderr = self._orig_stderr
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def test_quote_command(self):
self.assertEqual(shell.quote_command(["a b", "", "c"]), "'a b' '' c")
def test_call(self):
shell.dry_run = False
foo_file = os.path.join(self.tmpdir, 'foo.txt')
bar_file = os.path.join(self.tmpdir, 'bar.txt')
with open(foo_file, 'w') as f:
f.write("Hello Swift")
shell.call(['cp', foo_file, bar_file])
with open(bar_file, 'r') as f:
self.assertEqual(f.read(), "Hello Swift")
self.assertEqual(self.stdout.getvalue(), "")
self.assertEqual(self.stderr.getvalue(), '''\
+ cp {foo_file} {bar_file}
'''.format(foo_file=foo_file, bar_file=bar_file))
def test_capture(self):
self.assertEqual(shell.capture(["echo", "hi"]), "hi\n")
with self.assertRaises(SystemExit):
shell.capture(["false"])
self.assertIsNone(shell.capture(["false"], optional=True))
self.assertEqual(
shell.capture(["sh", "-c", "echo foo && false"],
allow_non_zero_exit=True), "foo\n")
with self.assertRaises(SystemExit):
shell.capture(["**not-a-command**"], optional=False)
self.assertIsNone(shell.capture(["**not-a-command**"], optional=True))
def test_rmtree(self):
shell.dry_run = False
path = os.path.join(self.tmpdir, 'foo', 'bar')
shell.makedirs(path)
self.assertTrue(os.path.isdir(path))
shell.rmtree(os.path.join(path))
self.assertFalse(
os.path.exists(os.path.join(path)))
self.assertTrue(
os.path.exists(os.path.join(self.tmpdir, 'foo')))
self.assertEqual(self.stdout.getvalue(), "")
self.assertEqual(self.stderr.getvalue(), '''\
+ mkdir -p {path}
+ rm -rf {path}
'''.format(path=path))
def test_pushd(self):
shell.dry_run = False
basedir = os.getcwd()
with shell.pushd(self.tmpdir):
self.assertEqual(os.getcwd(), self.tmpdir)
self.assertEqual(os.getcwd(), basedir)
# pushd inside pushd
with shell.pushd(self.tmpdir):
self.assertEqual(os.getcwd(), self.tmpdir)
shell.makedirs('foo')
with shell.pushd('foo'):
self.assertEqual(os.getcwd(),
os.path.join(self.tmpdir, 'foo'))
self.assertEqual(os.getcwd(), self.tmpdir)
self.assertEqual(os.getcwd(), basedir)
# cd inside pushd
with shell.pushd(self.tmpdir):
os.chdir('foo')
self.assertEqual(os.getcwd(), os.path.join(self.tmpdir, 'foo'))
os.chdir('..')
self.assertEqual(os.getcwd(), self.tmpdir)
shell.rmtree('foo')
self.assertEqual(os.getcwd(), basedir)
self.assertEqual(self.stdout.getvalue(), "")
self.assertEqual(self.stderr.getvalue(), '''\
+ pushd {tmpdir}
+ popd
+ pushd {tmpdir}
+ mkdir -p foo
+ pushd foo
+ popd
+ popd
+ pushd {tmpdir}
+ rm -rf foo
+ popd
'''.format(tmpdir=self.tmpdir))
def test_dry_run(self):
shell.dry_run = True
basedir = os.getcwd()
foobar_dir = os.path.join(self.tmpdir, 'foo', 'bar')
shell.makedirs(foobar_dir)
self.assertFalse(os.path.exists(os.path.join(self.tmpdir, 'foo')))
self.assertFalse(os.path.exists(foobar_dir))
with shell.pushd(foobar_dir):
self.assertEqual(os.getcwd(), basedir)
shell.call(['touch', 'testfile'])
self.assertFalse(os.path.exists(
os.path.join(foobar_dir, 'testfile')))
self.assertEqual(os.getcwd(), basedir)
shell.rmtree(self.tmpdir)
self.assertTrue(os.path.exists(self.tmpdir))
self.assertEqual(self.stdout.getvalue(), '''\
+ mkdir -p {foobar_dir}
+ pushd {foobar_dir}
+ touch testfile
+ popd
+ rm -rf {tmpdir}
'''.format(foobar_dir=foobar_dir, tmpdir=self.tmpdir))
self.assertEqual(self.stderr.getvalue(), "")
self.dry_run = False
| apache-2.0 |
Diapostrofo/instabot | instabot/bot/bot_archive.py | 3 | 1525 | from tqdm import tqdm
from . import delay
def archive(self, media_id, undo=False):
delay.small_delay(self)
media = self.get_media_info(media_id)
media = media[0] if isinstance(media, list) else media
if super(self.__class__, self).archiveMedia(media, undo):
self.total_archived += int(not undo)
self.total_unarchived += int(undo)
return True
self.logger.info("Media with %s is not %s ." % media_id, 'unarchived' if undo else 'archived')
return False
def archive_medias(self, medias):
broken_items = []
if len(medias) == 0:
self.logger.info("Nothing to archive.")
return broken_items
self.logger.info("Going to archive %d medias." % (len(medias)))
for media in tqdm(medias):
if not self.archive(media):
delay.error_delay(self)
broken_items = medias[medias.index(media):]
break
self.logger.info("DONE: Total archived %d medias." % self.total_archived)
return broken_items
def unarchive_medias(self, medias):
broken_items = []
if len(medias) == 0:
self.logger.info("Nothing to unarchive.")
return broken_items
self.logger.info("Going to unarchive %d medias." % (len(medias)))
for media in tqdm(medias):
if not self.unarchive(media):
delay.error_delay(self)
broken_items = medias[medias.index(media):]
break
self.logger.info("DONE: Total unarchived %d medias." % self.total_unarchived)
return broken_items
| apache-2.0 |
tornadozou/tensorflow | tensorflow/contrib/estimator/python/estimator/logit_fns_test.py | 9 | 2329 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""logit_fn tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.estimator.python.estimator import logit_fns
from tensorflow.python.client import session
from tensorflow.python.estimator import model_fn
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
class LogitFnTest(test.TestCase):
def test_simple_call_logit_fn(self):
def dummy_logit_fn(features, mode):
if mode == model_fn.ModeKeys.TRAIN:
return features['f1']
else:
return features['f2']
features = {
'f1': constant_op.constant([[2., 3.]]),
'f2': constant_op.constant([[4., 5.]])
}
logit_fn_result = logit_fns.call_logit_fn(
dummy_logit_fn, features, model_fn.ModeKeys.EVAL, 'fake_params',
'fake_config')
with session.Session():
self.assertAllClose([[4., 5.]], logit_fn_result.eval())
def test_should_return_tensor(self):
def invalid_logit_fn(features, params):
return {
'tensor1': features['f1'] * params['input_multiplier'],
'tensor2': features['f2'] * params['input_multiplier']
}
features = {
'f1': constant_op.constant([[2., 3.]]),
'f2': constant_op.constant([[4., 5.]])
}
params = {'learning_rate': 0.001, 'input_multiplier': 2.0}
with self.assertRaisesRegexp(ValueError, 'model_fn should return a Tensor'):
logit_fns.call_logit_fn(invalid_logit_fn, features, 'fake_mode', params,
'fake_config')
if __name__ == '__main__':
test.main()
| apache-2.0 |
destijl/grr | grr/server/stats_server.py | 2 | 4317 | #!/usr/bin/env python
"""Stats server implementation."""
import BaseHTTPServer
import collections
import json
import socket
import threading
import logging
from grr.lib import config_lib
from grr.lib import registry
from grr.lib import stats
from grr.lib import utils
def _JSONMetricValue(metric_info, value):
if metric_info.metric_type == stats.MetricType.EVENT:
return dict(
sum=value.sum,
counter=value.count,
bins_heights=collections.OrderedDict(value.bins_heights))
else:
return value
def BuildVarzJsonString():
"""Builds Varz JSON string from all stats metrics."""
results = {}
for name, metric_info in stats.STATS.GetAllMetricsMetadata().iteritems():
info_dict = dict(metric_type=metric_info.metric_type.name)
if metric_info.value_type:
info_dict["value_type"] = metric_info.value_type.name
if metric_info.docstring:
info_dict["docstring"] = metric_info.docstring
if metric_info.units:
info_dict["units"] = metric_info.units.name
if metric_info.fields_defs:
info_dict["fields_defs"] = []
for field_def in metric_info.fields_defs:
info_dict["fields_defs"].append(
(field_def.field_name, utils.SmartStr(field_def.field_type)))
value = {}
all_fields = stats.STATS.GetMetricFields(name)
for f in all_fields:
joined_fields = ":".join(utils.SmartStr(fname) for fname in f)
value[joined_fields] = _JSONMetricValue(
metric_info, stats.STATS.GetMetricValue(
name, fields=f))
else:
value = _JSONMetricValue(metric_info, stats.STATS.GetMetricValue(name))
results[name] = dict(info=info_dict, value=value)
encoder = json.JSONEncoder()
return encoder.encode(results)
class StatsServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Default stats server implementation."""
def do_GET(self): # pylint: disable=g-bad-name
if self.path == "/varz":
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(BuildVarzJsonString())
else:
self.send_error(403, "Access forbidden: %s" % self.path)
class StatsServer(object):
def __init__(self, port):
self.port = port
def Start(self):
"""Start HTTPServer."""
# Use the same number of available ports as the adminui is using. If we
# have 10 available for adminui we will need 10 for the stats server.
adminui_max_port = config_lib.CONFIG.Get("AdminUI.port_max",
config_lib.CONFIG["AdminUI.port"])
additional_ports = adminui_max_port - config_lib.CONFIG["AdminUI.port"]
max_port = self.port + additional_ports
for port in range(self.port, max_port + 1):
# Make a simple reference implementation WSGI server
try:
server = BaseHTTPServer.HTTPServer(("", port), StatsServerHandler)
break
except socket.error as e:
if e.errno == socket.errno.EADDRINUSE and port < max_port:
logging.info("Port %s in use, trying %s", port, port + 1)
else:
raise
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
class StatsServerInit(registry.InitHook):
"""Starts up a varz server after everything is registered."""
def RunOnce(self):
"""Main method of this registry hook.
StatsServer implementation may be overriden. If there's a "stats_server"
module present in grr/local directory then
grr.local.stats_server.StatsServer implementation will be used instead of
a default one.
"""
# Figure out which port to use.
port = config_lib.CONFIG["Monitoring.http_port"]
if port != 0:
logging.info("Starting monitoring server on port %d.", port)
# pylint: disable=g-import-not-at-top
from grr.lib import local as local_overrides
# pylint: enable=g-import-not-at-top
if "stats_server" in dir(local_overrides):
stats_server = local_overrides.stats_server.StatsServer(port)
logging.debug("Using local StatsServer from %s", local_overrides)
else:
stats_server = StatsServer(port)
stats_server.Start()
else:
logging.info("Monitoring server disabled.")
| apache-2.0 |
pombredanne/MOG | nova/tests/api/openstack/compute/contrib/test_extended_ips_mac.py | 10 | 6087 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import extended_ips_mac
from nova.api.openstack import xmlutil
from nova import compute
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
NW_CACHE = [
{
'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {
'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [
{
'cidr': '192.168.1.0/24',
'ips': [
{
'address': '192.168.1.100',
'type': 'fixed',
'floating_ips': [
{'address': '5.0.0.1', 'type': 'floating'},
],
},
],
},
]
}
},
{
'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {
'bridge': 'br1',
'id': 2,
'label': 'public',
'subnets': [
{
'cidr': '10.0.0.0/24',
'ips': [
{
'address': '10.0.0.100',
'type': 'fixed',
'floating_ips': [
{'address': '5.0.0.2', 'type': 'floating'},
],
}
],
},
]
}
}
]
ALL_IPS = []
for cache in NW_CACHE:
for subnet in cache['network']['subnets']:
for fixed in subnet['ips']:
sanitized = dict(fixed)
sanitized['mac_address'] = cache['address']
sanitized.pop('floating_ips')
sanitized.pop('type')
ALL_IPS.append(sanitized)
for floating in fixed['floating_ips']:
sanitized = dict(floating)
sanitized['mac_address'] = cache['address']
sanitized.pop('type')
ALL_IPS.append(sanitized)
ALL_IPS.sort()
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3, nw_cache=NW_CACHE)
return fake_instance.fake_instance_obj(args[1],
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
def fake_compute_get_all(*args, **kwargs):
db_list = [
fakes.stub_instance(1, uuid=UUID1, nw_cache=NW_CACHE),
fakes.stub_instance(2, uuid=UUID2, nw_cache=NW_CACHE),
]
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj._make_instance_list(args[1],
instance_obj.InstanceList(),
db_list, fields)
class ExtendedIpsMacTest(test.TestCase):
content_type = 'application/json'
prefix = '%s:' % extended_ips_mac.Extended_ips_mac.alias
def setUp(self):
super(ExtendedIpsMacTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Extended_ips_mac'])
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_ips(self, server):
for network in server['addresses'].itervalues():
for ip in network:
yield ip
def assertServerStates(self, server):
results = []
for ip in self._get_ips(server):
results.append({'address': ip.get('addr'),
'mac_address': ip.get('%smac_addr' % self.prefix)})
self.assertEqual(ALL_IPS, sorted(results))
def test_show(self):
url = '/v2/fake/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertServerStates(self._get_server(res.body))
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for _i, server in enumerate(self._get_servers(res.body)):
self.assertServerStates(server)
class ExtendedIpsMacXmlTest(ExtendedIpsMacTest):
content_type = 'application/xml'
prefix = '{%s}' % extended_ips_mac.Extended_ips_mac.namespace
def _get_server(self, body):
return etree.XML(body)
def _get_servers(self, body):
return etree.XML(body).getchildren()
def _get_ips(self, server):
for network in server.find('{%s}addresses' % xmlutil.XMLNS_V11):
for ip in network:
yield ip
| apache-2.0 |
rbu/ansible-modules-extras | source_control/bzr.py | 23 | 6484 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, André Paramés <git@andreparames.com>
# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = u'''
---
module: bzr
author: André Paramés
version_added: "1.1"
short_description: Deploy software (or files) from bzr branches
description:
- Manage I(bzr) branches to deploy files or software.
options:
name:
required: true
aliases: [ 'parent' ]
description:
- SSH or HTTP protocol address of the parent branch.
dest:
required: true
description:
- Absolute path of where the branch should be cloned to.
version:
required: false
default: "head"
description:
- What version of the branch to clone. This can be the
bzr revno or revid.
force:
required: false
default: "no"
choices: [ 'yes', 'no' ]
description:
- If C(yes), any modified files in the working
tree will be discarded. Before 1.9 the default
value was "yes".
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to bzr executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
'''
EXAMPLES = '''
# Example bzr checkout from Ansible Playbooks
- bzr: name=bzr+ssh://foosball.example.org/path/to/branch dest=/srv/checkout version=22
'''
import re
class Bzr(object):
def __init__(self, module, parent, dest, version, bzr_path):
self.module = module
self.parent = parent
self.dest = dest
self.version = version
self.bzr_path = bzr_path
def _command(self, args_list, cwd=None, **kwargs):
(rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
return (rc, out, err)
def get_version(self):
'''samples the version of the bzr branch'''
cmd = "%s revno" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
revno = stdout.strip()
return revno
def clone(self):
'''makes a new bzr branch if it does not already exist'''
dest_dirname = os.path.dirname(self.dest)
try:
os.makedirs(dest_dirname)
except:
pass
if self.version.lower() != 'head':
args_list = ["branch", "-r", self.version, self.parent, self.dest]
else:
args_list = ["branch", self.parent, self.dest]
return self._command(args_list, check_rc=True, cwd=dest_dirname)
def has_local_mods(self):
cmd = "%s status -S" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
def reset(self, force):
'''
Resets the index and working tree to head.
Discards any changes to tracked files in the working
tree since that commit.
'''
if not force and self.has_local_mods():
self.module.fail_json(msg="Local modifications exist in branch (force=no).")
return self._command(["revert"], check_rc=True, cwd=self.dest)
def fetch(self):
'''updates branch from remote sources'''
if self.version.lower() != 'head':
(rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
else:
(rc, out, err) = self._command(["pull"], cwd=self.dest)
if rc != 0:
self.module.fail_json(msg="Failed to pull")
return (rc, out, err)
def switch_version(self):
'''once pulled, switch to a particular revno or revid'''
if self.version.lower() != 'head':
args_list = ["revert", "-r", self.version]
else:
args_list = ["revert"]
return self._command(args_list, check_rc=True, cwd=self.dest)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
dest=dict(required=True),
name=dict(required=True, aliases=['parent']),
version=dict(default='head'),
force=dict(default='no', type='bool'),
executable=dict(default=None),
)
)
dest = os.path.abspath(os.path.expanduser(module.params['dest']))
parent = module.params['name']
version = module.params['version']
force = module.params['force']
bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
rc, out, err, status = (0, None, None, None)
bzr = Bzr(module, parent, dest, version, bzr_path)
# if there is no bzr configuration, do a branch operation
# else pull and switch the version
before = None
local_mods = False
if not os.path.exists(bzrconfig):
(rc, out, err) = bzr.clone()
else:
# else do a pull
local_mods = bzr.has_local_mods()
before = bzr.get_version()
(rc, out, err) = bzr.reset(force)
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = bzr.fetch()
if rc != 0:
module.fail_json(msg=err)
# switch to version specified regardless of whether
# we cloned or pulled
(rc, out, err) = bzr.switch_version()
# determine if we changed anything
after = bzr.get_version()
changed = False
if before != after or local_mods:
changed = True
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
vlinhd11/vlinhd11-android-scripting | python/src/Lib/binhex.py | 59 | 14828 | """Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import sys
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
LINELEN=64
RUNCHAR=chr(0x90) # run-length introducer
#
# This code is no longer byte-order dependent
#
# Workarounds for non-mac machines.
try:
from Carbon.File import FSSpec, FInfo
from MacOS import openrf
def getfileinfo(name):
finfo = FSSpec(name).FSpGetFInfo()
dir, file = os.path.split(name)
# XXX Get resource/data sizes
fp = open(name, 'rb')
fp.seek(0, 2)
dlen = fp.tell()
fp = openrf(name, '*rb')
fp.seek(0, 2)
rlen = fp.tell()
return file, finfo, dlen, rlen
def openrsrc(name, *mode):
if not mode:
mode = '*rb'
else:
mode = '*' + mode[0]
return openrf(name, mode)
except ImportError:
#
# Glue code for non-macintosh usage
#
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
# Quick check for textfile
fp = open(name)
data = open(name).read(256)
for c in data:
if not c.isspace() and (c<' ' or ord(c) > 0x7f):
break
else:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return ''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
self.hqxdata = ''
self.linelen = LINELEN-1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen//3)*3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata)-self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last]+'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + ':\n')
def close(self):
if self.data:
self.hqxdata = \
self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = ''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, (name, finfo, dlen, rlen), ofp):
if type(ofp) == type(''):
ofname = ofp
ofp = open(ofname, 'w')
if os.name == 'mac':
fss = FSSpec(ofname)
fss.SetCreatorType('BnHq', 'TEXT')
ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error, 'Filename too long'
d = chr(nl) + name + '\0'
d2 = finfo.Type + finfo.Creator
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error, 'Writing data at the wrong time'
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Writing resource data at the wrong time'
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Close at the wrong time'
if self.rlen != 0:
raise Error, \
"Incorrect resource-datasize, diff=%r" % (self.rlen,)
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""(infilename, outfilename) - Create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while 1:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = ''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd+2)//3)*4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while 1:
try:
decdatacur, self.eof = \
binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error, \
'Premature EOF on binhex file'
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error, 'Premature EOF on binhex file'
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = ''
self.post_buffer = ''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd-len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = ''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + '\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if type(ifp) == type(''):
ifp = open(ifp)
#
# Find initial colon.
#
while 1:
ch = ifp.read(1)
if not ch:
raise Error, "No binhex data found"
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == '\r':
continue
if ch == ':':
break
if ch != '\n':
dummy = ifp.readline()
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error, 'CRC error, computed %x, read %x' \
%(self.crc, filecrc)
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1+4+4+2+4+4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error, 'Read data at wrong time'
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = ''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error, 'close_data at wrong time'
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Read resource data at wrong time'
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
if os.name == 'mac':
ofss = FSSpec(out)
out = ofss.as_pathname()
ofp = open(out, 'wb')
# XXXX Do translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while 1:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
if os.name == 'mac':
nfinfo = ofss.GetFInfo()
nfinfo.Creator = finfo.Creator
nfinfo.Type = finfo.Type
nfinfo.Flags = finfo.Flags
ofss.SetFInfo(nfinfo)
ifp.close()
def _test():
fname = sys.argv[1]
binhex(fname, fname+'.hqx')
hexbin(fname+'.hqx', fname+'.viahqx')
#hexbin(fname, fname+'.unpacked')
sys.exit(1)
if __name__ == '__main__':
_test()
| apache-2.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/numpy/lib/index_tricks.py | 66 | 26204 | from __future__ import division, absolute_import, print_function
import sys
import math
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, ScalarType, array, alltrue, cumprod, arange
)
from numpy.core.numerictypes import find_common_type, issubdtype
from . import function_base
import numpy.matrixlib as matrix
from .function_base import diff
from numpy.core.multiarray import ravel_multi_index, unravel_index
from numpy.lib.stride_tricks import as_strided
makemat = matrix.matrix
__all__ = [
'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
'diag_indices', 'diag_indices_from'
]
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Parameters
----------
args : 1-D sequences
Returns
-------
out : tuple of ndarrays
N arrays with N dimensions each, with N the number of input
sequences. Together these arrays form an open mesh.
See Also
--------
ogrid, mgrid, meshgrid
Examples
--------
>>> a = np.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = np.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
>>> ixgrid[0].shape, ixgrid[1].shape
((2, 1), (1, 2))
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
"""
out = []
nd = len(args)
for k, new in enumerate(args):
new = asarray(new)
if new.ndim != 1:
raise ValueError("Cross index must be 1 dimensional")
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(_nx.intp)
if issubdtype(new.dtype, _nx.bool_):
new, = new.nonzero()
new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
out.append(new)
return tuple(out)
class nd_grid(object):
"""
Construct a multi-dimensional "meshgrid".
``grid = nd_grid()`` creates an instance which will return a mesh-grid
when indexed. The dimension and number of the output arrays are equal
to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then the
integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
If instantiated with an argument of ``sparse=True``, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each returned
argument is greater than 1.
Parameters
----------
sparse : bool, optional
Whether the grid is sparse or not. Default is False.
Notes
-----
Two instances of `nd_grid` are made available in the NumPy namespace,
`mgrid` and `ogrid`::
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
Users should use these pre-defined instances instead of using `nd_grid`
directly.
Examples
--------
>>> mgrid = np.lib.index_tricks.nd_grid()
>>> mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid = np.lib.index_tricks.nd_grid(sparse=True)
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, key):
try:
size = []
typ = int
for k in range(len(key)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
size.append(
int(math.ceil((key[k].stop - start)/(step*1.0))))
if (isinstance(step, float) or
isinstance(start, float) or
isinstance(key[k].stop, float)):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
for _x, _t in zip(size, (typ,)*len(size))]
else:
nn = _nx.indices(size, typ)
for k in range(len(size)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
step = (key[k].stop - start)/float(step-1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [_nx.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None, None)
nn[k] = nn[k][slobj]
slobj[k] = _nx.newaxis
return nn
except (IndexError, TypeError):
step = key.step
stop = key.stop
start = key.start
if start is None:
start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
stop = key.stop + step
return _nx.arange(0, length, 1, float)*step + start
else:
return _nx.arange(start, stop, step)
def __getslice__(self, i, j):
return _nx.arange(i, j)
def __len__(self):
return 0
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
mgrid.__doc__ = None # set in numpy.add_newdocs
ogrid.__doc__ = None # set in numpy.add_newdocs
class AxisConcatenator(object):
"""
Translates slice objects to concatenation along an axis.
For detailed documentation on usage, see `r_`.
"""
def _retval(self, res):
if self.matrix:
oldndim = res.ndim
res = makemat(res)
if oldndim == 1 and self.col:
res = res.T
self.axis = self._axis
self.matrix = self._matrix
self.col = 0
return res
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self._axis = axis
self._matrix = matrix
self.axis = axis
self.matrix = matrix
self.col = 0
self.trans1d = trans1d
self.ndmin = ndmin
def __getitem__(self, key):
trans1d = self.trans1d
ndmin = self.ndmin
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrix.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(key[k], str):
if k != 0:
raise ValueError("special directives must be the "
"first entry.")
key0 = key[0]
if key0 in 'rc':
self.matrix = True
self.col = (key0 == 'c')
continue
if ',' in key0:
vec = key0.split(',')
try:
self.axis, ndmin = \
[int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError("unknown special directive")
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("unknown special directive")
elif type(key[k]) in ScalarType:
newobj = array(key[k], ndmin=ndmin)
scalars.append(k)
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = key[k]
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True,
ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin-tempobj.ndim
if (trans1d < 0):
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + \
defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Esure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = _nx.concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
def __getslice__(self, i, j):
res = _nx.arange(i, j)
return self._retval(res)
def __len__(self):
return 0
# separate classes are used here instead of just making r_ = concatentor(0),
# etc. because otherwise we couldn't get the doc string to come out right
# in help(r_)
class RClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the first axis.
This is a simple way to build up arrays quickly. There are two use cases.
1. If the index expression contains comma separated arrays, then stack
them along their first axis.
2. If the index expression contains slice notation or scalars then create
a 1-D array with a range indicated by the slice notation.
If slice notation is used, the syntax ``start:stop:step`` is equivalent
to ``np.arange(start, stop, step)`` inside of the brackets. However, if
``step`` is an imaginary number (i.e. 100j) then its integer portion is
interpreted as a number-of-points desired and the start and stop are
inclusive. In other words ``start:stop:stepj`` is interpreted as
``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
After expansion of slice notation, all comma separated sequences are
concatenated together.
Optional character strings placed as the first element of the index
expression can be used to change the output. The strings 'r' or 'c' result
in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
(column) matrix is produced. If the result is 2-D then both provide the
same matrix result.
A string integer specifies which axis to stack multiple comma separated
arrays along. A string of two comma-separated integers allows indication
of the minimum number of dimensions to force each entry into as the
second integer (the axis to concatenate along is still the first integer).
A string with three comma-separated integers allows specification of the
axis to concatenate along, the minimum number of dimensions to force the
entries to, and which axis should contain the start of the arrays which
are less than the specified number of dimensions. In other words the third
integer allows you to specify where the 1's should be placed in the shape
of the arrays that have their shapes upgraded. By default, they are placed
in the front of the shape tuple. The third argument allows you to specify
where the start of the array should be instead. Thus, a third argument of
'0' would place the 1's at the end of the array shape. Negative integers
specify where in the new shape tuple the last dimension of upgraded arrays
should be placed, so the default is '-1'.
Parameters
----------
Not a function, so takes no parameters
Returns
-------
A concatenated ndarray or matrix.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
c_ : Translates slice objects to concatenation along the second axis.
Examples
--------
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
>>> np.r_[-1:1:6j, [0]*3, 5, 6]
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
String integers specify the axis to concatenate along or the minimum
number of dimensions to force entries into.
>>> a = np.array([[0, 1, 2], [3, 4, 5]])
>>> np.r_['-1', a, a] # concatenate along last axis
array([[0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5]])
>>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
array([[1, 2, 3],
[4, 5, 6]])
>>> np.r_['0,2,0', [1,2,3], [4,5,6]]
array([[1],
[2],
[3],
[4],
[5],
[6]])
>>> np.r_['1,2,0', [1,2,3], [4,5,6]]
array([[1, 4],
[2, 5],
[3, 6]])
Using 'r' or 'c' as a first string argument creates a matrix.
>>> np.r_['r',[1,2,3], [4,5,6]]
matrix([[1, 2, 3, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, 0)
r_ = RClass()
class CClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the second axis.
This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
useful because of its common occurrence. In particular, arrays will be
stacked along their last axis after being upgraded to at least 2-D with
1's post-pended to the shape (column vectors made out of 1-D arrays).
For detailed documentation, see `r_`.
Examples
--------
>>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
array([[1, 2, 3, 0, 0, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
c_ = CClass()
class ndenumerate(object):
"""
Multidimensional index iterator.
Return an iterator yielding pairs of array coordinates and values.
Parameters
----------
arr : ndarray
Input array.
See Also
--------
ndindex, flatiter
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> for index, x in np.ndenumerate(a):
... print(index, x)
(0, 0) 1
(0, 1) 2
(1, 0) 3
(1, 1) 4
"""
def __init__(self, arr):
self.iter = asarray(arr).flat
def __next__(self):
"""
Standard iterator method, returns the index tuple and array value.
Returns
-------
coords : tuple of ints
The indices of the current iteration.
val : scalar
The array element of the current iteration.
"""
return self.iter.coords, next(self.iter)
def __iter__(self):
return self
next = __next__
class ndindex(object):
"""
An N-dimensional iterator object to index arrays.
Given the shape of an array, an `ndindex` instance iterates over
the N-dimensional index of the array. At each iteration a tuple
of indices is returned, the last dimension is iterated over first.
Parameters
----------
`*args` : ints
The size of each dimension of the array.
See Also
--------
ndenumerate, flatiter
Examples
--------
>>> for index in np.ndindex(3, 2, 1):
... print(index)
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
(1, 1, 0)
(2, 0, 0)
(2, 1, 0)
"""
def __init__(self, *shape):
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]
x = as_strided(_nx.zeros(1), shape=shape,
strides=_nx.zeros_like(shape))
self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
order='C')
def __iter__(self):
return self
def ndincr(self):
"""
Increment the multi-dimensional index by one.
This method is for backward compatibility only: do not use.
"""
next(self)
def __next__(self):
"""
Standard iterator method, updates the index and returns the index
tuple.
Returns
-------
val : tuple of ints
Returns a tuple containing the indices of the current
iteration.
"""
next(self._it)
return self._it.multi_index
next = __next__
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
# it uses the standard array indexing syntax.
#
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-23
#
# Cosmetic changes by T. Oliphant 2001
#
#
class IndexExpression(object):
"""
A nicer way to build up index tuples for arrays.
.. note::
Use one of the two predefined instances `index_exp` or `s_`
rather than directly using `IndexExpression`.
For any index combination, including slicing and axis insertion,
``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
array `a`. However, ``np.index_exp[indices]`` can be used anywhere
in Python code and returns a tuple of slice objects that can be
used in the construction of complex index expressions.
Parameters
----------
maketuple : bool
If True, always returns a tuple.
See Also
--------
index_exp : Predefined instance that always returns a tuple:
`index_exp = IndexExpression(maketuple=True)`.
s_ : Predefined instance without tuple conversion:
`s_ = IndexExpression(maketuple=False)`.
Notes
-----
You can do all this with `slice()` plus a few special objects,
but there's a lot to remember and this version is simpler because
it uses the standard array indexing syntax.
Examples
--------
>>> np.s_[2::2]
slice(2, None, 2)
>>> np.index_exp[2::2]
(slice(2, None, 2),)
>>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
array([2, 4])
"""
def __init__(self, maketuple):
self.maketuple = maketuple
def __getitem__(self, item):
if self.maketuple and not isinstance(item, tuple):
return (item,)
else:
return item
index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
# End contribution from Konrad.
# The following functions complement those in twodim_base, but are
# applicable to N-dimensions.
def fill_diagonal(a, val, wrap=False):
"""Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
wrap : bool
For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affects only tall matrices.
See also
--------
diag_indices, diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
This functionality can be obtained via `diag_indices`, but internally
this version uses a much faster implementation that never constructs the
indices and uses simple slicing.
Examples
--------
>>> a = np.zeros((3, 3), int)
>>> np.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = np.zeros((3, 3, 3, 3), int)
>>> np.fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
The wrap option affects only tall matrices:
>>> # tall matrices no wrap
>>> a = np.zeros((5, 3),int)
>>> fill_diagonal(a, 4)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[0, 0, 0]])
>>> # tall matrices wrap
>>> a = np.zeros((5, 3),int)
>>> fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[4, 0, 0]])
>>> # wide matrices
>>> a = np.zeros((3, 5),int)
>>> fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
"""
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
end = None
if a.ndim == 2:
# Explicit, fast formula for the common case. For 2-d arrays, we
# accept rectangular ones.
step = a.shape[1] + 1
#This is needed to don't have tall matrix have the diagonal wrap.
if not wrap:
end = a.shape[1] * a.shape[1]
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
a.flat[:end:step] = val
def diag_indices(n, ndim=2):
"""
Return the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main
diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
(n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
for ``i = [0..n-1]``.
Parameters
----------
n : int
The size, along each dimension, of the arrays for which the returned
indices can be used.
ndim : int, optional
The number of dimensions.
See also
--------
diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Create a set of indices to access the diagonal of a (4, 4) array:
>>> di = np.diag_indices(4)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> a[di] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
Now, we create indices to manipulate a 3-D array:
>>> d3 = np.diag_indices(2, 3)
>>> d3
(array([0, 1]), array([0, 1]), array([0, 1]))
And use it to set the diagonal of an array of zeros to 1:
>>> a = np.zeros((2, 2, 2), dtype=np.int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]])
"""
idx = arange(n)
return (idx,) * ndim
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Parameters
----------
arr : array, at least 2-D
See Also
--------
diag_indices
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
| mit |
srene/ns-3-inrpp | src/mpi/bindings/modulegen__gcc_LP64.py | 38 | 160936 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.mpi', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## mpi-interface.h (module 'mpi'): ns3::MpiInterface [class]
module.add_class('MpiInterface')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration]
module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network')
## parallel-communication-interface.h (module 'mpi'): ns3::ParallelCommunicationInterface [class]
module.add_class('ParallelCommunicationInterface', allow_subclassing=True)
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver [class]
module.add_class('MpiReceiver', parent=root_module['ns3::Object'])
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3MpiInterface_methods(root_module, root_module['ns3::MpiInterface'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3ParallelCommunicationInterface_methods(root_module, root_module['ns3::ParallelCommunicationInterface'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3MpiReceiver_methods(root_module, root_module['ns3::MpiReceiver'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3MpiInterface_methods(root_module, cls):
## mpi-interface.h (module 'mpi'): ns3::MpiInterface::MpiInterface() [constructor]
cls.add_constructor([])
## mpi-interface.h (module 'mpi'): ns3::MpiInterface::MpiInterface(ns3::MpiInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MpiInterface const &', 'arg0')])
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Disable() [member function]
cls.add_method('Disable',
'void',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Enable(int * pargc, char * * * pargv) [member function]
cls.add_method('Enable',
'void',
[param('int *', 'pargc'), param('char * * *', 'pargv')],
is_static=True)
## mpi-interface.h (module 'mpi'): static uint32_t ns3::MpiInterface::GetSize() [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static uint32_t ns3::MpiInterface::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static bool ns3::MpiInterface::IsEnabled() [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_static=True)
## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::SendPacket(ns3::Ptr<ns3::Packet> p, ns3::Time const & rxTime, uint32_t node, uint32_t dev) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Time const &', 'rxTime'), param('uint32_t', 'node'), param('uint32_t', 'dev')],
is_static=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3ParallelCommunicationInterface_methods(root_module, cls):
## parallel-communication-interface.h (module 'mpi'): ns3::ParallelCommunicationInterface::ParallelCommunicationInterface() [constructor]
cls.add_constructor([])
## parallel-communication-interface.h (module 'mpi'): ns3::ParallelCommunicationInterface::ParallelCommunicationInterface(ns3::ParallelCommunicationInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParallelCommunicationInterface const &', 'arg0')])
## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::Disable() [member function]
cls.add_method('Disable',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::Enable(int * pargc, char * * * pargv) [member function]
cls.add_method('Enable',
'void',
[param('int *', 'pargc'), param('char * * *', 'pargv')],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): uint32_t ns3::ParallelCommunicationInterface::GetSize() [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): uint32_t ns3::ParallelCommunicationInterface::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): bool ns3::ParallelCommunicationInterface::IsEnabled() [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::SendPacket(ns3::Ptr<ns3::Packet> p, ns3::Time const & rxTime, uint32_t node, uint32_t dev) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Time const &', 'rxTime'), param('uint32_t', 'node'), param('uint32_t', 'dev')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3MpiReceiver_methods(root_module, cls):
## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver::MpiReceiver() [constructor]
cls.add_constructor([])
## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver::MpiReceiver(ns3::MpiReceiver const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MpiReceiver const &', 'arg0')])
## mpi-receiver.h (module 'mpi'): static ns3::TypeId ns3::MpiReceiver::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::Receive(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::SetReceiveCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
renyi533/tensorflow | tensorflow/python/kernel_tests/bincount_op_test.py | 4 | 5627 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for math_ops.bincount."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class BincountTest(test_util.TensorFlowTestCase):
def test_empty(self):
with self.session(use_gpu=True):
self.assertAllEqual(self.evaluate(math_ops.bincount([], minlength=5)),
[0, 0, 0, 0, 0])
self.assertAllEqual(self.evaluate(math_ops.bincount([], minlength=1)),
[0])
self.assertAllEqual(self.evaluate(math_ops.bincount([], minlength=0)),
[])
self.assertEqual(self.evaluate(math_ops.bincount([], minlength=0,
dtype=np.float32)).dtype,
np.float32)
self.assertEqual(self.evaluate(math_ops.bincount([], minlength=3,
dtype=np.float64)).dtype,
np.float64)
def test_values(self):
with self.session(use_gpu=True):
self.assertAllEqual(self.evaluate(math_ops.bincount([1, 1, 1, 2, 2, 3])),
[0, 3, 2, 1])
arr = [1, 1, 2, 1, 2, 3, 1, 2, 3, 4, 1, 2, 3, 4, 5]
self.assertAllEqual(self.evaluate(math_ops.bincount(arr)),
[0, 5, 4, 3, 2, 1])
arr += [0, 0, 0, 0, 0, 0]
self.assertAllEqual(self.evaluate(math_ops.bincount(arr)),
[6, 5, 4, 3, 2, 1])
self.assertAllEqual(self.evaluate(math_ops.bincount([])), [])
self.assertAllEqual(self.evaluate(math_ops.bincount([0, 0, 0])), [3])
self.assertAllEqual(self.evaluate(math_ops.bincount([5])),
[0, 0, 0, 0, 0, 1])
self.assertAllEqual(self.evaluate(math_ops.bincount(np.arange(10000))),
np.ones(10000))
def test_maxlength(self):
with self.session(use_gpu=True):
self.assertAllEqual(self.evaluate(math_ops.bincount([5], maxlength=3)),
[0, 0, 0])
self.assertAllEqual(self.evaluate(math_ops.bincount([1], maxlength=3)),
[0, 1])
self.assertAllEqual(self.evaluate(math_ops.bincount([], maxlength=3)),
[])
def test_random_with_weights(self):
num_samples = 10000
with self.session(use_gpu=True):
np.random.seed(42)
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
arr = np.random.randint(0, 1000, num_samples)
if dtype == dtypes.int32 or dtype == dtypes.int64:
weights = np.random.randint(-100, 100, num_samples)
else:
weights = np.random.random(num_samples)
self.assertAllClose(
self.evaluate(math_ops.bincount(arr, weights)),
np.bincount(arr, weights))
def test_random_without_weights(self):
num_samples = 10000
with self.session(use_gpu=True):
np.random.seed(42)
for dtype in [np.int32, np.float32]:
arr = np.random.randint(0, 1000, num_samples)
weights = np.ones(num_samples).astype(dtype)
self.assertAllClose(
self.evaluate(math_ops.bincount(arr, None)),
np.bincount(arr, weights))
def test_zero_weights(self):
with self.session(use_gpu=True):
self.assertAllEqual(
self.evaluate(math_ops.bincount(np.arange(1000), np.zeros(1000))),
np.zeros(1000))
def test_negative(self):
# unsorted_segment_sum will only report InvalidArgumentError on CPU
with self.cached_session(), ops.device("/CPU:0"):
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(math_ops.bincount([1, 2, 3, -1, 6, 8]))
@test_util.run_deprecated_v1
def test_shape_function(self):
# size must be scalar.
with self.assertRaisesRegexp(
ValueError, "Shape must be rank 0 but is rank 1 for .*Bincount"):
gen_math_ops.bincount([1, 2, 3, -1, 6, 8], [1], [])
# size must be positive.
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
gen_math_ops.bincount([1, 2, 3, -1, 6, 8], -5, [])
# if size is a constant then the shape is known.
v1 = gen_math_ops.bincount([1, 2, 3, -1, 6, 8], 5, [])
self.assertAllEqual(v1.get_shape().as_list(), [5])
# if size is a placeholder then the shape is unknown.
s = array_ops.placeholder(dtype=dtypes.int32)
v2 = gen_math_ops.bincount([1, 2, 3, -1, 6, 8], s, [])
self.assertAllEqual(v2.get_shape().as_list(), [None])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
kaoscoach/crits | crits/actors/handlers.py | 12 | 24902 | import json
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from crits.actors.actor import Actor, ActorIdentifier, ActorThreatIdentifier
from crits.core.crits_mongoengine import EmbeddedCampaign, json_handler
from crits.core.crits_mongoengine import create_embedded_source
from crits.core.forms import DownloadFileForm
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.user_tools import is_admin, is_user_subscribed, user_sources
from crits.core.user_tools import is_user_favorite
from crits.notifications.handlers import remove_user_from_notification
from crits.services.handlers import run_triage, get_supported_services
from crits.vocabulary.actors import (
ThreatTypes,
Motivations,
Sophistications,
IntendedEffects
)
def generate_actor_identifier_csv(request):
"""
Generate a CSV file of the Actor Identifier information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request, ActorIdentifier)
return response
def generate_actor_csv(request):
"""
Generate a CSV file of the Actor information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request, Actor)
return response
def generate_actor_identifier_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = ActorIdentifier
type_ = "actor_identifier"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type, request):
obj_id = request.POST.get('id', None)
if obj_id:
# Remove this identifier from any Actors who reference it.
Actor.objects(identifiers__identifier_id=obj_id)\
.update(pull__identifiers__identifier_id=obj_id)
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Actor Identifiers",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.actors.views.%ss_listing' %
(type_), args=('jtlist',)),
'deleteurl': reverse('crits.actors.views.%ss_listing' %
(type_), args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts, request)
for field in jtable['fields']:
if field['fieldname'] == "'name'":
url = reverse('crits.actors.views.actors_listing')
field['display'] = """ function (data) {
return '<a href="%s?q='+data.record.id+'&search_type=actor_identifier&force_full=1">'+data.record.name+'</a>';
}
""" % url
break
jtable['toolbar'] = [
{
'tooltip': "'Add Actor Identifier'",
'text': "'Add Actor Identifier'",
'click': "function () {$('#new-actor-identifier').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button': '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def generate_actor_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Actor
type_ = "actor"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type, request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Actors",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' %
(type_, type_), args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' %
(type_, type_), args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = [
{
'tooltip': "'Add Actor'",
'text': "'Add Actor'",
'click': "function () {$('#new-actor').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button': '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def get_actor_details(id_, analyst):
"""
Generate the data to render the Actor details template.
:param id_: The Actor ObjectId to get details for.
:type actorip: str
:param analyst: The user requesting this information.
:type analyst: str
:returns: template (str), arguments (dict)
"""
allowed_sources = user_sources(analyst)
actor = Actor.objects(id=id_, source__name__in=allowed_sources).first()
template = None
args = {}
if not actor:
template = "error.html"
error = ('Either no data exists for this Actor or you do not have'
' permission to view it.')
args = {'error': error}
else:
actor.sanitize("%s" % analyst)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, actor.id, 'Actor')
download_form = DownloadFileForm(initial={"obj_type": 'Actor',
"obj_id": actor.id})
# generate identifiers
actor_identifiers = actor.generate_identifiers_list(analyst)
# subscription
subscription = {
'type': 'Actor',
'id': actor.id,
'subscribed': is_user_subscribed("%s" % analyst, 'Actor', actor.id),
}
#objects
objects = actor.sort_objects()
#relationships
relationships = actor.sort_relationships("%s" % analyst, meta=True)
# relationship
relationship = {
'type': 'Actor',
'value': actor.id
}
#comments
comments = {'comments': actor.get_comments(),
'url_key': actor.id}
#screenshots
screenshots = actor.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Actor', actor.id)
# services
service_list = get_supported_services('Actor')
# analysis results
service_results = actor.get_analysis_results()
args = {'actor_identifiers': actor_identifiers,
'objects': objects,
'download_form': download_form,
'relationships': relationships,
'relationship': relationship,
'subscription': subscription,
'favorite': favorite,
'service_list': service_list,
'service_results': service_results,
'screenshots': screenshots,
'actor': actor,
'actor_id': id_,
'comments': comments}
return template, args
def get_actor_by_name(allowed_sources, actor):
"""
Get an Actor from the database by name.
:param allowed_sources: The sources this Actor is allowed to have.
:type allowed_sources: list
:param actor: The Actor address to find.
:type actor: str
:returns: :class:`crits.actors.actor.Actor`
"""
actor = Actor.objects(name=actor, source__name__in=allowed_sources).first()
return actor
def add_new_actor(name, aliases=None, description=None, source=None,
source_method='', source_reference='', campaign=None,
confidence=None, analyst=None, bucket_list=None, ticket=None):
"""
Add an Actor to CRITs.
:param name: The name of the Actor.
:type name: str
:param aliases: Aliases for the actor.
:type aliases: list or str
:param description: Description of the actor.
:type description: str
:param source: Name of the source which provided this information.
:type source: str
:param source_method: Method of acquiring this data.
:type source_method: str
:param source_reference: A reference to this data.
:type source_reference: str
:param campaign: A campaign to attribute to this actor.
:type campaign: str
:param confidence: Confidence level in the campaign attribution.
:type confidence: str ("low", "medium", "high")
:param analyst: The user adding this actor.
:type analyst: str
:param bucket_list: Buckets to assign to this actor.
:type bucket_list: str
:param ticket: Ticket to assign to this actor.
:type ticket: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"object" (if successful) :class:`crits.actors.actor.Actor`
"""
is_item_new = False
retVal = {}
actor = Actor.objects(name=name).first()
if not actor:
actor = Actor()
actor.name = name
if description:
actor.description = description.strip()
is_item_new = True
if isinstance(source, basestring):
source = [create_embedded_source(source,
reference=source_reference,
method=source_method,
analyst=analyst)]
if isinstance(campaign, basestring):
c = EmbeddedCampaign(name=campaign, confidence=confidence, analyst=analyst)
campaign = [c]
if campaign:
for camp in campaign:
actor.add_campaign(camp)
if source:
for s in source:
actor.add_source(s)
else:
return {"success" : False, "message" : "Missing source information."}
if not isinstance(aliases, list):
aliases = aliases.split(',')
for alias in aliases:
alias = alias.strip()
if alias not in actor.aliases:
actor.aliases.append(alias)
if bucket_list:
actor.add_bucket_list(bucket_list, analyst)
if ticket:
actor.add_ticket(ticket, analyst)
actor.save(username=analyst)
# run actor triage
if is_item_new:
actor.reload()
run_triage(actor, analyst)
resp_url = reverse('crits.actors.views.actor_detail', args=[actor.id])
retVal['message'] = ('Success! Click here to view the new Actor: '
'<a href="%s">%s</a>' % (resp_url, actor.name))
retVal['success'] = True
retVal['object'] = actor
retVal['id'] = str(actor.id)
return retVal
def actor_remove(id_, username):
"""
Remove an Actor from CRITs.
:param id_: The ObjectId of the Actor to remove.
:type id_: str
:param username: The user removing this Actor.
:type username: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
if is_admin(username):
actor = Actor.objects(id=id_).first()
if actor:
actor.delete(username=username)
return {'success': True}
else:
return {'success': False, 'message': 'Could not find Actor.'}
else:
return {'success': False, 'message': 'Must be an admin to remove'}
def create_actor_identifier_type(username, identifier_type):
"""
Add a new Actor Identifier Type.
:param username: The CRITs user adding the identifier type.
:type username: str
:param identifier_type: The Identifier Type.
:type identifier_type: str
:returns: dict with keys:
"success" (boolean),
"message" (str) if failed.
"""
identifier = ActorThreatIdentifier.objects(name=identifier_type).first()
if identifier:
return {'success': False,
'message': 'Identifier Type already exists!'}
else:
identifier = ActorThreatIdentifier()
identifier.name = identifier_type
identifier.save(username=username)
return {'success': True,
'message': 'Identifier Type added successfully!'}
def get_actor_tags_by_type(tag_type):
"""
Get Actor tags based on type. These are tags that could be used for
attribution.
:param tag_type: The type of tags to get.
:type tag_type: str
:return: list
"""
if tag_type == 'ActorIntendedEffect':
return IntendedEffects.values(sort=True)
elif tag_type == 'ActorMotivation':
return Motivations.values(sort=True)
elif tag_type == 'ActorSophistication':
return Sophistications.values(sort=True)
elif tag_type == 'ActorThreatType':
return ThreatTypes.values(sort=True)
else:
return []
def update_actor_tags(id_, tag_type, tags, user, **kwargs):
"""
Update a subset of tags for an Actor.
:param id_: The ObjectId of the Actor to update.
:type id_: str
:param tag_type: The type of tag we are updating.
:type tag_type: str
:param tags: The tags we are setting.
:type tags: list
:returns: dict
"""
actor = Actor.objects(id=id_).first()
if not actor:
return {'success': False,
'message': 'No actor could be found.'}
else:
actor.update_tags(tag_type, tags)
actor.save(username=user)
return {'success': True}
def add_new_actor_identifier(identifier_type, identifier=None, source=None,
source_method='', source_reference='',
analyst=None):
"""
Add an Actor Identifier to CRITs.
:param identifier_type: The Actor Identifier Type.
:type identifier_type: str
:param identifier: The Actor Identifier.
:type identifier: str
:param source: Name of the source which provided this information.
:type source: str
:param source_method: Method of acquiring this data.
:type source_method: str
:param source_reference: A reference to this data.
:type source_reference: str
:param analyst: The user adding this actor.
:type analyst: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"""
actor_identifier = ActorIdentifier.objects(identifier_type=identifier_type,
name=identifier).first()
if not actor_identifier:
actor_identifier = ActorIdentifier()
actor_identifier.set_identifier_type(identifier_type)
if not actor_identifier.identifier_type:
return {'success': False,
'message': "Unknown Identifier Type"}
if not identifier:
return {'success': False,
'message': "Missing Identifier"}
actor_identifier.name = identifier.strip()
if isinstance(source, basestring):
source = [create_embedded_source(source,
reference=source_reference,
method=source_method,
analyst=analyst)]
if source:
for s in source:
actor_identifier.add_source(s)
else:
return {"success" : False, "message" : "Missing source information."}
actor_identifier.save(username=analyst)
actor_identifier.reload()
return {'success': True,
'id': str(actor_identifier.id),
'message': "Actor Identifier added successfully!"}
def actor_identifier_types(active=True):
"""
Get the available Actor Identifier Types.
:param active: Only get active ones.
:type active: boolean
:returns: list
"""
if active:
its = ActorThreatIdentifier.objects(active="on").order_by('+name')
else:
its = ActorThreatIdentifier.objects(active="off").order_by('+name')
it_list = [i.name for i in its]
return {'items': it_list}
def actor_identifier_type_values(type_=None, username=None):
"""
Get the available Actor Identifier Type values.
:param active: Only get active ones.
:type active: boolean
:returns: list
"""
result = {}
if username and type_:
sources = user_sources(username)
ids = ActorIdentifier.objects(active="on",
identifier_type=type_,
source__name__in=sources).order_by('+name')
result['items'] = [(i.name, i.name) for i in ids]
else:
result['items'] = []
return result
def attribute_actor_identifier(id_, identifier_type, identifier=None,
confidence="low", user=None, **kwargs):
"""
Attribute an Actor Identifier to an Actor in CRITs.
:param id_: The Actor ObjectId.
:type id_: str
:param identifier_type: The Actor Identifier Type.
:type identifier_type: str
:param identifier: The Actor Identifier.
:type identifier: str
:param user: The user attributing this identifier.
:type user: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"""
sources = user_sources(user)
admin = is_admin(user)
actor = Actor.objects(id=id_,
source__name__in=sources).first()
if not actor:
return {'success': False,
'message': "Could not find actor"}
c = len(actor.identifiers)
actor.attribute_identifier(identifier_type, identifier, confidence, user)
actor.save(username=user)
actor.reload()
actor_identifiers = actor.generate_identifiers_list(user)
if len(actor.identifiers) <= c:
return {'success': False,
'message': "Invalid data submitted or identifier is already attributed."}
html = render_to_string('actor_identifiers_widget.html',
{'actor_identifiers': actor_identifiers,
'admin': admin,
'actor_id': str(actor.id)})
return {'success': True,
'message': html}
def set_identifier_confidence(id_, identifier=None, confidence="low",
user=None, **kwargs):
"""
Set the Identifier attribution confidence.
:param id_: The ObjectId of the Actor.
:param identifier: The Actor Identifier ObjectId.
:type identifier: str
:param confidence: The confidence level.
:type confidence: str
:param user: The user editing this identifier.
:type user: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"""
sources = user_sources(user)
actor = Actor.objects(id=id_,
source__name__in=sources).first()
if not actor:
return {'success': False,
'message': "Could not find actor"}
actor.set_identifier_confidence(identifier, confidence)
actor.save(username=user)
return {'success': True}
def remove_attribution(id_, identifier=None, user=None, **kwargs):
"""
Remove an attributed identifier.
:param id_: The ObjectId of the Actor.
:param identifier: The Actor Identifier ObjectId.
:type identifier: str
:param user: The user removing this attribution.
:type user: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"""
sources = user_sources(user)
admin = is_admin(user)
actor = Actor.objects(id=id_,
source__name__in=sources).first()
if not actor:
return {'success': False,
'message': "Could not find actor"}
actor.remove_attribution(identifier)
actor.save(username=user)
actor.reload()
actor_identifiers = actor.generate_identifiers_list(user)
html = render_to_string('actor_identifiers_widget.html',
{'actor_identifiers': actor_identifiers,
'admin': admin,
'actor_id': str(actor.id)})
return {'success': True,
'message': html}
def set_actor_name(id_, name, user, **kwargs):
"""
Set an Actor name.
:param id_: Actor ObjectId.
:type id_: str
:param name: The new name.
:type name: str
:param user: The user updating the name.
:type user: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"""
sources = user_sources(user)
actor = Actor.objects(id=id_,
source__name__in=sources).first()
if not actor:
return {'success': False,
'message': "Could not find actor"}
actor.name = name.strip()
actor.save(username=user)
return {'success': True}
def update_actor_aliases(id_, aliases, user, **kwargs):
"""
Update aliases for an Actor.
:param id_: The ObjectId of the Actor to update.
:type id_: str
:param aliases: The aliases we are setting.
:type aliases: list
:param user: The user updating the aliases.
:type user: str
:returns: dict
"""
sources = user_sources(user)
actor = Actor.objects(id=id_,
source__name__in=sources).first()
if not actor:
return {'success': False,
'message': 'No actor could be found.'}
else:
actor.update_aliases(aliases)
actor.save(username=user)
return {'success': True}
| mit |
sunqm/pyscf | pyscf/df/test/test_df_hessian.py | 2 | 1644 | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import os
import unittest
import tempfile
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import df
from pyscf import hessian
mol = gto.Mole()
mol.build(
verbose = 0,
atom = '''O 0 0. 0.
1 0 -0.757 0.587
1 0 0.757 0.587''',
basis = '6-31g',
)
def tearDownModule():
global mol
del mol
class KnownValues(unittest.TestCase):
def test_rhf_hess(self):
gref = scf.RHF(mol).run().Hessian().kernel()
g1 = scf.RHF(mol).density_fit().run().Hessian().kernel()
self.assertAlmostEqual(abs(gref - g1).max(), 0, 3)
def test_uks_hess(self):
gref = mol.UKS.run(xc='b3lyp').Hessian().kernel()
g1 = mol.UKS.density_fit().run(xc='b3lyp').Hessian().kernel()
self.assertAlmostEqual(abs(gref - g1).max(), 0, 3)
#
if __name__ == "__main__":
print("Full Tests for df.hessian")
unittest.main()
| apache-2.0 |
lebek/reversible-raytracer | test_1ball.py | 1 | 1798 | import os
import numpy as np
import theano.tensor as T
import theano
from scipy import misc
from autoencoder_1obj import Autoencoder_1obj
from transform import *
from scene import *
from shader import *
from optimize import *
if not os.path.exists('output'):
os.makedirs('output')
train_data = np.array([misc.imread('example.png').flatten()], dtype='float32')/255.0
N,D = train_data.shape
img_sz = int(np.sqrt(D))
def scene(center1, scale1):
material1 = Material((0.2, 0.9, 0.4), 0.3, 0.7, 0.5, 50.)
t1 = translate(center1) * scale(scale1)
shapes = [
Sphere(t1, material1)
]
light = Light((-1., -1., 2.), (0.961, 1., 0.87))
camera = Camera(img_sz, img_sz)
shader = DepthMapShader(6.1)
scene = Scene(shapes, [light], camera, shader)
return scene.build()
ae = Autoencoder_1obj(scene, D, 300, 30, 10)
opt = MGDAutoOptimizer(ae)
#import pdb; pdb.set_trace()
recon = ae.get_reconstruct(train_data[0])[:,:,0].eval()
imsave('output/test0.png', recon)
epsilon = 0.0001
num_epoch = 100
train_ae = opt.optimize(train_data)
get_recon = theano.function([], ae.get_reconstruct(train_data[0])[:,:,0])
get_rvars= theano.function([], ae.encoder(train_data[0]))
rvars = get_rvars()
print '...Initial center1 (%g,%g,%g) scale1 (%g,%g,%g)' % (
rvars[0], rvars[1], rvars[2], rvars[3], rvars[4], rvars[5])
print recon.sum()
n=0;
while (n<num_epoch):
n+=1
#ggg =get_grad()
#gbb =get_gradb()
eps = get_epsilon(epsilon, num_epoch, n)
train_loss = train_ae(eps)
rvars = get_rvars()
print 'Epoch %d Train loss %g, Center (%g, %g, %g) Scale (%g, %g, %g)' \
% (n, train_loss, rvars[0], rvars[1], rvars[2],
rvars[3], rvars[4], rvars[5])
image = get_recon()
imsave('output/test%d.png' % (n,), image)
| mit |
blueburningcoder/pybrain | pybrain/tools/neuralnets.py | 26 | 13763 | # Neural network data analysis tool collection. Makes heavy use of the logging module.
# Can generate training curves during the run (from properly setup IPython and/or with
# TkAgg backend and interactive mode - see matplotlib documentation).
__author__ = "Martin Felder"
__version__ = "$Id$"
from pylab import ion, figure, draw
import csv
from numpy import Infinity
import logging
from pybrain.datasets import ClassificationDataSet, SequentialDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer, RPropMinusTrainer, Trainer
from pybrain.structure import SoftmaxLayer, LSTMLayer
from pybrain.utilities import setAllArgs
from pybrain.tools.plotting import MultilinePlotter
from pybrain.tools.validation import testOnSequenceData, ModuleValidator, Validator
from pybrain.tools.customxml import NetworkWriter
class NNtools(object):
""" Abstract class providing basic functionality to make neural network training more comfortable """
def __init__(self, DS, **kwargs):
""" Initialize with the training data set DS. All keywords given are set as member variables.
The following are particularly important:
:key hidden: number of hidden units
:key TDS: test data set for checking convergence
:key VDS: validation data set for final performance evaluation
:key epoinc: number of epochs to train for, before checking convergence (default: 5)
"""
self.DS = DS
self.hidden = 10
self.maxepochs = 1000
self.Graph = None
self.TDS = None
self.VDS = None
self.epoinc = 5
setAllArgs(self, kwargs)
self.trainCurve = None
def initGraphics(self, ymax=10, xmax= -1):
""" initialize the interactive graphics output window, and return a handle to the plot """
if xmax < 0:
xmax = self.maxepochs
figure(figsize=[12, 8])
ion()
draw()
#self.Graph = MultilinePlotter(autoscale=1.2 ) #xlim=[0, self.maxepochs], ylim=[0, ymax])
self.Graph = MultilinePlotter(xlim=[0, xmax], ylim=[0, ymax])
self.Graph.setLineStyle([0, 1], linewidth=2)
return self.Graph
def set(self, **kwargs):
""" convenience method to set several member variables at once """
setAllArgs(self, kwargs)
def saveTrainingCurve(self, learnfname):
""" save the training curves into a file with the given name (CSV format) """
logging.info('Saving training curves into ' + learnfname)
if self.trainCurve is None:
logging.error('No training curve available for saving!')
learnf = open(learnfname, "wb")
writer = csv.writer(learnf, dialect='excel')
nDataSets = len(self.trainCurve)
for i in range(1, len(self.trainCurve[0]) - 1):
writer.writerow([self.trainCurve[k][i] for k in range(nDataSets)])
learnf.close()
def saveNetwork(self, fname):
""" save the trained network to a file """
NetworkWriter.writeToFile(self.Trainer.module, fname)
logging.info("Network saved to: " + fname)
#=======================================================================================================
class NNregression(NNtools):
""" Learns to numerically predict the targets of a set of data, with optional online progress plots. """
def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs):
""" Constructs a 3-layer FNN for regression. Optional arguments are passed on to the Trainer class. """
if hidden is not None:
self.hidden = hidden
logging.info("Constructing FNN with following config:")
FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim)
logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Training FNN with following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(FNN, dataset=self.DS, **trnargs)
def runTraining(self, convergence=0, **kwargs):
""" Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments
whether test error is going down again, and stop training accordingly.
CAVEAT: No support for Sequential datasets!"""
assert isinstance(self.Trainer, Trainer)
if self.Graph is not None:
self.Graph.setLabels(x='epoch', y='normalized regression error')
self.Graph.setLegend(['training', 'test'], loc='upper right')
epoch = 0
inc = self.epoinc
best_error = Infinity
best_epoch = 0
learncurve_x = [0]
learncurve_y = [0.0]
valcurve_y = [0.0]
converged = False
convtest = 0
if convergence > 0:
logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc))
while epoch <= self.maxepochs and not converged:
self.Trainer.trainEpochs(inc)
epoch += inc
learncurve_x.append(epoch)
# calculate errors on TRAINING data
err_trn = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.DS)
learncurve_y.append(err_trn)
if self.TDS is None:
logging.info("epoch: %6d, err_trn: %10g" % (epoch, err_trn))
else:
# calculate same errors on TEST data
err_tst = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.TDS)
valcurve_y.append(err_tst)
if err_tst < best_error:
# store best error and parameters
best_epoch = epoch
best_error = err_tst
bestweights = self.Trainer.module.params.copy()
convtest = 0
else:
convtest += 1
logging.info("epoch: %6d, err_trn: %10g, err_tst: %10g, best_tst: %10g" % (epoch, err_trn, err_tst, best_error))
if self.Graph is not None:
self.Graph.addData(1, epoch, err_tst)
# check if convegence criterion is fulfilled (no improvement after N epoincs)
if convtest >= convergence:
converged = True
if self.Graph is not None:
self.Graph.addData(0, epoch, err_trn)
self.Graph.update()
# training finished!
logging.info("Best epoch: %6d, with error: %10g" % (best_epoch, best_error))
if self.VDS is not None:
# calculate same errors on VALIDATION data
self.Trainer.module.params[:] = bestweights.copy()
err_val = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.VDS)
logging.info("Result on evaluation data: %10g" % err_val)
# store training curve for saving into file
self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
#=======================================================================================================
class NNclassifier(NNtools):
""" Learns to classify a set of data, with optional online progress plots. """
def __init__(self, DS, **kwargs):
""" Initialize the classifier: the least we need is the dataset to be classified. All keywords given are set as member variables. """
if not isinstance(DS, ClassificationDataSet):
raise TypeError('Need a ClassificationDataSet to do classification!')
NNtools.__init__(self, DS, **kwargs)
self.nClasses = self.DS.nClasses # need this because targets may be altered later
self.clsnames = None
self.targetsAreOneOfMany = False
def _convertAllDataToOneOfMany(self, values=[0, 1]):
""" converts all datasets associated with self into 1-out-of-many representations,
e.g. with original classes 0 to 4, the new target for class 1 would be [0,1,0,0,0],
or accordingly with other upper and lower bounds, as given by the values keyword """
if self.targetsAreOneOfMany:
return
else:
# convert all datasets to one-of-many ("winner takes all") representation
for dsname in ["DS", "TDS", "VDS"]:
d = getattr(self, dsname)
if d is not None:
if d.outdim < d.nClasses:
d._convertToOneOfMany(values)
self.targetsAreOneOfMany = True
def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs):
""" Setup FNN and trainer for classification. """
self._convertAllDataToOneOfMany()
if hidden is not None:
self.hidden = hidden
FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, outclass=SoftmaxLayer)
logging.info("Constructing classification FNN with following config:")
logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Trainer received the following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(FNN, dataset=self.DS, **trnargs)
def setupRNN(self, trainer=BackpropTrainer, hidden=None, **trnargs):
""" Setup an LSTM RNN and trainer for sequence classification. """
if hidden is not None:
self.hidden = hidden
self._convertAllDataToOneOfMany()
RNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, hiddenclass=LSTMLayer,
recurrent=True, outclass=SoftmaxLayer)
logging.info("Constructing classification RNN with following config:")
logging.info(str(RNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Trainer received the following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(RNN, dataset=self.DS, **trnargs)
def runTraining(self, convergence=0, **kwargs):
""" Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments
whether test error is going down again, and stop training accordingly. """
assert isinstance(self.Trainer, Trainer)
if self.Graph is not None:
self.Graph.setLabels(x='epoch', y='% classification error')
self.Graph.setLegend(['training', 'test'], loc='lower right')
epoch = 0
inc = self.epoinc
best_error = 100.0
best_epoch = 0
learncurve_x = [0]
learncurve_y = [0.0]
valcurve_y = [0.0]
converged = False
convtest = 0
if convergence > 0:
logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc))
while epoch <= self.maxepochs and not converged:
self.Trainer.trainEpochs(inc)
epoch += inc
learncurve_x.append(epoch)
# calculate errors on TRAINING data
if isinstance(self.DS, SequentialDataSet):
r_trn = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.DS))
else:
# FIXME: messy - validation does not belong into the Trainer...
out, trueclass = self.Trainer.testOnClassData(return_targets=True)
r_trn = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
learncurve_y.append(r_trn)
if self.TDS is None:
logging.info("epoch: %6d, err_trn: %5.2f%%" % (epoch, r_trn))
else:
# calculate errors on TEST data
if isinstance(self.DS, SequentialDataSet):
r_tst = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.TDS))
else:
# FIXME: messy - validation does not belong into the Trainer...
out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.TDS)
r_tst = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
valcurve_y.append(r_tst)
if r_tst < best_error:
best_epoch = epoch
best_error = r_tst
bestweights = self.Trainer.module.params.copy()
convtest = 0
else:
convtest += 1
logging.info("epoch: %6d, err_trn: %5.2f%%, err_tst: %5.2f%%, best_tst: %5.2f%%" % (epoch, r_trn, r_tst, best_error))
if self.Graph is not None:
self.Graph.addData(1, epoch, r_tst)
# check if convegence criterion is fulfilled (no improvement after N epoincs)
if convtest >= convergence:
converged = True
if self.Graph is not None:
self.Graph.addData(0, epoch, r_trn)
self.Graph.update()
logging.info("Best epoch: %6d, with error: %5.2f%%" % (best_epoch, best_error))
if self.VDS is not None:
# calculate errors on VALIDATION data
self.Trainer.module.params[:] = bestweights.copy()
if isinstance(self.DS, SequentialDataSet):
r_val = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.VDS))
else:
out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.VDS)
r_val = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
logging.info("Result on evaluation data: %5.2f%%" % r_val)
self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
| bsd-3-clause |
moorescloud/holideck | examples/twitter/oauth.py | 3 | 2938 | from __future__ import print_function
from twitter.auth import Auth
from time import time
from random import getrandbits
try:
import urllib.parse as urllib_parse
from urllib.parse import urlencode
PY3 = True
except ImportError:
import urllib2 as urllib_parse
from urllib import urlencode
PY3 = False
import hashlib
import hmac
import base64
def write_token_file(filename, oauth_token, oauth_token_secret):
"""
Write a token file to hold the oauth token and oauth token secret.
"""
oauth_file = open(filename, 'w')
print(oauth_token, file=oauth_file)
print(oauth_token_secret, file=oauth_file)
oauth_file.close()
def read_token_file(filename):
"""
Read a token file and return the oauth token and oauth token secret.
"""
f = open(filename)
return f.readline().strip(), f.readline().strip()
class OAuth(Auth):
"""
An OAuth authenticator.
"""
def __init__(self, token, token_secret, consumer_key, consumer_secret):
"""
Create the authenticator. If you are in the initial stages of
the OAuth dance and don't yet have a token or token_secret,
pass empty strings for these params.
"""
self.token = token
self.token_secret = token_secret
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
def encode_params(self, base_url, method, params):
params = params.copy()
if self.token:
params['oauth_token'] = self.token
params['oauth_consumer_key'] = self.consumer_key
params['oauth_signature_method'] = 'HMAC-SHA1'
params['oauth_version'] = '1.0'
params['oauth_timestamp'] = str(int(time()))
params['oauth_nonce'] = str(getrandbits(64))
enc_params = urlencode_noplus(sorted(params.items()))
key = self.consumer_secret + "&" + urllib_parse.quote(self.token_secret, '')
message = '&'.join(
urllib_parse.quote(i, '') for i in [method.upper(), base_url, enc_params])
signature = (base64.b64encode(hmac.new(
key.encode('ascii'), message.encode('ascii'), hashlib.sha1)
.digest()))
return enc_params + "&" + "oauth_signature=" + urllib_parse.quote(signature, '')
def generate_headers(self):
return {}
# apparently contrary to the HTTP RFCs, spaces in arguments must be encoded as
# %20 rather than '+' when constructing an OAuth signature (and therefore
# also in the request itself.)
# So here is a specialized version which does exactly that.
def urlencode_noplus(query):
if not PY3:
new_query = []
for k,v in query:
if type(k) is unicode: k = k.encode('utf-8')
if type(v) is unicode: v = v.encode('utf-8')
new_query.append((k, v))
query = new_query
return urlencode(query).replace("+", "%20")
| mit |
Kitware/girder | plugins/autojoin/plugin_tests/autojoin_test.py | 2 | 2418 | from girder.constants import AccessType
from girder.models.group import Group
from girder.models.user import User
from tests import base
import json
def setUpModule():
base.enabledPlugins.append('autojoin')
base.startServer()
def tearDownModule():
base.stopServer()
class AutoJoinTest(base.TestCase):
def setUp(self):
super().setUp()
self.users = [User().createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', 'u%s@girder4.test' % num)
for num in [0, 1]]
def testAutoJoinBehavior(self):
admin, user = self.users
# create some groups
g1 = Group().createGroup('g1', admin)
g2 = Group().createGroup('g2', admin)
g3 = Group().createGroup('g3', admin)
# set auto join rules
rules = [
{
'pattern': '@girder2.test',
'groupId': str(g1['_id']),
'level': AccessType.ADMIN
},
{
'pattern': '@girder1.test',
'groupId': str(g2['_id']),
'level': AccessType.READ
},
{
'pattern': '@girder1.test',
'groupId': str(g3['_id']),
'level': AccessType.WRITE
},
]
params = {
'list': json.dumps([{'key': 'autojoin', 'value': rules}])
}
resp = self.request('/system/setting', user=admin, method='PUT', params=params)
self.assertStatusOk(resp)
# create users
user1 = User().createUser('user1', 'password', 'John', 'Doe', 'user1@girder1.test')
user2 = User().createUser('user2', 'password', 'John', 'Doe', 'user2@girder2.test')
user3 = User().createUser('user3', 'password', 'John', 'Doe', 'user3@girder3.test')
# check correct groups were joined
self.assertEqual(user1['groups'], [g2['_id'], g3['_id']])
self.assertEqual(user2['groups'], [g1['_id']])
self.assertEqual(user3['groups'], [])
# check correct access levels
g1 = Group().load(g1['_id'], force=True)
g3 = Group().load(g3['_id'], force=True)
self.assertIn(
{'id': user2['_id'], 'level': AccessType.ADMIN, 'flags': []},
g1['access']['users'])
self.assertIn(
{'id': user1['_id'], 'level': AccessType.WRITE, 'flags': []},
g3['access']['users'])
| apache-2.0 |
apache/stratos | components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/event/tenant/events.py | 11 | 2910 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from entity import *
class CompleteTenantEvent:
def __init__(self):
self.tenants = []
""" :type : list[Tenant] """
self.tenant_list_json = None
""" :type : str """
@staticmethod
def create_from_json(json_str):
json_obj = json.loads(json_str)
instance = CompleteTenantEvent()
instance.tenants = []
tenants_str = json_obj["tenants"] if "tenants" in json_obj else None
instance.tenant_list_json = tenants_str
if tenants_str is not None:
for tenant_str in tenants_str:
tenant_obj = Tenant(int(tenant_str["tenantId"]), tenant_str["tenantDomain"])
instance.tenants.append(tenant_obj)
return instance
class TenantSubscribedEvent:
def __init__(self):
self.tenant_id = None
""" :type : int """
self.service_name = None
""" :type : str """
self.cluster_ids = None
""" :type : list[str] """
@staticmethod
def create_from_json(json_str):
json_obj = json.loads(json_str)
instance = TenantSubscribedEvent()
instance.tenant_id = json_obj["tenantId"] if "tenantId" in json_obj else None
instance.service_name = json_obj["serviceName"] if "serviceName" in json_obj else None
instance.cluster_ids = json_obj["clusterIds"] if "clusterIds" in json_obj else None
return instance
class TenantUnsubscribedEvent:
def __init__(self):
self.tenant_id = None
""" :type : int """
self.service_name = None
""" :type : str """
self.cluster_ids = None
""" :type : list[str] """
@staticmethod
def create_from_json(json_str):
json_obj = json.loads(json_str)
instance = TenantUnsubscribedEvent()
instance.tenant_id = json_obj["tenantId"] if "tenantId" in json_obj else None
instance.service_name = json_obj["serviceName"] if "serviceName" in json_obj else None
instance.cluster_ids = json_obj["clusterIds"] if "clusterIds" in json_obj else None
return instance
| apache-2.0 |
mahak/nova | nova/tests/functional/test_servers_provider_tree.py | 3 | 33234 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os_traits
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova.tests.functional import integrated_helpers
from nova.virt import fake
LOG = logging.getLogger(__name__)
class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
compute_driver = 'fake.MediumFakeDriver'
# These must match the capabilities in
# nova.virt.fake.FakeDriver.capabilities
expected_fake_driver_capability_traits = set([
trait for trait in [
os_traits.COMPUTE_ACCELERATORS,
os_traits.COMPUTE_IMAGE_TYPE_RAW,
os_traits.COMPUTE_DEVICE_TAGGING,
os_traits.COMPUTE_NET_ATTACH_INTERFACE,
os_traits.COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,
os_traits.COMPUTE_VOLUME_ATTACH_WITH_TAG,
os_traits.COMPUTE_VOLUME_EXTEND,
os_traits.COMPUTE_VOLUME_MULTI_ATTACH,
os_traits.COMPUTE_TRUSTED_CERTS,
]
])
def setUp(self):
super(ProviderTreeTests, self).setUp()
# Before starting compute, placement has no providers registered
self.assertEqual([], self._get_all_providers())
# Start compute without mocking update_provider_tree. The fake driver
# doesn't implement the method, so this will cause us to start with the
# legacy get_available_resource()-based inventory discovery and
# boostrapping of placement data.
self.compute = self._start_compute(host='host1')
# Mock out update_provider_tree *after* starting compute with the
# (unmocked, default, unimplemented) version from the fake driver.
_p = mock.patch.object(fake.MediumFakeDriver, 'update_provider_tree')
self.addCleanup(_p.stop)
self.mock_upt = _p.start()
# The compute host should have been created in placement with
# appropriate inventory and no traits
rps = self._get_all_providers()
self.assertEqual(1, len(rps))
self.assertEqual(self.compute.host, rps[0]['name'])
self.host_uuid = self._get_provider_uuid_by_host(self.compute.host)
self.assertEqual({
'DISK_GB': {
'total': 1028,
'allocation_ratio': 1.0,
'max_unit': 1028,
'min_unit': 1,
'reserved': 0,
'step_size': 1,
},
'MEMORY_MB': {
'total': 8192,
'allocation_ratio': 1.5,
'max_unit': 8192,
'min_unit': 1,
'reserved': 512,
'step_size': 1,
},
'VCPU': {
'total': 10,
'allocation_ratio': 16.0,
'max_unit': 10,
'min_unit': 1,
'reserved': 0,
'step_size': 1,
},
}, self._get_provider_inventory(self.host_uuid))
self.expected_compute_node_traits = (
self.expected_fake_driver_capability_traits.union(
# The COMPUTE_NODE trait is always added
[os_traits.COMPUTE_NODE]))
self.assertCountEqual(self.expected_compute_node_traits,
self._get_provider_traits(self.host_uuid))
def _run_update_available_resource(self, startup):
self.compute.rt.update_available_resource(
context.get_admin_context(), self.compute.host, startup=startup)
def _run_update_available_resource_and_assert_raises(
self, exc=exception.ResourceProviderSyncFailed, startup=False):
"""Invoke ResourceTracker.update_available_resource and assert that it
results in ResourceProviderSyncFailed.
_run_periodicals is a little too high up in the call stack to be useful
for this, because ResourceTracker.update_available_resource_for_node
swallows all exceptions.
"""
self.assertRaises(exc, self._run_update_available_resource, startup)
def test_update_provider_tree_associated_info(self):
"""Inventory in some standard and custom resource classes. Standard
and custom traits. Aggregates. Custom resource class and trait get
created; inventory, traits, and aggregates get set properly.
"""
inv = {
'VCPU': {
'total': 10,
'reserved': 0,
'min_unit': 1,
'max_unit': 2,
'step_size': 1,
'allocation_ratio': 10.0,
},
'MEMORY_MB': {
'total': 1048576,
'reserved': 2048,
'min_unit': 1024,
'max_unit': 131072,
'step_size': 1024,
'allocation_ratio': 1.0,
},
'CUSTOM_BANDWIDTH': {
'total': 1250000,
'reserved': 10000,
'min_unit': 5000,
'max_unit': 250000,
'step_size': 5000,
'allocation_ratio': 8.0,
},
}
traits = set(['HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2', 'CUSTOM_GOLD'])
aggs = set([uuids.agg1, uuids.agg2])
def update_provider_tree(prov_tree, nodename):
prov_tree.update_inventory(self.compute.host, inv)
prov_tree.update_traits(self.compute.host, traits)
prov_tree.update_aggregates(self.compute.host, aggs)
self.mock_upt.side_effect = update_provider_tree
self.assertNotIn('CUSTOM_BANDWIDTH', self._get_all_resource_classes())
self.assertNotIn('CUSTOM_GOLD', self._get_all_traits())
self._run_periodics()
self.assertIn('CUSTOM_BANDWIDTH', self._get_all_resource_classes())
self.assertIn('CUSTOM_GOLD', self._get_all_traits())
self.assertEqual(inv, self._get_provider_inventory(self.host_uuid))
self.assertCountEqual(
traits.union(self.expected_compute_node_traits),
self._get_provider_traits(self.host_uuid)
)
self.assertEqual(aggs,
set(self._get_provider_aggregates(self.host_uuid)))
def _update_provider_tree_multiple_providers(self, startup=False,
do_reshape=False):
r"""Make update_provider_tree create multiple providers, including an
additional root as a sharing provider; and some descendants in the
compute node's tree.
+---------------------------+ +--------------------------+
|uuid: self.host_uuid | |uuid: uuids.ssp |
|name: self.compute.host | |name: 'ssp' |
|inv: (per MediumFakeDriver)| |inv: DISK_GB=500 |
| VCPU=10 |...|traits: [MISC_SHARES_..., |
| MEMORY_MB=8192 | | STORAGE_DISK_SSD]|
| DISK_GB=1028 | |aggs: [uuids.agg] |
|aggs: [uuids.agg] | +--------------------------+
+---------------------------+
/ \
+-----------------+ +-----------------+
|uuid: uuids.numa1| |uuid: uuids.numa2|
|name: 'numa1' | |name: 'numa2' |
|inv: VCPU=10 | |inv: VCPU=20 |
| MEMORY_MB=1G| | MEMORY_MB=2G|
+-----------------+ +-----------------+
/ \ / \
+------------+ +------------+ +------------+ +------------+
|uuid: | |uuid: | |uuid: | |uuid: |
| uuids.pf1_1| | uuids.pf1_2| | uuids.pf2_1| | uuids.pf2_2|
|name: | |name: | |name: | |name: |
| 'pf1_1' | | 'pf1_2' | | 'pf2_1' | | 'pf2_2' |
|inv: | |inv: | |inv: | |inv: |
| ..NET_VF: 2| | ..NET_VF: 3| | ..NET_VF: 3| | ..NET_VF: 4|
|traits: | |traits: | |traits: | |traits: |
| ..PHYSNET_0| | ..PHYSNET_1| | ..PHYSNET_0| | ..PHYSNET_1|
+------------+ +------------+ +------------+ +------------+
"""
def update_provider_tree(prov_tree, nodename, allocations=None):
if do_reshape and allocations is None:
raise exception.ReshapeNeeded()
# Create a shared storage provider as a root
prov_tree.new_root('ssp', uuids.ssp)
prov_tree.update_traits(
'ssp', ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_SSD'])
prov_tree.update_aggregates('ssp', [uuids.agg])
prov_tree.update_inventory('ssp', {'DISK_GB': {'total': 500}})
# Compute node is in the same aggregate
prov_tree.update_aggregates(self.compute.host, [uuids.agg])
# Create two NUMA nodes as children
prov_tree.new_child('numa1', self.host_uuid, uuid=uuids.numa1)
prov_tree.new_child('numa2', self.host_uuid, uuid=uuids.numa2)
# Give the NUMA nodes the proc/mem inventory. NUMA 2 has twice as
# much as NUMA 1 (so we can validate later that everything is where
# it should be).
for n in (1, 2):
inv = {
'VCPU': {
'total': 10 * n,
'reserved': 0,
'min_unit': 1,
'max_unit': 2,
'step_size': 1,
'allocation_ratio': 10.0,
},
'MEMORY_MB': {
'total': 1048576 * n,
'reserved': 2048,
'min_unit': 512,
'max_unit': 131072,
'step_size': 512,
'allocation_ratio': 1.0,
},
}
prov_tree.update_inventory('numa%d' % n, inv)
# Each NUMA node has two PFs providing VF inventory on one of two
# networks
for n in (1, 2):
for p in (1, 2):
name = 'pf%d_%d' % (n, p)
prov_tree.new_child(
name, getattr(uuids, 'numa%d' % n),
uuid=getattr(uuids, name))
trait = 'CUSTOM_PHYSNET_%d' % ((n + p) % 2)
prov_tree.update_traits(name, [trait])
inv = {
'SRIOV_NET_VF': {
'total': n + p,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
prov_tree.update_inventory(name, inv)
if do_reshape:
# Clear out the compute node's inventory. Its VCPU and
# MEMORY_MB "moved" to the NUMA RPs and its DISK_GB "moved" to
# the shared storage provider.
prov_tree.update_inventory(self.host_uuid, {})
# Move all the allocations
for consumer_uuid, alloc_info in allocations.items():
allocs = alloc_info['allocations']
# All allocations should belong to the compute node.
self.assertEqual([self.host_uuid], list(allocs))
new_allocs = {}
for rc, amt in allocs[self.host_uuid]['resources'].items():
# Move VCPU to NUMA1 and MEMORY_MB to NUMA2. Bogus, but
# lets us prove stuff ends up where we tell it to go.
if rc == 'VCPU':
rp_uuid = uuids.numa1
elif rc == 'MEMORY_MB':
rp_uuid = uuids.numa2
elif rc == 'DISK_GB':
rp_uuid = uuids.ssp
else:
self.fail("Unexpected resource on compute node: "
"%s=%d" % (rc, amt))
new_allocs[rp_uuid] = {
'resources': {rc: amt},
}
# Add a VF for the heck of it. Again bogus, but see above.
new_allocs[uuids.pf1_1] = {
'resources': {'SRIOV_NET_VF': 1}
}
# Now replace just the allocations, leaving the other stuff
# (proj/user ID and consumer generation) alone
alloc_info['allocations'] = new_allocs
self.mock_upt.side_effect = update_provider_tree
if startup:
self.restart_compute_service(self.compute)
else:
self._run_update_available_resource(False)
# Create a dict, keyed by provider UUID, of all the providers
rps_by_uuid = {}
for rp_dict in self._get_all_providers():
rps_by_uuid[rp_dict['uuid']] = rp_dict
# All and only the expected providers got created.
all_uuids = set([self.host_uuid, uuids.ssp, uuids.numa1, uuids.numa2,
uuids.pf1_1, uuids.pf1_2, uuids.pf2_1, uuids.pf2_2])
self.assertEqual(all_uuids, set(rps_by_uuid))
# Validate tree roots
tree_uuids = [self.host_uuid, uuids.numa1, uuids.numa2,
uuids.pf1_1, uuids.pf1_2, uuids.pf2_1, uuids.pf2_2]
for tree_uuid in tree_uuids:
self.assertEqual(self.host_uuid,
rps_by_uuid[tree_uuid]['root_provider_uuid'])
self.assertEqual(uuids.ssp,
rps_by_uuid[uuids.ssp]['root_provider_uuid'])
# SSP has the right traits
self.assertEqual(
set(['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_SSD']),
set(self._get_provider_traits(uuids.ssp)))
# SSP has the right inventory
self.assertEqual(
500, self._get_provider_inventory(uuids.ssp)['DISK_GB']['total'])
# SSP and compute are in the same aggregate
agg_uuids = set([self.host_uuid, uuids.ssp])
for uuid in agg_uuids:
self.assertEqual(set([uuids.agg]),
set(self._get_provider_aggregates(uuid)))
# The rest aren't in aggregates
for uuid in (all_uuids - agg_uuids):
self.assertEqual(set(), set(self._get_provider_aggregates(uuid)))
# NUMAs have the right inventory and parentage
for n in (1, 2):
numa_uuid = getattr(uuids, 'numa%d' % n)
self.assertEqual(self.host_uuid,
rps_by_uuid[numa_uuid]['parent_provider_uuid'])
inv = self._get_provider_inventory(numa_uuid)
self.assertEqual(10 * n, inv['VCPU']['total'])
self.assertEqual(1048576 * n, inv['MEMORY_MB']['total'])
# PFs have the right inventory, physnet, and parentage
self.assertEqual(uuids.numa1,
rps_by_uuid[uuids.pf1_1]['parent_provider_uuid'])
self.assertEqual(['CUSTOM_PHYSNET_0'],
self._get_provider_traits(uuids.pf1_1))
self.assertEqual(
2,
self._get_provider_inventory(uuids.pf1_1)['SRIOV_NET_VF']['total'])
self.assertEqual(uuids.numa1,
rps_by_uuid[uuids.pf1_2]['parent_provider_uuid'])
self.assertEqual(['CUSTOM_PHYSNET_1'],
self._get_provider_traits(uuids.pf1_2))
self.assertEqual(
3,
self._get_provider_inventory(uuids.pf1_2)['SRIOV_NET_VF']['total'])
self.assertEqual(uuids.numa2,
rps_by_uuid[uuids.pf2_1]['parent_provider_uuid'])
self.assertEqual(['CUSTOM_PHYSNET_1'],
self._get_provider_traits(uuids.pf2_1))
self.assertEqual(
3,
self._get_provider_inventory(uuids.pf2_1)['SRIOV_NET_VF']['total'])
self.assertEqual(uuids.numa2,
rps_by_uuid[uuids.pf2_2]['parent_provider_uuid'])
self.assertEqual(['CUSTOM_PHYSNET_0'],
self._get_provider_traits(uuids.pf2_2))
self.assertEqual(
4,
self._get_provider_inventory(uuids.pf2_2)['SRIOV_NET_VF']['total'])
# Compute don't have any extra traits
self.assertCountEqual(self.expected_compute_node_traits,
self._get_provider_traits(self.host_uuid))
# NUMAs don't have any traits
for uuid in (uuids.numa1, uuids.numa2):
self.assertEqual([], self._get_provider_traits(uuid))
def test_update_provider_tree_multiple_providers(self):
self._update_provider_tree_multiple_providers()
def test_update_provider_tree_multiple_providers_startup(self):
"""The above works the same for startup when no reshape requested."""
self._update_provider_tree_multiple_providers(startup=True)
def test_update_provider_tree_bogus_resource_class(self):
def update_provider_tree(prov_tree, nodename):
prov_tree.update_inventory(self.compute.host, {'FOO': {}})
self.mock_upt.side_effect = update_provider_tree
rcs = self._get_all_resource_classes()
self.assertIn('VCPU', rcs)
self.assertNotIn('FOO', rcs)
self._run_update_available_resource_and_assert_raises()
rcs = self._get_all_resource_classes()
self.assertIn('VCPU', rcs)
self.assertNotIn('FOO', rcs)
def test_update_provider_tree_bogus_trait(self):
def update_provider_tree(prov_tree, nodename):
prov_tree.update_traits(self.compute.host, ['FOO'])
self.mock_upt.side_effect = update_provider_tree
traits = self._get_all_traits()
self.assertIn('HW_CPU_X86_AVX', traits)
self.assertNotIn('FOO', traits)
self._run_update_available_resource_and_assert_raises()
traits = self._get_all_traits()
self.assertIn('HW_CPU_X86_AVX', traits)
self.assertNotIn('FOO', traits)
def _create_instance(self, flavor):
return self._create_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor['id'],
networks='none', az='nova:host1')
def test_reshape(self):
"""On startup, virt driver signals it needs to reshape, then does so.
This test creates a couple of instances so there are allocations to be
moved by the reshape operation. Then we do the reshape and make sure
the inventories and allocations end up where they should.
"""
# First let's create some instances so we have allocations to move.
flavors = self.api.get_flavors()
inst1 = self._create_instance(flavors[0])
inst2 = self._create_instance(flavors[1])
# Instance create calls RT._update, which calls
# driver.update_provider_tree, which is currently mocked to a no-op.
self.assertEqual(2, self.mock_upt.call_count)
self.mock_upt.reset_mock()
# Hit the reshape.
self._update_provider_tree_multiple_providers(startup=True,
do_reshape=True)
# Check the final allocations
# The compute node provider should have *no* allocations.
self.assertEqual(
{}, self._get_allocations_by_provider_uuid(self.host_uuid))
# And no inventory
self.assertEqual({}, self._get_provider_inventory(self.host_uuid))
# NUMA1 got all the VCPU
self.assertEqual(
{inst1['id']: {'resources': {'VCPU': 1}},
inst2['id']: {'resources': {'VCPU': 1}}},
self._get_allocations_by_provider_uuid(uuids.numa1))
# NUMA2 got all the memory
self.assertEqual(
{inst1['id']: {'resources': {'MEMORY_MB': 512}},
inst2['id']: {'resources': {'MEMORY_MB': 2048}}},
self._get_allocations_by_provider_uuid(uuids.numa2))
# Disk resource ended up on the shared storage provider
self.assertEqual(
{inst1['id']: {'resources': {'DISK_GB': 1}},
inst2['id']: {'resources': {'DISK_GB': 20}}},
self._get_allocations_by_provider_uuid(uuids.ssp))
# We put VFs on the first PF in NUMA1
self.assertEqual(
{inst1['id']: {'resources': {'SRIOV_NET_VF': 1}},
inst2['id']: {'resources': {'SRIOV_NET_VF': 1}}},
self._get_allocations_by_provider_uuid(uuids.pf1_1))
self.assertEqual(
{}, self._get_allocations_by_provider_uuid(uuids.pf1_2))
self.assertEqual(
{}, self._get_allocations_by_provider_uuid(uuids.pf2_1))
self.assertEqual(
{}, self._get_allocations_by_provider_uuid(uuids.pf2_2))
# This is *almost* redundant - but it makes sure the instances don't
# have extra allocations from some other provider.
self.assertEqual(
{
uuids.numa1: {
'resources': {'VCPU': 1},
# Don't care about the generations - rely on placement db
# tests to validate that those behave properly.
'generation': mock.ANY,
},
uuids.numa2: {
'resources': {'MEMORY_MB': 512},
'generation': mock.ANY,
},
uuids.ssp: {
'resources': {'DISK_GB': 1},
'generation': mock.ANY,
},
uuids.pf1_1: {
'resources': {'SRIOV_NET_VF': 1},
'generation': mock.ANY,
},
}, self._get_allocations_by_server_uuid(inst1['id']))
self.assertEqual(
{
uuids.numa1: {
'resources': {'VCPU': 1},
'generation': mock.ANY,
},
uuids.numa2: {
'resources': {'MEMORY_MB': 2048},
'generation': mock.ANY,
},
uuids.ssp: {
'resources': {'DISK_GB': 20},
'generation': mock.ANY,
},
uuids.pf1_1: {
'resources': {'SRIOV_NET_VF': 1},
'generation': mock.ANY,
},
}, self._get_allocations_by_server_uuid(inst2['id']))
# The first call raises ReshapeNeeded, resulting in the second.
self.assertEqual(2, self.mock_upt.call_count)
# The expected value of the allocations kwarg to update_provider_tree
# for that second call:
exp_allocs = {
inst1['id']: {
'allocations': {
uuids.numa1: {'resources': {'VCPU': 1}},
uuids.numa2: {'resources': {'MEMORY_MB': 512}},
uuids.ssp: {'resources': {'DISK_GB': 1}},
uuids.pf1_1: {'resources': {'SRIOV_NET_VF': 1}},
},
'consumer_generation': mock.ANY,
'project_id': mock.ANY,
'user_id': mock.ANY,
},
inst2['id']: {
'allocations': {
uuids.numa1: {'resources': {'VCPU': 1}},
uuids.numa2: {'resources': {'MEMORY_MB': 2048}},
uuids.ssp: {'resources': {'DISK_GB': 20}},
uuids.pf1_1: {'resources': {'SRIOV_NET_VF': 1}},
},
'consumer_generation': mock.ANY,
'project_id': mock.ANY,
'user_id': mock.ANY,
},
}
self.mock_upt.assert_has_calls([
mock.call(mock.ANY, 'host1'),
mock.call(mock.ANY, 'host1', allocations=exp_allocs),
])
class TraitsTrackingTests(integrated_helpers.ProviderUsageBaseTestCase):
compute_driver = 'fake.SmallFakeDriver'
fake_caps = {
'supports_attach_interface': True,
'supports_device_tagging': False,
}
def _mock_upt(self, traits_to_add, traits_to_remove):
"""Set up the compute driver with a fake update_provider_tree()
which injects the given traits into the provider tree
"""
original_upt = fake.SmallFakeDriver.update_provider_tree
def fake_upt(self2, ptree, nodename, allocations=None):
original_upt(self2, ptree, nodename, allocations)
LOG.debug("injecting traits via fake update_provider_tree(): %s",
traits_to_add)
ptree.add_traits(nodename, *traits_to_add)
LOG.debug("removing traits via fake update_provider_tree(): %s",
traits_to_remove)
ptree.remove_traits(nodename, *traits_to_remove)
self.stub_out('nova.virt.fake.FakeDriver.update_provider_tree',
fake_upt)
@mock.patch.dict(fake.SmallFakeDriver.capabilities, clear=True,
values=fake_caps)
def test_resource_provider_traits(self):
"""Test that the compute service reports traits via driver
capabilities and registers them on the compute host resource
provider in the placement API.
"""
custom_trait = 'CUSTOM_FOO'
ptree_traits = [custom_trait, 'HW_CPU_X86_VMX']
global_traits = self._get_all_traits()
self.assertNotIn(custom_trait, global_traits)
self.assertIn(os_traits.COMPUTE_NET_ATTACH_INTERFACE, global_traits)
self.assertIn(os_traits.COMPUTE_DEVICE_TAGGING, global_traits)
self.assertIn(os_traits.COMPUTE_NODE, global_traits)
self.assertEqual([], self._get_all_providers())
self._mock_upt(ptree_traits, [])
self.compute = self._start_compute(host='host1')
rp_uuid = self._get_provider_uuid_by_host('host1')
expected_traits = set(
ptree_traits +
[os_traits.COMPUTE_NET_ATTACH_INTERFACE, os_traits.COMPUTE_NODE]
)
self.assertCountEqual(expected_traits,
self._get_provider_traits(rp_uuid))
global_traits = self._get_all_traits()
# CUSTOM_FOO is now a registered trait because the virt driver
# reported it.
self.assertIn(custom_trait, global_traits)
# Now simulate user deletion of driver-provided traits from
# the compute node provider.
expected_traits.remove(custom_trait)
expected_traits.remove(os_traits.COMPUTE_NET_ATTACH_INTERFACE)
self._set_provider_traits(rp_uuid, list(expected_traits))
self.assertCountEqual(expected_traits,
self._get_provider_traits(rp_uuid))
# The above trait deletions are simulations of an out-of-band
# placement operation, as if the operator used the CLI. So
# now we have to "SIGHUP the compute process" to clear the
# report client cache so the subsequent update picks up the
# changes.
self.compute.manager.reset()
# Add the traits back so that the mock update_provider_tree()
# can reinject them.
expected_traits.update(
[custom_trait, os_traits.COMPUTE_NET_ATTACH_INTERFACE])
# Now when we run the periodic update task, the trait should
# reappear in the provider tree and get synced back to
# placement.
self._run_periodics()
self.assertCountEqual(expected_traits,
self._get_provider_traits(rp_uuid))
global_traits = self._get_all_traits()
self.assertIn(custom_trait, global_traits)
self.assertIn(os_traits.COMPUTE_NET_ATTACH_INTERFACE, global_traits)
@mock.patch.dict(fake.SmallFakeDriver.capabilities, clear=True,
values=fake_caps)
def test_admin_traits_preserved(self):
"""Test that if admin externally sets traits on the resource provider
then the compute periodic doesn't remove them from placement.
"""
admin_trait = 'CUSTOM_TRAIT_FROM_ADMIN'
self._create_trait(admin_trait)
global_traits = self._get_all_traits()
self.assertIn(admin_trait, global_traits)
self.compute = self._start_compute(host='host1')
rp_uuid = self._get_provider_uuid_by_host('host1')
traits = self._get_provider_traits(rp_uuid)
traits.append(admin_trait)
self._set_provider_traits(rp_uuid, traits)
self.assertIn(admin_trait, self._get_provider_traits(rp_uuid))
# SIGHUP the compute process to clear the report client
# cache, so the subsequent periodic update recalculates everything.
self.compute.manager.reset()
self._run_periodics()
self.assertIn(admin_trait, self._get_provider_traits(rp_uuid))
@mock.patch.dict(fake.SmallFakeDriver.capabilities, clear=True,
values=fake_caps)
def test_driver_removing_support_for_trait_via_capability(self):
"""Test that if a driver initially reports a trait via a supported
capability, then at the next periodic update doesn't report
support for it again, it gets removed from the provider in the
placement service.
"""
self.compute = self._start_compute(host='host1')
rp_uuid = self._get_provider_uuid_by_host('host1')
trait = os_traits.COMPUTE_NET_ATTACH_INTERFACE
self.assertIn(trait, self._get_provider_traits(rp_uuid))
new_caps = dict(fake.SmallFakeDriver.capabilities,
**{'supports_attach_interface': False})
with mock.patch.dict(fake.SmallFakeDriver.capabilities, new_caps):
self._run_periodics()
self.assertNotIn(trait, self._get_provider_traits(rp_uuid))
def test_driver_removing_trait_via_upt(self):
"""Test that if a driver reports a trait via update_provider_tree()
initially, but at the next periodic update doesn't report it
again, that it gets removed from placement.
"""
custom_trait = "CUSTOM_TRAIT_FROM_DRIVER"
standard_trait = os_traits.HW_CPU_X86_SGX
self._mock_upt([custom_trait, standard_trait], [])
self.compute = self._start_compute(host='host1')
rp_uuid = self._get_provider_uuid_by_host('host1')
self.assertIn(custom_trait, self._get_provider_traits(rp_uuid))
self.assertIn(standard_trait, self._get_provider_traits(rp_uuid))
# Now change the fake update_provider_tree() from injecting the
# traits to removing them, and run the periodic update.
self._mock_upt([], [custom_trait, standard_trait])
self._run_periodics()
self.assertNotIn(custom_trait, self._get_provider_traits(rp_uuid))
self.assertNotIn(standard_trait, self._get_provider_traits(rp_uuid))
@mock.patch.dict(fake.SmallFakeDriver.capabilities, clear=True,
values=fake_caps)
def test_driver_removes_unsupported_trait_from_admin(self):
"""Test that if an admin adds a trait corresponding to a
capability which is unsupported, then if the provider cache is
reset, the driver will remove it during the next update.
"""
self.compute = self._start_compute(host='host1')
rp_uuid = self._get_provider_uuid_by_host('host1')
traits = self._get_provider_traits(rp_uuid)
trait = os_traits.COMPUTE_DEVICE_TAGGING
self.assertNotIn(trait, traits)
# Simulate an admin associating the trait with the host via
# the placement API.
traits.append(trait)
self._set_provider_traits(rp_uuid, traits)
# Check that worked.
traits = self._get_provider_traits(rp_uuid)
self.assertIn(trait, traits)
# SIGHUP the compute process to clear the report client
# cache, so the subsequent periodic update recalculates everything.
self.compute.manager.reset()
self._run_periodics()
self.assertNotIn(trait, self._get_provider_traits(rp_uuid))
| apache-2.0 |
jirikuncar/invenio | invenio/legacy/bibformat/adminlib.py | 13 | 10217 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Handle requests from the web interface to configure BibFormat."""
__revision__ = "$Id$"
import os
import re
from invenio.modules.formatter.config import \
CFG_BIBFORMAT_TEMPLATES_PATH, \
CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION
from invenio.modules.formatter import engine as bibformat_engine
def get_outputs_that_use_template(filename):
"""Return a list of output formats that call the given format template.
The returned output formats also give their dependencies on tags.
We don't return the complete output formats but some reference to
them (filename + names)::
[ {'filename':"filename_1.bfo"
'names': {'en':"a name", 'fr': "un nom", 'generic':"a name"}
'tags': ['710__a', '920__']
},
...
]
:param filename: a format template filename
:return: output formats references sorted by (generic) name
"""
output_formats_list = {}
tags = []
output_formats = bibformat_engine.get_output_formats(with_attributes=True)
for output_format in output_formats:
name = output_formats[output_format]['attrs']['names']['generic']
# First look at default template, and add it if necessary
if output_formats[output_format]['default'] == filename:
output_formats_list[name] = {'filename':output_format,
'names':output_formats[output_format]['attrs']['names'],
'tags':[]}
# Second look at each rule
found = False
for rule in output_formats[output_format]['rules']:
if rule['template'] == filename:
found = True
tags.append(rule['field']) #Also build dependencies on tags
# Finally add dependency on template from rule (overwrite default dependency,
# which is weaker in term of tag)
if found:
output_formats_list[name] = {'filename':output_format,
'names':output_formats[output_format]['attrs']['names'],
'tags':tags}
keys = output_formats_list.keys()
keys.sort()
return map(output_formats_list.get, keys)
def get_elements_used_by_template(filename):
"""
Returns a list of format elements that are called by the given format template.
The returned elements also give their dependencies on tags.
Dependencies on tag might be approximative. See get_tags_used_by_element()
doc string.
We must handle usage of bfe_field in a special way if we want to retrieve
used tag: used tag is given in "tag" parameter, not inside element code.
The list is returned sorted by name::
[ {'filename':"filename_1.py"
'name':"filename_1"
'tags': ['710__a', '920__']
},
...
]
:param filename: a format template filename
:return: elements sorted by name
"""
format_elements = {}
format_template = bibformat_engine.get_format_template(filename=filename, with_attributes=True)
code = format_template['code']
format_elements_iter = bibformat_engine.pattern_tag.finditer(code)
for result in format_elements_iter:
function_name = result.group("function_name").lower()
if function_name is not None and function_name not in format_elements \
and not function_name == "field":
filename = bibformat_engine.resolve_format_element_filename("BFE_"+function_name)
if filename is not None:
tags = get_tags_used_by_element(filename)
format_elements[function_name] = {'name':function_name.lower(),
'filename':filename,
'tags':tags}
elif function_name == "field":
# Handle bfe_field element in a special way
if function_name not in format_elements:
#Indicate usage of bfe_field if not already done
filename = bibformat_engine.resolve_format_element_filename("BFE_"+function_name)
format_elements[function_name] = {'name':function_name.lower(),
'filename':filename,
'tags':[]}
# Retrieve value of parameter "tag"
all_params = result.group('params')
function_params_iterator = bibformat_engine.pattern_function_params.finditer(all_params)
for param_match in function_params_iterator:
name = param_match.group('param')
if name == "tag":
value = param_match.group('value')
if not value in format_elements[function_name]['tags']:
format_elements[function_name]['tags'].append(value)
break
keys = format_elements.keys()
keys.sort()
return map(format_elements.get, keys)
# Format Elements Dependencies
#
def get_tags_used_by_element(filename):
"""
Returns a list of tags used by given format element
APPROXIMATIVE RESULTS: the tag are retrieved in field(), fields()
and control_field() function. If they are used computed, or saved
in a variable somewhere else, they are not retrieved
@TODO: There is room for improvements. For example catch
call to BibRecord functions.
:param filename: a format element filename
:return: tags sorted by value
"""
tags = {}
format_element = bibformat_engine.get_format_element(filename)
if format_element is None:
return []
elif format_element['type']=="field":
tags = format_element['attrs']['tags']
return tags
filename = bibformat_engine.resolve_format_element_filename(filename)
path = bibformat_engine.get_format_element_path(filename)
format = open(path, 'r')
code = format.read()
format.close
tags_pattern = re.compile('''
(field|fields|control_field)\s* #Function call
\(\s* #Opening parenthesis
[\'"]+ #Single or double quote
(?P<tag>.+?) #Tag
[\'"]+\s* #Single or double quote
(,[^\)]+)* #Additional function param
\) #Closing parenthesis
''', re.VERBOSE | re.MULTILINE)
tags_iter = tags_pattern.finditer(code)
for result in tags_iter:
tags[result.group("tag")] = result.group("tag")
return tags.values()
def get_templates_that_use_element(name):
"""Return a list of format templates that call the given format element.
The returned format templates also give their dependencies on tags::
[ {'filename':"filename_1.bft"
'name': "a name"
'tags': ['710__a', '920__']
},
...
]
:param name: a format element name
:return: templates sorted by name
"""
format_templates = {}
tags = []
files = os.listdir(CFG_BIBFORMAT_TEMPLATES_PATH) #Retrieve all templates
for possible_template in files:
if possible_template.endswith(CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION):
format_elements = get_elements_used_by_template(possible_template) #Look for elements used in template
format_elements = map(lambda x: x['name'].lower(), format_elements)
try: #Look for element
format_elements.index(name.lower()) #If not found, get out of "try" statement
format_template = bibformat_engine.get_format_template(filename=possible_template, with_attributes=True)
template_name = format_template['attrs']['name']
format_templates[template_name] = {'name':template_name,
'filename':possible_template}
except:
pass
keys = format_templates.keys()
keys.sort()
return map(format_templates.get, keys)
# Output Formats Dependencies
#
def get_templates_used_by_output(code):
"""Return a list of templates used inside an output format give by its code.
The returned format templates also give their dependencies on elements and tags::
[ {'filename':"filename_1.bft"
'name': "a name"
'elements': [{'filename':"filename_1.py", 'name':"filename_1", 'tags': ['710__a', '920__']
}, ...]
},
...
]
:param code: outpout format code
:return: templates sorted by name
"""
format_templates = {}
output_format = bibformat_engine.get_output_format(code, with_attributes=True)
filenames = map(lambda x: x['template'], output_format['rules'])
if output_format['default'] != "":
filenames.append(output_format['default'])
for filename in filenames:
template = bibformat_engine.get_format_template(filename, with_attributes=True)
name = template['attrs']['name']
elements = get_elements_used_by_template(filename)
format_templates[name] = {'name':name,
'filename':filename,
'elements':elements}
keys = format_templates.keys()
keys.sort()
return map(format_templates.get, keys)
| gpl-2.0 |
kimjam/pkaers | tests/test.py | 1 | 1275 | # flake8: noqa
import pandas as pd
from pkaers.khan_elo import khan_elo
import unittest
class ObjectTest(unittest.TestCase):
def configure(self):
self.student = pd.read_csv('student.csv').T.to_dict().values()
self.map_data = pd.read_csv('map.csv').T.to_dict().values()
self.khanstudent = pd.read_csv('khanstudent.csv').T.to_dict().values()
self.exerstates = pd.read_csv('exerstates.csv').T.to_dict().values()
selfkhanpred = []
class StudentTest(ObjectTest):
def setUp(self):
self.configure()
def test_studentpredict(self):
predict = khan_elo(
student=self.student,
map_data=self.map_data,
khanstudent=self.khanstudent,
exerstates=self.exerstates,
update='students',
khanpred=self.khanpred)
self.assertEqual(len(predict), 2)
self.assertEqual(predict[0]['rit_prediction'], None)
class ExerciseTest(ObjectTest):
def setUp(self):
self.configure()
def test_exerciseupdate(self):
items = khan_elo(
student=self.student,
map_data=self.map_data,
khanstudent=self.khanstudent,
exerstates=self.exerstates,
update='items')
self.assertEqual(len(items), 1124)
sample = [
item['matches']
for item in items
if item['slug'] == 'adding_and_subtracting_negative_numbers'
][0]
self.assertEqual(sample, 1)
| mit |
scalable-networks/gnuradio-3.7.2.1 | gnuradio-runtime/python/gnuradio/gru/hexint.py | 78 | 1351 | #
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
def hexint(mask):
"""
Convert unsigned masks into signed ints.
This allows us to use hex constants like 0xf0f0f0f2 when talking to
our hardware and not get screwed by them getting treated as python
longs.
"""
if mask >= 2**31:
return int(mask-2**32)
return mask
def hexshort(mask):
"""
Convert unsigned masks into signed shorts.
This allows us to use hex constants like 0x8000 when talking to
our hardware and not get screwed by them getting treated as python
longs.
"""
if mask >= 2**15:
return int(mask-2**16)
return mask
| gpl-3.0 |
nburn42/tensorflow | tensorflow/contrib/autograph/converters/asserts_test.py | 4 | 1349 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for asserts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.converters import asserts
from tensorflow.contrib.autograph.converters import converter_test_base
from tensorflow.python.platform import test
class AssertsTest(converter_test_base.TestCase):
def test_transform(self):
def test_fn(a):
assert a > 0
node = self.parse_and_analyze(test_fn, {})
node = asserts.transform(node, self.ctx)
self.assertTrue(isinstance(node.body[0].body[0].value, gast.Call))
if __name__ == '__main__':
test.main()
| apache-2.0 |
alfanugraha/LUMENS-repo | processing/algs/ftools/ExportGeometryInfo.py | 5 | 4922 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExportGeometryInfo.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from qgis.core import *
from processing import interface
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterSelection import ParameterSelection
from processing.outputs.OutputVector import OutputVector
from processing.tools import dataobjects, vector
class ExportGeometryInfo(GeoAlgorithm):
INPUT = 'INPUT'
METHOD = 'CALC_METHOD'
OUTPUT = 'OUTPUT'
CALC_METHODS = ['Layer CRS', 'Project CRS', 'Ellipsoidal']
#==========================================================================
#def getIcon(self):
# return QIcon(os.path.dirname(__file__) + "/icons/export_geometry.png")
#=========================================================================
def defineCharacteristics(self):
self.name = 'Export/Add geometry columns'
self.group = 'Vector table tools'
self.addParameter(ParameterVector(self.INPUT, 'Input layer',
[ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterSelection(self.METHOD, 'Calculate using',
self.CALC_METHODS, 0))
self.addOutput(OutputVector(self.OUTPUT, 'Output layer'))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
method = self.getParameterValue(self.METHOD)
geometryType = layer.geometryType()
idx1 = -1
idx2 = -1
fields = layer.pendingFields()
if geometryType == QGis.Polygon:
(idx1, fields) = vector.findOrCreateField(layer, fields, 'area',
21, 6)
(idx2, fields) = vector.findOrCreateField(layer, fields,
'perimeter', 21, 6)
elif geometryType == QGis.Line:
(idx1, fields) = vector.findOrCreateField(layer, fields, 'length',
21, 6)
idx2 = idx1
else:
(idx1, fields) = vector.findOrCreateField(layer, fields, 'xcoord',
21, 6)
(idx2, fields) = vector.findOrCreateField(layer, fields, 'ycoord',
21, 6)
writer = self.getOutputFromName(
self.OUTPUT).getVectorWriter(fields.toList(),
layer.dataProvider().geometryType(), layer.crs())
ellips = None
crs = None
coordTransform = None
# Calculate with:
# 0 - layer CRS
# 1 - project CRS
# 2 - ellipsoidal
if method == 2:
ellips = QgsProject.instance().readEntry('Measure', '/Ellipsoid',
'NONE')[0]
crs = layer.crs().srsid()
elif method == 1:
mapCRS = interface.iface.mapCanvas().mapRenderer().destinationCrs()
layCRS = layer.crs()
coordTransform = QgsCoordinateTransform(layCRS, mapCRS)
outFeat = QgsFeature()
inGeom = QgsGeometry()
outFeat.initAttributes(len(fields))
outFeat.setFields(fields)
current = 0
features = vector.features(layer)
total = 100.0 / float(len(features))
for f in features:
inGeom = f.geometry()
if method == 1:
inGeom.transform(coordTransform)
(attr1, attr2) = vector.simpleMeasure(inGeom, method, ellips, crs)
outFeat.setGeometry(inGeom)
attrs = f.attributes()
attrs.insert(idx1, attr1)
if attr2 is not None:
attrs.insert(idx2, attr2)
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
current += 1
progress.setPercentage(int(current * total))
del writer
| gpl-2.0 |
vitmod/enigma2 | lib/python/Plugins/Extensions/DVDBurn/Title.py | 50 | 6408 | from Components.config import ConfigSubsection, ConfigSubList, ConfigInteger, ConfigText, ConfigSelection
import TitleCutter
class ConfigFixedText(ConfigText):
def __init__(self, text, visible_width=60):
ConfigText.__init__(self, default = text, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
class Title:
def __init__(self, project):
self.properties = ConfigSubsection()
self.properties.menutitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.menusubtitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.aspect = ConfigSelection(choices = [("4:3", _("4:3")), ("16:9", _("16:9"))])
self.properties.widescreen = ConfigSelection(choices = [("nopanscan", "nopanscan"), ("noletterbox", "noletterbox")])
self.properties.autochapter = ConfigInteger(default = 0, limits = (0, 60))
self.properties.audiotracks = ConfigSubList()
self.DVBname = _("Title")
self.DVBdescr = _("Description")
self.DVBchannel = _("Channel")
self.cuesheet = [ ]
self.source = None
self.filesize = 0
self.estimatedDiskspace = 0
self.inputfile = ""
self.cutlist = [ ]
self.chaptermarks = [ ]
self.timeCreate = None
self.project = project
self.length = 0
self.VideoType = -1
self.VideoPID = -1
self.framerate = 0
self.progressive = -1
self.resolution = (-1,-1)
def addService(self, service):
from os import path
from enigma import eServiceCenter, iServiceInformation
from ServiceReference import ServiceReference
from time import localtime, time
self.source = service
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(service)
sDescr = info and info.getInfoString(service, iServiceInformation.sDescription) or ""
self.DVBdescr = sDescr
sTimeCreate = info.getInfo(service, iServiceInformation.sTimeCreate)
if sTimeCreate > 1:
self.timeCreate = localtime(sTimeCreate)
serviceref = ServiceReference(info.getInfoString(service, iServiceInformation.sServiceref))
name = info and info.getName(service) or "Title" + sDescr
self.DVBname = name
self.DVBchannel = serviceref.getServiceName()
self.inputfile = service.getPath()
self.filesize = path.getsize(self.inputfile)
self.estimatedDiskspace = self.filesize
self.length = info.getLength(service)
def addFile(self, filename):
from enigma import eServiceReference
ref = eServiceReference(1, 0, filename)
self.addService(ref)
self.project.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, self)
def titleEditDone(self, cutlist):
self.initDVDmenuText(len(self.project.titles))
self.cuesheet = cutlist
self.produceFinalCuesheet()
def initDVDmenuText(self, track):
s = self.project.menutemplate.settings
self.properties.menutitle.setValue(self.formatDVDmenuText(s.titleformat.getValue(), track))
self.properties.menusubtitle.setValue(self.formatDVDmenuText(s.subtitleformat.getValue(), track))
def formatDVDmenuText(self, template, track):
template = template.replace("$i", str(track))
template = template.replace("$t", self.DVBname)
template = template.replace("$d", self.DVBdescr)
template = template.replace("$c", str(len(self.chaptermarks)+1))
template = template.replace("$f", self.inputfile)
template = template.replace("$C", self.DVBchannel)
#if template.find("$A") >= 0:
audiolist = [ ]
for audiotrack in self.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
trackstring = audiotrack.format.getValue()
trackstring += ' (' + audiotrack.language.getValue() + ')'
audiolist.append(trackstring)
audiostring = ', '.join(audiolist)
template = template.replace("$A", audiostring)
if template.find("$l") >= 0:
l = self.length
lengthstring = "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
template = template.replace("$l", lengthstring)
if self.timeCreate:
template = template.replace("$Y", str(self.timeCreate[0]))
template = template.replace("$M", str(self.timeCreate[1]))
template = template.replace("$D", str(self.timeCreate[2]))
timestring = "%d:%02d" % (self.timeCreate[3], self.timeCreate[4])
template = template.replace("$T", timestring)
else:
template = template.replace("$Y", "").replace("$M", "").replace("$D", "").replace("$T", "")
return template
def produceFinalCuesheet(self):
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
accumulated_in = 0
accumulated_at = 0
last_in = 0
self.cutlist = [ ]
self.chaptermarks = [ ]
# our demuxer expects *strictly* IN,OUT lists.
currently_in = not any(type == CUT_TYPE_IN for pts, type in self.cuesheet)
if currently_in:
self.cutlist.append(0) # emulate "in" at first
for (pts, type) in self.cuesheet:
#print "pts=", pts, "type=", type, "accumulated_in=", accumulated_in, "accumulated_at=", accumulated_at, "last_in=", last_in
if type == CUT_TYPE_IN and not currently_in:
self.cutlist.append(pts)
last_in = pts
currently_in = True
if type == CUT_TYPE_OUT and currently_in:
self.cutlist.append(pts)
# accumulate the segment
accumulated_in += pts - last_in
accumulated_at = pts
currently_in = False
if type == CUT_TYPE_MARK and currently_in:
# relocate chaptermark against "in" time. This is not 100% accurate,
# as the in/out points are not.
reloc_pts = pts - last_in + accumulated_in
self.chaptermarks.append(reloc_pts)
if len(self.cutlist) > 1:
part = accumulated_in / (self.length*90000.0)
usedsize = int ( part * self.filesize )
self.estimatedDiskspace = usedsize
self.length = accumulated_in / 90000
def getChapterMarks(self, template="$h:$m:$s.$t"):
timestamps = [ ]
chapters = [ ]
minutes = self.properties.autochapter.getValue()
if len(self.chaptermarks) < 1 and minutes > 0:
chapterpts = 0
while chapterpts < (self.length-60*minutes)*90000:
chapterpts += 90000 * 60 * minutes
chapters.append(chapterpts)
else:
chapters = self.chaptermarks
for p in chapters:
timestring = template.replace("$h", str(p / (90000 * 3600)))
timestring = timestring.replace("$m", ("%02d" % (p % (90000 * 3600) / (90000 * 60))))
timestring = timestring.replace("$s", ("%02d" % (p % (90000 * 60) / 90000)))
timestring = timestring.replace("$t", ("%03d" % ((p % 90000) / 90)))
timestamps.append(timestring)
return timestamps
| gpl-2.0 |
goliveirab/odoo | addons/account/product.py | 374 | 2897 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_category(osv.osv):
_inherit = "product.category"
_columns = {
'property_account_income_categ': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used for invoices to value sales."),
'property_account_expense_categ': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used for invoices to value expenses."),
}
#----------------------------------------------------------
# Products
#----------------------------------------------------------
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'taxes_id': fields.many2many('account.tax', 'product_taxes_rel',
'prod_id', 'tax_id', 'Customer Taxes',
domain=[('parent_id','=',False),('type_tax_use','in',['sale','all'])]),
'supplier_taxes_id': fields.many2many('account.tax',
'product_supplier_taxes_rel', 'prod_id', 'tax_id',
'Supplier Taxes', domain=[('parent_id', '=', False),('type_tax_use','in',['purchase','all'])]),
'property_account_income': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used for invoices instead of the default one to value sales for the current product."),
'property_account_expense': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used for invoices instead of the default one to value expenses for the current product."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gangadhar-kadam/adb-erp | stock/report/item_prices/item_prices.py | 3 | 4795 | # ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
item_map = get_item_details()
pl = get_price_list()
last_purchase_rate = get_last_purchase_rate()
bom_rate = get_item_bom_rate()
val_rate_map = get_valuation_rate()
precision = webnotes.conn.get_value("Global Defaults", None, "float_precision") or 2
data = []
for item in sorted(item_map):
data.append([item, item_map[item]["item_name"],
item_map[item]["description"], item_map[item]["stock_uom"],
flt(last_purchase_rate.get(item, 0), precision),
flt(val_rate_map.get(item, 0), precision),
pl.get(item, {}).get("selling"),
pl.get(item, {}).get("buying"),
flt(bom_rate.get(item, 0), precision),
flt(item_map[item]["standard_rate"], precision)
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = ["Item:Link/Item:100", "Item Name::150", "Description::150", "UOM:Link/UOM:80",
"Last Purchase Rate:Currency:90", "Valuation Rate:Currency:80", "Sales Price List::80",
"Purchase Price List::80", "BOM Rate:Currency:90", "Standard Rate:Currency:100"]
return columns
def get_item_details():
"""returns all items details"""
item_map = {}
for i in webnotes.conn.sql("select name, item_name, description, \
stock_uom, standard_rate from tabItem \
order by item_code", as_dict=1):
item_map.setdefault(i.name, i)
return item_map
def get_price_list():
"""Get selling & buying price list of every item"""
rate = {}
price_list = webnotes.conn.sql("""select parent, selling, buying,
concat(price_list_name, " - ", ref_currency, " ", ref_rate) as price
from `tabItem Price` where docstatus<2""", as_dict=1)
for j in price_list:
if j.price:
if j.selling:
rate.setdefault(j.parent, {}).setdefault("selling", []).append(j.price)
if j.buying:
rate.setdefault(j.parent, {}).setdefault("buying", []).append(j.price)
item_rate_map = {}
for item in rate:
item_rate_map.setdefault(item, {}).setdefault("selling",
", ".join(rate[item].get("selling", [])))
item_rate_map[item]["buying"] = ", ".join(rate[item].get("buying", []))
return item_rate_map
def get_last_purchase_rate():
item_last_purchase_rate_map = {}
query = """select * from (select
result.item_code,
result.purchase_rate
from (
(select
po_item.item_code,
po_item.item_name,
po.transaction_date as posting_date,
po_item.purchase_ref_rate,
po_item.discount_rate,
po_item.purchase_rate
from `tabPurchase Order` po, `tabPurchase Order Item` po_item
where po.name = po_item.parent and po.docstatus = 1)
union
(select
pr_item.item_code,
pr_item.item_name,
pr.posting_date,
pr_item.purchase_ref_rate,
pr_item.discount_rate,
pr_item.purchase_rate
from `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item
where pr.name = pr_item.parent and pr.docstatus = 1)
) result
order by result.item_code asc, result.posting_date desc) result_wrapper
group by item_code"""
for d in webnotes.conn.sql(query, as_dict=1):
item_last_purchase_rate_map.setdefault(d.item_code, d.purchase_rate)
return item_last_purchase_rate_map
def get_item_bom_rate():
"""Get BOM rate of an item from BOM"""
item_bom_map = {}
for b in webnotes.conn.sql("""select item, (total_cost/quantity) as bom_rate
from `tabBOM` where is_active=1 and is_default=1""", as_dict=1):
item_bom_map.setdefault(b.item, flt(b.bom_rate))
return item_bom_map
def get_valuation_rate():
"""Get an average valuation rate of an item from all warehouses"""
item_val_rate_map = {}
for d in webnotes.conn.sql("""select item_code,
sum(actual_qty*valuation_rate)/sum(actual_qty) as val_rate
from tabBin where actual_qty > 0 group by item_code""", as_dict=1):
item_val_rate_map.setdefault(d.item_code, d.val_rate)
return item_val_rate_map
| agpl-3.0 |
erwilan/ansible | contrib/inventory/zabbix.py | 24 | 4205 | #!/usr/bin/env python
# (c) 2013, Greg Buehler
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Zabbix Server external inventory script.
========================================
Returns hosts and hostgroups from Zabbix Server.
Configuration is read from `zabbix.ini`.
Tested with Zabbix Server 2.0.6.
"""
from __future__ import print_function
import os
import sys
import argparse
import ConfigParser
try:
from zabbix_api import ZabbixAPI
except:
print("Error: Zabbix API library must be installed: pip install zabbix-api.",
file=sys.stderr)
sys.exit(1)
try:
import json
except:
import simplejson as json
class ZabbixInventory(object):
def read_settings(self):
config = ConfigParser.SafeConfigParser()
conf_path = './zabbix.ini'
if not os.path.exists(conf_path):
conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini'
if os.path.exists(conf_path):
config.read(conf_path)
# server
if config.has_option('zabbix', 'server'):
self.zabbix_server = config.get('zabbix', 'server')
# login
if config.has_option('zabbix', 'username'):
self.zabbix_username = config.get('zabbix', 'username')
if config.has_option('zabbix', 'password'):
self.zabbix_password = config.get('zabbix', 'password')
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def hoststub(self):
return {
'hosts': []
}
def get_host(self, api, name):
data = {'ansible_ssh_host': name}
return data
def get_list(self, api):
hostsData = api.host.get({'output': 'extend', 'selectGroups': 'extend'})
data = {}
data[self.defaultgroup] = self.hoststub()
for host in hostsData:
hostname = host['name']
data[self.defaultgroup]['hosts'].append(hostname)
for group in host['groups']:
groupname = group['name']
if groupname not in data:
data[groupname] = self.hoststub()
data[groupname]['hosts'].append(hostname)
return data
def __init__(self):
self.defaultgroup = 'group_all'
self.zabbix_server = None
self.zabbix_username = None
self.zabbix_password = None
self.read_settings()
self.read_cli()
if self.zabbix_server and self.zabbix_username:
try:
api = ZabbixAPI(server=self.zabbix_server)
api.login(user=self.zabbix_username, password=self.zabbix_password)
except BaseException as e:
print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr)
sys.exit(1)
if self.options.host:
data = self.get_host(api, self.options.host)
print(json.dumps(data, indent=2))
elif self.options.list:
data = self.get_list(api)
print(json.dumps(data, indent=2))
else:
print("usage: --list ..OR.. --host <hostname>", file=sys.stderr)
sys.exit(1)
else:
print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr)
sys.exit(1)
ZabbixInventory()
| gpl-3.0 |
waytai/odoo | addons/mrp_byproduct/mrp_byproduct.py | 150 | 8285 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class mrp_subproduct(osv.osv):
_name = 'mrp.subproduct'
_description = 'Byproduct'
_columns={
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'subproduct_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Quantity Type', required=True, help="Define how the quantity of byproducts will be set on the production orders using this BoM.\
'Fixed' depicts a situation where the quantity of created byproduct is always equal to the quantity set on the BoM, regardless of how many are created in the production order.\
By opposition, 'Variable' means that the quantity will be computed as\
'(quantity of byproduct set on the BoM / quantity of manufactured product set on the BoM * quantity of manufactured product in the production order.)'"),
'bom_id': fields.many2one('mrp.bom', 'BoM', ondelete='cascade'),
}
_defaults={
'subproduct_type': 'variable',
'product_qty': lambda *a: 1.0,
}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {'product_uom': prod.uom_id.id}
return {'value': v}
return {}
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value':{}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
class mrp_bom(osv.osv):
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit='mrp.bom'
_columns={
'sub_products':fields.one2many('mrp.subproduct', 'bom_id', 'Byproducts', copy=True),
}
class mrp_production(osv.osv):
_description = 'Production'
_inherit= 'mrp.production'
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order and calculates quantity based on subproduct_type.
@return: Newly generated picking Id.
"""
move_obj = self.pool.get('stock.move')
picking_id = super(mrp_production,self).action_confirm(cr, uid, ids, context=context)
product_uom_obj = self.pool.get('product.uom')
for production in self.browse(cr, uid, ids):
source = production.product_id.property_stock_production.id
if not production.bom_id:
continue
for sub_product in production.bom_id.sub_products:
product_uom_factor = product_uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.bom_id.product_uom.id)
qty1 = sub_product.product_qty
if sub_product.subproduct_type == 'variable':
if production.product_qty:
qty1 *= product_uom_factor / (production.bom_id.product_qty or 1.0)
data = {
'name': 'PROD:'+production.name,
'date': production.date_planned,
'product_id': sub_product.product_id.id,
'product_uom_qty': qty1,
'product_uom': sub_product.product_uom.id,
'location_id': source,
'location_dest_id': production.location_dest_id.id,
'move_dest_id': production.move_prod_id.id,
'production_id': production.id
}
move_id = move_obj.create(cr, uid, data, context=context)
move_obj.action_confirm(cr, uid, [move_id], context=context)
return picking_id
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
"""Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but with
the module mrp_byproduct installed it can differ for byproducts having type 'variable'.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Identify the product to produce.
:return: The factor to apply to the quantity that we should produce for the given production order and stock move.
"""
sub_obj = self.pool.get('mrp.subproduct')
move_obj = self.pool.get('stock.move')
production_obj = self.pool.get('mrp.production')
production_browse = production_obj.browse(cr, uid, production_id, context=context)
move_browse = move_obj.browse(cr, uid, move_id, context=context)
subproduct_factor = 1
sub_id = sub_obj.search(cr, uid,[('product_id', '=', move_browse.product_id.id),('bom_id', '=', production_browse.bom_id.id), ('subproduct_type', '=', 'variable')], context=context)
if sub_id:
subproduct_record = sub_obj.browse(cr ,uid, sub_id[0], context=context)
if subproduct_record.bom_id.product_qty:
subproduct_factor = subproduct_record.product_qty / subproduct_record.bom_id.product_qty
return subproduct_factor
return super(mrp_production, self)._get_subproduct_factor(cr, uid, production_id, move_id, context=context)
class change_production_qty(osv.osv_memory):
_inherit = 'change.production.qty'
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
bom_obj = self.pool.get('mrp.bom')
move_lines_obj = self.pool.get('stock.move')
prod_obj = self.pool.get('mrp.production')
for m in prod.move_created_ids:
if m.product_id.id == prod.product_id.id:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
else:
for sub_product_line in prod.bom_id.sub_products:
if sub_product_line.product_id.id == m.product_id.id:
factor = prod_obj._get_subproduct_factor(cr, uid, prod.id, m.id, context=context)
subproduct_qty = sub_product_line.subproduct_type == 'variable' and qty * factor or sub_product_line.product_qty
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': subproduct_qty})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
joaduo/mepinta | developer_tools/mepinta_devtools/python_project/shedskin_project/ShedskinProjectCreator.py | 1 | 5395 | # -*- coding: utf-8 -*-
'''
Mepinta
Copyright (c) 2011-2012, Joaquin G. Duo
This file is part of Mepinta.
Mepinta is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mepinta is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mepinta. If not, see <http://www.gnu.org/licenses/>.
'''
from common.abstract.FrameworkBase import FrameworkBase
from mepinta_devtools.python_project.PackageCreator import PackageCreator
from mepinta_devtools.ide_projects.FileManager import FileManager
from common.path import joinPath
import os
class ShedskinProjectCreator(FrameworkBase):
def __post_init__(self):
self.file_manager = FileManager(self.context)
self.package_creator = PackageCreator(self.context)
def __linkPipelineBackend(self, mepinta_src, project_path, overwrite):
# Create the new shedskin library path (external, cpp and hpp are
# fixed)
external_path = joinPath(project_path, 'shedskin_builtin_lib')
self.file_manager.makedirs(external_path)
# Link the library to the source folder
backend_rel = ('core', 'pipeline_backend', 'pipeline_backend_c_and_cpp',
'pipeline_backend')
backend_src = joinPath(mepinta_src, backend_rel)
backend_dst = joinPath(external_path, 'pipeline_backend')
self.file_manager.symlink(backend_src, backend_dst, overwrite)
# Link the includes for external lib
includes_dst = joinPath(
external_path, 'pipeline_backend_implementation')
self.file_manager.makedirs(includes_dst)
includes_src = joinPath(mepinta_src, 'backend',
'c_and_cpp', 'backend_api_c')
list_path = joinPath(backend_src, 'implementation', 'files_to_link')
files_list = self.file_manager.loadTextFile(list_path).splitlines()
for basename in files_list:
self.file_manager.symlink(joinPath(includes_src, basename),
joinPath(includes_dst, basename),
overwrite)
def __linkPipelineAndPipelineLoFacade(self, mepinta_src, python_src_path, overwrite):
# Create the mepinta package (alone)
package_path = joinPath(python_src_path, 'mepinta')
self.package_creator.createSimple(package_path, overwrite)
# Link the pipeline package
package_src = joinPath(mepinta_src, 'core', 'python_core',
'mepinta', 'pipeline')
package_dst = joinPath(package_path, 'pipeline')
self.file_manager.symlink(package_src, package_dst, overwrite)
# Link pipeline_lo_facade.py so that its in the
facade_src = joinPath(
'mepinta', 'pipeline', 'lo', 'pipeline_lo_facade.py')
facade_dst = joinPath(python_src_path, 'pipeline_lo_facade.py')
self.file_manager.symlink(facade_src, facade_dst, overwrite)
def __linkLoadLibraryStandAlone(self, mepinta_src, python_src_path, overwrite):
load_dst = joinPath(python_src_path, 'load_library_stand_alone.py')
load_src = joinPath(
'mepinta', 'pipeline', 'lo', 'load_library_stand_alone.py')
self.file_manager.symlink(load_src, load_dst, overwrite)
def __copyScripts(self, python_src_path):
# Copy scripts necessary for building the pipeline_lo_facade.so
# shedskin module
repo_path = joinPath(os.path.dirname(__file__), 'templates_repository')
scripts_names = os.listdir(repo_path)
self.file_manager.copyFiles(repo_path, python_src_path, scripts_names)
mk_cmds = [s for s in scripts_names if 'build' in s and
not 'common' in s]
def strip(s):
if s.endswith('.py'):
return s[:-len('.py')]
else:
return s
mk_targets = [(strip(s), joinPath(python_src_path, s))
for s in mk_cmds]
return mk_targets
def createProject(self, project_path, overwrite=False):
# Get the code source path
mepinta_src = self.context.deployment_config.mepinta_src
# Create the mepinta package
python_src_path = joinPath(project_path, 'src')
# Where all the python code goes
self.file_manager.makedirs(python_src_path)
# Link pipeline package and pipeline_lo_facade module
self.__linkPipelineAndPipelineLoFacade(
mepinta_src, python_src_path, overwrite)
# Link pipeline_backend stuff
self.__linkPipelineBackend(
mepinta_src, project_path, overwrite)
# Link the load_library_stand_alone
self.__linkLoadLibraryStandAlone(
mepinta_src, python_src_path, overwrite)
# Copy the scripts to generate the skedskin modules
mk_targets = self.__copyScripts(python_src_path)
# return the build scripts
return mk_targets
def testModule():
pass
if __name__ == "__main__":
testModule()
| gpl-3.0 |
noroutine/ansible | lib/ansible/module_utils/service.py | 37 | 8222 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) Ansible Inc, 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import glob
import os
import pickle
import platform
import select
import shlex
import subprocess
import traceback
from ansible.module_utils.six import PY2, b
from ansible.module_utils._text import to_bytes, to_text
def sysv_is_enabled(name):
'''
This function will check if the service name supplied
is enabled in any of the sysv runlevels
:arg name: name of the service to test for
'''
return bool(glob.glob('/etc/rc?.d/S??%s' % name))
def get_sysv_script(name):
'''
This function will return the expected path for an init script
corresponding to the service name supplied.
:arg name: name or path of the service to test for
'''
if name.startswith('/'):
result = name
else:
result = '/etc/init.d/%s' % name
return result
def sysv_exists(name):
'''
This function will return True or False depending on
the existence of an init script corresponding to the service name supplied.
:arg name: name of the service to test for
'''
return os.path.exists(get_sysv_script(name))
def fail_if_missing(module, found, service, msg=''):
'''
This function will return an error or exit gracefully depending on check mode status
and if the service is missing or not.
:arg module: is an AnsibleModule object, used for it's utility methods
:arg found: boolean indicating if services was found or not
:arg service: name of service
:kw msg: extra info to append to error/success msg when missing
'''
if not found:
if module.check_mode:
module.exit_json(msg="Service %s not found on %s, assuming it will exist on full run" % (service, msg), changed=True)
else:
module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg))
def fork_process():
'''
This function performs the double fork process to detach from the
parent process and execute.
'''
pid = os.fork()
if pid == 0:
# Set stdin/stdout/stderr to /dev/null
fd = os.open(os.devnull, os.O_RDWR)
# clone stdin/out/err
for num in range(3):
if fd != num:
os.dup2(fd, num)
# close otherwise
if fd not in range(3):
os.close(fd)
# Make us a daemon
pid = os.fork()
# end if not in child
if pid > 0:
os._exit(0)
# get new process session and detach
sid = os.setsid()
if sid == -1:
raise Exception("Unable to detach session while daemonizing")
# avoid possible problems with cwd being removed
os.chdir("/")
pid = os.fork()
if pid > 0:
os._exit(0)
return pid
def daemonize(module, cmd):
'''
Execute a command while detaching as a daemon, returns rc, stdout, and stderr.
:arg module: is an AnsibleModule object, used for it's utility methods
:arg cmd: is a list or string representing the command and options to run
This is complex because daemonization is hard for people.
What we do is daemonize a part of this module, the daemon runs the command,
picks up the return code and output, and returns it to the main process.
'''
# init some vars
chunk = 4096 # FIXME: pass in as arg?
errors = 'surrogate_or_strict'
# start it!
try:
pipe = os.pipe()
pid = fork_process()
except OSError:
module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc())
except Exception as exc:
module.fail_json(msg=to_text(exc), exception=traceback.format_exc())
# we don't do any locking as this should be a unique module/process
if pid == 0:
os.close(pipe[0])
# if command is string deal with py2 vs py3 conversions for shlex
if not isinstance(cmd, list):
if PY2:
cmd = shlex.split(to_bytes(cmd, errors=errors))
else:
cmd = shlex.split(to_text(cmd, errors=errors))
# make sure we always use byte strings
run_cmd = []
for c in cmd:
run_cmd.append(to_bytes(c, errors=errors))
# execute the command in forked process
p = subprocess.Popen(run_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
fds = [p.stdout, p.stderr]
# loop reading output till its done
output = {p.stdout: b(""), p.sterr: b("")}
while fds:
rfd, wfd, efd = select.select(fds, [], fds, 1)
if (rfd + wfd + efd) or p.poll():
for out in fds:
if out in rfd:
data = os.read(out.fileno(), chunk)
if not data:
fds.remove(out)
output[out] += b(data)
# even after fds close, we might want to wait for pid to die
p.wait()
# Return a pickled data of parent
return_data = pickle.dumps([p.returncode, to_text(output[p.stdout]), to_text(output[p.stderr])], protocol=pickle.HIGHEST_PROTOCOL)
os.write(pipe[1], to_bytes(return_data, errors=errors))
# clean up
os.close(pipe[1])
os._exit(0)
elif pid == -1:
module.fail_json(msg="Unable to fork, no exception thrown, probably due to lack of resources, check logs.")
else:
# in parent
os.close(pipe[1])
os.waitpid(pid, 0)
# Grab response data after child finishes
return_data = b("")
while True:
rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
if pipe[0] in rfd:
data = os.read(pipe[0], chunk)
if not data:
break
return_data += b(data)
# Note: no need to specify encoding on py3 as this module sends the
# pickle to itself (thus same python interpreter so we aren't mixing
# py2 and py3)
return pickle.loads(to_bytes(return_data, errors=errors))
def check_ps(module, pattern):
# Set ps flags
if platform.system() == 'SunOS':
psflags = '-ef'
else:
psflags = 'auxww'
# Find ps binary
psbin = module.get_bin_path('ps', True)
(rc, out, err) = module.run_command('%s %s' % (psbin, psflags))
# If rc is 0, set running as appropriate
if rc == 0:
for line in out.split('\n'):
if pattern in line:
return True
return False
| gpl-3.0 |
Radium-Devices/android_kernel_motorola_msm8916 | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
NeuralProsthesisLab/unlock | unlock/util/__init__.py | 1 | 1966 | # Copyright (c) James Percent, Byron Galbraith and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from unlock.util.dispatcher import *
from unlock.util.decorator import *
from unlock.util.misc import *
from unlock.util.observable import *
from unlock.util.saferef import *
from unlock.util.sockets import *
from unlock.util.signal import *
from unlock.util.factory import *
from unlock.util.injector import *
from unlock.util.runtime import *
__author__ = 'jpercent'
| bsd-3-clause |
anandsimmy/ecommerce | tests/integration/core/test_customisation.py | 6 | 2799 | import pytest
from django.conf import settings
from django.test import TestCase
from oscar.core import customisation
VALID_FOLDER_PATH = 'tests/_site/apps'
class TestUtilities(TestCase):
def test_subfolder_extraction(self):
folders = list(customisation.subfolders('/var/www/eggs'))
self.assertEqual(folders, ['/var', '/var/www', '/var/www/eggs'])
def test_raises_exception_for_nonexistant_app_label():
with pytest.raises(ValueError):
customisation.fork_app('sillytown', 'somefolder')
def test_raises_exception_if_app_has_already_been_forked():
# We piggyback on another test which means a custom app is already in
# the settings we use for the test suite. We just check that's still
# the case here.
assert 'tests._site.apps.partner' in settings.INSTALLED_APPS
with pytest.raises(ValueError):
customisation.fork_app('partner', VALID_FOLDER_PATH)
def test_creates_new_folder(tmpdir):
path = tmpdir.mkdir('fork')
customisation.fork_app('order', str(path))
path.join('order').ensure_dir()
def test_creates_init_file(tmpdir):
path = tmpdir.mkdir('fork')
customisation.fork_app('order', str(path))
path.join('order').join('__init__.py').ensure()
def test_handles_dashboard_app(tmpdir):
# Dashboard apps are fiddly as they aren't identified by a single app
# label.
path = tmpdir.mkdir('fork')
customisation.fork_app('dashboard.catalogue', str(path))
# Check __init__.py created (and supporting folders)
path.join('dashboard').join('catalogue').join('__init__.py').ensure()
def test_creates_models_and_admin_file(tmpdir):
path = tmpdir.mkdir('fork')
customisation.fork_app('order', str(path))
for module, expected_string in [
('models', 'from oscar.apps.order.models import *'),
('admin', 'from oscar.apps.order.admin import *'),
('config', 'OrderConfig')
]:
filepath = path.join('order').join('%s.py' % module)
filepath.ensure()
contents = filepath.read()
assert expected_string in contents
def test_copies_in_migrations_when_needed(tmpdir):
path = tmpdir.mkdir('fork')
for app, has_models in [('order', True), ('search', False)]:
customisation.fork_app(app, str(path))
native_migration_path = path.join(app).join('migrations')
assert has_models == native_migration_path.check()
def test_dashboard_app_config(tmpdir, monkeypatch):
path = tmpdir.mkdir('fork')
customisation.fork_app('dashboard', str(path))
path.join('__init__.py').write('')
monkeypatch.syspath_prepend(str(tmpdir))
config_module = __import__(
'%s.dashboard.config' % path.basename, fromlist=['DashboardConfig']
)
assert hasattr(config_module, 'DashboardConfig')
| bsd-3-clause |
plumgrid/plumgrid-nova | nova/api/openstack/compute/contrib/hypervisors.py | 11 | 9862 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hypervisors admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'hypervisors')
def make_hypervisor(elem, detail):
elem.set('hypervisor_hostname')
elem.set('id')
if detail:
elem.set('vcpus')
elem.set('memory_mb')
elem.set('local_gb')
elem.set('vcpus_used')
elem.set('memory_mb_used')
elem.set('local_gb_used')
elem.set('hypervisor_type')
elem.set('hypervisor_version')
elem.set('free_ram_mb')
elem.set('free_disk_gb')
elem.set('current_workload')
elem.set('running_vms')
elem.set('cpu_info')
elem.set('disk_available_least')
service = xmlutil.SubTemplateElement(elem, 'service',
selector='service')
service.set('id')
service.set('host')
class HypervisorIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, False)
return xmlutil.MasterTemplate(root, 1)
class HypervisorDetailTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, True)
return xmlutil.MasterTemplate(root, 1)
class HypervisorTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor', selector='hypervisor')
make_hypervisor(root, True)
return xmlutil.MasterTemplate(root, 1)
class HypervisorUptimeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor', selector='hypervisor')
make_hypervisor(root, False)
root.set('uptime')
return xmlutil.MasterTemplate(root, 1)
class HypervisorServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, False)
servers = xmlutil.SubTemplateElement(elem, 'servers')
server = xmlutil.SubTemplateElement(servers, 'server',
selector='servers')
server.set('name')
server.set('uuid')
return xmlutil.MasterTemplate(root, 1)
class HypervisorStatisticsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor_statistics',
selector='hypervisor_statistics')
root.set('count')
root.set('vcpus')
root.set('memory_mb')
root.set('local_gb')
root.set('vcpus_used')
root.set('memory_mb_used')
root.set('local_gb_used')
root.set('free_ram_mb')
root.set('free_disk_gb')
root.set('current_workload')
root.set('running_vms')
root.set('disk_available_least')
return xmlutil.MasterTemplate(root, 1)
class HypervisorsController(object):
"""The Hypervisors API controller for the OpenStack API."""
def __init__(self):
self.host_api = compute.HostAPI()
super(HypervisorsController, self).__init__()
def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs):
hyp_dict = {
'id': hypervisor['id'],
'hypervisor_hostname': hypervisor['hypervisor_hostname'],
}
if detail and not servers:
for field in ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
'hypervisor_type', 'hypervisor_version',
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least'):
hyp_dict[field] = hypervisor[field]
hyp_dict['service'] = {
'id': hypervisor['service_id'],
'host': hypervisor['service']['host'],
}
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
for serv in servers]
# Add any additional info
if kwargs:
hyp_dict.update(kwargs)
return hyp_dict
@wsgi.serializers(xml=HypervisorIndexTemplate)
def index(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
return dict(hypervisors=[self._view_hypervisor(hyp, False)
for hyp in compute_nodes])
@wsgi.serializers(xml=HypervisorDetailTemplate)
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
return dict(hypervisors=[self._view_hypervisor(hyp, True)
for hyp in compute_nodes])
@wsgi.serializers(xml=HypervisorTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
return dict(hypervisor=self._view_hypervisor(hyp, True))
@wsgi.serializers(xml=HypervisorUptimeTemplate)
def uptime(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
# Get the uptime
try:
host = hyp['service']['host']
uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return dict(hypervisor=self._view_hypervisor(hyp, False,
uptime=uptime))
@wsgi.serializers(xml=HypervisorIndexTemplate)
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
hypervisors = self.host_api.compute_node_search_by_hypervisor(
context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(hyp, False)
for hyp in hypervisors])
else:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
@wsgi.serializers(xml=HypervisorServersTemplate)
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_search_by_hypervisor(
context, id)
if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
hypervisors = []
for compute_node in compute_nodes:
instances = self.host_api.instance_get_all_by_host(context,
compute_node['service']['host'])
hyp = self._view_hypervisor(compute_node, False, instances)
hypervisors.append(hyp)
return dict(hypervisors=hypervisors)
@wsgi.serializers(xml=HypervisorStatisticsTemplate)
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
class Hypervisors(extensions.ExtensionDescriptor):
"""Admin-only hypervisor administration."""
name = "Hypervisors"
alias = "os-hypervisors"
namespace = "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1"
updated = "2012-06-21T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hypervisors',
HypervisorsController(),
collection_actions={'detail': 'GET',
'statistics': 'GET'},
member_actions={'uptime': 'GET',
'search': 'GET',
'servers': 'GET'})]
return resources
| apache-2.0 |
kxliugang/edx-platform | lms/djangoapps/courseware/features/word_cloud.py | 94 | 1516 | # pylint: disable=missing-docstring
from lettuce import world, step
from common import i_am_registered_for_the_course, section_location, visit_scenario_item
@step('I view the word cloud and it has rendered')
def word_cloud_is_rendered(_step):
assert world.is_css_present('.word_cloud')
@step('the course has a Word Cloud component')
def view_word_cloud(_step):
coursenum = 'test_course'
i_am_registered_for_the_course(_step, coursenum)
add_word_cloud_to_course(coursenum)
visit_scenario_item('SECTION')
@step('I press the Save button')
def press_the_save_button(_step):
button_css = '.input_cloud_section input.save'
world.css_click(button_css)
@step('I see the empty result')
def see_empty_result(_step):
assert world.css_text('.your_words', 0) == ''
@step('I fill inputs')
def fill_inputs(_step):
input_css = '.input_cloud_section .input-cloud'
world.css_fill(input_css, 'text1', 0)
for index in range(1, 4):
world.css_fill('.input_cloud_section .input-cloud', 'text2', index)
@step('I see the result with words count')
def see_result(_step):
strong_css = '.your_words strong'
target_text = set([world.css_text(strong_css, i) for i in range(2)])
assert set(['text1', 'text2']) == target_text
def add_word_cloud_to_course(course):
category = 'word_cloud'
world.ItemFactory.create(parent_location=section_location(course),
category=category,
display_name='Word Cloud')
| agpl-3.0 |
garnaat/boto | boto/sqs/messageattributes.py | 159 | 2487 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2014 Amazon.com, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an SQS MessageAttribute Name/Value set
"""
class MessageAttributes(dict):
def __init__(self, parent):
self.parent = parent
self.current_key = None
self.current_value = None
def startElement(self, name, attrs, connection):
if name == 'Value':
self.current_value = MessageAttributeValue(self)
return self.current_value
def endElement(self, name, value, connection):
if name == 'MessageAttribute':
self[self.current_key] = self.current_value
elif name == 'Name':
self.current_key = value
elif name == 'Value':
pass
else:
setattr(self, name, value)
class MessageAttributeValue(dict):
def __init__(self, parent):
self.parent = parent
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'DataType':
self['data_type'] = value
elif name == 'StringValue':
self['string_value'] = value
elif name == 'BinaryValue':
self['binary_value'] = value
elif name == 'StringListValue':
self['string_list_value'] = value
elif name == 'BinaryListValue':
self['binary_list_value'] = value
| mit |
StratusLab/client | api/code/src/main/python/stratuslab/marketplace/Downloader.py | 1 | 4491 | #
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
from stratuslab import Util
from stratuslab import Defaults
from stratuslab.ManifestInfo import ManifestInfo
from stratuslab.Compressor import Compressor
from stratuslab.ConfigHolder import ConfigHolder
from stratuslab.Exceptions import InputException
from stratuslab.marketplace.ImageValidator import ImageValidator
from stratuslab.marketplace.ManifestValidator import ManifestValidator
from stratuslab.marketplace.ManifestDownloader import ManifestDownloader
etree = Util.importETree()
class Downloader(object):
ENDPOINT = Defaults.marketplaceEndpoint
LOCAL_IMAGE_FILENAME = '/tmp/image.img'
def __init__(self, configHolder=ConfigHolder()):
self.localImageFilename = ''
self.configHolder = configHolder
self.imageUrl = ''
self.compression = ''
self.marketplaceEndpoint = Downloader.ENDPOINT
configHolder.assign(self)
self.localImageFilename = os.path.abspath(self.localImageFilename)
self.manifestObject = None
def download(self, uri):
"""uri is the full resource uri uniquely identifying a single manifest entry"""
tempMetadataFilename = tempfile.mktemp()
ManifestDownloader(self.configHolder).getManifestAsFile(uri, tempMetadataFilename)
manifestInfo = ManifestInfo(self.configHolder)
manifestInfo.parseManifestFromFile(tempMetadataFilename)
tempImageFilename = self._downloadFromLocations(manifestInfo)
self._verifySignature(tempImageFilename, tempMetadataFilename)
tempImageFilename = self._inflateImage(tempImageFilename)
if not os.path.exists(tempImageFilename):
raise InputException('Failed to find image matching image resource uri: %s' % uri)
self._verifyHash(tempImageFilename, manifestInfo.sha1)
shutil.copy2(tempImageFilename, self.localImageFilename)
os.remove(tempImageFilename)
os.remove(tempMetadataFilename)
return self.localImageFilename
def _downloadFromLocations(self, manifestInfo):
tempImageFilename = ''
for location in manifestInfo.locations:
self._printDetail('Looking for image: %s' % location)
try:
tempImageFilename = self._downloadImage(location)
break
except KeyboardInterrupt:
raise
except:
pass
return tempImageFilename
def _downloadImage(self, url):
compressionExtension = self._extractCompressionExtension(url)
localFilename = tempfile.mktemp()
localImageName = localFilename + compressionExtension
Util.wget(url, localImageName)
return localImageName
def _extractCompressionExtension(self, url):
compression = url.split('.')[-1]
if compression in Compressor.compressionFormats:
return '.' + compression
else:
return ''
def _verifySignature(self, imageFilename, metadataFilename):
ManifestValidator(self.configHolder).verifySignature(imageFilename, metadataFilename)
def _inflateImage(self, imageFilename):
extension = self._extractCompressionExtension(imageFilename)
inflatedFilename = imageFilename
if extension:
self._printDetail('Inflating image %s' % imageFilename)
Compressor.inflate(imageFilename)
inflatedFilename = imageFilename[:-len(extension)]
return inflatedFilename
def _verifyHash(self, imageFilename, hashFromManifest):
ImageValidator().verifyHash(imageFilename, hashFromManifest)
def _printDetail(self, message):
Util.printDetail(message, self.verboseLevel, Util.VERBOSE_LEVEL_NORMAL)
| apache-2.0 |
vikas-parashar/zulip | zerver/tests/webhooks/test_crashlytics.py | 31 | 1114 | # -*- coding: utf-8 -*-
from zerver.lib.test_classes import WebhookTestCase
class CrashlyticsHookTests(WebhookTestCase):
STREAM_NAME = 'crashlytics'
URL_TEMPLATE = u"/api/v1/external/crashlytics?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'crashlytics'
def test_crashlytics_verification_message(self):
# type: () -> None
last_message_before_request = self.get_last_message()
payload = self.get_body('verification')
url = self.build_webhook_url()
result = self.client_post(url, payload, content_type="application/json")
last_message_after_request = self.get_last_message()
self.assert_json_success(result)
self.assertEqual(last_message_after_request.pk, last_message_before_request.pk)
def test_crashlytics_build_in_success_status(self):
# type: () -> None
expected_subject = u"123: Issue Title"
expected_message = u"[Issue](http://crashlytics.com/full/url/to/issue) impacts at least 16 device(s)."
self.send_and_test_stream_message('issue_message', expected_subject, expected_message)
| apache-2.0 |
XiaosongWei/chromium-crosswalk | tools/telemetry/third_party/pyserial/serial/serialwin32.py | 147 | 18260 | #! python
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# serial driver for win32
# see __init__.py
#
# (C) 2001-2011 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# Initial patch to use ctypes by Giovanni Bajo <rasky@develer.com>
import ctypes
from serial import win32
from serial.serialutil import *
def device(portnum):
"""Turn a port number into a device name"""
return 'COM%d' % (portnum+1) # numbers are transformed to a string
class Win32Serial(SerialBase):
"""Serial port implementation for Win32 based on ctypes."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def __init__(self, *args, **kwargs):
self.hComPort = None
self._overlappedRead = None
self._overlappedWrite = None
self._rtsToggle = False
self._rtsState = win32.RTS_CONTROL_ENABLE
self._dtrState = win32.DTR_CONTROL_ENABLE
SerialBase.__init__(self, *args, **kwargs)
def open(self):
"""Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self._isOpen:
raise SerialException("Port is already open.")
# the "\\.\COMx" format is required for devices other than COM1-COM8
# not all versions of windows seem to support this properly
# so that the first few ports are used with the DOS device name
port = self.portstr
try:
if port.upper().startswith('COM') and int(port[3:]) > 8:
port = '\\\\.\\' + port
except ValueError:
# for like COMnotanumber
pass
self.hComPort = win32.CreateFile(port,
win32.GENERIC_READ | win32.GENERIC_WRITE,
0, # exclusive access
None, # no security
win32.OPEN_EXISTING,
win32.FILE_ATTRIBUTE_NORMAL | win32.FILE_FLAG_OVERLAPPED,
0)
if self.hComPort == win32.INVALID_HANDLE_VALUE:
self.hComPort = None # 'cause __del__ is called anyway
raise SerialException("could not open port %r: %r" % (self.portstr, ctypes.WinError()))
try:
self._overlappedRead = win32.OVERLAPPED()
self._overlappedRead.hEvent = win32.CreateEvent(None, 1, 0, None)
self._overlappedWrite = win32.OVERLAPPED()
#~ self._overlappedWrite.hEvent = win32.CreateEvent(None, 1, 0, None)
self._overlappedWrite.hEvent = win32.CreateEvent(None, 0, 0, None)
# Setup a 4k buffer
win32.SetupComm(self.hComPort, 4096, 4096)
# Save original timeout values:
self._orgTimeouts = win32.COMMTIMEOUTS()
win32.GetCommTimeouts(self.hComPort, ctypes.byref(self._orgTimeouts))
self._reconfigurePort()
# Clear buffers:
# Remove anything that was there
win32.PurgeComm(self.hComPort,
win32.PURGE_TXCLEAR | win32.PURGE_TXABORT |
win32.PURGE_RXCLEAR | win32.PURGE_RXABORT)
except:
try:
self._close()
except:
# ignore any exception when closing the port
# also to keep original exception that happened when setting up
pass
self.hComPort = None
raise
else:
self._isOpen = True
def _reconfigurePort(self):
"""Set communication parameters on opened port."""
if not self.hComPort:
raise SerialException("Can only operate on a valid port handle")
# Set Windows timeout values
# timeouts is a tuple with the following items:
# (ReadIntervalTimeout,ReadTotalTimeoutMultiplier,
# ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier,
# WriteTotalTimeoutConstant)
if self._timeout is None:
timeouts = (0, 0, 0, 0, 0)
elif self._timeout == 0:
timeouts = (win32.MAXDWORD, 0, 0, 0, 0)
else:
timeouts = (0, 0, int(self._timeout*1000), 0, 0)
if self._timeout != 0 and self._interCharTimeout is not None:
timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:]
if self._writeTimeout is None:
pass
elif self._writeTimeout == 0:
timeouts = timeouts[:-2] + (0, win32.MAXDWORD)
else:
timeouts = timeouts[:-2] + (0, int(self._writeTimeout*1000))
win32.SetCommTimeouts(self.hComPort, ctypes.byref(win32.COMMTIMEOUTS(*timeouts)))
win32.SetCommMask(self.hComPort, win32.EV_ERR)
# Setup the connection info.
# Get state and modify it:
comDCB = win32.DCB()
win32.GetCommState(self.hComPort, ctypes.byref(comDCB))
comDCB.BaudRate = self._baudrate
if self._bytesize == FIVEBITS:
comDCB.ByteSize = 5
elif self._bytesize == SIXBITS:
comDCB.ByteSize = 6
elif self._bytesize == SEVENBITS:
comDCB.ByteSize = 7
elif self._bytesize == EIGHTBITS:
comDCB.ByteSize = 8
else:
raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
if self._parity == PARITY_NONE:
comDCB.Parity = win32.NOPARITY
comDCB.fParity = 0 # Disable Parity Check
elif self._parity == PARITY_EVEN:
comDCB.Parity = win32.EVENPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == PARITY_ODD:
comDCB.Parity = win32.ODDPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == PARITY_MARK:
comDCB.Parity = win32.MARKPARITY
comDCB.fParity = 1 # Enable Parity Check
elif self._parity == PARITY_SPACE:
comDCB.Parity = win32.SPACEPARITY
comDCB.fParity = 1 # Enable Parity Check
else:
raise ValueError("Unsupported parity mode: %r" % self._parity)
if self._stopbits == STOPBITS_ONE:
comDCB.StopBits = win32.ONESTOPBIT
elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
comDCB.StopBits = win32.ONE5STOPBITS
elif self._stopbits == STOPBITS_TWO:
comDCB.StopBits = win32.TWOSTOPBITS
else:
raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
comDCB.fBinary = 1 # Enable Binary Transmission
# Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE)
if self._rtscts:
comDCB.fRtsControl = win32.RTS_CONTROL_HANDSHAKE
elif self._rtsToggle:
comDCB.fRtsControl = win32.RTS_CONTROL_TOGGLE
else:
comDCB.fRtsControl = self._rtsState
if self._dsrdtr:
comDCB.fDtrControl = win32.DTR_CONTROL_HANDSHAKE
else:
comDCB.fDtrControl = self._dtrState
if self._rtsToggle:
comDCB.fOutxCtsFlow = 0
else:
comDCB.fOutxCtsFlow = self._rtscts
comDCB.fOutxDsrFlow = self._dsrdtr
comDCB.fOutX = self._xonxoff
comDCB.fInX = self._xonxoff
comDCB.fNull = 0
comDCB.fErrorChar = 0
comDCB.fAbortOnError = 0
comDCB.XonChar = XON
comDCB.XoffChar = XOFF
if not win32.SetCommState(self.hComPort, ctypes.byref(comDCB)):
raise ValueError("Cannot configure port, some setting was wrong. Original message: %r" % ctypes.WinError())
#~ def __del__(self):
#~ self.close()
def _close(self):
"""internal close port helper"""
if self.hComPort:
# Restore original timeout values:
win32.SetCommTimeouts(self.hComPort, self._orgTimeouts)
# Close COM-Port:
win32.CloseHandle(self.hComPort)
if self._overlappedRead is not None:
win32.CloseHandle(self._overlappedRead.hEvent)
self._overlappedRead = None
if self._overlappedWrite is not None:
win32.CloseHandle(self._overlappedWrite.hEvent)
self._overlappedWrite = None
self.hComPort = None
def close(self):
"""Close port"""
if self._isOpen:
self._close()
self._isOpen = False
def makeDeviceName(self, port):
return device(port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
flags = win32.DWORD()
comstat = win32.COMSTAT()
if not win32.ClearCommError(self.hComPort, ctypes.byref(flags), ctypes.byref(comstat)):
raise SerialException('call to ClearCommError failed')
return comstat.cbInQue
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if not self.hComPort: raise portNotOpenError
if size > 0:
win32.ResetEvent(self._overlappedRead.hEvent)
flags = win32.DWORD()
comstat = win32.COMSTAT()
if not win32.ClearCommError(self.hComPort, ctypes.byref(flags), ctypes.byref(comstat)):
raise SerialException('call to ClearCommError failed')
if self.timeout == 0:
n = min(comstat.cbInQue, size)
if n > 0:
buf = ctypes.create_string_buffer(n)
rc = win32.DWORD()
err = win32.ReadFile(self.hComPort, buf, n, ctypes.byref(rc), ctypes.byref(self._overlappedRead))
if not err and win32.GetLastError() != win32.ERROR_IO_PENDING:
raise SerialException("ReadFile failed (%r)" % ctypes.WinError())
err = win32.WaitForSingleObject(self._overlappedRead.hEvent, win32.INFINITE)
read = buf.raw[:rc.value]
else:
read = bytes()
else:
buf = ctypes.create_string_buffer(size)
rc = win32.DWORD()
err = win32.ReadFile(self.hComPort, buf, size, ctypes.byref(rc), ctypes.byref(self._overlappedRead))
if not err and win32.GetLastError() != win32.ERROR_IO_PENDING:
raise SerialException("ReadFile failed (%r)" % ctypes.WinError())
err = win32.GetOverlappedResult(self.hComPort, ctypes.byref(self._overlappedRead), ctypes.byref(rc), True)
read = buf.raw[:rc.value]
else:
read = bytes()
return bytes(read)
def write(self, data):
"""Output the given string over the serial port."""
if not self.hComPort: raise portNotOpenError
#~ if not isinstance(data, (bytes, bytearray)):
#~ raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
# convert data (needed in case of memoryview instance: Py 3.1 io lib), ctypes doesn't like memoryview
data = to_bytes(data)
if data:
#~ win32event.ResetEvent(self._overlappedWrite.hEvent)
n = win32.DWORD()
err = win32.WriteFile(self.hComPort, data, len(data), ctypes.byref(n), self._overlappedWrite)
if not err and win32.GetLastError() != win32.ERROR_IO_PENDING:
raise SerialException("WriteFile failed (%r)" % ctypes.WinError())
if self._writeTimeout != 0: # if blocking (None) or w/ write timeout (>0)
# Wait for the write to complete.
#~ win32.WaitForSingleObject(self._overlappedWrite.hEvent, win32.INFINITE)
err = win32.GetOverlappedResult(self.hComPort, self._overlappedWrite, ctypes.byref(n), True)
if n.value != len(data):
raise writeTimeoutError
return n.value
else:
return 0
def flush(self):
"""Flush of file like objects. In this case, wait until all data
is written."""
while self.outWaiting():
time.sleep(0.05)
# XXX could also use WaitCommEvent with mask EV_TXEMPTY, but it would
# require overlapped IO and its also only possible to set a single mask
# on the port---
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.hComPort: raise portNotOpenError
win32.PurgeComm(self.hComPort, win32.PURGE_RXCLEAR | win32.PURGE_RXABORT)
def flushOutput(self):
"""Clear output buffer, aborting the current output and
discarding all that is in the buffer."""
if not self.hComPort: raise portNotOpenError
win32.PurgeComm(self.hComPort, win32.PURGE_TXCLEAR | win32.PURGE_TXABORT)
def sendBreak(self, duration=0.25):
"""Send break condition. Timed, returns to idle state after given duration."""
if not self.hComPort: raise portNotOpenError
import time
win32.SetCommBreak(self.hComPort)
time.sleep(duration)
win32.ClearCommBreak(self.hComPort)
def setBreak(self, level=1):
"""Set break: Controls TXD. When active, to transmitting is possible."""
if not self.hComPort: raise portNotOpenError
if level:
win32.SetCommBreak(self.hComPort)
else:
win32.ClearCommBreak(self.hComPort)
def setRTS(self, level=1):
"""Set terminal status line: Request To Send"""
# remember level for reconfigure
if level:
self._rtsState = win32.RTS_CONTROL_ENABLE
else:
self._rtsState = win32.RTS_CONTROL_DISABLE
# also apply now if port is open
if self.hComPort:
if level:
win32.EscapeCommFunction(self.hComPort, win32.SETRTS)
else:
win32.EscapeCommFunction(self.hComPort, win32.CLRRTS)
def setDTR(self, level=1):
"""Set terminal status line: Data Terminal Ready"""
# remember level for reconfigure
if level:
self._dtrState = win32.DTR_CONTROL_ENABLE
else:
self._dtrState = win32.DTR_CONTROL_DISABLE
# also apply now if port is open
if self.hComPort:
if level:
win32.EscapeCommFunction(self.hComPort, win32.SETDTR)
else:
win32.EscapeCommFunction(self.hComPort, win32.CLRDTR)
def _GetCommModemStatus(self):
stat = win32.DWORD()
win32.GetCommModemStatus(self.hComPort, ctypes.byref(stat))
return stat.value
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self.hComPort: raise portNotOpenError
return win32.MS_CTS_ON & self._GetCommModemStatus() != 0
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self.hComPort: raise portNotOpenError
return win32.MS_DSR_ON & self._GetCommModemStatus() != 0
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if not self.hComPort: raise portNotOpenError
return win32.MS_RING_ON & self._GetCommModemStatus() != 0
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if not self.hComPort: raise portNotOpenError
return win32.MS_RLSD_ON & self._GetCommModemStatus() != 0
# - - platform specific - - - -
def setBufferSize(self, rx_size=4096, tx_size=None):
"""\
Recommend a buffer size to the driver (device driver can ignore this
vlaue). Must be called before the port is opended.
"""
if tx_size is None: tx_size = rx_size
win32.SetupComm(self.hComPort, rx_size, tx_size)
def setXON(self, level=True):
"""\
Manually control flow - when software flow control is enabled.
This will send XON (true) and XOFF (false) to the other device.
WARNING: this function is not portable to different platforms!
"""
if not self.hComPort: raise portNotOpenError
if level:
win32.EscapeCommFunction(self.hComPort, win32.SETXON)
else:
win32.EscapeCommFunction(self.hComPort, win32.SETXOFF)
def outWaiting(self):
"""return how many characters the in the outgoing buffer"""
flags = win32.DWORD()
comstat = win32.COMSTAT()
if not win32.ClearCommError(self.hComPort, ctypes.byref(flags), ctypes.byref(comstat)):
raise SerialException('call to ClearCommError failed')
return comstat.cbOutQue
# functions useful for RS-485 adapters
def setRtsToggle(self, rtsToggle):
"""Change RTS toggle control setting."""
self._rtsToggle = rtsToggle
if self._isOpen: self._reconfigurePort()
def getRtsToggle(self):
"""Get the current RTS toggle control setting."""
return self._rtsToggle
rtsToggle = property(getRtsToggle, setRtsToggle, doc="RTS toggle control setting")
# assemble Serial class with the platform specific implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(Win32Serial, FileLike):
pass
else:
# io library present
class Serial(Win32Serial, io.RawIOBase):
pass
# Nur Testfunktion!!
if __name__ == '__main__':
s = Serial(0)
sys.stdout.write("%s\n" % s)
s = Serial()
sys.stdout.write("%s\n" % s)
s.baudrate = 19200
s.databits = 7
s.close()
s.port = 0
s.open()
sys.stdout.write("%s\n" % s)
| bsd-3-clause |
108coin/108coin | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
appop/bitcoin | contrib/devtools/update-translations.py | 1 | 8076 | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'nealcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| mit |
zygmuntz/pybrain | pybrain/rl/explorers/discrete/egreedy.py | 31 | 1191 | __author__ = "Thomas Rueckstiess, ruecksti@in.tum.de"
from scipy import random, array
from pybrain.rl.explorers.discrete.discrete import DiscreteExplorer
class EpsilonGreedyExplorer(DiscreteExplorer):
""" A discrete explorer, that executes the original policy in most cases,
but sometimes returns a random action (uniformly drawn) instead. The
randomness is controlled by a parameter 0 <= epsilon <= 1. The closer
epsilon gets to 0, the more greedy (and less explorative) the agent
behaves.
"""
def __init__(self, epsilon = 0.3, decay = 0.9999):
DiscreteExplorer.__init__(self)
self.epsilon = epsilon
self.decay = decay
def _forwardImplementation(self, inbuf, outbuf):
""" Draws a random number between 0 and 1. If the number is less
than epsilon, a random action is chosen. If it is equal or
larger than epsilon, the greedy action is returned.
"""
assert self.module
if random.random() < self.epsilon:
outbuf[:] = array([random.randint(self.module.numActions)])
else:
outbuf[:] = inbuf
self.epsilon *= self.decay
| bsd-3-clause |
auferack08/edx-platform | common/lib/capa/capa/tests/__init__.py | 29 | 2220 | """Tools for helping with testing capa."""
import gettext
import os
import os.path
import fs.osfs
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.inputtypes import Status
from mock import Mock, MagicMock
import xml.sax.saxutils as saxutils
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def tst_render_template(template, context):
"""
A test version of render to template. Renders to the repr of the context, completely ignoring
the template name. To make the output valid xml, quotes the content, and wraps it in a <div>
"""
return '<div>{0}</div>'.format(saxutils.escape(repr(context)))
def calledback_url(dispatch='score_update'):
return dispatch
xqueue_interface = MagicMock()
xqueue_interface.send_to_queue.return_value = (0, 'Success!')
def test_capa_system():
"""
Construct a mock LoncapaSystem instance.
"""
the_system = Mock(
spec=LoncapaSystem,
ajax_url='/dummy-ajax-url',
anonymous_student_id='student',
cache=None,
can_execute_unsafe_code=lambda: False,
DEBUG=True,
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
i18n=gettext.NullTranslations(),
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
render_template=tst_render_template,
seed=0,
STATIC_URL='/dummy-static/',
STATUS_CLASS=Status,
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
)
return the_system
def new_loncapa_problem(xml, capa_system=None, seed=723):
"""Construct a `LoncapaProblem` suitable for unit tests."""
return LoncapaProblem(xml, id='1', seed=seed, capa_system=capa_system or test_capa_system())
def load_fixture(relpath):
"""
Return a `unicode` object representing the contents
of the fixture file at the given path within a test_files directory
in the same directory as the test file.
"""
abspath = os.path.join(os.path.dirname(__file__), 'test_files', relpath)
with open(abspath) as fixture_file:
contents = fixture_file.read()
return contents.decode('utf8')
| agpl-3.0 |
cavaunpeu/vanilla-neural-nets | vanilla_neural_nets/neural_network/network.py | 1 | 3546 | import numpy as np
from vanilla_neural_nets.neural_network.training_batch_generator import MiniBatchGenerator
from vanilla_neural_nets.neural_network.optimization_algorithm import GradientDescent
from vanilla_neural_nets.neural_network.loss_function import MeanSquaredError
from vanilla_neural_nets.neural_network.activation_function import SigmoidActivationFunction
from vanilla_neural_nets.neural_network.layer_object import NetworkLayersCollection
class VanillaNeuralNetwork:
def __init__(self, layer_sizes, training_batch_generator_class, loss_function_class,
activation_function_class, optimization_algorithm_class, learning_rate, n_epochs,
training_batch_size, weight_initializer, bias_initializer,
output_layer_activation_function_class=None, holdout_data=None, random_state=123):
self.training_batch_generator_class = training_batch_generator_class
self.loss_function_class = loss_function_class
self.activation_function_class = activation_function_class
self.output_layer_activation_function_class = output_layer_activation_function_class or activation_function_class
self.optimization_algorithm_class = optimization_algorithm_class
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.training_batch_size = training_batch_size
self.holdout_data = holdout_data
self.random_number_generator = np.random.RandomState(random_state)
self.parameters = NetworkLayersCollection(
layer_sizes=layer_sizes,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer
)
def fit(self, X, y):
for epoch in range(self.n_epochs):
training_batch_generator = self.training_batch_generator_class(X=X, y=y, batch_size=self.training_batch_size,
random_number_generator=self.random_number_generator)
for training_batch in training_batch_generator:
self.parameters = self._update_network_layers_with_training_batch(training_batch)
if self.holdout_data:
holdout_accuracy = self._validate_on_holdout_set()
print('Epoch: {} | Accuracy: {}'.format(epoch, np.round(holdout_accuracy, 5)))
def predict(self, x):
activation_matrix = x
for layer in self.parameters.layers:
activation_function_class = self.output_layer_activation_function_class if layer.is_output_layer\
else self.activation_function_class
linear_combination = np.dot(activation_matrix, layer.weight_parameter.value.T) + layer.bias_parameter.value
activation_matrix = activation_function_class.activation_function(linear_combination)
return activation_matrix
def _update_network_layers_with_training_batch(self, training_batch):
return self.optimization_algorithm_class(
training_batch=training_batch,
network_layers=self.parameters,
loss_function_class=self.loss_function_class,
activation_function_class=self.activation_function_class,
output_layer_activation_function_class=self.output_layer_activation_function_class,
learning_rate=self.learning_rate
).run()
def _validate_on_holdout_set(self):
holdout_predictions = self.predict(self.holdout_data.X)
return self.loss_function_class.accuracy(
y_true=self.holdout_data.y,
y_predicted=holdout_predictions
)
| mit |
samdroid-apps/sugar | extensions/globalkey/speech.py | 8 | 1036 | # Copyright (C) 2011 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from jarabe.model import speech
BOUND_KEYS = ['<alt><shift>s']
def handle_key_press(key):
manager = speech.get_speech_manager()
if manager.is_paused:
manager.restart()
elif not manager.is_playing:
manager.say_selected_text()
else:
manager.pause()
| gpl-2.0 |
Alexander-M-Waldman/local_currency_site | lib/python2.7/site-packages/easy_thumbnails/models.py | 6 | 2906 | from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from easy_thumbnails import utils, signal_handlers
from easy_thumbnails.conf import settings
class FileManager(models.Manager):
def get_file(self, storage, name, create=False, update_modified=None,
check_cache_miss=False, **kwargs):
kwargs.update(dict(storage_hash=utils.get_storage_hash(storage),
name=name))
if create:
if update_modified:
defaults = kwargs.setdefault('defaults', {})
defaults['modified'] = update_modified
obj, created = self.get_or_create(**kwargs)
else:
created = False
kwargs.pop('defaults', None)
try:
manager = self._get_thumbnail_manager()
obj = manager.get(**kwargs)
except self.model.DoesNotExist:
if check_cache_miss and storage.exists(name):
# File already in storage, update cache. Using
# get_or_create again in case this was updated while
# storage.exists was running.
obj, created = self.get_or_create(**kwargs)
else:
return
if update_modified and not created:
if obj.modified != update_modified:
self.filter(pk=obj.pk).update(modified=update_modified)
return obj
def _get_thumbnail_manager(self):
return self
class ThumbnailManager(FileManager):
def _get_thumbnail_manager(self):
if settings.THUMBNAIL_CACHE_DIMENSIONS:
return self.select_related("dimensions")
return self
class File(models.Model):
storage_hash = models.CharField(max_length=40, db_index=True)
name = models.CharField(max_length=255, db_index=True)
modified = models.DateTimeField(default=timezone.now)
objects = FileManager()
class Meta:
abstract = True
unique_together = (('storage_hash', 'name'),)
def __unicode__(self):
return self.name
class Source(File):
pass
class Thumbnail(File):
source = models.ForeignKey(Source, related_name='thumbnails')
objects = ThumbnailManager()
class Meta:
unique_together = (('storage_hash', 'name', 'source'),)
class ThumbnailDimensions(models.Model):
thumbnail = models.OneToOneField(Thumbnail, related_name="dimensions")
width = models.PositiveIntegerField(null=True)
height = models.PositiveIntegerField(null=True)
def __unicode__(self):
return "%sx%s" % (self.width, self.height)
@property
def size(self):
return self.width, self.height
models.signals.pre_save.connect(signal_handlers.find_uncommitted_filefields)
models.signals.post_save.connect(signal_handlers.signal_committed_filefields)
| gpl-3.0 |
motion2015/edx-platform | lms/djangoapps/courseware/tabs.py | 7 | 10289 | """
This module is essentially a broker to xmodule/tabs.py -- it was originally introduced to
perform some LMS-specific tab display gymnastics for the Entrance Exams feature
"""
from django.conf import settings
from django.utils.translation import ugettext as _, ugettext_noop
from courseware.access import has_access
from courseware.entrance_exams import user_must_complete_entrance_exam
from openedx.core.lib.course_tabs import CourseTabPluginManager
from student.models import CourseEnrollment
from xmodule.tabs import CourseTab, CourseTabList, key_checker
class EnrolledTab(CourseTab):
"""
A base class for any view types that require a user to be enrolled.
"""
@classmethod
def is_enabled(cls, course, user=None):
if user is None:
return True
return bool(CourseEnrollment.is_enrolled(user, course.id) or has_access(user, 'staff', course, course.id))
class CoursewareTab(EnrolledTab):
"""
The main courseware view.
"""
type = 'courseware'
title = ugettext_noop('Courseware')
priority = 10
view_name = 'courseware'
is_movable = False
is_default = False
class CourseInfoTab(CourseTab):
"""
The course info view.
"""
type = 'course_info'
title = ugettext_noop('Course Info')
priority = 20
view_name = 'info'
tab_id = 'info'
is_movable = False
is_default = False
@classmethod
def is_enabled(cls, course, user=None):
return True
class SyllabusTab(EnrolledTab):
"""
A tab for the course syllabus.
"""
type = 'syllabus'
title = ugettext_noop('Syllabus')
priority = 30
view_name = 'syllabus'
allow_multiple = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(SyllabusTab, cls).is_enabled(course, user=user):
return False
return getattr(course, 'syllabus_present', False)
class ProgressTab(EnrolledTab):
"""
The course progress view.
"""
type = 'progress'
title = ugettext_noop('Progress')
priority = 40
view_name = 'progress'
is_hideable = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ProgressTab, cls).is_enabled(course, user=user):
return False
return not course.hide_progress_tab
class TextbookTabsBase(CourseTab):
"""
Abstract class for textbook collection tabs classes.
"""
# Translators: 'Textbooks' refers to the tab in the course that leads to the course' textbooks
title = ugettext_noop("Textbooks")
is_collection = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return user is None or user.is_authenticated()
@classmethod
def items(cls, course):
"""
A generator for iterating through all the SingleTextbookTab book objects associated with this
collection of textbooks.
"""
raise NotImplementedError()
class TextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all textbook tabs.
"""
type = 'textbooks'
priority = None
view_name = 'book'
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
parent_is_enabled = super(TextbookTabs, cls).is_enabled(course, user)
return settings.FEATURES.get('ENABLE_TEXTBOOK') and parent_is_enabled
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.textbooks):
yield SingleTextbookTab(
name=textbook.title,
tab_id='textbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class PDFTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all PDF textbook tabs.
"""
type = 'pdf_textbooks'
priority = None
view_name = 'pdf_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.pdf_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='pdftextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class HtmlTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all Html textbook tabs.
"""
type = 'html_textbooks'
priority = None
view_name = 'html_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.html_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='htmltextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class LinkTab(CourseTab):
"""
Abstract class for tabs that contain external links.
"""
link_value = ''
def __init__(self, tab_dict=None, name=None, link=None):
self.link_value = tab_dict['link'] if tab_dict else link
def link_value_func(_course, _reverse_func):
""" Returns the link_value as the link. """
return self.link_value
self.type = tab_dict['type']
tab_dict['link_func'] = link_value_func
super(LinkTab, self).__init__(tab_dict)
def __getitem__(self, key):
if key == 'link':
return self.link_value
else:
return super(LinkTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'link':
self.link_value = value
else:
super(LinkTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(LinkTab, self).to_json()
to_json_val.update({'link': self.link_value})
return to_json_val
def __eq__(self, other):
if not super(LinkTab, self).__eq__(other):
return False
return self.link_value == other.get('link')
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return True
class ExternalDiscussionCourseTab(LinkTab):
"""
A course tab that links to an external discussion service.
"""
type = 'external_discussion'
# Translators: 'Discussion' refers to the tab in the courseware that leads to the discussion forums
title = ugettext_noop('Discussion')
priority = None
is_default = False
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalDiscussionCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link'])(tab_dict, raise_error))
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ExternalDiscussionCourseTab, cls).is_enabled(course, user=user):
return False
return course.discussion_link
class ExternalLinkCourseTab(LinkTab):
"""
A course tab containing an external link.
"""
type = 'external_link'
priority = None
is_default = False # An external link tab is not added to a course by default
allow_multiple = True
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalLinkCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link', 'name'])(tab_dict, raise_error))
class SingleTextbookTab(CourseTab):
"""
A tab representing a single textbook. It is created temporarily when enumerating all textbooks within a
Textbook collection tab. It should not be serialized or persisted.
"""
type = 'single_textbook'
is_movable = False
is_collection_item = True
priority = None
def __init__(self, name, tab_id, view_name, index):
def link_func(course, reverse_func, index=index):
""" Constructs a link for textbooks from a view name, a course, and an index. """
return reverse_func(view_name, args=[unicode(course.id), index])
tab_dict = dict()
tab_dict['name'] = name
tab_dict['tab_id'] = tab_id
tab_dict['link_func'] = link_func
super(SingleTextbookTab, self).__init__(tab_dict)
def to_json(self):
raise NotImplementedError('SingleTextbookTab should not be serialized.')
def get_course_tab_list(request, course):
"""
Retrieves the course tab list from xmodule.tabs and manipulates the set as necessary
"""
user = request.user
xmodule_tab_list = CourseTabList.iterate_displayable(course, user=user)
# Now that we've loaded the tabs for this course, perform the Entrance Exam work.
# If the user has to take an entrance exam, we'll need to hide away all but the
# "Courseware" tab. The tab is then renamed as "Entrance Exam".
course_tab_list = []
for tab in xmodule_tab_list:
if user_must_complete_entrance_exam(request, user, course):
# Hide all of the tabs except for 'Courseware'
# Rename 'Courseware' tab to 'Entrance Exam'
if tab.type is not 'courseware':
continue
tab.name = _("Entrance Exam")
course_tab_list.append(tab)
# Add in any dynamic tabs, i.e. those that are not persisted
course_tab_list += _get_dynamic_tabs(course, user)
return course_tab_list
def _get_dynamic_tabs(course, user):
"""
Returns the dynamic tab types for the current user.
Note: dynamic tabs are those that are not persisted in the course, but are
instead added dynamically based upon the user's role.
"""
dynamic_tabs = list()
for tab_type in CourseTabPluginManager.get_tab_types():
if getattr(tab_type, "is_dynamic", False):
tab = tab_type(dict())
if tab.is_enabled(course, user=user):
dynamic_tabs.append(tab)
dynamic_tabs.sort(key=lambda dynamic_tab: dynamic_tab.name)
return dynamic_tabs
| agpl-3.0 |
candlepin/virt-who | virtwho/virt/ahv/ahv_interface.py | 1 | 27979 | import json
import math
import time
import sys
from . import ahv_constants
from requests import Session
from requests.exceptions import ConnectionError, ReadTimeout
from virtwho import virt
class AhvInterface(object):
""" AHV REST Api interface class"""
NO_RETRY_HTTP_CODES = [400, 404, 500, 502, 503]
event_types = ['node', 'vm']
def __init__(self, logger, url, username, password, port, **kwargs):
"""
Args:
logger (Log): Logger.
url (str): Rest server url.
username (str): Username.
password (str): Password for rest client.
port (int): Port number for ssp.
kwargs(dict): Accepts following arguments:
timeout(optional, int): Max seconds to wait before HTTP connection
times-out. Default 30 seconds.
retries (optional, int): Maximum number of retires. Default: 5.
retry_interval (optional, int): Time to sleep between retry intervals.
internal_debug (optional, bool): Detail log of the rest calls.
Default: 5 seconds.
"""
self._session = Session()
self._timeout = kwargs.get('timeout', 30)
self._retries = kwargs.get('retries', 5)
self._retry_interval = kwargs.get('retry_interval', 30)
self._logger = logger
self._url = url
self._user = username
self._password = password
self._port = port
self._internal_debug = kwargs.get('internal_debug', False)
self._create_session(self._user, self._password)
def _create_session(self, user=None, password=None):
"""
Creates rest session.
Args:
user (str): Username.
password (str): Password for rest session.
Returns:
None.
"""
if user is None:
user = self._user
if password is None:
password = self._password
self._session.auth = (user, password)
def _make_url(self, uri, *args):
"""
Creates base url.
uri would always begin with a slash
Args:
uri (str): Uri.
args (list): Args.
Returns:
url (str): Url with uri.
"""
if not uri.startswith("/"):
uri = "/%s" % uri
url = "%s%s" % (self._url, uri)
for arg in args:
url += "/%s" % str(arg)
return url
def _format_response(self, data):
"""
Format the data based on the response's version.
Args:
data (dict): Data dictionary.
Returns:
formatted_data (dict): Formatted dictionary.
"""
if 'entities' in data:
return self._process_entities_list(data['entities'])
else:
return self._process_dict_response(data)
def _process_dict_response(self, data):
"""
Format the data when we only have a dictionary.
Args:
data (dict): Data dictionary.
Returns:
formatted_data (dict): Formatted data.
"""
formatted_data = data
if 'status' in data and 'metadata' in data:
formatted_data = dict(data['status'], **data['metadata'])
if 'resources' in formatted_data:
if 'power_state' in formatted_data['resources']:
formatted_data['power_state'] = \
formatted_data['resources']['power_state']
if 'num_cpu_sockets' in formatted_data['resources']:
formatted_data['num_cpu_sockets'] = \
formatted_data['resources']['num_cpu_sockets']
return formatted_data
def _process_entities_list(self, data):
"""
Format data for the list of entities.
Args:
data (list): List of entities dictionary.
Returns:
formatted_data (dict): Formatted data after processing list fo entities.
"""
formatted_data = data
initial = True
for entity in data:
if 'status' in entity and 'metadata' in entity:
if initial:
formatted_data = []
initial = False
formatted_data.append(dict(entity['status'], **entity['metadata']))
for ent_obj in formatted_data:
if 'resources' in ent_obj:
if 'nodes' in ent_obj['resources']:
nodes = ent_obj['resources']['nodes']
if 'hypervisor_server_list' in nodes:
ent_obj['hypervisor_types'] = []
for server in nodes['hypervisor_server_list']:
ent_obj['hypervisor_types'].append(server['type'])
if 'kind' in ent_obj:
if ent_obj['kind'] == 'cluster':
if 'uuid' in ent_obj:
ent_obj['cluster_uuid'] = ent_obj['uuid']
return formatted_data
def _progressbar(self, it, prefix="", size=60, file=sys.stdout, total=0, is_pc=False):
count = total
cursor = 0
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
if is_pc:
yield item
for i in range(20):
show(cursor+1)
cursor += 1
if cursor == count:
break
time.sleep(0.1)
else:
show(i+1)
yield item
file.write("\n")
file.flush()
def login(self, version):
"""
Login to the rest server and ensure connection succeeds.
Args:
version (Str): Interface version.
Returns:
None.
"""
(url, cmd_method) = self.get_diff_ver_url_and_method(
cmd_key='list_clusters', intf_version=version)
self.make_rest_call(method=cmd_method, uri=url)
self._logger.info("Successfully logged into the AHV REST server")
def get_hypervisor_type(self, version, host_entity=None, vm_entity=None):
"""
Get the hypervisor type of the guest vm.
Args:
version (Str): API version.
host_entity (Dict): Host info dict.
vm_entity (Dict): Vm info dict.
Returns:
hypervisor_type (str): Vm hypervisor type.
"""
hypervisor_type = None
if version == 'v2.0':
if host_entity:
hypervisor_type = host_entity['hypervisor_type']
else:
self._logger.warning("Cannot retrieve the host type. Version:%s"
% version)
else:
if vm_entity:
if 'resources' in vm_entity:
if 'hypervisor_type' in vm_entity['resources']:
hypervisor_type = vm_entity['resources']['hypervisor_type']
else:
self._logger.debug("Hypervisor type of the %s is not available"
% vm_entity['uuid'])
else:
self._logger.warning("No vm entity is provided for version %s. "
"Therefore it's unable to retrieve host type"
% version)
return hypervisor_type
def get_common_ver_url_and_method(self, cmd_key):
"""
Gets the correct cmd name based on its corresponding version.
Args:
cmd_key (str): Key name to search for in the command dict.
Returns:
(str, str) : Tuple of (command, rest_type).
"""
return (ahv_constants.CMN_RST_CMD[cmd_key]['url'],
ahv_constants.CMN_RST_CMD[cmd_key]['method'])
def get_diff_ver_url_and_method(self, cmd_key, intf_version):
"""
Gets the correct cmd name based on its corresponding version
Args:
cmd_key (str): Key name to search for in the command dict.
intf_version (str): Interface version.
Returns:
(str, str) : Tuple of (command, rest_type).
"""
return (ahv_constants.REST_CMD[intf_version][cmd_key]['url'],
ahv_constants.REST_CMD[intf_version][cmd_key]['method'])
def get(self, uri, *args, **kwargs):
"""
Args are appended to the url as components.
/arg1/arg2/arg3
Send a get request with kwargs to the server.
Args:
uri (str): Uri.
args (list): Args.
kwargs (dict): Dictionary of params.
Returns:
Response (requests.Response): rsp.
"""
url = self._make_url(uri, *args)
return self._send('get', url, **kwargs)
def post(self, uri, **kwargs):
"""
Send a Post request to the server.
Body can be either the dict or passed as kwargs
headers is a dict.
Args:
uri (str): Uri.
kwargs (dict): Dictionary of params.
Returns:
Response (requests.Response): rsp.
"""
url = self._make_url(uri)
return self._send('post', url, **kwargs)
def make_rest_call(self, method, uri, *args, **kwargs):
"""This method calls the appropriate rest method based on the arguments.
Args:
method (str): HTTP method.
uri (str): Relative_uri.
args(any): Arguments.
kwargs(dict): Key value pair for the additional args.
Returns:
rsp (dict): The response content loaded as a JSON.
"""
func = getattr(self, method)
return func(uri, *args, **kwargs)
def _send(self, method, url, **kwargs):
"""This private method acting as proxy for all http methods.
Args:
method (str): The http method type.
url (str): The URL to for the Request
kwargs (dict): Keyword args to be passed to the requests call.
retries (int): The retry count in case of HTTP errors.
Except the codes in the list NO_RETRY_HTTP_CODES.
Returns:
Response (requests.Response): The response object.
"""
kwargs['verify'] = kwargs.get('verify', False)
if 'timeout' not in kwargs:
kwargs['timeout'] = self._timeout
if 'data' not in kwargs:
body = {}
kwargs['data'] = json.dumps(body)
content_dict = {'content-type': 'application/json'}
kwargs.setdefault('headers', {})
kwargs['headers'].update(content_dict)
func = getattr(self._session, method)
response = None
retries = kwargs.pop("retries", None)
retry_interval = kwargs.pop("retry_interval", self._retry_interval)
retry_count = retries if retries else self._retries
for ii in range(retry_count):
try:
response = func(url, **kwargs)
if self._internal_debug:
self._logger.debug("%s method The request url sent: %s" % (
method.upper(), response.request.url))
self._logger.debug('Response status: %d' % response.status_code)
self._logger.debug('Response: %s' % json.dumps(response.json(),
indent=4))
except (ConnectionError, ReadTimeout) as e:
self._logger.warning("Request failed with error: %s" % e)
if ii != retry_count - 1:
time.sleep(retry_interval)
continue
finally:
self._session.close()
if response.ok:
return response
if response.status_code in [401, 403]:
raise virt.VirtError('HTTP Auth Failed %s %s. \n res: response: %s' %
(method, url, response))
elif response.status_code == 409:
raise virt.VirtError('HTTP conflict with the current state of the '
'target resource %s %s. \n res: %s' %
(method, url, response))
elif response.status_code in self.NO_RETRY_HTTP_CODES:
break
if ii != retry_count - 1:
time.sleep(retry_interval)
if response is not None:
msg = 'HTTP %s %s failed: ' % (method, url)
if hasattr(response, "text") and response.text:
msg = "\n".join([msg, response.text]).encode('utf-8')
self._logger.error(msg)
else:
self._logger.error("Failed to make the HTTP request (%s, %s)" %
(method, url))
def get_tasks(self, timestamp, version, is_pc=False):
"""
Returns a list of AHV tasks which happened after timestamp.
Args:
timestamp (int): Current timestamp.
version (str): Interface version.
is_pc (bool): Flag to determine f we need to poll for PC tasks.
Returns:
Task list (list): list of tasks.
"""
ahv_clusters = self.get_ahv_cluster_uuids(version)
(uri, cmd_method) = self.get_common_ver_url_and_method(cmd_key='get_tasks')
# For task return. Use fv2.0 for now. update the url to use v2.0.
url = self._url[:(self._url).rfind('v')] + 'v2.0' + uri
res = self._send(method=cmd_method, url=url)
data = res.json()
if is_pc:
return self.get_pc_tasks(data, timestamp, ahv_clusters)
else:
return self.get_pe_tasks(data, timestamp, ahv_clusters)
def get_pc_tasks(self, data, timestamp, ahv_clusters):
"""
Returns a list of AHV tasks on PC which happened after timestamp.
Args:
data (json): Rest response in json format.
timestamp (str): Current timestamp.
ahv_clusters (list): List of ahv clusters uuid.
Returns:
task_list (list): list of tasks on PC.
"""
(uri, cmd_method) = self.get_common_ver_url_and_method(cmd_key='get_task')
# For task return. Use fv2.0 for now. update the url to use v2.0.
url = self._url[:(self._url).rfind('v')] + 'v2.0' + uri
task_completed = False
task_list = []
if 'entities' in data:
for task in data['entities']:
if 'start_time_usecs' in task:
if task['start_time_usecs'] > timestamp:
if 'progress_status' in task:
if task['progress_status'] in ahv_constants.TASK_COMPLETE_MSG:
task_completed = True
elif 'status' in task:
if task['status'] in ahv_constants.TASK_COMPLETE_MSG:
task_completed = True
if task_completed:
task_completed=False
if 'subtask_uuid_list' in task:
for subtask in task['subtask_uuid_list']:
url = url % subtask
subtask_resp = self._send(cmd_method, url)
subtask_data = subtask_resp.json()
if 'progress_status' in subtask_data:
if subtask_data['progress_status'] in \
ahv_constants.TASK_COMPLETE_MSG:
if 'cluster_uuid' in subtask_data:
cluster_uuid = subtask_data['cluster_uuid']
else:
# Task does not have any cluster associated with it,
# skip it.
continue
if cluster_uuid in ahv_clusters:
if 'entity_list' in task:
entity_type_list = task['entity_list']
else:
# Task doesn't have any entity list, skip it.
continue
if entity_type_list:
for ent_type in entity_type_list:
if 'entity_type' in ent_type:
if (str(ent_type['entity_type'])).lower() \
in self.event_types:
task_list.append(task)
task_list.append(subtask_data)
else:
# Task has not finished or it failed, skip it and continue
# the loop
continue
return task_list
def get_pe_tasks(self, data, timestamp, ahv_clusters):
"""
Returns a list of AHV tasks on PE which happened after timestamp.
Args:
data (json): rest response in json format.
timestamp (str): Current timestamp.
ahv_clusters (list): list of ahv clusters uuid.
Returns:
task_list (list): list of tasks on PE.
"""
task_completed = False
task_list = []
if 'entities' in data:
for task in data['entities']:
if 'start_time_usecs' in task:
if task['start_time_usecs'] > timestamp:
if 'progress_status' in task:
if task['progress_status'] in ahv_constants.TASK_COMPLETE_MSG:
task_completed = True
elif 'status' in task:
if task['status'] in ahv_constants.TASK_COMPLETE_MSG:
task_completed = True
if task_completed:
task_completed = False
if 'cluster_reference' in task:
if 'uuid' in task['cluster_reference']:
cluster_uuid = task['cluster_reference']['uuid']
elif 'cluster_uuid' in task:
cluster_uuid = task['cluster_uuid']
else:
# Task does not have any cluster associated with it, skip it.
continue
if cluster_uuid in ahv_clusters:
if 'entity_list' in task:
entity_type_list = task['entity_list']
elif 'entity_reference_list' in task:
entity_type_list = task['entity_reference_list']
else:
# Task doesn't have any entity list, skip it.
continue
for ent_type in entity_type_list:
if 'entity_type' in ent_type:
if (str(ent_type['entity_type'])).lower() \
in self.event_types:
task_list.append(task)
elif 'kind' in ent_type:
if (str(ent_type['kind'])).lower() in self.event_types:
task_list.append(task)
else:
# Task doesn't have any event type associated to it.
continue
return task_list
def get_vms_uuid(self, version):
"""
Returns the list of vms uuid.
Args:
version (str): Interface version.
Returns:
vm_uuid_list (list): list of vm's uuid.
"""
self._logger.info("Getting the list of available vms")
is_pc=True if version == 'v3' else False
vm_uuid_list = []
length = 0
offset = 0
total_matches = 0
count = 1
current = 0
(url, cmd_method) = self.get_diff_ver_url_and_method(
cmd_key='list_vms', intf_version=version)
res = self.make_rest_call(method=cmd_method, uri=url)
data = res.json()
if 'metadata' in data:
if 'total_matches' in data['metadata'] and 'length' in data['metadata']:
length = data['metadata']['length']
total_matches = data['metadata']['total_matches']
elif 'count' in data['metadata'] and \
'grand_total_entities' in data['metadata'] and \
'total_entities' in data['metadata']:
total_matches = data['metadata']['grand_total_entities']
count = data['metadata']['count']
length = data['metadata']['total_entities']
if length < total_matches:
self._logger.debug('Number of vms %s returned from REST is less than the total'\
'numberr:%s. Adjusting the offset and iterating over all'\
'vms until evry vm is returned from the server.' % (length,
total_matches))
count = math.ceil(total_matches/float(length))
body = {'length': length, 'offset': offset}
for i in self._progressbar(range(int(count)), "Finding vms uuid: ", total=int(total_matches), is_pc=is_pc):
if 'entities' in data:
for vm_entity in data['entities']:
if 'metadata' in vm_entity:
vm_uuid_list.append(vm_entity['metadata']['uuid'])
elif 'uuid' in vm_entity:
vm_uuid_list.append(vm_entity['uuid'])
else:
self._logger.warning("Cannot access the uuid for the vm %s. "
"vm object: %s" % (vm_entity['name'],
vm_entity))
body['offset'] = body['offset'] + length
body_data = json.dumps(body, indent=4)
self._logger.debug('next vm list call has this body: %s' % body)
res = self.make_rest_call(method=cmd_method, uri=url, data=body_data)
data = res.json()
current += 1
self._logger.info("Total number of vms uuids found and saved for processing %s" % len(vm_uuid_list))
return vm_uuid_list
def get_hosts_uuid(self, version):
"""
Returns the list of host uuid.
Args:
version (str): Interface version.
Returns:
host_uuid_list (list): list of host's uuid.
"""
host_uuid_list = []
(url, cmd_method) = self.get_diff_ver_url_and_method(
cmd_key='list_hosts', intf_version=version)
res = self.make_rest_call(method=cmd_method, uri=url)
data = res.json()
if 'entities' in data:
for host_entity in data['entities']:
if 'status' in host_entity and'metadata' in host_entity:
# Check if a physical host, not a cluster.
if 'cpu_model' in host_entity['status']:
host_uuid_list.append(host_entity['metadata']['uuid'])
elif 'uuid' in host_entity:
host_uuid_list.append(host_uuid_list['uuid'])
else:
self._logger.warning("Cannot access the uuid for the. "
"host object: %s" % (host_entity))
def get_host_cluster_uuid(self, host_info):
"""
Returns host's cluster UUID.
Args:
host_info (dict): Host info dict.
Returns:
host_cluster_uuid (uuid): host's cluster uuid.
"""
if 'cluster_uuid' in host_info:
return host_info['cluster_uuid']
elif 'cluster_reference' in host_info:
return host_info['cluster_reference']['uuid']
def get_ahv_cluster_uuids(self, version):
"""
Returns list of ahv cluster uuids.
Args:
version (str): Interface version.
Returns:
ahv_host_cluster_uuids (List): Returns list of ahv cluster uuids.
"""
ahv_host_cluster_uuids = []
seen = set(ahv_host_cluster_uuids)
(url, cmd_method) = self.get_diff_ver_url_and_method(
cmd_key='list_clusters', intf_version=version)
res = self.make_rest_call(method=cmd_method, uri=url)
data = res.json()
formatted_data = self._format_response(data)
for cluster in formatted_data:
if 'hypervisor_types' in cluster and 'cluster_uuid' in cluster:
for hypevirsor_type in cluster['hypervisor_types']:
if hypevirsor_type in ahv_constants.AHV_HYPERVIRSOR:
cluster_uuid = cluster['cluster_uuid']
if cluster_uuid not in seen:
seen.add(cluster_uuid)
ahv_host_cluster_uuids.append(cluster['cluster_uuid'])
break
return ahv_host_cluster_uuids
def get_host_version(self, host_info):
"""
Returns host's version.
Args:
host_info (dict): Host info dict.
Returns:
host_version (Str): Host version if found, None otherwise.
"""
host_version = None
if 'resources' in host_info:
host_resources = host_info['resources']
if 'hypervisor' in host_resources:
if 'hypervisor_full_name' in host_resources['hypervisor']:
host_version = host_resources['hypervisor']['hypervisor_full_name']
elif 'hypervisor_full_name' in host_info:
host_version = host_info['hypervisor_full_name']
else:
self._logger.warning("Cannot get host version for %s"
% host_info['uuid'])
return host_version
def get_vm(self, uuid):
"""
Returns vm information
Args:
uuid (str): Vm uuid.
Return:
data (dict): Vm information.
"""
(url, cmd_method) = self.get_common_ver_url_and_method(cmd_key='get_vm')
url = url % uuid
res = self.make_rest_call(method=cmd_method, uri=url)
if res:
data = res.json()
return self._format_response(data)
return None
def get_host(self, uuid):
"""
Returns host information
Args:
uuid (str): Host uuid.
Return:
data (dict): Host information.
"""
(url, cmd_method) = self.get_common_ver_url_and_method(cmd_key='get_host')
url = url % uuid
res = self.make_rest_call(method=cmd_method, uri=url)
if res:
data = res.json()
return self._format_response(data)
else:
return None
def get_vm_host_uuid_from_vm(self, vm_entity):
"""
Get the host uuid from the vm_entity response
Args:
vm_entity (dict): Vm info.
Returns:
host uuid (str): Vm host uuid if found, none otherwise.
"""
if 'resources' in vm_entity:
if 'host_reference' in vm_entity['resources']:
return vm_entity['resources']['host_reference']['uuid']
else:
self._logger.warning("Did not find any host information for vm:%s"
% vm_entity['uuid'])
elif 'host_uuid' in vm_entity:
return vm_entity['host_uuid']
else:
# Vm is off therefore no host is assigned to it.
self._logger.debug('Cannot get the host uuid of the vm:%s. '
'perhaps the vm is powered off' % vm_entity['uuid'])
return None
def is_ahv_host(self, version, host_uuid, vm_entity=None):
"""
Determine if a given host is a AHV host.
host uuid should match the host uuid in vm_entity.
Args:
version (str): API version.
host_uuid (str): uuid of a host.
vm_entity (dict): For v3
Returns:
bool : True if host is ahv; false otehrwise.
"""
if version == 'v2.0':
host = self.get_host(host_uuid)
if 'hypervisor_type' in host:
return host['hypervisor_type'] in ahv_constants.AHV_HYPERVIRSOR
else:
if 'resources' in vm_entity:
if 'hypervisor_type' in vm_entity['resources']:
return vm_entity['resources']['hypervisor_type'] in \
ahv_constants.AHV_HYPERVIRSOR
self._logger.debug('Hypervisor type not found. \nversion:%s, '
'\nhost_uuid:%s, \nvm_entity:%s'
% (version, host_uuid, vm_entity))
return False
def build_host_to_uvm_map(self, version):
"""
Builds a dictionary of every ahv host along with the vms they are hosting
Args:
version (Str): API version
Returns:
host_uvm_map (dict): Dict of ahv host with its uvms.
"""
host_uvm_map = {}
vm_entity = None
host_uuid = None
vm_uuids = self.get_vms_uuid(version)
self._logger.info("Processing hosts for each vm.")
if len(vm_uuids) > 0:
for vm_uuid in vm_uuids:
vm_entity = self.get_vm(vm_uuid)
if vm_entity:
host_uuid = self.get_vm_host_uuid_from_vm(vm_entity)
if host_uuid:
if self.is_ahv_host(version, host_uuid, vm_entity):
host = self.get_host(host_uuid)
if host:
if host_uuid not in host_uvm_map:
host_uvm_map[host_uuid] = host
if 'guest_list' in host_uvm_map[host_uuid]:
host_uvm_map[host_uuid]['guest_list'].append(vm_entity)
else:
host_uvm_map[host_uuid]['guest_list'] = []
host_uvm_map[host_uuid]['guest_list'].append(vm_entity)
else:
self._logger.warning("unable to read information for host %s" % host_uuid)
continue
else:
self._logger.debug("Host %s is not ahv, skipping it." % host_uuid)
continue
host_type = self.get_hypervisor_type(version, host, vm_entity)
host_uvm_map[host_uuid]['hypervisor_type'] = host_type
else:
self._logger.warning("No available vms found")
try:
host_uuids = self.get_hosts_uuid(version)
if len(host_uuids) > 0:
for host_uuid in host_uuids:
host = self.get_host(host_uuid)
if host_uuid not in host_uvm_map:
host_uvm_map[host_uuid] = host
host_uvm_map[host_uuid]['guest_list'] = []
else:
self._logger.warning("No Available AHV host found")
except TypeError:
# In case there is no cluster registered to the PC.
self._logger.warning("Unable to find any AHV hosts.")
return host_uvm_map
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception as exn:
import sys
print(exn)
return "AHV-API failure: %s" % str(self.details)
def _details_map(self):
return dict([(str(i), self.details[i]) for i in range(len(self.details))])
| gpl-2.0 |
snlab-freedom/sdntest | sdntest/exception.py | 1 | 1179 | #!/usr/bin/env python
REASON = {
'OUTDIR': 1
}
class PlatformException(Exception):
"""
When involve invalid platform, this exception will be raised.
"""
def __init__(self, platform):
self.platform = platform
def __str__(self):
return "Unknown or unsupported SDN controller platform: %s" % self.platform
class WorkspaceException(Exception):
"""
When missing configure workspace or workspace is non-existed,
this exception will be raised.
"""
def __init__(self, workspace="", reason=None):
self.workspace = workspace
self.reason = reason
def __str__(self):
if self.workspace:
if not self.reason:
return "Workspace %s is non-existed or not a directory." % self.workspace
elif REASON['OUTDIR'] == self.reason:
return "'output' in workspace %s has been existing, but not a directory" % self.workspace
else:
return "Unknown reason exception occurred in workspace %s" % self.workspace
else:
return "Missing workspace. You need to set a experiment workspace directory to run the testcase."
| mit |
jreinhardt/manufac | src/manufac/yaml_loader.py | 1 | 1438 | # manufac - a commandline tool for step-by-step instructions
# Copyright (C) 2014 Johannes Reinhardt <jreinhardt@ist-dein-freund.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from os.path import join,dirname, exists, basename
from os import makedirs
from pkg_resources import iter_entry_points
import pkg_resources
from manufac.utils import FileCache
from manufac.importers.common import GraphScaffolding
def load_graph(input_file,store):
importers = []
basedir = dirname(input_file)
for ep in iter_entry_points('importers'):
importers.append(ep.load()(basedir))
cachedir = join(basedir,'.mlcache')
if not exists(cachedir):
makedirs(cachedir)
scaf = GraphScaffolding(input_file,store,FileCache(cachedir),importers)
return scaf.get_graph()
| gpl-2.0 |
mmasaki/trove | trove/tests/unittests/datastore/base.py | 4 | 3604 | # Copyright (c) 2014 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from trove.datastore import models as datastore_models
from trove.datastore.models import Capability
from trove.datastore.models import Datastore
from trove.datastore.models import DatastoreVersion
from trove.datastore.models import DatastoreVersionMetadata
from trove.datastore.models import DBCapabilityOverrides
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
class TestDatastoreBase(trove_testtools.TestCase):
def setUp(self):
# Basic setup and mock/fake structures for testing only
super(TestDatastoreBase, self).setUp()
util.init_db()
self.rand_id = str(uuid.uuid4())
self.ds_name = "my-test-datastore" + self.rand_id
self.ds_version = "my-test-version" + self.rand_id
self.capability_name = "root_on_create" + self.rand_id
self.capability_desc = "Enables root on create"
self.capability_enabled = True
self.datastore_version_id = str(uuid.uuid4())
self.flavor_id = 1
datastore_models.update_datastore(self.ds_name, False)
self.datastore = Datastore.load(self.ds_name)
datastore_models.update_datastore_version(
self.ds_name, self.ds_version, "mysql", "", "", True)
DatastoreVersionMetadata.add_datastore_version_flavor_association(
self.ds_name, self.ds_version, [self.flavor_id])
self.datastore_version = DatastoreVersion.load(self.datastore,
self.ds_version)
self.test_id = self.datastore_version.id
self.cap1 = Capability.create(self.capability_name,
self.capability_desc, True)
self.cap2 = Capability.create("require_volume" + self.rand_id,
"Require external volume", True)
self.cap3 = Capability.create("test_capability" + self.rand_id,
"Test capability", False)
def tearDown(self):
super(TestDatastoreBase, self).tearDown()
capabilities_overridden = DBCapabilityOverrides.find_all(
datastore_version_id=self.datastore_version.id).all()
for ce in capabilities_overridden:
ce.delete()
self.cap1.delete()
self.cap2.delete()
self.cap3.delete()
datastore = datastore_models.Datastore.load(self.ds_name)
ds_version = datastore_models.DatastoreVersion.load(datastore,
self.ds_version)
datastore_models.DBDatastoreVersionMetadata.find_by(
datastore_version_id=ds_version.id).delete()
Datastore.load(self.ds_name).delete()
def capability_name_filter(self, capabilities):
new_capabilities = []
for capability in capabilities:
if self.rand_id in capability.name:
new_capabilities.append(capability)
return new_capabilities
| apache-2.0 |
edisonlz/fruit | web_project/base/site-packages/django/contrib/sessions/backends/cache.py | 122 | 2505 | from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import get_cache
from django.utils.six.moves import xrange
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = get_cache(settings.SESSION_CACHE_ALIAS)
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key, None)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError(
"Unable to create a new session key. "
"It is likely that the cache is unavailable.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return (KEY_PREFIX + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
@classmethod
def clear_expired(cls):
pass
| apache-2.0 |
narthollis/eve-api-proxy | eveproxy/char.py | 1 | 4453 |
from core import EveProxyBaseRequestHandler
class AccountBalance(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/AccountBalance.xml"
class AssetList(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/AssetList.xml"
class CalendarEventAttendees(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID', 'eventIDs']
urlPart = "/char/CalendarEventAttendees.xml"
class CharacterSheet(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/CharacterSheet.xml"
class ContactList(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/ContactList.xml"
class ContactNotifications(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/ContactNotifications.xml"
class Contracts(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
optionalParamaters = ['contractID']
urlPart = "/char/Contracts.xml"
class ContractItems(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID', 'contractID']
urlPart = "/char/ContractItems.xml"
class ContractBids(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/ContractBids.xml"
class FacWarStats(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/FacWarStats.xml"
class IndustryJobs(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/IndustryJobs.xml"
class Killlog(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
optionalParamaters = ['beforeKillID']
urlPart = "/char/Killlog.xml"
class Locations(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID', 'IDs']
urlPart = "/char/Locations.xml"
class MailBodies(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID', 'ids']
urlPart = "/char/MailBodies.xml"
class MailingLists(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/MailingLists.xml"
class MailMessages(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/MailMessages.xml"
class MarketOrders(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
optionalParamaters = ['orderID']
urlPart = "/char/MarketOrders.xml"
class Medals(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/Medals.xml"
class Notifications(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/Notifications.xml"
class NotificationTexts(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID', 'IDs']
urlPart = "/char/NotificationTexts.xml"
class Research(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/Research.xml"
class SkillInTraining(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/SkillInTraining.xml"
class SkillQueue(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/SkillQueue.xml"
class Standings(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/Standings.xml"
class UpcomingCalendarEvents(EveProxyBaseRequestHandler):
requiredParamaters = ['keyID', 'vCode', 'characterID']
urlPart = "/char/UpcomingCalendarEvents.xml"
class WalletJournal(EveProxyBaseRequestHandler):
paramaterDefaults = {'accountKey': 1000}
requiredParamaters = ['keyID', 'vCode', 'characterID', 'accountKey']
optionalParamaters = ['fromID', 'rowCount']
urlPart = "/char/WalletJournal.xml"
class WalletTransactions(EveProxyBaseRequestHandler):
paramaterDefaults = {'accountKey': 1000}
requiredParamaters = ['keyID', 'vCode', 'characterID', 'accountKey']
optionalParamaters = ['fromID', 'rowCount']
urlPart = "/char/WalletTransactions.xml"
| bsd-2-clause |
dufresnedavid/hr | __unported__/hr_experience/hr_experience.py | 4 | 1563 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class hr_experience(orm.Model):
_name = 'hr.experience'
_inherit = 'hr.curriculum'
_columns = {
'category': fields.selection((('professional', 'Professional'),
('academic', 'Academic'),
('certification', 'Certification')),
'Category', required=True,
help='category'),
}
_defaults = {
'category': 'professional',
}
| agpl-3.0 |
jrennie/weewx-jrennie | bin/user/schemas.py | 1 | 4285 | #
# Copyright (c) 2011 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
# $Revision$
# $Author$
# $Date$
#
"""Database schemas used by weewx"""
#===============================================================================
# This is a list containing the default schema of the archive database. It is
# identical to what is used by wview. It is only used for
# initialization --- afterwards, the schema is obtained dynamically from the
# database. Although a type may be listed here, it may not necessarily be
# supported by your weather station hardware.
#
# You may trim this list of any unused types if you wish, but it will not result
# in as much space saving as you may think.
# ===============================================================================
defaultArchiveSchema = [('dateTime', 'INTEGER NOT NULL UNIQUE PRIMARY KEY'),
('usUnits', 'INTEGER NOT NULL'),
('interval', 'INTEGER NOT NULL'),
('barometer', 'REAL'),
('pressure', 'REAL'),
('altimeter', 'REAL'),
('inTemp', 'REAL'),
('outTemp', 'REAL'),
('inHumidity', 'REAL'),
('outHumidity', 'REAL'),
('windSpeed', 'REAL'),
('windDir', 'REAL'),
('windGust', 'REAL'),
('windGustDir', 'REAL'),
('rainRate', 'REAL'),
('rain', 'REAL'),
('dewpoint', 'REAL'),
('windchill', 'REAL'),
('heatindex', 'REAL'),
('ET', 'REAL'),
('radiation', 'REAL'),
('UV', 'REAL'),
('extraTemp1', 'REAL'),
('extraTemp2', 'REAL'),
('extraTemp3', 'REAL'),
('soilTemp1', 'REAL'),
('soilTemp2', 'REAL'),
('soilTemp3', 'REAL'),
('soilTemp4', 'REAL'),
('leafTemp1', 'REAL'),
('leafTemp2', 'REAL'),
('extraHumid1', 'REAL'),
('extraHumid2', 'REAL'),
('soilMoist1', 'REAL'),
('soilMoist2', 'REAL'),
('soilMoist3', 'REAL'),
('soilMoist4', 'REAL'),
('leafWet1', 'REAL'),
('leafWet2', 'REAL'),
('rxCheckPercent', 'REAL'),
('txBatteryStatus', 'REAL'),
('consBatteryVoltage', 'REAL'),
('hail', 'REAL'),
('hailRate', 'REAL'),
('heatingTemp', 'REAL'),
('heatingVoltage', 'REAL'),
('supplyVoltage', 'REAL'),
('referenceVoltage', 'REAL'),
('windBatteryStatus', 'REAL'),
('rainBatteryStatus', 'REAL'),
('outTempBatteryStatus', 'REAL'),
('inTempBatteryStatus', 'REAL')]
# The default types for which statistics will be kept is pretty much all of the types
# above. We drop a few of the wind related types and replace them with special type 'wind'
drop_list = ['dateTime', 'usUnits', 'interval', 'windSpeed', 'windDir', 'windGust', 'windGustDir']
defaultStatsTypes = filter(lambda x : x not in drop_list, [_tuple[0] for _tuple in defaultArchiveSchema]) + ['wind']
| gpl-3.0 |
qyou/flaskweb | flask_demo.py | 1 | 2342 | # -*- coding: utf-8 -*-
import os
from werkzeug import secure_filename
from flask import Flask
from flask import url_for # for static files
from flask import render_template
from flask import request
from flask import make_response
from flask import session
from flask import redirect
from flask import flash
from models import Album
app = Flask(__name__)
app.secret_key = os.urandom(24) # use for login
@app.route('/')
def index():
return render_template('index.html', title=u'主页')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
if request.form['username'] == 'summer' and request.form['password'] == 'youqiang':
session['login'] = True
return redirect(url_for('index'))
else:
flash('Wrong username or password!')
return redirect(url_for('index'))
else:
return redirect(url_for('index'))
@app.route('/logout')
def logout():
session.pop('login', None)
return redirect(url_for('index'))
@app.route('/sign', methods=['GET', 'POST'])
def sign():
if request.method=='POST':
flash("注册成功")
return redirect(url_for('index'))
else:
return render_template('sign.html', title=u'注册')
@app.route('/contact')
def contact():
return render_template('contact.html', title=u'联系我们')
# TODO: need to add the next code
@app.route('/album', methods=['GET', 'POST'])
def album():
upload = 'upload-file'
savedir = os.path.join(os.path.dirname(__file__), 'static', 'upload')
if not os.path.exists(savedir):
os.mkdir(savedir)
if request.method=='POST':
f = request.files[upload]
f.save(os.path.join(savedir, secure_filename(f.filename)))
return render_template('upload.html', filename=secure_filename(f.filename), title=u"上传")
album = Album()
return render_template('album.html', upload=upload, album=album, title=u'相簿')
# return render_template('album.html', upload=upload, title=u'相簿')
# TODO:
@app.route('/message')
def message():
return render_template('message.html', title=u'留言簿')
# TODO:
@app.route('/email')
def email():
return render_template('email.html', title=u'邮件系统')
def main():
app.run(debug=False)
if __name__ == '__main__':
main()
| mit |
wahlmanj/OpenPlex3 | update/OSX/icon/icon/WebServer.py | 2 | 15174 | #!/usr/bin/env python
"""
Sources:
http://fragments.turtlemeat.com/pythonwebserver.php
http://www.linuxjournal.com/content/tech-tip-really-simple-http-server-python
...stackoverflow.com and such
after 27Aug - Apple's switch to https:
- added https WebServer with SSL encryption - needs valid (private) vertificate on aTV and server
- for additional information see http://langui.sh/2013/08/27/appletv-ssl-plexconnect/
Thanks to reaperhulk for showing this solution!
"""
import sys
import string, cgi, time
from os import sep, path
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
import ssl
from multiprocessing import Pipe # inter process communication
import urllib, StringIO, gzip
import signal
import traceback
import Settings, ATVSettings
from Debug import * # dprint()
import XMLConverter # XML_PMS2aTV, XML_PlayVideo
import re
import Localize
import Subtitle
g_param = {}
def setParams(param):
global g_param
g_param = param
def JSConverter(file, options):
f = open(sys.path[0] + "/assets/js/" + file)
JS = f.read()
f.close()
# PlexConnect {{URL()}}->baseURL
for path in set(re.findall(r'\{\{URL\((.*?)\)\}\}', JS)):
JS = JS.replace('{{URL(%s)}}' % path, g_param['baseURL']+path)
# localization
JS = Localize.replaceTEXT(JS, options['aTVLanguage']).encode('utf-8')
return JS
class MyHandler(BaseHTTPRequestHandler):
# Fixes slow serving speed under Windows
def address_string(self):
host, port = self.client_address[:2]
#return socket.getfqdn(host)
return host
def log_message(self, format, *args):
pass
def compress(self, data):
buf = StringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', fileobj=buf, compresslevel=9)
zfile.write(data)
zfile.close()
return buf.getvalue()
def sendResponse(self, data, type, enableGzip):
self.send_response(200)
self.send_header('Server', 'PlexConnect')
self.send_header('Content-type', type)
try:
accept_encoding = map(string.strip, string.split(self.headers["accept-encoding"], ","))
except KeyError:
accept_encoding = []
if enableGzip and \
g_param['CSettings'].getSetting('allow_gzip_atv')=='True' and \
'gzip' in accept_encoding:
self.send_header('Content-encoding', 'gzip')
self.end_headers()
self.wfile.write(self.compress(data))
else:
self.end_headers()
self.wfile.write(data)
def do_GET(self):
global g_param
try:
dprint(__name__, 2, "http request header:\n{0}", self.headers)
dprint(__name__, 2, "http request path:\n{0}", self.path)
# check for PMS address
PMSaddress = ''
pms_end = self.path.find(')')
if self.path.startswith('/PMS(') and pms_end>-1:
PMSaddress = urllib.unquote_plus(self.path[5:pms_end])
self.path = self.path[pms_end+1:]
# break up path, separate PlexConnect options
# clean path needed for filetype decoding
parts = re.split(r'[?&]', self.path, 1) # should be '?' only, but we do some things different :-)
if len(parts)==1:
self.path = parts[0]
options = {}
query = ''
else:
self.path = parts[0]
# break up query string
options = {}
query = ''
parts = parts[1].split('&')
for part in parts:
if part.startswith('PlexConnect'):
# get options[]
opt = part.split('=', 1)
if len(opt)==1:
options[opt[0]] = ''
else:
options[opt[0]] = urllib.unquote(opt[1])
else:
# recreate query string (non-PlexConnect) - has to be merged back when forwarded
if query=='':
query = '?' + part
else:
query += '&' + part
# get aTV language setting
options['aTVLanguage'] = Localize.pickLanguage(self.headers.get('Accept-Language', 'en'))
# add client address - to be used in case UDID is unknown
if 'X-Forwarded-For' in self.headers:
options['aTVAddress'] = self.headers['X-Forwarded-For'].split(',', 1)[0]
else:
options['aTVAddress'] = self.client_address[0]
# get aTV hard-/software parameters
options['aTVFirmwareVersion'] = self.headers.get('X-Apple-TV-Version', '5.1')
options['aTVScreenResolution'] = self.headers.get('X-Apple-TV-Resolution', '720')
dprint(__name__, 2, "pms address:\n{0}", PMSaddress)
dprint(__name__, 2, "cleaned path:\n{0}", self.path)
dprint(__name__, 2, "PlexConnect options:\n{0}", options)
dprint(__name__, 2, "additional arguments:\n{0}", query)
if 'User-Agent' in self.headers and \
'AppleTV' in self.headers['User-Agent']:
# recieve simple logging messages from the ATV
if 'PlexConnectATVLogLevel' in options:
dprint('ATVLogger', int(options['PlexConnectATVLogLevel']), options['PlexConnectLog'])
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
return
# serve "*.cer" - Serve up certificate file to atv
if self.path.endswith(".cer"):
dprint(__name__, 1, "serving *.cer: "+self.path)
if g_param['CSettings'].getSetting('certfile').startswith('.'):
# relative to current path
cfg_certfile = sys.path[0] + sep + g_param['CSettings'].getSetting('certfile')
else:
# absolute path
cfg_certfile = g_param['CSettings'].getSetting('certfile')
cfg_certfile = path.normpath(cfg_certfile)
cfg_certfile = path.splitext(cfg_certfile)[0] + '.cer'
try:
f = open(cfg_certfile, "rb")
except:
dprint(__name__, 0, "Failed to access certificate: {0}", cfg_certfile)
return
self.sendResponse(f.read(), 'text/xml', False)
f.close()
return
# serve .js files to aTV
# application, main: ignore path, send /assets/js/application.js
# otherwise: path should be '/js', send /assets/js/*.js
dirname = path.dirname(self.path)
basename = path.basename(self.path)
if basename in ("application.js", "main.js", "javascript-packed.js", "bootstrap.js") or \
basename.endswith(".js") and dirname == '/js':
if basename in ("main.js", "javascript-packed.js", "bootstrap.js"):
basename = "application.js"
dprint(__name__, 1, "serving /js/{0}", basename)
JS = JSConverter(basename, options)
self.sendResponse(JS, 'text/javascript', True)
return
# proxy phobos.apple.com to support PlexConnect main icon
if "a1.phobos.apple.com" in self.headers['Host']:
resource = self.headers['Host']+self.path
icon = g_param['CSettings'].getSetting('icon')
if basename.startswith(icon):
icon_res = basename[len(icon):] # cut string from settings, keeps @720.png/@1080.png
resource = './assets/icons/icon'+icon_res
dprint(__name__, 1, "serving "+self.headers['Host']+self.path+" with "+resource)
r = open(resource, "rb")
else:
r = urllib.urlopen('http://'+resource)
self.sendResponse(r.read(), 'image/png', False)
r.close()
return
# serve "*.jpg" - thumbnails for old-style mainpage
if self.path.endswith(".jpg"):
dprint(__name__, 1, "serving *.jpg: "+self.path)
f = open(sys.path[0] + sep + "assets" + self.path, "rb")
self.sendResponse(f.read(), 'image/jpeg', False)
f.close()
return
# serve "*.png" - only png's support transparent colors
if self.path.endswith(".png"):
dprint(__name__, 1, "serving *.png: "+self.path)
f = open(sys.path[0] + sep + "assets" + self.path, "rb")
self.sendResponse(f.read(), 'image/png', False)
f.close()
return
# serve subtitle file - transcoded to aTV subtitle json
if 'PlexConnect' in options and \
options['PlexConnect']=='Subtitle':
dprint(__name__, 1, "serving subtitle: "+self.path)
XML = Subtitle.getSubtitleJSON(PMSaddress, self.path + query, options)
self.sendResponse(XML, 'application/json', True)
return
# get everything else from XMLConverter - formerly limited to trailing "/" and &PlexConnect Cmds
if True:
dprint(__name__, 1, "serving .xml: "+self.path)
XML = XMLConverter.XML_PMS2aTV(PMSaddress, self.path + query, options)
self.sendResponse(XML, 'text/xml', True)
return
"""
# unexpected request
self.send_error(403,"Access denied: %s" % self.path)
"""
else:
self.send_error(403,"Not Serving Client %s" % self.client_address[0])
except IOError:
dprint(__name__, 0, 'File Not Found:\n{0}', traceback.format_exc())
self.send_error(404,"File Not Found: %s" % self.path)
except:
dprint(__name__, 0, 'Internal Server Error:\n{0}', traceback.format_exc())
self.send_error(500,"Internal Server Error: %s" % self.path)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def Run(cmdPipe, param):
if not __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_IGN)
dinit(__name__, param) # init logging, WebServer process
cfg_IP_WebServer = param['IP_self']
cfg_Port_WebServer = param['CSettings'].getSetting('port_webserver')
try:
server = ThreadedHTTPServer((cfg_IP_WebServer,int(cfg_Port_WebServer)), MyHandler)
server.timeout = 1
except Exception, e:
dprint(__name__, 0, "Failed to connect to HTTP on {0} port {1}: {2}", cfg_IP_WebServer, cfg_Port_WebServer, e)
sys.exit(1)
socketinfo = server.socket.getsockname()
dprint(__name__, 0, "***")
dprint(__name__, 0, "WebServer: Serving HTTP on {0} port {1}.", socketinfo[0], socketinfo[1])
dprint(__name__, 0, "***")
setParams(param)
XMLConverter.setParams(param)
XMLConverter.setATVSettings(param['CATVSettings'])
try:
while True:
# check command
if cmdPipe.poll():
cmd = cmdPipe.recv()
if cmd=='shutdown':
break
# do your work (with timeout)
server.handle_request()
except KeyboardInterrupt:
signal.signal(signal.SIGINT, signal.SIG_IGN) # we heard you!
dprint(__name__, 0,"^C received.")
finally:
dprint(__name__, 0, "Shutting down (HTTP).")
server.socket.close()
def Run_SSL(cmdPipe, param):
if not __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_IGN)
dinit(__name__, param) # init logging, WebServer process
cfg_IP_WebServer = param['IP_self']
cfg_Port_SSL = param['CSettings'].getSetting('port_ssl')
if param['CSettings'].getSetting('certfile').startswith('.'):
# relative to current path
cfg_certfile = sys.path[0] + sep + param['CSettings'].getSetting('certfile')
else:
# absolute path
cfg_certfile = param['CSettings'].getSetting('certfile')
cfg_certfile = path.normpath(cfg_certfile)
try:
certfile = open(cfg_certfile, 'r')
except:
dprint(__name__, 0, "Failed to access certificate: {0}", cfg_certfile)
sys.exit(1)
certfile.close()
try:
server = ThreadedHTTPServer((cfg_IP_WebServer,int(cfg_Port_SSL)), MyHandler)
server.socket = ssl.wrap_socket(server.socket, certfile=cfg_certfile, server_side=True)
server.timeout = 1
except Exception, e:
dprint(__name__, 0, "Failed to connect to HTTPS on {0} port {1}: {2}", cfg_IP_WebServer, cfg_Port_SSL, e)
sys.exit(1)
socketinfo = server.socket.getsockname()
dprint(__name__, 0, "***")
dprint(__name__, 0, "WebServer: Serving HTTPS on {0} port {1}.", socketinfo[0], socketinfo[1])
dprint(__name__, 0, "***")
setParams(param)
XMLConverter.setParams(param)
XMLConverter.setATVSettings(param['CATVSettings'])
try:
while True:
# check command
if cmdPipe.poll():
cmd = cmdPipe.recv()
if cmd=='shutdown':
break
# do your work (with timeout)
server.handle_request()
except KeyboardInterrupt:
signal.signal(signal.SIGINT, signal.SIG_IGN) # we heard you!
dprint(__name__, 0,"^C received.")
finally:
dprint(__name__, 0, "Shutting down (HTTPS).")
server.socket.close()
if __name__=="__main__":
cmdPipe = Pipe()
cfg = Settings.CSettings()
param = {}
param['CSettings'] = cfg
param['CATVSettings'] = ATVSettings.CATVSettings()
param['IP_self'] = '192.168.178.20' # IP_self?
param['baseURL'] = 'http://'+ param['IP_self'] +':'+ cfg.getSetting('port_webserver')
param['HostToIntercept'] = 'trailers.apple.com'
if len(sys.argv)==1:
Run(cmdPipe[1], param)
elif len(sys.argv)==2 and sys.argv[1]=='SSL':
Run_SSL(cmdPipe[1], param)
| bsd-3-clause |
fuzeman/trakt.py | examples/search.py | 1 | 1610 | from __future__ import absolute_import, division, print_function
from trakt import Trakt
from trakt.objects import Episode
import logging
import os
logging.basicConfig(level=logging.DEBUG)
def print_lookup(id, service):
print("Trakt['search'].lookup(%r, %r)" % (id, service))
items = Trakt['search'].lookup(id, service, per_page=10)
item = items[0]
if type(item) is Episode and item.show:
sk, ek = item.pk
print('\t%s (%s) - S%02dE%02d %r' % (item.show.title, item.show.year, sk, ek, item.title))
else:
print('\t%s (%s)' % (item.title, item.year))
def print_query(query, media=None, year=None):
print("Trakt['search'].query(%r, %r, %r)" % (query, media, year))
items = Trakt['search'].query(query, media, year, pagination=True, per_page=10)
for item in items.get(1): # Retrieve first page
if type(item) is Episode and item.show:
sk, ek = item.pk
print('\t[%.2d%%] %s (%s) - S%02dE%02d %r' % (item.score, item.show.title, item.show.year,
sk, ek, item.title))
else:
print('\t[%.2d%%] %s (%s)' % (item.score, item.title, item.year))
if __name__ == '__main__':
# Configure
Trakt.configuration.defaults.client(
id=os.environ.get('CLIENT_ID')
)
# Lookup by id
print_lookup('tt0848228', 'imdb')
print_lookup('tt0903747', 'imdb')
print_lookup('tt0959621', 'imdb')
# Search by name
print_query('The Avengers', 'movie')
print_query('Breaking Bad', 'show')
print_query('Fly', 'episode')
| mit |
borysiasty/QGIS | tests/code_layout/test_qgssipcoverage.py | 45 | 5000 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for SIP binding coverage.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '15/10/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
import os
from qgis.testing import unittest
from utilities import printImportant
from doxygen_parser import DoxygenParser
from termcolor import colored
# Import all the things!
from qgis.analysis import * # NOQA
from qgis.core import * # NOQA
from qgis.gui import * # NOQA
try:
from qgis.server import * # NOQA
except:
pass
class TestQgsSipCoverage(unittest.TestCase):
def testCoverage(self):
print('CTEST_FULL_OUTPUT')
prefixPath = os.environ['QGIS_PREFIX_PATH']
docPath = os.path.join(prefixPath, '..', 'doc', 'api', 'xml')
parser = DoxygenParser(docPath)
# first look for objects without any bindings
objects = set([m[0] for m in parser.bindable_members])
missing_objects = []
bound_objects = {}
for o in objects:
try:
if '::' in o:
bound_objects[o] = getattr(globals()[o.split('::')[0]], o.split('::')[1])
else:
bound_objects[o] = globals()[o]
except:
missing_objects.append(o)
missing_objects.sort()
# next check for individual members
parser.bindable_members.sort()
missing_members = []
for m in parser.bindable_members:
if m[0] in bound_objects:
obj = bound_objects[m[0]]
if "::" in m[0] and m[0].split("::")[1] == m[1]:
# skip constructors of nested classes
continue
# try two different methods of checking for member existence
try:
if hasattr(obj, m[1]):
continue
except:
pass
try:
if m[1] in dir(obj):
continue
except:
printImportant("SIP coverage test: something strange happened in {}.{}, obj={}".format(m[0], m[1], obj))
missing_members.append('{}.{}'.format(m[0], m[1]))
missing_members.sort()
if missing_objects:
print("---------------------------------")
print((colored('Missing classes:', 'yellow')))
print((' ' + '\n '.join([colored(obj, 'yellow', attrs=['bold']) for obj in missing_objects])))
if missing_members:
print("---------------------------------")
print((colored('Missing members:', 'yellow')))
print((' ' + '\n '.join([colored(mem, 'yellow', attrs=['bold']) for mem in missing_members])))
# print summaries
missing_class_count = len(missing_objects)
present_count = len(objects) - missing_class_count
coverage = 100.0 * present_count / len(objects)
print("---------------------------------")
printImportant("{} total bindable classes".format(len(objects)))
printImportant("{} total have bindings".format(present_count))
printImportant("Binding coverage by classes {}%".format(coverage))
printImportant("---------------------------------")
printImportant("{} classes missing bindings".format(missing_class_count))
print("---------------------------------")
missing_member_count = len(missing_members)
present_count = len(parser.bindable_members) - missing_member_count
coverage = 100.0 * present_count / len(parser.bindable_members)
print("---------------------------------")
printImportant("{} total bindable members".format(len(parser.bindable_members)))
printImportant("{} total have bindings".format(present_count))
printImportant("Binding coverage by members {}%".format(coverage))
printImportant("---------------------------------")
printImportant("{} members missing bindings".format(missing_member_count))
self.assertEqual(missing_class_count, 0, """\n\nFAIL: new unbound classes have been introduced, please add SIP bindings for these classes
If these classes are not suitable for the Python bindings, please add the Doxygen tag
"\\note not available in Python bindings" to the CLASS Doxygen comments""")
self.assertEqual(missing_member_count, 0, """\n\nFAIL: new unbound members have been introduced, please add SIP bindings for these members
If these members are not suitable for the Python bindings, please add the Doxygen tag
"\\note not available in Python bindings" to the MEMBER Doxygen comments""")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
duh386/django-mailer | mailer/engine.py | 1 | 7959 | import time
import smtplib
import logging
from lockfile import FileLock, AlreadyLocked, LockTimeout
from socket import error as socket_error
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail as core_send_mail
try:
# Django 1.2
from django.core.mail import get_connection
except ImportError:
# ImportError: cannot import name get_connection
from django.core.mail import SMTPConnection
get_connection = lambda backend=None, fail_silently=False, **kwds: SMTPConnection(fail_silently=fail_silently)
from mailer.models import Message, DontSendEntry, MessageLog
# when queue is empty, how long to wait (in seconds) before checking again
EMPTY_QUEUE_SLEEP = getattr(settings, "MAILER_EMPTY_QUEUE_SLEEP", 30)
# lock timeout value. how long to wait for the lock to become available.
# default behavior is to never wait for the lock to be available.
LOCK_WAIT_TIMEOUT = getattr(settings, "MAILER_LOCK_WAIT_TIMEOUT", -1)
# The actual backend to use for sending, defaulting to the Django default.
EMAIL_BACKEND = getattr(settings, "MAILER_EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
if not hasattr(settings, 'EMAIL_HOST_USER_MASS') or not hasattr(settings, 'EMAIL_HOST_PASSWORD_MASS'):
raise ImproperlyConfigured('Please define settings EMAIL_HOST_USER_MASS and EMAIL_HOST_PASSWORD_MASS in settings.py')
if not hasattr(settings, 'MAILER_MASS_QUEUE_SIZE') \
or not hasattr(settings, 'MAILER_MASS_QUEUE_INTERVAL') \
or not hasattr(settings, 'MAILER_MASS_QUEUE_ATTEMPTS'):
raise ImproperlyConfigured('Please define settings MAILER_MASS_QUEUE_SIZE, MAILER_MASS_QUEUE_INTERVAL and MAILER_MASS_QUEUE_ATTEMPTS in settings.py')
def prioritize(is_mass=False):
"""
Yield the messages in the queue in the order they should be sent.
"""
while True:
while Message.objects.high_priority().filter(is_mass=is_mass).count() or Message.objects.medium_priority().filter(is_mass=is_mass).count():
while Message.objects.high_priority().filter(is_mass=is_mass).count():
for message in Message.objects.high_priority().filter(is_mass=is_mass).order_by("when_added"):
yield message
while Message.objects.high_priority().filter(is_mass=is_mass).count() == 0 and Message.objects.medium_priority().filter(is_mass=is_mass).count():
yield Message.objects.medium_priority().filter(is_mass=is_mass).order_by("when_added")[0]
while Message.objects.high_priority().filter(is_mass=is_mass).count() == 0 and Message.objects.medium_priority().filter(is_mass=is_mass).count() == 0 and Message.objects.low_priority().filter(is_mass=is_mass).count():
yield Message.objects.low_priority().filter(is_mass=is_mass).order_by("when_added")[0]
if Message.objects.non_deferred().filter(is_mass=is_mass).count() == 0:
break
def send_all():
"""
Send all eligible messages in the queue.
"""
lock = FileLock("send_mail")
logging.debug("acquiring lock...")
try:
lock.acquire(LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logging.debug("lock already in place. quitting.")
return
except LockTimeout:
logging.debug("waiting for the lock timed out. quitting.")
return
logging.debug("acquired.")
start_time = time.time()
dont_send = 0
deferred = 0
sent = 0
try:
connection = None
for message in prioritize():
try:
if connection is None:
connection = get_connection(backend=EMAIL_BACKEND)
logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
email = message.email
email.connection = connection
email.send()
MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
message.delete()
sent += 1
except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError), err:
message.defer()
logging.info("message deferred due to failure: %s" % err)
MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
deferred += 1
# Get new connection, it case the connection itself has an error.
connection = None
finally:
logging.debug("releasing lock...")
lock.release()
logging.debug("released.")
logging.info("")
logging.info("%s sent; %s deferred;" % (sent, deferred))
logging.info("done in %.2f seconds" % (time.time() - start_time))
def send_mass():
"""
Send mass mails according to settings
"""
lock = FileLock("send_mass_mail")
logging.debug("acquiring mass lock...")
try:
lock.acquire(LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logging.debug("mass lock already in place. quitting.")
return
except LockTimeout:
logging.debug("waiting for the mass lock timed out. quitting.")
return
logging.debug("acquired.")
start_time = time.time()
dont_send = 0
deferred = 0
sent = 0
try:
queue_size = settings.MAILER_MASS_QUEUE_SIZE
queue_interval = settings.MAILER_MASS_QUEUE_INTERVAL
queue_attempts = settings.MAILER_MASS_QUEUE_ATTEMPTS
connection = None
messages_count = 0
for message in prioritize(is_mass=True):
try:
if connection is None:
connection = get_connection(backend=EMAIL_BACKEND, username=settings.EMAIL_HOST_USER_MASS,
password=settings.EMAIL_HOST_PASSWORD_MASS)
logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
email = message.email
email.connection = connection
email.send()
MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
message.delete()
sent += 1
except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError, smtplib.SMTPDataError), err:
message.defer()
logging.info("mass message deferred due to failure: %s" % err)
MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
deferred += 1
# Get new connection, it case the connection itself has an error.
connection = None
messages_count += 1
if messages_count == queue_size:
queue_attempts -= 1
logging.debug('%s emails was sended. %s attemps in future. Sleeping before next attempt %s min.' % (messages_count, queue_attempts, queue_interval))
messages_count = 0
if queue_attempts == 0:
break
time.sleep(60*queue_interval)
finally:
logging.debug("releasing mass lock...")
lock.release()
logging.debug("released.")
logging.info("")
logging.info("%s sent; %s deferred;" % (sent, deferred))
logging.info("done in %.2f seconds" % (time.time() - start_time))
# def send_loop():
# """
# Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and
# sending messages if any are on queue.
# """
#
# while True:
# while not Message.objects.all():
# logging.debug("sleeping for %s seconds before checking queue again" % EMPTY_QUEUE_SLEEP)
# time.sleep(EMPTY_QUEUE_SLEEP)
# send_all()
| mit |
JCA-Developpement/Odoo | addons/account_bank_statement_extensions/report/__init__.py | 415 | 1128 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bank_statement_balance_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Midafi/scikit-image | skimage/viewer/widgets/core.py | 35 | 10580 | from ..qt import QtWidgets, QtCore, Qt, QtGui
from ..utils import RequiredAttr
__all__ = ['BaseWidget', 'Slider', 'ComboBox', 'CheckBox', 'Text', 'Button']
class BaseWidget(QtWidgets.QWidget):
plugin = RequiredAttr("Widget is not attached to a Plugin.")
def __init__(self, name, ptype=None, callback=None):
super(BaseWidget, self).__init__()
self.name = name
self.ptype = ptype
self.callback = callback
self.plugin = None
@property
def val(self):
msg = "Subclass of BaseWidget requires `val` property"
raise NotImplementedError(msg)
def _value_changed(self, value):
self.callback(self.name, value)
class Text(BaseWidget):
def __init__(self, name=None, text=''):
super(Text, self).__init__(name)
self._label = QtWidgets.QLabel()
self.text = text
self.layout = QtWidgets.QHBoxLayout(self)
if name is not None:
name_label = QtWidgets.QLabel()
name_label.setText(name)
self.layout.addWidget(name_label)
self.layout.addWidget(self._label)
@property
def text(self):
return self._label.text()
@text.setter
def text(self, text_str):
self._label.setText(text_str)
class Slider(BaseWidget):
"""Slider widget for adjusting numeric parameters.
Parameters
----------
name : str
Name of slider parameter. If this parameter is passed as a keyword
argument, it must match the name of that keyword argument (spaces are
replaced with underscores). In addition, this name is displayed as the
name of the slider.
low, high : float
Range of slider values.
value : float
Default slider value. If None, use midpoint between `low` and `high`.
value_type : {'float' | 'int'}, optional
Numeric type of slider value.
ptype : {'kwarg' | 'arg' | 'plugin'}, optional
Parameter type.
callback : callable f(widget_name, value), optional
Callback function called in response to slider changes.
*Note:* This function is typically set (overridden) when the widget is
added to a plugin.
orientation : {'horizontal' | 'vertical'}, optional
Slider orientation.
update_on : {'release' | 'move'}, optional
Control when callback function is called: on slider move or release.
"""
def __init__(self, name, low=0.0, high=1.0, value=None, value_type='float',
ptype='kwarg', callback=None, max_edit_width=60,
orientation='horizontal', update_on='release'):
super(Slider, self).__init__(name, ptype, callback)
if value is None:
value = (high - low) / 2.
# Set widget orientation
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if orientation == 'vertical':
self.slider = QtWidgets.QSlider(Qt.Vertical)
alignment = QtCore.Qt.AlignHCenter
align_text = QtCore.Qt.AlignHCenter
align_value = QtCore.Qt.AlignHCenter
self.layout = QtWidgets.QVBoxLayout(self)
elif orientation == 'horizontal':
self.slider = QtWidgets.QSlider(Qt.Horizontal)
alignment = QtCore.Qt.AlignVCenter
align_text = QtCore.Qt.AlignLeft
align_value = QtCore.Qt.AlignRight
self.layout = QtWidgets.QHBoxLayout(self)
else:
msg = "Unexpected value %s for 'orientation'"
raise ValueError(msg % orientation)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set slider behavior for float and int values.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if value_type == 'float':
# divide slider into 1000 discrete values
slider_max = 1000
self._scale = float(high - low) / slider_max
self.slider.setRange(0, slider_max)
self.value_fmt = '%2.2f'
elif value_type == 'int':
self.slider.setRange(low, high)
self.value_fmt = '%d'
else:
msg = "Expected `value_type` to be 'float' or 'int'; received: %s"
raise ValueError(msg % value_type)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.value_type = value_type
self._low = low
self._high = high
# Update slider position to default value
self.val = value
if update_on == 'move':
self.slider.valueChanged.connect(self._on_slider_changed)
elif update_on == 'release':
self.slider.sliderReleased.connect(self._on_slider_changed)
else:
raise ValueError("Unexpected value %s for 'update_on'" % update_on)
self.slider.setFocusPolicy(QtCore.Qt.StrongFocus)
self.name_label = QtWidgets.QLabel()
self.name_label.setText(self.name)
self.name_label.setAlignment(align_text)
self.editbox = QtWidgets.QLineEdit()
self.editbox.setMaximumWidth(max_edit_width)
self.editbox.setText(self.value_fmt % self.val)
self.editbox.setAlignment(align_value)
self.editbox.editingFinished.connect(self._on_editbox_changed)
self.layout.addWidget(self.name_label)
self.layout.addWidget(self.slider)
self.layout.addWidget(self.editbox)
def _on_slider_changed(self):
"""Call callback function with slider's name and value as parameters"""
value = self.val
self.editbox.setText(str(value)[:4])
self.callback(self.name, value)
def _on_editbox_changed(self):
"""Validate input and set slider value"""
try:
value = float(self.editbox.text())
except ValueError:
self._bad_editbox_input()
return
if not self._low <= value <= self._high:
self._bad_editbox_input()
return
self.val = value
self._good_editbox_input()
self.callback(self.name, value)
def _good_editbox_input(self):
self.editbox.setStyleSheet("background-color: rgb(255, 255, 255)")
def _bad_editbox_input(self):
self.editbox.setStyleSheet("background-color: rgb(255, 200, 200)")
@property
def val(self):
value = self.slider.value()
if self.value_type == 'float':
value = value * self._scale + self._low
return value
@val.setter
def val(self, value):
if self.value_type == 'float':
value = (value - self._low) / self._scale
self.slider.setValue(value)
class ComboBox(BaseWidget):
"""ComboBox widget for selecting among a list of choices.
Parameters
----------
name : str
Name of ComboBox parameter. If this parameter is passed as a keyword
argument, it must match the name of that keyword argument (spaces are
replaced with underscores). In addition, this name is displayed as the
name of the ComboBox.
items: list of str
Allowed parameter values.
ptype : {'arg' | 'kwarg' | 'plugin'}, optional
Parameter type.
callback : callable f(widget_name, value), optional
Callback function called in response to combobox changes.
*Note:* This function is typically set (overridden) when the widget is
added to a plugin.
"""
def __init__(self, name, items, ptype='kwarg', callback=None):
super(ComboBox, self).__init__(name, ptype, callback)
self.name_label = QtWidgets.QLabel()
self.name_label.setText(self.name)
self.name_label.setAlignment(QtCore.Qt.AlignLeft)
self._combo_box = QtWidgets.QComboBox()
self._combo_box.addItems(list(items))
self.layout = QtWidgets.QHBoxLayout(self)
self.layout.addWidget(self.name_label)
self.layout.addWidget(self._combo_box)
self._combo_box.currentIndexChanged.connect(self._value_changed)
@property
def val(self):
return self._combo_box.currentText()
@property
def index(self):
return self._combo_box.currentIndex()
@index.setter
def index(self, i):
self._combo_box.setCurrentIndex(i)
class CheckBox(BaseWidget):
"""CheckBox widget
Parameters
----------
name : str
Name of CheckBox parameter. If this parameter is passed as a keyword
argument, it must match the name of that keyword argument (spaces are
replaced with underscores). In addition, this name is displayed as the
name of the CheckBox.
value: {False, True}, optional
Initial state of the CheckBox.
alignment: {'center','left','right'}, optional
Checkbox alignment
ptype : {'arg' | 'kwarg' | 'plugin'}, optional
Parameter type
callback : callable f(widget_name, value), optional
Callback function called in response to checkbox changes.
*Note:* This function is typically set (overridden) when the widget is
added to a plugin.
"""
def __init__(self, name, value=False, alignment='center', ptype='kwarg',
callback=None):
super(CheckBox, self).__init__(name, ptype, callback)
self._check_box = QtWidgets.QCheckBox()
self._check_box.setChecked(value)
self._check_box.setText(self.name)
self.layout = QtWidgets.QHBoxLayout(self)
if alignment == 'center':
self.layout.setAlignment(QtCore.Qt.AlignCenter)
elif alignment == 'left':
self.layout.setAlignment(QtCore.Qt.AlignLeft)
elif alignment == 'right':
self.layout.setAlignment(QtCore.Qt.AlignRight)
else:
raise ValueError("Unexpected value %s for 'alignment'" % alignment)
self.layout.addWidget(self._check_box)
self._check_box.stateChanged.connect(self._value_changed)
@property
def val(self):
return self._check_box.isChecked()
@val.setter
def val(self, i):
self._check_box.setChecked(i)
class Button(BaseWidget):
"""Button which calls callback upon click.
Parameters
----------
name : str
Name of button.
callback : callable f()
Function to call when button is clicked.
"""
def __init__(self, name, callback):
super(Button, self).__init__(self)
self._button = QtGui.QPushButton(name)
self._button.clicked.connect(callback)
self.layout = QtGui.QHBoxLayout(self)
self.layout.addWidget(self._button)
| bsd-3-clause |
birdsarah/bokeh | bokeh/util/serialization.py | 4 | 4029 | """ Functions for helping with serialization and deserialization of
Bokeh objects.
"""
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
_simple_id = 1000
def make_id():
""" Return a new unique ID for a Bokeh object.
Normally this function will return UUIDs to use for identifying Bokeh
objects. This is especally important for Bokeh objects stored on a
Bokeh server. However, it is convenient to have more human-readable
IDs during development, so this behavior can be overridden by
setting the environment variable ``BOKEH_SIMPLE_IDS=yes``.
"""
global _simple_id
import uuid
from ..settings import settings
if settings.simple_ids(False):
_simple_id += 1
new_id = _simple_id
else:
new_id = uuid.uuid4()
return str(new_id)
def urljoin(*args):
""" Construct an absolute URL from several URL components.
Args:
*args (str) : URL components to join
Returns:
str : joined URL
"""
from six.moves.urllib.parse import urljoin as sys_urljoin
from functools import reduce
return reduce(sys_urljoin, args)
def get_json(response):
""" Unify retrieving JSON responses from different sources.
Works correctly for HTTP responses from requests <=1.0, >1.0, and
the Flask test client.
Args:
response (Flask or requests response) : a response to process
Returns:
JSON
"""
import json
try:
import flask
except ImportError:
flask = None
if flask and isinstance(response, flask.Response):
# flask testing
return json.loads(response.data.decode('utf-8'))
else:
# requests
if hasattr(response.json, '__call__'):
return response.json()
else:
return response.json
def dump(objs, docid, changed_only=True):
""" Serialize a sequence of Bokeh objects into JSON
Args:
objs (seq[obj]) : a sequence of Bokeh object to dump
docid (str) : an ID for a Bokeh Document to dump relative to
changed_only (bool, optional) : whether to dump only attributes
that have had their values changed at some point (default: True)
Returns:
list[json]
"""
json_objs = []
for obj in objs:
ref = obj.ref
ref["attributes"] = obj.vm_serialize(changed_only=changed_only)
ref["attributes"].update({"id": ref["id"], "doc" : docid})
json_objs.append(ref)
return json_objs
def is_ref(frag):
""" Test whether a given Bokeh object graph fragment is a reference.
A Bokeh "reference" is a ``dict`` with ``"type"`` and ``"id"`` keys.
Args:
frag (dict) : a fragment of a Bokeh object graph
Returns:
True, if the fragment is a reference, otherwise False
"""
return isinstance(frag, dict) and \
frag.get('type') and \
frag.get('id')
def json_apply(fragment, check_func, func):
""" Apply a function to JSON fragments that match the given predicate
and return the collected results.
Recursively traverses a nested collection of ``dict`` and ``list``,
applying ``check_func`` to each fragment. If True, then collect
``func(fragment)`` in the final output
Args:
fragment (JSON-like) : the fragment to apply ``func`` to recursively
check_func (callable) : the predicate to test fragments with
func (callable) : the conversion function to apply
Returns:
converted fragments
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment | bsd-3-clause |
Itxaka/st2 | st2api/tests/unit/controllers/v1/test_base.py | 1 | 2888 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from tests import FunctionalTest
class TestBase(FunctionalTest):
def test_defaults(self):
response = self.app.get('/')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://localhost')
self.assertEqual(response.headers['Access-Control-Allow-Methods'],
'GET,POST,PUT,DELETE,OPTIONS')
self.assertEqual(response.headers['Access-Control-Allow-Headers'],
'Content-Type,Authorization,X-Auth-Token,X-Request-ID')
self.assertEqual(response.headers['Access-Control-Expose-Headers'],
'Content-Type,X-Limit,X-Total-Count,X-Request-ID')
def test_origin(self):
response = self.app.get('/', headers={
'origin': 'http://localhost:3000'
})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://localhost:3000')
def test_additional_origin(self):
response = self.app.get('/', headers={
'origin': 'http://dev'
})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://dev')
def test_wrong_origin(self):
response = self.app.get('/', headers={
'origin': 'http://xss'
})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'null')
def test_wildcard_origin(self):
try:
cfg.CONF.set_override('allow_origin', ['*'], 'api')
response = self.app.get('/', headers={
'origin': 'http://xss'
})
finally:
cfg.CONF.clear_override('allow_origin', 'api')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'*')
| apache-2.0 |
jamezpolley/pip | pip/_vendor/requests/packages/chardet/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| mit |
michaelaye/iuvs | iuvs/profile_movie_plotter.py | 1 | 4393 | import os
import sys
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from . import io, scaling
from .multitools import IntProgress, display
matplotlib.use('Agg')
def apply_and_compare(l1b, Scaler=scaling.PolyScaler1):
plt.ioff()
plt.rcParams['image.cmap'] = 'cubehelix'
plt.rcParams['image.interpolation'] = None
# set up ani writer
Writer = animation.writers['ffmpeg']
metadata = dict(title='IUVS dark subtracted profiles',
artist='K.-Michael Aye')
writer = Writer(fps=3, metadata=metadata, bitrate=1800)
# determine visual max and min for image plots
min_, max_ = np.percentile(l1b.dds_dn_s, (1, 99))
# now calculate min/max for profiles but with rows as index
# profmin, profmax = np.percentile(l1b.dds_dn_s, (0.5,99.5), axis=(0,2))
profmin = {5: -1, 31: -1, 55: -1}
profmax = {5: 3, 31: 3, 55: 3}
for ndark in range(l1b.n_darks):
prog = IntProgress(min=0, max=l1b.n_integrations)
display(prog)
fulldark = l1b.get_integration('dark_dn_s', ndark)
for nraw in range(l1b.n_integrations):
fullraw = l1b.get_integration('raw_dn_s', nraw)
spa_slice, spe_slice = l1b.find_scaling_window(fullraw)
raw_subframe = fullraw[spa_slice, spe_slice]
dark_subframe = fulldark[spa_slice, spe_slice]
scaler = Scaler(dark_subframe, raw_subframe)
scaler.do_fit()
newdark = scaler.apply_fit(fulldark)
subtracted = fullraw - newdark
# plotting
fig, ax = plt.subplots(nrows=3)
rawmin, rawmax = np.percentile(fullraw, (1, 99))
ax[0].imshow(fullraw, vmin=rawmin, vmax=rawmax)
ax[0].set_title('Raw, {}_{} (NOT same colormap!)'.format(ndark, nraw))
current = l1b.get_integration('dds_dn_s', nraw)
ax[1].imshow(current, vmin=min_, vmax=max_)
ax[1].set_title('Current dark subtraction')
im = ax[2].imshow(subtracted, vmin=min_, vmax=max_)
ax[2].set_title("{} scaled dark subtracted".format(Scaler))
ax[2].set_xlabel('Spectral pixel number')
fig.tight_layout()
fig.subplots_adjust(top=0.9, bottom=0.1)
cb = plt.colorbar(im, ax=ax.ravel().tolist())
cb.set_label(' DN/s', fontsize=13, rotation=0)
fig.savefig(os.path.join(str(io.plotfolder),
'compare_{}_{}.png'.format(ndark, str(nraw).zfill(2))),
dpi=120)
plt.close(fig)
with sns.axes_style('whitegrid'):
fig, ax = plt.subplots(nrows=4, sharex=True)
with writer.saving(fig,
'/Users/klay6683/plots/profiles_dark{}.mp4'
.format(ndark), 100):
for row in [5, 31, 55]:
ax[0].plot(fullraw[row], lw=1, label='Raw, row{}'.format(row))
ax[0].set_ylim(0, 4)
ax[0].legend()
ax[0].set_title('Raw profiles', fontsize=10)
for row, myaxis in zip([5, 31, 55], ax[1:]):
myaxis.plot(current[row], 'r-', label='Current, row{}'.format(row),
lw=1, alpha=0.7)
myaxis.plot(subtracted[row], 'g-', label='ScaledDark, row{}'.format(row),
lw=1, alpha=0.7)
myaxis.set_ylim(profmin[row], profmax[row])
myaxis.legend()
myaxis.set_ylabel('DN / s')
myaxis.set_title('Row {}'.format(row), fontsize=10)
ax[3].set_xlabel('Spectral pixel number')
fig.suptitle('Profile comparison')
fig.savefig(os.path.join(str(io.plotfolder),
'mean_profs_compare_{}_{}.png'
.format(ndark, str(nraw).zfill(2))),
dpi=120)
writer.grab_frame()
plt.close(fig)
prog.value = nraw + 1
if __name__ == '__main__':
l1b = io.L1BReader(sys.argv[1])
sys.exit(apply_and_compare(l1b))
| isc |
mgit-at/ansible | test/units/modules/network/f5/test_bigip_gtm_global.py | 21 | 3688 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_gtm_global import ApiParameters
from library.modules.bigip_gtm_global import ModuleParameters
from library.modules.bigip_gtm_global import ModuleManager
from library.modules.bigip_gtm_global import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_gtm_global import ApiParameters
from ansible.modules.network.f5.bigip_gtm_global import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_global import ModuleManager
from ansible.modules.network.f5.bigip_gtm_global import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
synchronization=True,
synchronization_group_name='foo',
synchronize_zone_files=True
)
p = ModuleParameters(params=args)
assert p.synchronization is True
assert p.synchronization_group_name == 'foo'
assert p.synchronize_zone_files is True
def test_api_parameters(self):
args = load_fixture('load_gtm_global_settings_general_1.json')
p = ApiParameters(params=args)
assert p.synchronization is False
assert p.synchronization_group_name == 'default'
assert p.synchronize_zone_files is False
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def update(self, *args):
set_module_args(dict(
synchronization="yes",
synchronization_group_name='foo',
synchronize_zone_files="yes",
server='localhost',
password='password',
user='admin'
))
current = ApiParameters(params=load_fixture('load_gtm_global_settings_general_1.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['synchronization'] == 'yes'
assert results['synchronization_group_name'] == 'foo'
assert results['synchronize_zone_files'] == 'yes'
| gpl-3.0 |
phenoxim/cinder | cinder/scheduler/filters/extra_specs_ops.py | 13 | 2432 | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_utils import strutils
# 1. The following operations are supported:
# =, s==, s!=, s>=, s>, s<=, s<, <in>, <is>, <or>, ==, !=, >=, <=
# 2. Note that <or> is handled in a different way below.
# 3. If the first word in the extra_specs is not one of the operators,
# it is ignored.
_op_methods = {'=': lambda x, y: float(x) >= float(y),
'<in>': lambda x, y: y in x,
'<is>': lambda x, y: (strutils.bool_from_string(x) is
strutils.bool_from_string(y)),
'==': lambda x, y: float(x) == float(y),
'!=': lambda x, y: float(x) != float(y),
'>=': lambda x, y: float(x) >= float(y),
'<=': lambda x, y: float(x) <= float(y),
's==': operator.eq,
's!=': operator.ne,
's<': operator.lt,
's<=': operator.le,
's>': operator.gt,
's>=': operator.ge}
def match(value, req):
if req is None:
if value is None:
return True
else:
return False
words = req.split()
op = method = None
if words:
op = words.pop(0)
method = _op_methods.get(op)
if op != '<or>' and not method:
return value == req
if value is None:
return False
if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
while True:
if words.pop(0) == value:
return True
if not words:
break
op = words.pop(0) # remove a keyword <or>
if not words:
break
return False
try:
if words and method(value, words[0]):
return True
except ValueError:
pass
return False
| apache-2.0 |
Omegaphora/external_chromium_org | tools/telemetry/telemetry/results/page_run_unittest.py | 33 | 2090 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.page import page_set
from telemetry.results import page_run
from telemetry.value import failure
from telemetry.value import scalar
from telemetry.value import skip
class PageRunTest(unittest.TestCase):
def setUp(self):
self.page_set = page_set.PageSet(file_path=os.path.dirname(__file__))
self.page_set.AddPageWithDefaultRunNavigate("http://www.bar.com/")
@property
def pages(self):
return self.page_set.pages
def testPageRunFailed(self):
run = page_run.PageRun(self.pages[0])
run.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'test'))
self.assertFalse(run.ok)
self.assertTrue(run.failed)
self.assertFalse(run.skipped)
run = page_run.PageRun(self.pages[0])
run.AddValue(scalar.ScalarValue(self.pages[0], 'a', 's', 1))
run.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'test'))
self.assertFalse(run.ok)
self.assertTrue(run.failed)
self.assertFalse(run.skipped)
def testPageRunSkipped(self):
run = page_run.PageRun(self.pages[0])
run.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'test'))
run.AddValue(skip.SkipValue(self.pages[0], 'test'))
self.assertFalse(run.ok)
self.assertFalse(run.failed)
self.assertTrue(run.skipped)
run = page_run.PageRun(self.pages[0])
run.AddValue(scalar.ScalarValue(self.pages[0], 'a', 's', 1))
run.AddValue(skip.SkipValue(self.pages[0], 'test'))
self.assertFalse(run.ok)
self.assertFalse(run.failed)
self.assertTrue(run.skipped)
def testPageRunSucceeded(self):
run = page_run.PageRun(self.pages[0])
self.assertTrue(run.ok)
self.assertFalse(run.failed)
self.assertFalse(run.skipped)
run = page_run.PageRun(self.pages[0])
run.AddValue(scalar.ScalarValue(self.pages[0], 'a', 's', 1))
self.assertTrue(run.ok)
self.assertFalse(run.failed)
self.assertFalse(run.skipped)
| bsd-3-clause |
ToontownUprising/src | toontown/suit/DistributedCashbotBoss.py | 1 | 43368 | from direct.directnotify import DirectNotifyGlobal
from direct.fsm import FSM
from direct.interval.IntervalGlobal import *
from direct.task.Task import Task
from direct.task.TaskManagerGlobal import *
import math
from pandac.PandaModules import *
import random
import DistributedBossCog
import DistributedCashbotBossGoon
import SuitDNA
from otp.otpbase import OTPGlobals
from toontown.battle import MovieToonVictory
from toontown.battle import RewardPanel
from toontown.battle import SuitBattleGlobals
from toontown.building import ElevatorConstants
from toontown.building import ElevatorUtils
from toontown.chat import ResistanceChat
from toontown.chat.ChatGlobals import *
from toontown.coghq import CogDisguiseGlobals
from toontown.distributed import DelayDelete
from toontown.nametag import NametagGlobals
from toontown.nametag.NametagGlobals import *
from toontown.toon import Toon
from toontown.toon import ToonDNA
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
OneBossCog = None
TTL = TTLocalizer
class DistributedCashbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCashbotBoss')
numFakeGoons = 3
def __init__(self, cr):
DistributedBossCog.DistributedBossCog.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedSellbotBoss')
self.resistanceToon = None
self.resistanceToonOnstage = 0
self.cranes = {}
self.safes = {}
self.goons = []
self.bossMaxDamage = ToontownGlobals.CashbotBossMaxDamage
self.elevatorType = ElevatorConstants.ELEVATOR_CFO
base.boss = self
return
def announceGenerate(self):
DistributedBossCog.DistributedBossCog.announceGenerate(self)
self.setName(TTLocalizer.CashbotBossName)
nameInfo = TTLocalizer.BossCogNameWithDept % {'name': self.name,
'dept': SuitDNA.getDeptFullname(self.style.dept)}
self.setDisplayName(nameInfo)
target = CollisionSphere(2, 0, 0, 3)
targetNode = CollisionNode('headTarget')
targetNode.addSolid(target)
targetNode.setCollideMask(ToontownGlobals.PieBitmask)
self.headTarget = self.neck.attachNewNode(targetNode)
shield = CollisionSphere(0, 0, 0.8, 7)
shieldNode = CollisionNode('shield')
shieldNode.addSolid(shield)
shieldNode.setCollideMask(ToontownGlobals.PieBitmask)
shieldNodePath = self.pelvis.attachNewNode(shieldNode)
self.heldObject = None
self.bossDamage = 0
self.loadEnvironment()
self.__makeResistanceToon()
self.physicsMgr = PhysicsManager()
integrator = LinearEulerIntegrator()
self.physicsMgr.attachLinearIntegrator(integrator)
fn = ForceNode('gravity')
self.fnp = self.geom.attachNewNode(fn)
gravity = LinearVectorForce(0, 0, -32)
fn.addForce(gravity)
self.physicsMgr.addLinearForce(gravity)
base.localAvatar.chatMgr.chatInputSpeedChat.addCFOMenu()
global OneBossCog
if OneBossCog != None:
self.notify.warning('Multiple BossCogs visible.')
OneBossCog = self
return
def disable(self):
global OneBossCog
DistributedBossCog.DistributedBossCog.disable(self)
self.demand('Off')
self.unloadEnvironment()
self.__cleanupResistanceToon()
self.fnp.removeNode()
self.physicsMgr.clearLinearForces()
self.battleThreeMusic.stop()
self.epilogueMusic.stop()
base.localAvatar.chatMgr.chatInputSpeedChat.removeCFOMenu()
if OneBossCog == self:
OneBossCog = None
return
def __makeResistanceToon(self):
if self.resistanceToon:
return
npc = Toon.Toon()
npc.setName(TTLocalizer.ResistanceToonName)
npc.setPickable(0)
npc.setPlayerType(NametagGlobals.CCNonPlayer)
dna = ToonDNA.ToonDNA()
dna.newToonRandom(11237, 'f', 1)
dna.head = 'pls'
npc.setDNAString(dna.makeNetString())
npc.animFSM.request('neutral')
self.resistanceToon = npc
self.resistanceToon.setPosHpr(*ToontownGlobals.CashbotRTBattleOneStartPosHpr)
state = random.getstate()
random.seed(self.doId)
self.resistanceToon.suitType = SuitDNA.getRandomSuitByDept('m')
random.setstate(state)
self.fakeGoons = []
for i in xrange(self.numFakeGoons):
goon = DistributedCashbotBossGoon.DistributedCashbotBossGoon(base.cr)
goon.doId = -1 - i
goon.setBossCogId(self.doId)
goon.generate()
goon.announceGenerate()
self.fakeGoons.append(goon)
self.__hideFakeGoons()
def __cleanupResistanceToon(self):
self.__hideResistanceToon()
if self.resistanceToon:
self.resistanceToon.removeActive()
self.resistanceToon.delete()
self.resistanceToon = None
for i in xrange(self.numFakeGoons):
self.fakeGoons[i].disable()
self.fakeGoons[i].delete()
self.fakeGoons[i] = None
return
def __showResistanceToon(self, withSuit):
if not self.resistanceToonOnstage:
self.resistanceToon.addActive()
self.resistanceToon.reparentTo(self.geom)
self.resistanceToonOnstage = 1
if withSuit:
suit = self.resistanceToon.suitType
self.resistanceToon.putOnSuit(suit, False)
else:
self.resistanceToon.takeOffSuit()
def __hideResistanceToon(self):
if self.resistanceToonOnstage:
self.resistanceToon.removeActive()
self.resistanceToon.detachNode()
self.resistanceToonOnstage = 0
def __hideFakeGoons(self):
if self.fakeGoons:
for goon in self.fakeGoons:
goon.request('Off')
def __showFakeGoons(self, state):
if self.fakeGoons:
for goon in self.fakeGoons:
goon.request(state)
def loadEnvironment(self):
DistributedBossCog.DistributedBossCog.loadEnvironment(self)
self.midVault = loader.loadModel('phase_10/models/cogHQ/MidVault.bam')
self.endVault = loader.loadModel('phase_10/models/cogHQ/EndVault.bam')
self.lightning = loader.loadModel('phase_10/models/cogHQ/CBLightning.bam')
self.magnet = loader.loadModel('phase_10/models/cogHQ/CBMagnet.bam')
self.craneArm = loader.loadModel('phase_10/models/cogHQ/CBCraneArm.bam')
self.controls = loader.loadModel('phase_10/models/cogHQ/CBCraneControls.bam')
self.stick = loader.loadModel('phase_10/models/cogHQ/CBCraneStick.bam')
self.safe = loader.loadModel('phase_10/models/cogHQ/CBSafe.bam')
self.eyes = loader.loadModel('phase_10/models/cogHQ/CashBotBossEyes.bam')
self.cableTex = self.craneArm.findTexture('MagnetControl')
self.eyes.setPosHprScale(4.5, 0, -2.5, 90, 90, 0, 0.4, 0.4, 0.4)
self.eyes.reparentTo(self.neck)
self.eyes.hide()
self.midVault.setPos(0, -222, -70.7)
self.endVault.setPos(84, -201, -6)
self.geom = NodePath('geom')
self.midVault.reparentTo(self.geom)
self.endVault.reparentTo(self.geom)
self.endVault.findAllMatches('**/MagnetArms').detach()
self.endVault.findAllMatches('**/Safes').detach()
self.endVault.findAllMatches('**/MagnetControlsAll').detach()
cn = self.endVault.find('**/wallsCollision').node()
cn.setIntoCollideMask(OTPGlobals.WallBitmask | ToontownGlobals.PieBitmask)
self.door1 = self.midVault.find('**/SlidingDoor1/')
self.door2 = self.midVault.find('**/SlidingDoor/')
self.door3 = self.endVault.find('**/SlidingDoor/')
elevatorModel = loader.loadModel('phase_10/models/cogHQ/CFOElevator')
elevatorOrigin = self.midVault.find('**/elevator_origin')
elevatorOrigin.setScale(1)
elevatorModel.reparentTo(elevatorOrigin)
leftDoor = elevatorModel.find('**/left_door')
leftDoor.setName('left-door')
rightDoor = elevatorModel.find('**/right_door')
rightDoor.setName('right-door')
self.setupElevator(elevatorOrigin)
ElevatorUtils.closeDoors(leftDoor, rightDoor, ElevatorConstants.ELEVATOR_CFO)
walls = self.endVault.find('**/RollUpFrameCillison')
walls.detachNode()
self.evWalls = self.replaceCollisionPolysWithPlanes(walls)
self.evWalls.reparentTo(self.endVault)
self.evWalls.stash()
floor = self.endVault.find('**/EndVaultFloorCollision')
floor.detachNode()
self.evFloor = self.replaceCollisionPolysWithPlanes(floor)
self.evFloor.reparentTo(self.endVault)
self.evFloor.setName('floor')
plane = CollisionPlane(Plane(Vec3(0, 0, 1), Point3(0, 0, -50)))
planeNode = CollisionNode('dropPlane')
planeNode.addSolid(plane)
planeNode.setCollideMask(ToontownGlobals.PieBitmask)
self.geom.attachNewNode(planeNode)
self.geom.reparentTo(render)
def unloadEnvironment(self):
DistributedBossCog.DistributedBossCog.unloadEnvironment(self)
self.geom.removeNode()
def replaceCollisionPolysWithPlanes(self, model):
newCollisionNode = CollisionNode('collisions')
newCollideMask = BitMask32(0)
planes = []
collList = model.findAllMatches('**/+CollisionNode')
if not collList:
collList = [model]
for cnp in collList:
cn = cnp.node()
if not isinstance(cn, CollisionNode):
self.notify.warning('Not a collision node: %s' % repr(cnp))
break
newCollideMask = newCollideMask | cn.getIntoCollideMask()
for i in xrange(cn.getNumSolids()):
solid = cn.getSolid(i)
if isinstance(solid, CollisionPolygon):
plane = Plane(solid.getPlane())
planes.append(plane)
else:
self.notify.warning('Unexpected collision solid: %s' % repr(solid))
newCollisionNode.addSolid(plane)
newCollisionNode.setIntoCollideMask(newCollideMask)
threshold = 0.1
planes.sort(lambda p1, p2: p1.compareTo(p2, threshold))
lastPlane = None
for plane in planes:
if lastPlane == None or plane.compareTo(lastPlane, threshold) != 0:
cp = CollisionPlane(plane)
newCollisionNode.addSolid(cp)
lastPlane = plane
return NodePath(newCollisionNode)
def __makeGoonMovieForIntro(self):
goonTrack = Parallel()
goon = self.fakeGoons[0]
goonTrack.append(Sequence(
goon.posHprInterval(0, Point3(111, -287, 0), VBase3(165, 0, 0)),
goon.posHprInterval(9, Point3(101, -323, 0), VBase3(165, 0, 0)),
goon.hprInterval(1, VBase3(345, 0, 0)),
goon.posHprInterval(9, Point3(111, -287, 0), VBase3(345, 0, 0)),
goon.hprInterval(1, VBase3(165, 0, 0)),
goon.posHprInterval(9.5, Point3(104, -316, 0), VBase3(165, 0, 0)),
Func(goon.request, 'Stunned'),
Wait(1)))
goon = self.fakeGoons[1]
goonTrack.append(Sequence(
goon.posHprInterval(0, Point3(119, -315, 0), VBase3(357, 0, 0)),
goon.posHprInterval(9, Point3(121, -280, 0), VBase3(357, 0, 0)),
goon.hprInterval(1, VBase3(177, 0, 0)),
goon.posHprInterval(9, Point3(119, -315, 0), VBase3(177, 0, 0)),
goon.hprInterval(1, VBase3(357, 0, 0)),
goon.posHprInterval(9, Point3(121, -280, 0), VBase3(357, 0, 0))))
goon = self.fakeGoons[2]
goonTrack.append(Sequence(
goon.posHprInterval(0, Point3(102, -320, 0), VBase3(231, 0, 0)),
goon.posHprInterval(9, Point3(127, -337, 0), VBase3(231, 0, 0)),
goon.hprInterval(1, VBase3(51, 0, 0)),
goon.posHprInterval(9, Point3(102, -320, 0), VBase3(51, 0, 0)),
goon.hprInterval(1, VBase3(231, 0, 0)),
goon.posHprInterval(9, Point3(127, -337, 0), VBase3(231, 0, 0))))
return Sequence(Func(self.__showFakeGoons, 'Walk'), goonTrack, Func(self.__hideFakeGoons))
def makeIntroductionMovie(self, delayDeletes):
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
delayDeletes.append(DelayDelete.DelayDelete(toon, 'CashbotBoss.makeIntroductionMovie'))
rtTrack = Sequence()
startPos = Point3(ToontownGlobals.CashbotBossOffstagePosHpr[0], ToontownGlobals.CashbotBossOffstagePosHpr[1], ToontownGlobals.CashbotBossOffstagePosHpr[2])
battlePos = Point3(ToontownGlobals.CashbotBossBattleOnePosHpr[0], ToontownGlobals.CashbotBossBattleOnePosHpr[1], ToontownGlobals.CashbotBossBattleOnePosHpr[2])
battleHpr = VBase3(ToontownGlobals.CashbotBossBattleOnePosHpr[3], ToontownGlobals.CashbotBossBattleOnePosHpr[4], ToontownGlobals.CashbotBossBattleOnePosHpr[5])
bossTrack = Sequence()
bossTrack.append(Func(self.reparentTo, render))
bossTrack.append(Func(self.getGeomNode().setH, 180))
bossTrack.append(Func(self.pelvis.setHpr, self.pelvisForwardHpr))
bossTrack.append(Func(self.loop, 'Ff_neutral'))
track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(battlePos, hpr, battlePos, battleHpr, 0)
bossTrack.append(track)
bossTrack.append(Func(self.getGeomNode().setH, 0))
bossTrack.append(Func(self.pelvis.setHpr, self.pelvisReversedHpr))
goonTrack = self.__makeGoonMovieForIntro()
attackToons = TTL.CashbotBossCogAttack
rToon = self.resistanceToon
rToon.setPosHpr(*ToontownGlobals.CashbotRTBattleOneStartPosHpr)
track = Sequence(
Func(base.camera.setPosHpr, 82, -219, 5, 267, 0, 0),
Func(rToon.setChatAbsolute, TTL.ResistanceToonWelcome, CFSpeech),
Wait(3),
Sequence(goonTrack, duration=0),
Parallel(
base.camera.posHprInterval(4, Point3(108, -244, 4), VBase3(211.5, 0, 0)),
Sequence(
Func(rToon.suit.setPlayRate, 1.4, 'walk'),
Func(rToon.suit.loop, 'walk'),
Parallel(
rToon.hprInterval(1, VBase3(180, 0, 0)),
rToon.posInterval(3, VBase3(120, -255, 0)),
Sequence(
Wait(2),
Func(rToon.clearChat))),
Func(rToon.suit.loop, 'neutral'),
self.door2.posInterval(3, VBase3(0, 0, 30)))),
Func(rToon.setHpr, 0, 0, 0),
Func(rToon.setChatAbsolute, TTL.ResistanceToonTooLate, CFSpeech),
Func(base.camera.reparentTo, render),
Func(base.camera.setPosHpr, 61.1, -228.8, 10.2, -90, 0, 0),
self.door1.posInterval(2, VBase3(0, 0, 30)),
Parallel(
bossTrack,
Sequence(
Wait(3),
Func(rToon.clearChat),
self.door1.posInterval(3, VBase3(0, 0, 0)))),
Func(self.setChatAbsolute, TTL.CashbotBossDiscoverToons1, CFSpeech),
base.camera.posHprInterval(1.5, Point3(93.3, -230, 0.7), VBase3(-92.9, 39.7, 8.3)),
Func(self.setChatAbsolute, TTL.CashbotBossDiscoverToons2, CFSpeech),
Wait(4),
Func(self.clearChat),
self.loseCogSuits(self.toonsA + self.toonsB, render, (113, -228, 10, 90, 0, 0)),
Wait(1),
Func(rToon.setHpr, 0, 0, 0),
self.loseCogSuits([rToon], render, (133, -243, 5, 143, 0, 0), True),
Func(rToon.setChatAbsolute, TTL.ResistanceToonKeepHimBusy, CFSpeech),
Wait(1),
Func(self.__showResistanceToon, False),
Sequence(
Func(rToon.animFSM.request, 'run'),
rToon.hprInterval(1, VBase3(180, 0, 0)),
Parallel(
Sequence(
rToon.posInterval(1.5, VBase3(109, -294, 0)),
Parallel(Func(rToon.animFSM.request, 'jump')),
rToon.posInterval(1.5, VBase3(93.935, -341.065, 2))),
self.door2.posInterval(3, VBase3(0, 0, 0))),
Func(rToon.animFSM.request, 'neutral')),
self.toonNormalEyes(self.involvedToons),
self.toonNormalEyes([self.resistanceToon], True),
Func(rToon.clearChat),
Func(base.camera.setPosHpr, 93.3, -230, 0.7, -92.9, 39.7, 8.3),
Func(self.setChatAbsolute, attackToons, CFSpeech),
Wait(2),
Func(self.clearChat))
return Sequence(Func(base.camera.reparentTo, render), track)
def __makeGoonMovieForBattleThree(self):
goonPosHprs = [[Point3(111, -287, 0),
VBase3(165, 0, 0),
Point3(101, -323, 0),
VBase3(165, 0, 0)], [Point3(119, -315, 0),
VBase3(357, 0, 0),
Point3(121, -280, 0),
VBase3(357, 0, 0)], [Point3(102, -320, 0),
VBase3(231, 0, 0),
Point3(127, -337, 0),
VBase3(231, 0, 0)]]
mainGoon = self.fakeGoons[0]
goonLoop = Parallel()
for i in xrange(1, self.numFakeGoons):
goon = self.fakeGoons[i]
goonLoop.append(Sequence(goon.posHprInterval(8, goonPosHprs[i][0], goonPosHprs[i][1]), goon.posHprInterval(8, goonPosHprs[i][2], goonPosHprs[i][3])))
goonTrack = Sequence(Func(self.__showFakeGoons, 'Walk'), Func(mainGoon.request, 'Stunned'), Func(goonLoop.loop), Wait(20))
return goonTrack
def makePrepareBattleThreeMovie(self, delayDeletes, crane, safe):
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
delayDeletes.append(DelayDelete.DelayDelete(toon, 'CashbotBoss.makePrepareBattleThreeMovie'))
startPos = Point3(ToontownGlobals.CashbotBossBattleOnePosHpr[0], ToontownGlobals.CashbotBossBattleOnePosHpr[1], ToontownGlobals.CashbotBossBattleOnePosHpr[2])
battlePos = Point3(ToontownGlobals.CashbotBossBattleThreePosHpr[0], ToontownGlobals.CashbotBossBattleThreePosHpr[1], ToontownGlobals.CashbotBossBattleThreePosHpr[2])
startHpr = Point3(ToontownGlobals.CashbotBossBattleOnePosHpr[3], ToontownGlobals.CashbotBossBattleOnePosHpr[4], ToontownGlobals.CashbotBossBattleOnePosHpr[5])
battleHpr = VBase3(ToontownGlobals.CashbotBossBattleThreePosHpr[3], ToontownGlobals.CashbotBossBattleThreePosHpr[4], ToontownGlobals.CashbotBossBattleThreePosHpr[5])
finalHpr = VBase3(135, 0, 0)
bossTrack = Sequence()
bossTrack.append(Func(self.reparentTo, render))
bossTrack.append(Func(self.getGeomNode().setH, 180))
bossTrack.append(Func(self.pelvis.setHpr, self.pelvisForwardHpr))
bossTrack.append(Func(self.loop, 'Ff_neutral'))
track, hpr = self.rollBossToPoint(startPos, startHpr, startPos, battleHpr, 0)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(startPos, None, battlePos, None, 0)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(battlePos, battleHpr, battlePos, finalHpr, 0)
bossTrack.append(track)
rToon = self.resistanceToon
rToon.setPosHpr(93.935, -341.065, 0, -45, 0, 0)
goon = self.fakeGoons[0]
crane = self.cranes[0]
track = Sequence(
Func(self.__hideToons),
Func(crane.request, 'Movie'),
Func(crane.accomodateToon, rToon),
Func(goon.request, 'Stunned'),
Func(goon.setPosHpr, 104, -316, 0, 165, 0, 0),
Parallel(
self.door2.posInterval(4.5, VBase3(0, 0, 30)),
self.door3.posInterval(4.5, VBase3(0, 0, 30)),
bossTrack),
Func(rToon.loop, 'leverNeutral'),
Func(base.camera.reparentTo, self.geom),
Func(base.camera.setPosHpr, 105, -326, 5, 136.3, 0, 0),
Func(rToon.setChatAbsolute, TTL.ResistanceToonWatchThis, CFSpeech),
Wait(2),
Func(rToon.clearChat),
Func(base.camera.setPosHpr, 105, -326, 20, -45.3, 11, 0),
Func(self.setChatAbsolute, TTL.CashbotBossGetAwayFromThat, CFSpeech),
Wait(2),
Func(self.clearChat),
base.camera.posHprInterval(1.5, Point3(105, -326, 5), Point3(136.3, 0, 0), blendType='easeInOut'),
Func(rToon.setChatAbsolute, TTL.ResistanceToonCraneInstructions1, CFSpeech),
Wait(4),
Func(rToon.setChatAbsolute, TTL.ResistanceToonCraneInstructions2, CFSpeech),
Wait(4),
Func(rToon.setChatAbsolute, TTL.ResistanceToonCraneInstructions3, CFSpeech),
Wait(4),
Func(rToon.setChatAbsolute, TTL.ResistanceToonCraneInstructions4, CFSpeech),
Wait(4),
Func(rToon.clearChat),
Func(base.camera.setPosHpr, 102, -323.6, 0.9, -10.6, 14, 0),
Func(goon.request, 'Recovery'),
Wait(2),
Func(base.camera.setPosHpr, 95.4, -332.6, 4.2, 167.1, -13.2, 0),
Func(rToon.setChatAbsolute, TTL.ResistanceToonGetaway, CFSpeech),
Func(rToon.animFSM.request, 'jump'),
Wait(1.8),
Func(rToon.clearChat),
Func(base.camera.setPosHpr, 109.1, -300.7, 13.9, -15.6, -13.6, 0),
Func(rToon.animFSM.request, 'run'),
Func(goon.request, 'Walk'),
Parallel(
self.door3.posInterval(3, VBase3(0, 0, 0)),
rToon.posHprInterval(3, Point3(136, -212.9, 0), VBase3(-14, 0, 0), startPos=Point3(110.8, -292.7, 0), startHpr=VBase3(-14, 0, 0)),
goon.posHprInterval(3, Point3(125.2, -243.5, 0), VBase3(-14, 0, 0), startPos=Point3(104.8, -309.5, 0), startHpr=VBase3(-14, 0, 0))),
Func(self.__hideFakeGoons),
Func(crane.request, 'Free'),
Func(self.getGeomNode().setH, 0),
self.moveToonsToBattleThreePos(self.involvedToons),
Func(self.__showToons))
return Sequence(Func(base.camera.reparentTo, self), Func(base.camera.setPosHpr, 0, -27, 25, 0, -18, 0), track)
def moveToonsToBattleThreePos(self, toons):
track = Parallel()
for i in xrange(len(toons)):
toon = base.cr.doId2do.get(toons[i])
if toon:
posHpr = ToontownGlobals.CashbotToonsBattleThreeStartPosHpr[i]
pos = Point3(*posHpr[0:3])
hpr = VBase3(*posHpr[3:6])
track.append(toon.posHprInterval(0.2, pos, hpr))
return track
def makeBossFleeMovie(self):
hadEnough = TTLocalizer.CashbotBossHadEnough
outtaHere = TTLocalizer.CashbotBossOuttaHere
loco = loader.loadModel('phase_10/models/cogHQ/CashBotLocomotive')
car1 = loader.loadModel('phase_10/models/cogHQ/CashBotBoxCar')
car2 = loader.loadModel('phase_10/models/cogHQ/CashBotTankCar')
trainPassingSfx = base.loadSfx('phase_10/audio/sfx/CBHQ_TRAIN_pass.ogg')
boomSfx = loader.loadSfx('phase_3.5/audio/sfx/ENC_cogfall_apart.ogg')
rollThroughDoor = self.rollBossToPoint(fromPos=Point3(120, -280, 0), fromHpr=None, toPos=Point3(120, -250, 0), toHpr=None, reverse=0)
rollTrack = Sequence(Func(self.getGeomNode().setH, 180), rollThroughDoor[0], Func(self.getGeomNode().setH, 0))
g = 80.0 / 300.0
trainTrack = Track(
(0 * g, loco.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(1 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(2 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(3 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(4 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(5 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(6 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(7 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(8 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(9 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(10 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(11 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(12 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(13 * g, car2.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))),
(14 * g, car1.posInterval(0.5, Point3(0, -242, 0), startPos=Point3(150, -242, 0))))
bossTrack = Track(
(0.0, Sequence(
Func(base.camera.reparentTo, render),
Func(base.camera.setPosHpr, 105, -280, 20, -158, -3, 0),
Func(self.reparentTo, render),
Func(self.show),
Func(self.clearChat),
Func(self.setPosHpr, *ToontownGlobals.CashbotBossBattleThreePosHpr),
Func(self.reverseHead),
ActorInterval(self, 'Fb_firstHit'),
ActorInterval(self, 'Fb_down2Up'))),
(1.0, Func(self.setChatAbsolute, hadEnough, CFSpeech)),
(5.5, Parallel(
Func(base.camera.setPosHpr, 100, -315, 16, -20, 0, 0),
Func(self.hideBattleThreeObjects),
Func(self.forwardHead),
Func(self.loop, 'Ff_neutral'),
rollTrack,
self.door3.posInterval(2.5, Point3(0, 0, 25), startPos=Point3(0, 0, 18)))),
(5.5, Func(self.setChatAbsolute, outtaHere, CFSpeech)),
(5.5, SoundInterval(trainPassingSfx)),
(8.1, Func(self.clearChat)),
(9.4, Sequence(
Func(loco.reparentTo, render),
Func(car1.reparentTo, render),
Func(car2.reparentTo, render),
trainTrack,
Func(loco.detachNode),
Func(car1.detachNode),
Func(car2.detachNode),
Wait(2))),
(9.5, SoundInterval(boomSfx)),
(9.5, Sequence(
self.posInterval(0.4, Point3(0, -250, 0)),
Func(self.stash))))
return bossTrack
def grabObject(self, obj):
obj.wrtReparentTo(self.neck)
obj.hideShadows()
obj.stashCollisions()
if obj.lerpInterval:
obj.lerpInterval.finish()
obj.lerpInterval = Parallel(obj.posInterval(ToontownGlobals.CashbotBossToMagnetTime, Point3(-1, 0, 0.2)), obj.quatInterval(ToontownGlobals.CashbotBossToMagnetTime, VBase3(0, -90, 90)), Sequence(Wait(ToontownGlobals.CashbotBossToMagnetTime), ShowInterval(self.eyes)), obj.toMagnetSoundInterval)
obj.lerpInterval.start()
self.heldObject = obj
def dropObject(self, obj):
if obj.lerpInterval:
obj.lerpInterval.finish()
obj.lerpInterval = None
obj = self.heldObject
obj.wrtReparentTo(render)
obj.setHpr(obj.getH(), 0, 0)
self.eyes.hide()
obj.showShadows()
obj.unstashCollisions()
self.heldObject = None
return
def setBossDamage(self, bossDamage):
if bossDamage > self.bossDamage:
delta = bossDamage - self.bossDamage
self.flashRed()
self.doAnimate('hit', now=1)
self.showHpText(-delta, scale=5)
self.bossDamage = bossDamage
self.updateHealthBar()
def setRewardId(self, rewardId):
self.rewardId = rewardId
def d_applyReward(self):
self.sendUpdate('applyReward', [])
def stunAllGoons(self):
for goon in self.goons:
if goon.state == 'Walk' or goon.state == 'Battle':
goon.demand('Stunned')
goon.sendUpdate('requestStunned', [0])
def destroyAllGoons(self):
for goon in self.goons:
if goon.state != 'Off' and not goon.isDead:
goon.b_destroyGoon()
def deactivateCranes(self):
for crane in self.cranes.values():
crane.demand('Free')
def hideBattleThreeObjects(self):
for goon in self.goons:
goon.demand('Off')
for safe in self.safes.values():
safe.demand('Off')
for crane in self.cranes.values():
crane.demand('Off')
def __doPhysics(self, task):
dt = globalClock.getDt()
self.physicsMgr.doPhysics(dt)
return Task.cont
def __hideToons(self):
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.hide()
def __showToons(self):
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
toon.show()
def __arrangeToonsAroundResistanceToon(self):
radius = 7
numToons = len(self.involvedToons)
center = (numToons - 1) / 2.0
for i in xrange(numToons):
toon = self.cr.doId2do.get(self.involvedToons[i])
if toon:
angle = 90 - 15 * (i - center)
radians = angle * math.pi / 180.0
x = math.cos(radians) * radius
y = math.sin(radians) * radius
toon.setPos(self.resistanceToon, x, y, 0)
toon.headsUp(self.resistanceToon)
toon.loop('neutral')
toon.show()
def __talkAboutPromotion(self, speech):
if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel:
deptIndex = CogDisguiseGlobals.dept2deptIndex(self.style.dept)
cogLevels = base.localAvatar.getCogLevels()
newCogSuitLevel = cogLevels[deptIndex]
cogTypes = base.localAvatar.getCogTypes()
maxCogSuitLevel = (SuitDNA.levelsPerSuit-1) + cogTypes[deptIndex]
if self.prevCogSuitLevel != maxCogSuitLevel:
speech += TTLocalizer.ResistanceToonLevelPromotion
if newCogSuitLevel == maxCogSuitLevel:
if newCogSuitLevel != ToontownGlobals.MaxCogSuitLevel:
suitIndex = (SuitDNA.suitsPerDept*deptIndex) + cogTypes[deptIndex]
cogTypeStr = SuitDNA.suitHeadTypes[suitIndex]
cogName = SuitBattleGlobals.SuitAttributes[cogTypeStr]['name']
speech += TTLocalizer.ResistanceToonSuitPromotion % cogName
else:
speech += TTLocalizer.ResistanceToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1)
return speech
def enterOff(self):
DistributedBossCog.DistributedBossCog.enterOff(self)
if self.resistanceToon:
self.resistanceToon.clearChat()
def enterWaitForToons(self):
DistributedBossCog.DistributedBossCog.enterWaitForToons(self)
self.detachNode()
self.geom.hide()
self.resistanceToon.removeActive()
def exitWaitForToons(self):
DistributedBossCog.DistributedBossCog.exitWaitForToons(self)
self.geom.show()
self.resistanceToon.addActive()
def enterElevator(self):
DistributedBossCog.DistributedBossCog.enterElevator(self)
self.detachNode()
self.resistanceToon.removeActive()
self.endVault.stash()
self.midVault.unstash()
self.__showResistanceToon(True)
base.camLens.setMinFov(ToontownGlobals.CFOElevatorFov/(4./3.))
def exitElevator(self):
DistributedBossCog.DistributedBossCog.exitElevator(self)
self.resistanceToon.addActive()
def enterIntroduction(self):
self.detachNode()
self.stopAnimate()
self.endVault.unstash()
self.evWalls.stash()
self.midVault.unstash()
self.__showResistanceToon(True)
base.playMusic(self.stingMusic, looping=1, volume=0.9)
DistributedBossCog.DistributedBossCog.enterIntroduction(self)
def exitIntroduction(self):
DistributedBossCog.DistributedBossCog.exitIntroduction(self)
self.stingMusic.stop()
def enterBattleOne(self):
DistributedBossCog.DistributedBossCog.enterBattleOne(self)
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.CashbotBossBattleOnePosHpr)
self.show()
self.pelvis.setHpr(self.pelvisReversedHpr)
self.doAnimate()
self.endVault.stash()
self.midVault.unstash()
self.__hideResistanceToon()
def exitBattleOne(self):
DistributedBossCog.DistributedBossCog.exitBattleOne(self)
def enterPrepareBattleThree(self):
self.controlToons()
NametagGlobals.setWant2dNametags(False)
intervalName = 'PrepareBattleThreeMovie'
delayDeletes = []
self.movieCrane = self.cranes[0]
self.movieSafe = self.safes[1]
self.movieCrane.request('Movie')
seq = Sequence(self.makePrepareBattleThreeMovie(delayDeletes, self.movieCrane, self.movieSafe), Func(self.__beginBattleThree), name=intervalName)
seq.delayDeletes = delayDeletes
seq.start()
self.storeInterval(seq, intervalName)
self.endVault.unstash()
self.evWalls.stash()
self.midVault.unstash()
self.__showResistanceToon(False)
taskMgr.add(self.__doPhysics, self.uniqueName('physics'), priority=25)
def __beginBattleThree(self):
intervalName = 'PrepareBattleThreeMovie'
self.clearInterval(intervalName)
self.doneBarrier('PrepareBattleThree')
def exitPrepareBattleThree(self):
intervalName = 'PrepareBattleThreeMovie'
self.clearInterval(intervalName)
self.unstickToons()
self.releaseToons()
if self.newState == 'BattleThree':
self.movieCrane.request('Free')
self.movieSafe.request('Initial')
NametagGlobals.setWant2dNametags(True)
ElevatorUtils.closeDoors(self.leftDoor, self.rightDoor, ElevatorConstants.ELEVATOR_CFO)
taskMgr.remove(self.uniqueName('physics'))
def enterBattleThree(self):
DistributedBossCog.DistributedBossCog.enterBattleThree(self)
self.clearChat()
self.resistanceToon.clearChat()
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.CashbotBossBattleThreePosHpr)
self.happy = 1
self.raised = 1
self.forward = 1
self.doAnimate()
self.endVault.unstash()
self.evWalls.unstash()
self.midVault.stash()
self.__hideResistanceToon()
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.generateHealthBar()
self.updateHealthBar()
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9)
taskMgr.add(self.__doPhysics, self.uniqueName('physics'), priority=25)
def exitBattleThree(self):
DistributedBossCog.DistributedBossCog.exitBattleThree(self)
bossDoneEventName = self.uniqueName('DestroyedBoss')
self.ignore(bossDoneEventName)
self.stopAnimate()
self.cleanupAttacks()
self.setDizzy(0)
self.removeHealthBar()
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
if self.newState != 'Victory':
self.battleThreeMusic.stop()
taskMgr.remove(self.uniqueName('physics'))
def enterVictory(self):
self.cleanupIntervals()
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.CashbotBossBattleThreePosHpr)
self.stopAnimate()
self.endVault.unstash()
self.evWalls.unstash()
self.midVault.unstash()
self.__hideResistanceToon()
self.__hideToons()
self.clearChat()
self.resistanceToon.clearChat()
self.deactivateCranes()
if self.cranes:
self.cranes[1].demand('Off')
self.releaseToons(finalBattle=1)
if self.hasLocalToon():
self.toMovieMode()
intervalName = 'VictoryMovie'
seq = Sequence(self.makeBossFleeMovie(), Func(self.__continueVictory), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
if self.oldState != 'BattleThree':
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9)
def __continueVictory(self):
self.doneBarrier('Victory')
def exitVictory(self):
self.cleanupIntervals()
if self.newState != 'Reward':
if self.hasLocalToon():
self.toWalkMode()
self.__showToons()
self.door3.setPos(0, 0, 0)
if self.newState != 'Reward':
self.battleThreeMusic.stop()
def enterReward(self):
self.cleanupIntervals()
self.clearChat()
self.resistanceToon.clearChat()
self.stash()
self.stopAnimate()
self.controlToons()
panelName = self.uniqueName('reward')
self.rewardPanel = RewardPanel.RewardPanel(panelName)
victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True)
ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward))
intervalName = 'RewardMovie'
delayDeletes = []
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
delayDeletes.append(DelayDelete.DelayDelete(toon, 'CashbotBoss.enterReward'))
ival.delayDeletes = delayDeletes
ival.start()
self.storeInterval(ival, intervalName)
if self.oldState != 'Victory':
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9)
def __doneReward(self):
self.doneBarrier('Reward')
self.toWalkMode()
def exitReward(self):
intervalName = 'RewardMovie'
self.clearInterval(intervalName)
if self.newState != 'Epilogue':
self.releaseToons()
self.unstash()
self.rewardPanel.destroy()
del self.rewardPanel
self.battleThreeMusic.stop()
def enterEpilogue(self):
self.cleanupIntervals()
self.clearChat()
self.resistanceToon.clearChat()
self.stash()
self.stopAnimate()
self.controlToons()
self.__showResistanceToon(False)
self.resistanceToon.setPosHpr(*ToontownGlobals.CashbotBossBattleThreePosHpr)
self.resistanceToon.loop('neutral')
self.__arrangeToonsAroundResistanceToon()
base.camera.reparentTo(render)
base.camera.setPos(self.resistanceToon, -9, 12, 6)
base.camera.lookAt(self.resistanceToon, 0, 0, 3)
intervalName = 'EpilogueMovie'
text = ResistanceChat.getChatText(self.rewardId)
menuIndex, itemIndex = ResistanceChat.decodeId(self.rewardId)
value = ResistanceChat.getItemValue(self.rewardId)
if menuIndex == ResistanceChat.RESISTANCE_TOONUP:
if value == -1:
instructions = TTLocalizer.ResistanceToonToonupAllInstructions
else:
instructions = TTLocalizer.ResistanceToonToonupInstructions % value
elif menuIndex == ResistanceChat.RESISTANCE_MONEY:
if value == -1:
instructions = TTLocalizer.ResistanceToonMoneyAllInstructions
else:
instructions = TTLocalizer.ResistanceToonMoneyInstructions % value
elif menuIndex == ResistanceChat.RESISTANCE_RESTOCK:
if value == -1:
instructions = TTLocalizer.ResistanceToonRestockAllInstructions
else:
trackName = TTLocalizer.BattleGlobalTracks[value]
instructions = TTLocalizer.ResistanceToonRestockInstructions % trackName
speech = TTLocalizer.ResistanceToonCongratulations % (text, instructions)
speech = self.__talkAboutPromotion(speech)
self.resistanceToon.setLocalPageChat(speech, 0)
self.accept('nextChatPage', self.__epilogueChatNext)
self.accept('doneChatPage', self.__epilogueChatDone)
base.playMusic(self.epilogueMusic, looping=1, volume=0.9)
def __epilogueChatNext(self, pageNumber, elapsed):
if pageNumber == 1:
toon = self.resistanceToon
playRate = 0.75
track = Sequence(ActorInterval(toon, 'victory', playRate=playRate, startFrame=0, endFrame=9), ActorInterval(toon, 'victory', playRate=playRate, startFrame=9, endFrame=0), Func(self.resistanceToon.loop, 'neutral'))
intervalName = 'EpilogueMovieToonAnim'
self.storeInterval(track, intervalName)
track.start()
elif pageNumber == 3:
self.d_applyReward()
ResistanceChat.doEffect(self.rewardId, self.resistanceToon, self.involvedToons)
def __epilogueChatDone(self, elapsed):
self.resistanceToon.setChatAbsolute(TTLocalizer.CagedToonGoodbye, CFSpeech)
self.ignore('nextChatPage')
self.ignore('doneChatPage')
intervalName = 'EpilogueMovieToonAnim'
self.clearInterval(intervalName)
track = Parallel(Sequence(ActorInterval(self.resistanceToon, 'wave'), Func(self.resistanceToon.loop, 'neutral')), Sequence(Wait(0.5), Func(self.localToonToSafeZone)))
self.storeInterval(track, intervalName)
track.start()
def exitEpilogue(self):
self.clearInterval('EpilogueMovieToonAnim')
self.unstash()
self.epilogueMusic.stop()
def enterFrolic(self):
DistributedBossCog.DistributedBossCog.enterFrolic(self)
self.setPosHpr(*ToontownGlobals.CashbotBossBattleOnePosHpr)
self.releaseToons()
if self.hasLocalToon():
self.toWalkMode()
self.door3.setZ(25)
self.door2.setZ(25)
self.endVault.unstash()
self.evWalls.stash()
self.midVault.unstash()
self.__hideResistanceToon()
def exitFrolic(self):
self.door3.setZ(0)
self.door2.setZ(0)
| mit |
rbalint/xbmc | tools/EventClients/lib/python/zeroconf.py | 181 | 4874 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Simple wrapper around Avahi
"""
__author__ = "d4rk@xbmc.org"
__version__ = "0.1"
try:
import time
import dbus, gobject, avahi
from dbus import DBusException
from dbus.mainloop.glib import DBusGMainLoop
except Exception, e:
print "Zeroconf support disabled. To enable, install the following Python modules:"
print " dbus, gobject, avahi"
pass
SERVICE_FOUND = 1
SERVICE_LOST = 2
class Browser:
""" Simple Zeroconf Browser """
def __init__( self, service_types = {} ):
"""
service_types - dictionary of services => handlers
"""
self._stop = False
self.loop = DBusGMainLoop()
self.bus = dbus.SystemBus( mainloop=self.loop )
self.server = dbus.Interface( self.bus.get_object( avahi.DBUS_NAME, '/' ),
'org.freedesktop.Avahi.Server')
self.handlers = {}
for type in service_types.keys():
self.add_service( type, service_types[ type ] )
def add_service( self, type, handler = None ):
"""
Add a service that the browser should watch for
"""
self.sbrowser = dbus.Interface(
self.bus.get_object(
avahi.DBUS_NAME,
self.server.ServiceBrowserNew(
avahi.IF_UNSPEC,
avahi.PROTO_UNSPEC,
type,
'local',
dbus.UInt32(0)
)
),
avahi.DBUS_INTERFACE_SERVICE_BROWSER)
self.handlers[ type ] = handler
self.sbrowser.connect_to_signal("ItemNew", self._new_item_handler)
self.sbrowser.connect_to_signal("ItemRemove", self._remove_item_handler)
def run(self):
"""
Run the gobject event loop
"""
# Don't use loop.run() because Python's GIL will block all threads
loop = gobject.MainLoop()
context = loop.get_context()
while not self._stop:
if context.pending():
context.iteration( True )
else:
time.sleep(1)
def stop(self):
"""
Stop the gobject event loop
"""
self._stop = True
def _new_item_handler(self, interface, protocol, name, stype, domain, flags):
if flags & avahi.LOOKUP_RESULT_LOCAL:
# local service, skip
pass
self.server.ResolveService(
interface,
protocol,
name,
stype,
domain,
avahi.PROTO_UNSPEC,
dbus.UInt32(0),
reply_handler = self._service_resolved_handler,
error_handler = self._error_handler
)
return
def _remove_item_handler(self, interface, protocol, name, stype, domain, flags):
if self.handlers[ stype ]:
# FIXME: more details needed here
try:
self.handlers[ stype ]( SERVICE_LOST, { 'type' : stype, 'name' : name } )
except:
pass
def _service_resolved_handler( self, *args ):
service = {}
service['type'] = str( args[3] )
service['name'] = str( args[2] )
service['address'] = str( args[7] )
service['hostname'] = str( args[5] )
service['port'] = int( args[8] )
# if the service type has a handler call it
try:
if self.handlers[ args[3] ]:
self.handlers[ args[3] ]( SERVICE_FOUND, service )
except:
pass
def _error_handler( self, *args ):
print 'ERROR: %s ' % str( args[0] )
if __name__ == "__main__":
def service_handler( found, service ):
print "---------------------"
print ['Found Service', 'Lost Service'][found-1]
for key in service.keys():
print key+" : "+str( service[key] )
browser = Browser( {
'_xbmc-events._udp' : service_handler,
'_xbmc-web._tcp' : service_handler
} )
browser.run()
| gpl-2.0 |
bcoca/ansible | lib/ansible/modules/uri.py | 19 | 28788 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Romeo Theriault <romeot () hawaii.edu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: uri
short_description: Interacts with webservices
description:
- Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
HTTP authentication mechanisms.
- For Windows targets, use the M(ansible.windows.win_uri) module instead.
version_added: "1.1"
options:
url:
description:
- HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
type: str
required: true
dest:
description:
- A path of where to download the file to (if desired). If I(dest) is a
directory, the basename of the file on the remote server will be used.
type: path
url_username:
description:
- A username for the module to use for Digest, Basic or WSSE authentication.
type: str
aliases: [ user ]
url_password:
description:
- A password for the module to use for Digest, Basic or WSSE authentication.
type: str
aliases: [ password ]
body:
description:
- The body of the http request/response to the web service. If C(body_format) is set
to 'json' it will take an already formatted JSON string or convert a data structure
into JSON.
- If C(body_format) is set to 'form-urlencoded' it will convert a dictionary
or list of tuples into an 'application/x-www-form-urlencoded' string. (Added in v2.7)
- If C(body_format) is set to 'form-multipart' it will convert a dictionary
into 'multipart/form-multipart' body. (Added in v2.10)
type: raw
body_format:
description:
- The serialization format of the body. When set to C(json), C(form-multipart), or C(form-urlencoded), encodes
the body argument, if needed, and automatically sets the Content-Type header accordingly.
- As of C(2.3) it is possible to override the `Content-Type` header, when
set to C(json) or C(form-urlencoded) via the I(headers) option.
- The 'Content-Type' header cannot be overridden when using C(form-multipart)
- C(form-urlencoded) was added in v2.7.
- C(form-multipart) was added in v2.10.
type: str
choices: [ form-urlencoded, json, raw, form-multipart ]
default: raw
version_added: "2.0"
method:
description:
- The HTTP method of the request or response.
- In more recent versions we do not restrict the method at the module level anymore
but it still must be a valid method accepted by the service handling the request.
type: str
default: GET
return_content:
description:
- Whether or not to return the body of the response as a "content" key in
the dictionary result no matter it succeeded or failed.
- Independently of this option, if the reported Content-type is "application/json", then the JSON is
always loaded into a key called C(json) in the dictionary results.
type: bool
default: no
force_basic_auth:
description:
- Force the sending of the Basic authentication header upon initial request.
- The library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail.
type: bool
default: no
follow_redirects:
description:
- Whether or not the URI module should follow redirects. C(all) will follow all redirects.
C(safe) will follow only "safe" redirects, where "safe" means that the client is only
doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow
any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility,
where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no)
are deprecated and will be removed in some future version of Ansible.
type: str
choices: ['all', 'no', 'none', 'safe', 'urllib2', 'yes']
default: safe
creates:
description:
- A filename, when it already exists, this step will not be run.
type: path
removes:
description:
- A filename, when it does not exist, this step will not be run.
type: path
status_code:
description:
- A list of valid, numeric, HTTP status codes that signifies success of the request.
type: list
elements: int
default: [ 200 ]
timeout:
description:
- The socket level timeout in seconds
type: int
default: 30
headers:
description:
- Add custom HTTP headers to a request in the format of a YAML hash. As
of C(2.3) supplying C(Content-Type) here will override the header
generated by supplying C(json) or C(form-urlencoded) for I(body_format).
type: dict
version_added: '2.1'
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
- Prior to 1.9.2 the code defaulted to C(no).
type: bool
default: yes
version_added: '1.9.2'
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, I(client_key) is not required
type: path
version_added: '2.4'
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If I(client_cert) contains both the certificate and key, this option is not required.
type: path
version_added: '2.4'
ca_path:
description:
- PEM formatted file that contains a CA certificate to be used for validation
type: path
version_added: '2.11'
src:
description:
- Path to file to be submitted to the remote server.
- Cannot be used with I(body).
type: path
version_added: '2.7'
remote_src:
description:
- If C(no), the module will search for the C(src) on the controller node.
- If C(yes), the module will search for the C(src) on the managed (remote) node.
type: bool
default: no
version_added: '2.7'
force:
description:
- If C(yes) do not get a cached copy.
- Alias C(thirsty) has been deprecated and will be removed in 2.13.
type: bool
default: no
aliases: [ thirsty ]
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: yes
unix_socket:
description:
- Path to Unix domain socket to use for connection
type: path
version_added: '2.8'
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
use_gssapi:
description:
- Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
authentication.
- Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
- Credentials for GSSAPI can be specified with I(url_username)/I(url_password) or with the GSSAPI env var
C(KRB5CCNAME) that specified a custom Kerberos credential cache.
- NTLM authentication is C(not) supported even if the GSSAPI mech for NTLM has been installed.
type: bool
default: no
version_added: '2.11'
notes:
- The dependency on httplib2 was removed in Ansible 2.1.
- The module returns all the HTTP headers in lower-case.
- For Windows targets, use the M(ansible.windows.win_uri) module instead.
seealso:
- module: ansible.builtin.get_url
- module: ansible.windows.win_uri
author:
- Romeo Theriault (@romeotheriault)
extends_documentation_fragment: files
'''
EXAMPLES = r'''
- name: Check that you can connect (GET) to a page and it returns a status 200
uri:
url: http://www.example.com
- name: Check that a page returns a status 200 and fail if the word AWESOME is not in the page contents
uri:
url: http://www.example.com
return_content: yes
register: this
failed_when: "'AWESOME' not in this.content"
- name: Create a JIRA issue
uri:
url: https://your.jira.example.com/rest/api/2/issue/
user: your_username
password: your_pass
method: POST
body: "{{ lookup('file','issue.json') }}"
force_basic_auth: yes
status_code: 201
body_format: json
- name: Login to a form based webpage, then use the returned cookie to access the app in later tasks
uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body_format: form-urlencoded
body:
name: your_username
password: your_password
enter: Sign in
status_code: 302
register: login
- name: Login to a form based webpage using a list of tuples
uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body_format: form-urlencoded
body:
- [ name, your_username ]
- [ password, your_password ]
- [ enter, Sign in ]
status_code: 302
register: login
- name: Upload a file via multipart/form-multipart
uri:
url: https://httpbin.org/post
method: POST
body_format: form-multipart
body:
file1:
filename: /bin/true
mime_type: application/octet-stream
file2:
content: text based file content
filename: fake.txt
mime_type: text/plain
text_form_field: value
- name: Connect to website using a previously stored cookie
uri:
url: https://your.form.based.auth.example.com/dashboard.php
method: GET
return_content: yes
headers:
Cookie: "{{ login.cookies_string }}"
- name: Queue build of a project in Jenkins
uri:
url: http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}
user: "{{ jenkins.user }}"
password: "{{ jenkins.password }}"
method: GET
force_basic_auth: yes
status_code: 201
- name: POST from contents of local file
uri:
url: https://httpbin.org/post
method: POST
src: file.json
- name: POST from contents of remote file
uri:
url: https://httpbin.org/post
method: POST
src: /path/to/my/file.json
remote_src: yes
- name: Create workspaces in Log analytics Azure
uri:
url: https://www.mms.microsoft.com/Embedded/Api/ConfigDataSources/LogManagementData/Save
method: POST
body_format: json
status_code: [200, 202]
return_content: true
headers:
Content-Type: application/json
x-ms-client-workspace-path: /subscriptions/{{ sub_id }}/resourcegroups/{{ res_group }}/providers/microsoft.operationalinsights/workspaces/{{ w_spaces }}
x-ms-client-platform: ibiza
x-ms-client-auth-token: "{{ token_az }}"
body:
- name: Pause play until a URL is reachable from this host
uri:
url: "http://192.0.2.1/some/test"
follow_redirects: none
method: GET
register: _result
until: _result.status == 200
retries: 720 # 720 * 5 seconds = 1hour (60*60/5)
delay: 5 # Every 5 seconds
# There are issues in a supporting Python library that is discussed in
# https://github.com/ansible/ansible/issues/52705 where a proxy is defined
# but you want to bypass proxy use on CIDR masks by using no_proxy
- name: Work around a python issue that doesn't support no_proxy envvar
uri:
follow_redirects: none
validate_certs: false
timeout: 5
url: "http://{{ ip_address }}:{{ port | default(80) }}"
register: uri_data
failed_when: false
changed_when: false
vars:
ip_address: 192.0.2.1
environment: |
{
{% for no_proxy in (lookup('env', 'no_proxy') | regex_replace('\s*,\s*', ' ') ).split() %}
{% if no_proxy | regex_search('\/') and
no_proxy | ipaddr('net') != '' and
no_proxy | ipaddr('net') != false and
ip_address | ipaddr(no_proxy) is not none and
ip_address | ipaddr(no_proxy) != false %}
'no_proxy': '{{ ip_address }}'
{% elif no_proxy | regex_search(':') != '' and
no_proxy | regex_search(':') != false and
no_proxy == ip_address + ':' + (port | default(80)) %}
'no_proxy': '{{ ip_address }}:{{ port | default(80) }}'
{% elif no_proxy | ipaddr('host') != '' and
no_proxy | ipaddr('host') != false and
no_proxy == ip_address %}
'no_proxy': '{{ ip_address }}'
{% elif no_proxy | regex_search('^(\*|)\.') != '' and
no_proxy | regex_search('^(\*|)\.') != false and
no_proxy | regex_replace('\*', '') in ip_address %}
'no_proxy': '{{ ip_address }}'
{% endif %}
{% endfor %}
}
'''
RETURN = r'''
# The return information includes all the HTTP headers in lower-case.
content:
description: The response body content.
returned: status not in status_code or return_content is true
type: str
sample: "{}"
cookies:
description: The cookie values placed in cookie jar.
returned: on success
type: dict
sample: {"SESSIONID": "[SESSIONID]"}
version_added: "2.4"
cookies_string:
description: The value for future request Cookie headers.
returned: on success
type: str
sample: "SESSIONID=[SESSIONID]"
version_added: "2.6"
elapsed:
description: The number of seconds that elapsed while performing the download.
returned: on success
type: int
sample: 23
msg:
description: The HTTP message from the request.
returned: always
type: str
sample: OK (unknown bytes)
redirected:
description: Whether the request was redirected.
returned: on success
type: bool
sample: false
status:
description: The HTTP status code from the request.
returned: always
type: int
sample: 200
url:
description: The actual URL used for the request.
returned: always
type: str
sample: https://www.ansible.com/
'''
import cgi
import datetime
import json
import os
import re
import shutil
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule, sanitize_keys
from ansible.module_utils.six import PY2, iteritems, string_types
from ansible.module_utils.six.moves.urllib.parse import urlencode, urlsplit
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.urls import fetch_url, prepare_multipart, url_argument_spec
JSON_CANDIDATES = ('text', 'json', 'javascript')
# List of response key names we do not want sanitize_keys() to change.
NO_MODIFY_KEYS = frozenset(
('msg', 'exception', 'warnings', 'deprecations', 'failed', 'skipped',
'changed', 'rc', 'stdout', 'stderr', 'elapsed', 'path', 'location',
'content_type')
)
def format_message(err, resp):
msg = resp.pop('msg')
return err + (' %s' % msg if msg else '')
def write_file(module, url, dest, content, resp):
# create a tempfile with some test content
fd, tmpsrc = tempfile.mkstemp(dir=module.tmpdir)
f = open(tmpsrc, 'wb')
try:
f.write(content)
except Exception as e:
os.remove(tmpsrc)
msg = format_message("Failed to create temporary content file: %s" % to_native(e), resp)
module.fail_json(msg=msg, **resp)
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
msg = format_message("Source '%s' does not exist" % tmpsrc, resp)
module.fail_json(msg=msg, **resp)
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
msg = format_message("Source '%s' not readable" % tmpsrc, resp)
module.fail_json(msg=msg, **resp)
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
msg = format_message("Destination '%s' not writable" % dest, resp)
module.fail_json(msg=msg, **resp)
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
msg = format_message("Destination '%s' not readable" % dest, resp)
module.fail_json(msg=msg, **resp)
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
msg = format_message("Destination dir '%s' not writable" % os.path.dirname(dest), resp)
module.fail_json(msg=msg, **resp)
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
except Exception as e:
os.remove(tmpsrc)
msg = format_message("failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)), resp)
module.fail_json(msg=msg, **resp)
os.remove(tmpsrc)
def url_filename(url):
fn = os.path.basename(urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def absolute_location(url, location):
"""Attempts to create an absolute URL based on initial URL, and
next URL, specifically in the case of a ``Location`` header.
"""
if '://' in location:
return location
elif location.startswith('/'):
parts = urlsplit(url)
base = url.replace(parts[2], '')
return '%s%s' % (base, location)
elif not location.startswith('/'):
base = os.path.dirname(url)
return '%s/%s' % (base, location)
else:
return location
def kv_list(data):
''' Convert data into a list of key-value tuples '''
if data is None:
return None
if isinstance(data, Sequence):
return list(data)
if isinstance(data, Mapping):
return list(data.items())
raise TypeError('cannot form-urlencode body, expect list or dict')
def form_urlencoded(body):
''' Convert data into a form-urlencoded string '''
if isinstance(body, string_types):
return body
if isinstance(body, (Mapping, Sequence)):
result = []
# Turn a list of lists into a list of tuples that urlencode accepts
for key, values in kv_list(body):
if isinstance(values, string_types) or not isinstance(values, (Mapping, Sequence)):
values = [values]
for value in values:
if value is not None:
result.append((to_text(key), to_text(value)))
return urlencode(result, doseq=True)
return body
def uri(module, url, dest, body, body_format, method, headers, socket_timeout, ca_path):
# is dest is set and is a directory, let's check if we get redirected and
# set the filename from that url
redirected = False
redir_info = {}
r = {}
src = module.params['src']
if src:
try:
headers.update({
'Content-Length': os.stat(src).st_size
})
data = open(src, 'rb')
except OSError:
module.fail_json(msg='Unable to open source file %s' % src, elapsed=0)
else:
data = body
kwargs = {}
if dest is not None:
# Stash follow_redirects, in this block we don't want to follow
# we'll reset back to the supplied value soon
follow_redirects = module.params['follow_redirects']
module.params['follow_redirects'] = False
if os.path.isdir(dest):
# first check if we are redirected to a file download
_, redir_info = fetch_url(module, url, data=body,
headers=headers,
method=method,
timeout=socket_timeout, unix_socket=module.params['unix_socket'])
# if we are redirected, update the url with the location header,
# and update dest with the new url filename
if redir_info['status'] in (301, 302, 303, 307):
url = redir_info['location']
redirected = True
dest = os.path.join(dest, url_filename(url))
# if destination file already exist, only download if file newer
if os.path.exists(dest):
kwargs['last_mod_time'] = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest))
# Reset follow_redirects back to the stashed value
module.params['follow_redirects'] = follow_redirects
resp, info = fetch_url(module, url, data=data, headers=headers,
method=method, timeout=socket_timeout, unix_socket=module.params['unix_socket'],
ca_path=ca_path,
**kwargs)
try:
content = resp.read()
except AttributeError:
# there was no content, but the error read()
# may have been stored in the info as 'body'
content = info.pop('body', '')
if src:
# Try to close the open file handle
try:
data.close()
except Exception:
pass
r['redirected'] = redirected or info['url'] != url
r.update(redir_info)
r.update(info)
return r, content, dest
def main():
argument_spec = url_argument_spec()
argument_spec.update(
dest=dict(type='path'),
url_username=dict(type='str', aliases=['user']),
url_password=dict(type='str', aliases=['password'], no_log=True),
body=dict(type='raw'),
body_format=dict(type='str', default='raw', choices=['form-urlencoded', 'json', 'raw', 'form-multipart']),
src=dict(type='path'),
method=dict(type='str', default='GET'),
return_content=dict(type='bool', default=False),
follow_redirects=dict(type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']),
creates=dict(type='path'),
removes=dict(type='path'),
status_code=dict(type='list', elements='int', default=[200]),
timeout=dict(type='int', default=30),
headers=dict(type='dict', default={}),
unix_socket=dict(type='path'),
remote_src=dict(type='bool', default=False),
ca_path=dict(type='path', default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
mutually_exclusive=[['body', 'src']],
)
if module.params.get('thirsty'):
module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
version='2.13', collection_name='ansible.builtin')
url = module.params['url']
body = module.params['body']
body_format = module.params['body_format'].lower()
method = module.params['method'].upper()
dest = module.params['dest']
return_content = module.params['return_content']
creates = module.params['creates']
removes = module.params['removes']
status_code = [int(x) for x in list(module.params['status_code'])]
socket_timeout = module.params['timeout']
ca_path = module.params['ca_path']
dict_headers = module.params['headers']
if not re.match('^[A-Z]+$', method):
module.fail_json(msg="Parameter 'method' needs to be a single word in uppercase, like GET or POST.")
if body_format == 'json':
# Encode the body unless its a string, then assume it is pre-formatted JSON
if not isinstance(body, string_types):
body = json.dumps(body)
if 'content-type' not in [header.lower() for header in dict_headers]:
dict_headers['Content-Type'] = 'application/json'
elif body_format == 'form-urlencoded':
if not isinstance(body, string_types):
try:
body = form_urlencoded(body)
except ValueError as e:
module.fail_json(msg='failed to parse body as form_urlencoded: %s' % to_native(e), elapsed=0)
if 'content-type' not in [header.lower() for header in dict_headers]:
dict_headers['Content-Type'] = 'application/x-www-form-urlencoded'
elif body_format == 'form-multipart':
try:
content_type, body = prepare_multipart(body)
except (TypeError, ValueError) as e:
module.fail_json(msg='failed to parse body as form-multipart: %s' % to_native(e))
dict_headers['Content-Type'] = content_type
if creates is not None:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of uri executions.
if os.path.exists(creates):
module.exit_json(stdout="skipped, since '%s' exists" % creates, changed=False)
if removes is not None:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of uri executions.
if not os.path.exists(removes):
module.exit_json(stdout="skipped, since '%s' does not exist" % removes, changed=False)
# Make the request
start = datetime.datetime.utcnow()
resp, content, dest = uri(module, url, dest, body, body_format, method,
dict_headers, socket_timeout, ca_path)
resp['elapsed'] = (datetime.datetime.utcnow() - start).seconds
resp['status'] = int(resp['status'])
resp['changed'] = False
# Write the file out if requested
if dest is not None:
if resp['status'] in status_code and resp['status'] != 304:
write_file(module, url, dest, content, resp)
# allow file attribute changes
resp['changed'] = True
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params, path=dest)
resp['changed'] = module.set_fs_attributes_if_different(file_args, resp['changed'])
resp['path'] = dest
# Transmogrify the headers, replacing '-' with '_', since variables don't
# work with dashes.
# In python3, the headers are title cased. Lowercase them to be
# compatible with the python2 behaviour.
uresp = {}
for key, value in iteritems(resp):
ukey = key.replace("-", "_").lower()
uresp[ukey] = value
if 'location' in uresp:
uresp['location'] = absolute_location(url, uresp['location'])
# Default content_encoding to try
content_encoding = 'utf-8'
if 'content_type' in uresp:
# Handle multiple Content-Type headers
charsets = []
content_types = []
for value in uresp['content_type'].split(','):
ct, params = cgi.parse_header(value)
if ct not in content_types:
content_types.append(ct)
if 'charset' in params:
if params['charset'] not in charsets:
charsets.append(params['charset'])
if content_types:
content_type = content_types[0]
if len(content_types) > 1:
module.warn(
'Received multiple conflicting Content-Type values (%s), using %s' % (', '.join(content_types), content_type)
)
if charsets:
content_encoding = charsets[0]
if len(charsets) > 1:
module.warn(
'Received multiple conflicting charset values (%s), using %s' % (', '.join(charsets), content_encoding)
)
u_content = to_text(content, encoding=content_encoding)
if any(candidate in content_type for candidate in JSON_CANDIDATES):
try:
js = json.loads(u_content)
uresp['json'] = js
except Exception:
if PY2:
sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
else:
u_content = to_text(content, encoding=content_encoding)
if module.no_log_values:
uresp = sanitize_keys(uresp, module.no_log_values, NO_MODIFY_KEYS)
if resp['status'] not in status_code:
uresp['msg'] = 'Status code was %s and not %s: %s' % (resp['status'], status_code, uresp.get('msg', ''))
if return_content:
module.fail_json(content=u_content, **uresp)
else:
module.fail_json(**uresp)
elif return_content:
module.exit_json(content=u_content, **uresp)
else:
module.exit_json(**uresp)
if __name__ == '__main__':
main()
| gpl-3.0 |
NTesla/pattern | examples/05-vector/07-slp.py | 21 | 5861 | import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
import random
from codecs import open
from collections import defaultdict
from pattern.text import Model
from pattern.vector import shuffled, SLP
from pattern.en import lexicon, parsetree
from random import seed
# This example demonstrates how a Perceptron classifier
# can be used to construct an English language model
# (i.e., a classifier that predicts part-of-speech tags),
# by learning from a training set of tagged sentences.
# First we need training data: a corpus of manually annotated (= tagged) sentences.
# Typically, Penn Treebank is used, which contains texts from the Wall Street Journal (WSJ).
# In this example we will use the freely available Open American National Corpus (OANC).
print "load training data..."
def corpus(path, encoding="utf-8"):
""" Yields sentences of (word, tag)-tuples from the given corpus,
which is a .txt file with a sentence on each line,
with slash-encoded tokens (e.g., the/DT cat/NN).
"""
for s in open(path, encoding=encoding):
s = map(lambda w: w.split("/"), s.strip().split(" "))
s = map(lambda w: (w[0].replace("&slash;", "/"), w[1]), s)
yield s
# The corpus is included in the Pattern download zip, in pattern/test/corpora:
path = os.path.join(os.path.dirname(__file__), "..", "..", "test", "corpora", "tagged-en-oanc.txt")
data = list(corpus(path))
# A parser is typically based on a lexicon of known words (aka a tag dictionary),
# that contains frequent words and their most frequent part-of-speech tag.
# This approach is fast. However, some words can have more than one tag,
# depending on their context in the sentence (e.g., "a can" vs. "can I").
# When we train a language model (i.e., a classifier),
# we want to make sure that it captures all ambiguity,
# ignoring ambiguous entries in the lexicon,
# handling them with the classifier instead.
# For example, the lexicon in pattern.en will always tag "about" as IN (preposition),
# even though it can also be used as RB (adverb) in about 25% of the cases.
# We will add "about" to the set of words in the lexicon to ignore
# when using a language model.
print "load training lexicon..."
f = defaultdict(lambda: defaultdict(int)) # {word1: {tag1: count, tag2: count, ...}}
for s in data:
for w, tag in s:
f[w][tag] += 1
known, unknown = set(), set()
for w, tags in f.items():
n = sum(tags.values()) # total count
m = sorted(tags, key=tags.__getitem__, reverse=True)[0] # most frequent tag
if float(tags[m]) / n >= 0.97 and n > 1:
# Words that are always handled by the lexicon.
known.add(w)
if float(tags[m]) / n < 0.92 and w in lexicon:
# Words in the lexicon that should be ignored and handled by the model.
unknown.add(w)
# A language model is a classifier (e.g., NB, KNN, SVM, SLP)
# trained on words and their context (= words to the left & right in sentence),
# that predicts the part-of-speech tag of unknown words.
# Take a look at the Model class in pattern/text/__init__.py.
# You'll see an internal Model._v() method
# that creates a training vector from a given word and its context,
# using information such as word suffix, first letter (i.e., for proper nouns),
# the part-of-speech tags of preceding words, surrounding tags, etc.
# Perceptron (SLP, single-layer averaged perceptron) works well for language models.
# Perceptron is an error-driven classifier.
# When given a training example (e.g., tagged word + surrounding words),
# it will check if it could correctly predict this example.
# If not, it will adjust its weights.
# So the accuracy of the perceptron can be improved significantly
# by training in multiple iterations, averaging out all weights.
# This will take several minutes.
# If you want it to run faster for experimentation,
# use less iterations or less data in the code below:
print "training model..."
seed(0) # Lock random list shuffling so we can compare.
m = Model(known=known, unknown=unknown, classifier=SLP())
for iteration in range(5):
for s in shuffled(data[:20000]):
prev = None
next = None
for i, (w, tag) in enumerate(s):
if i < len(s) - 1:
next = s[i+1]
m.train(w, tag, prev, next)
prev = (w, tag)
next = None
f = os.path.join(os.path.dirname(__file__), "en-model.slp")
m.save(f, final=True)
# Each parser in Pattern (pattern.en, pattern.es, pattern.it, ...)
# assumes that a lexicon of known words and their most frequent tag is available,
# along with some rules for morphology (suffixes, e.g., -ly = adverb)
# and context (surrounding words) for unknown words.
# If a language model is also available, it overrides these (simpler) rules.
# For English, this can raise accuracy from about 94% up to about 97%,
# and makes the parses about 3x faster.
print "loading model..."
f = os.path.join(os.path.dirname(__file__), "en-model.slp")
lexicon.model = Model.load(lexicon, f)
# To test the accuracy of the language model,
# we can compare a tagged corpus to the predicted tags.
# This corpus must be different from the one used for training.
# Typically, sections 22, 23 and 24 of the WSJ are used.
# Note that the WSJ contains standardized English.
# The accuracy will be lower when tested on, for example, informal tweets.
# A different classifier could be trained for informal language use.
print "testing..."
i, n = 0, 0
for s1 in data[-5000:]:
s2 = " ".join(w for w, tag in s1)
s2 = parsetree(s2, tokenize=False)
s2 = ((w.string, w.tag or "") for w in s2[0])
for (w1, tag1), (w2, tag2) in zip(s1, s2):
if tag1 == tag2.split("-")[0]: # NNP-PERS => NNP
i += 1
n += 1
print float(i) / n # accuracy | bsd-3-clause |
arangodb/arangodb | 3rdParty/V8/v7.9.317/tools/release/common_includes.py | 6 | 30305 | #!/usr/bin/env python
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import datetime
import httplib
import glob
import imp
import json
import os
import re
import shutil
import subprocess
import sys
import textwrap
import time
import urllib
import urllib2
from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
CHANGELOG_FILE = "ChangeLog"
DAY_IN_SECONDS = 24 * 60 * 60
PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
VERSION_FILE = os.path.join("include", "v8-version.h")
WATCHLISTS_FILE = "WATCHLISTS"
RELEASE_WORKDIR = "/tmp/v8-release-scripts-work-dir/"
# V8 base directory.
V8_BASE = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Add our copy of depot_tools to the PATH as many scripts use tools from there,
# e.g. git-cl, fetch, git-new-branch etc, and we can not depend on depot_tools
# being in the PATH on the LUCI bots.
path_to_depot_tools = os.path.join(V8_BASE, 'third_party', 'depot_tools')
new_path = path_to_depot_tools + os.pathsep + os.environ.get('PATH')
os.environ['PATH'] = new_path
def TextToFile(text, file_name):
with open(file_name, "w") as f:
f.write(text)
def AppendToFile(text, file_name):
with open(file_name, "a") as f:
f.write(text)
def LinesInFile(file_name):
with open(file_name) as f:
for line in f:
yield line
def FileToText(file_name):
with open(file_name) as f:
return f.read()
def MSub(rexp, replacement, text):
return re.sub(rexp, replacement, text, flags=re.MULTILINE)
def Fill80(line):
# Replace tabs and remove surrounding space.
line = re.sub(r"\t", r" ", line.strip())
# Format with 8 characters indentation and line width 80.
return textwrap.fill(line, width=80, initial_indent=" ",
subsequent_indent=" ")
def MakeComment(text):
return MSub(r"^( ?)", "#", text)
def StripComments(text):
# Use split not splitlines to keep terminal newlines.
return "\n".join(filter(lambda x: not x.startswith("#"), text.split("\n")))
def MakeChangeLogBody(commit_messages, auto_format=False):
result = ""
added_titles = set()
for (title, body, author) in commit_messages:
# TODO(machenbach): Better check for reverts. A revert should remove the
# original CL from the actual log entry.
title = title.strip()
if auto_format:
# Only add commits that set the LOG flag correctly.
log_exp = r"^[ \t]*LOG[ \t]*=[ \t]*(?:(?:Y(?:ES)?)|TRUE)"
if not re.search(log_exp, body, flags=re.I | re.M):
continue
# Never include reverts.
if title.startswith("Revert "):
continue
# Don't include duplicates.
if title in added_titles:
continue
# Add and format the commit's title and bug reference. Move dot to the end.
added_titles.add(title)
raw_title = re.sub(r"(\.|\?|!)$", "", title)
bug_reference = MakeChangeLogBugReference(body)
space = " " if bug_reference else ""
result += "%s\n" % Fill80("%s%s%s." % (raw_title, space, bug_reference))
# Append the commit's author for reference if not in auto-format mode.
if not auto_format:
result += "%s\n" % Fill80("(%s)" % author.strip())
result += "\n"
return result
def MakeChangeLogBugReference(body):
"""Grep for "BUG=xxxx" lines in the commit message and convert them to
"(issue xxxx)".
"""
crbugs = []
v8bugs = []
def AddIssues(text):
ref = re.match(r"^BUG[ \t]*=[ \t]*(.+)$", text.strip())
if not ref:
return
for bug in ref.group(1).split(","):
bug = bug.strip()
match = re.match(r"^v8:(\d+)$", bug)
if match: v8bugs.append(int(match.group(1)))
else:
match = re.match(r"^(?:chromium:)?(\d+)$", bug)
if match: crbugs.append(int(match.group(1)))
# Add issues to crbugs and v8bugs.
map(AddIssues, body.splitlines())
# Filter duplicates, sort, stringify.
crbugs = map(str, sorted(set(crbugs)))
v8bugs = map(str, sorted(set(v8bugs)))
bug_groups = []
def FormatIssues(prefix, bugs):
if len(bugs) > 0:
plural = "s" if len(bugs) > 1 else ""
bug_groups.append("%sissue%s %s" % (prefix, plural, ", ".join(bugs)))
FormatIssues("", v8bugs)
FormatIssues("Chromium ", crbugs)
if len(bug_groups) > 0:
return "(%s)" % ", ".join(bug_groups)
else:
return ""
def SortingKey(version):
"""Key for sorting version number strings: '3.11' > '3.2.1.1'"""
version_keys = map(int, version.split("."))
# Fill up to full version numbers to normalize comparison.
while len(version_keys) < 4: # pragma: no cover
version_keys.append(0)
# Fill digits.
return ".".join(map("{0:04d}".format, version_keys))
# Some commands don't like the pipe, e.g. calling vi from within the script or
# from subscripts like git cl upload.
def Command(cmd, args="", prefix="", pipe=True, cwd=None):
cwd = cwd or os.getcwd()
# TODO(machenbach): Use timeout.
cmd_line = "%s %s %s" % (prefix, cmd, args)
print("Command: %s" % cmd_line)
print("in %s" % cwd)
sys.stdout.flush()
try:
if pipe:
return subprocess.check_output(cmd_line, shell=True, cwd=cwd)
else:
return subprocess.check_call(cmd_line, shell=True, cwd=cwd)
except subprocess.CalledProcessError:
return None
finally:
sys.stdout.flush()
sys.stderr.flush()
def SanitizeVersionTag(tag):
version_without_prefix = re.compile(r"^\d+\.\d+\.\d+(?:\.\d+)?$")
version_with_prefix = re.compile(r"^tags\/\d+\.\d+\.\d+(?:\.\d+)?$")
if version_without_prefix.match(tag):
return tag
elif version_with_prefix.match(tag):
return tag[len("tags/"):]
else:
return None
def NormalizeVersionTags(version_tags):
normalized_version_tags = []
# Remove tags/ prefix because of packed refs.
for current_tag in version_tags:
version_tag = SanitizeVersionTag(current_tag)
if version_tag != None:
normalized_version_tags.append(version_tag)
return normalized_version_tags
# Wrapper for side effects.
class SideEffectHandler(object): # pragma: no cover
def Call(self, fun, *args, **kwargs):
return fun(*args, **kwargs)
def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
return Command(cmd, args, prefix, pipe, cwd=cwd)
def ReadLine(self):
return sys.stdin.readline().strip()
def ReadURL(self, url, params=None):
# pylint: disable=E1121
url_fh = urllib2.urlopen(url, params, 60)
try:
return url_fh.read()
finally:
url_fh.close()
def ReadClusterFuzzAPI(self, api_key, **params):
params["api_key"] = api_key.strip()
params = urllib.urlencode(params)
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPSConnection("backend-dot-cluster-fuzz.appspot.com")
conn.request("POST", "/_api/", params, headers)
response = conn.getresponse()
data = response.read()
try:
return json.loads(data)
except:
print(data)
print("ERROR: Could not read response. Is your key valid?")
raise
def Sleep(self, seconds):
time.sleep(seconds)
def GetDate(self):
return datetime.date.today().strftime("%Y-%m-%d")
def GetUTCStamp(self):
return time.mktime(datetime.datetime.utcnow().timetuple())
DEFAULT_SIDE_EFFECT_HANDLER = SideEffectHandler()
class NoRetryException(Exception):
pass
class VCInterface(object):
def InjectStep(self, step):
self.step=step
def Pull(self):
raise NotImplementedError()
def Fetch(self):
raise NotImplementedError()
def GetTags(self):
raise NotImplementedError()
def GetBranches(self):
raise NotImplementedError()
def MasterBranch(self):
raise NotImplementedError()
def CandidateBranch(self):
raise NotImplementedError()
def RemoteMasterBranch(self):
raise NotImplementedError()
def RemoteCandidateBranch(self):
raise NotImplementedError()
def RemoteBranch(self, name):
raise NotImplementedError()
def CLLand(self):
raise NotImplementedError()
def Tag(self, tag, remote, message):
"""Sets a tag for the current commit.
Assumptions: The commit already landed and the commit message is unique.
"""
raise NotImplementedError()
class GitInterface(VCInterface):
def Pull(self):
self.step.GitPull()
def Fetch(self):
self.step.Git("fetch")
def GetTags(self):
return self.step.Git("tag").strip().splitlines()
def GetBranches(self):
# Get relevant remote branches, e.g. "branch-heads/3.25".
branches = filter(
lambda s: re.match(r"^branch\-heads/\d+\.\d+$", s),
self.step.GitRemotes())
# Remove 'branch-heads/' prefix.
return map(lambda s: s[13:], branches)
def MasterBranch(self):
return "master"
def CandidateBranch(self):
return "candidates"
def RemoteMasterBranch(self):
return "origin/master"
def RemoteCandidateBranch(self):
return "origin/candidates"
def RemoteBranch(self, name):
# Assume that if someone "fully qualified" the ref, they know what they
# want.
if name.startswith('refs/'):
return name
if name in ["candidates", "master"]:
return "refs/remotes/origin/%s" % name
try:
# Check if branch is in heads.
if self.step.Git("show-ref refs/remotes/origin/%s" % name).strip():
return "refs/remotes/origin/%s" % name
except GitFailedException:
pass
try:
# Check if branch is in branch-heads.
if self.step.Git("show-ref refs/remotes/branch-heads/%s" % name).strip():
return "refs/remotes/branch-heads/%s" % name
except GitFailedException:
pass
self.Die("Can't find remote of %s" % name)
def Tag(self, tag, remote, message):
# Wait for the commit to appear. Assumes unique commit message titles (this
# is the case for all automated merge and push commits - also no title is
# the prefix of another title).
commit = None
for wait_interval in [10, 30, 60, 60, 60, 60, 60]:
self.step.Git("fetch")
commit = self.step.GitLog(n=1, format="%H", grep=message, branch=remote)
if commit:
break
print("The commit has not replicated to git. Waiting for %s seconds." %
wait_interval)
self.step._side_effect_handler.Sleep(wait_interval)
else:
self.step.Die("Couldn't determine commit for setting the tag. Maybe the "
"git updater is lagging behind?")
self.step.Git("tag %s %s" % (tag, commit))
self.step.Git("push origin refs/tags/%s:refs/tags/%s" % (tag, tag))
def CLLand(self):
self.step.GitCLLand()
class Step(GitRecipesMixin):
def __init__(self, text, number, config, state, options, handler):
self._text = text
self._number = number
self._config = config
self._state = state
self._options = options
self._side_effect_handler = handler
self.vc = GitInterface()
self.vc.InjectStep(self)
# The testing configuration might set a different default cwd.
self.default_cwd = (self._config.get("DEFAULT_CWD") or
os.path.join(self._options.work_dir, "v8"))
assert self._number >= 0
assert self._config is not None
assert self._state is not None
assert self._side_effect_handler is not None
def __getitem__(self, key):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
return self._state.get(key)
def __setitem__(self, key, value):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
self._state[key] = value
def Config(self, key):
return self._config[key]
def Run(self):
# Restore state.
state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
if not self._state and os.path.exists(state_file):
self._state.update(json.loads(FileToText(state_file)))
print(">>> Step %d: %s" % (self._number, self._text))
try:
return self.RunStep()
finally:
# Persist state.
TextToFile(json.dumps(self._state), state_file)
def RunStep(self): # pragma: no cover
raise NotImplementedError
def Retry(self, cb, retry_on=None, wait_plan=None):
""" Retry a function.
Params:
cb: The function to retry.
retry_on: A callback that takes the result of the function and returns
True if the function should be retried. A function throwing an
exception is always retried.
wait_plan: A list of waiting delays between retries in seconds. The
maximum number of retries is len(wait_plan).
"""
retry_on = retry_on or (lambda x: False)
wait_plan = list(wait_plan or [])
wait_plan.reverse()
while True:
got_exception = False
try:
result = cb()
except NoRetryException as e:
raise e
except Exception as e:
got_exception = e
if got_exception or retry_on(result):
if not wait_plan: # pragma: no cover
raise Exception("Retried too often. Giving up. Reason: %s" %
str(got_exception))
wait_time = wait_plan.pop()
print("Waiting for %f seconds." % wait_time)
self._side_effect_handler.Sleep(wait_time)
print("Retrying...")
else:
return result
def ReadLine(self, default=None):
# Don't prompt in forced mode.
if self._options.force_readline_defaults and default is not None:
print("%s (forced)" % default)
return default
else:
return self._side_effect_handler.ReadLine()
def Command(self, name, args, cwd=None):
cmd = lambda: self._side_effect_handler.Command(
name, args, "", True, cwd=cwd or self.default_cwd)
return self.Retry(cmd, None, [5])
def Git(self, args="", prefix="", pipe=True, retry_on=None, cwd=None):
cmd = lambda: self._side_effect_handler.Command(
"git", args, prefix, pipe, cwd=cwd or self.default_cwd)
result = self.Retry(cmd, retry_on, [5, 30])
if result is None:
raise GitFailedException("'git %s' failed." % args)
return result
def Editor(self, args):
if self._options.requires_editor:
return self._side_effect_handler.Command(
os.environ["EDITOR"],
args,
pipe=False,
cwd=self.default_cwd)
def ReadURL(self, url, params=None, retry_on=None, wait_plan=None):
wait_plan = wait_plan or [3, 60, 600]
cmd = lambda: self._side_effect_handler.ReadURL(url, params)
return self.Retry(cmd, retry_on, wait_plan)
def GetDate(self):
return self._side_effect_handler.GetDate()
def Die(self, msg=""):
if msg != "":
print("Error: %s" % msg)
print("Exiting")
raise Exception(msg)
def DieNoManualMode(self, msg=""):
if not self._options.manual: # pragma: no cover
msg = msg or "Only available in manual mode."
self.Die(msg)
def Confirm(self, msg):
print("%s [Y/n] " % msg, end=' ')
answer = self.ReadLine(default="Y")
return answer == "" or answer == "Y" or answer == "y"
def DeleteBranch(self, name, cwd=None):
for line in self.GitBranch(cwd=cwd).splitlines():
if re.match(r"\*?\s*%s$" % re.escape(name), line):
msg = "Branch %s exists, do you want to delete it?" % name
if self.Confirm(msg):
self.GitDeleteBranch(name, cwd=cwd)
print("Branch %s deleted." % name)
else:
msg = "Can't continue. Please delete branch %s and try again." % name
self.Die(msg)
def InitialEnvironmentChecks(self, cwd):
# Cancel if this is not a git checkout.
if not os.path.exists(os.path.join(cwd, ".git")): # pragma: no cover
self.Die("%s is not a git checkout. If you know what you're doing, try "
"deleting it and rerunning this script." % cwd)
# Cancel if EDITOR is unset or not executable.
if (self._options.requires_editor and (not os.environ.get("EDITOR") or
self.Command(
"which", os.environ["EDITOR"]) is None)): # pragma: no cover
self.Die("Please set your EDITOR environment variable, you'll need it.")
def CommonPrepare(self):
# Check for a clean workdir.
if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Checkout master in case the script was left on a work branch.
self.GitCheckout('origin/master')
# Fetch unfetched revisions.
self.vc.Fetch()
def PrepareBranch(self):
# Delete the branch that will be created later if it exists already.
self.DeleteBranch(self._config["BRANCHNAME"])
def CommonCleanup(self):
self.GitCheckout('origin/master')
self.GitDeleteBranch(self._config["BRANCHNAME"])
# Clean up all temporary files.
for f in glob.iglob("%s*" % self._config["PERSISTFILE_BASENAME"]):
if os.path.isfile(f):
os.remove(f)
if os.path.isdir(f):
shutil.rmtree(f)
def ReadAndPersistVersion(self, prefix=""):
def ReadAndPersist(var_name, def_name):
match = re.match(r"^#define %s\s+(\d*)" % def_name, line)
if match:
value = match.group(1)
self["%s%s" % (prefix, var_name)] = value
for line in LinesInFile(os.path.join(self.default_cwd, VERSION_FILE)):
for (var_name, def_name) in [("major", "V8_MAJOR_VERSION"),
("minor", "V8_MINOR_VERSION"),
("build", "V8_BUILD_NUMBER"),
("patch", "V8_PATCH_LEVEL")]:
ReadAndPersist(var_name, def_name)
def WaitForLGTM(self):
print ("Please wait for an LGTM, then type \"LGTM<Return>\" to commit "
"your change. (If you need to iterate on the patch or double check "
"that it's sane, do so in another shell, but remember to not "
"change the headline of the uploaded CL.")
answer = ""
while answer != "LGTM":
print("> ", end=' ')
answer = self.ReadLine(None if self._options.wait_for_lgtm else "LGTM")
if answer != "LGTM":
print("That was not 'LGTM'.")
def WaitForResolvingConflicts(self, patch_file):
print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
"or resolve the conflicts, stage *all* touched files with "
"'git add', and type \"RESOLVED<Return>\"" % (patch_file))
self.DieNoManualMode()
answer = ""
while answer != "RESOLVED":
if answer == "ABORT":
self.Die("Applying the patch failed.")
if answer != "":
print("That was not 'RESOLVED' or 'ABORT'.")
print("> ", end=' ')
answer = self.ReadLine()
# Takes a file containing the patch to apply as first argument.
def ApplyPatch(self, patch_file, revert=False):
try:
self.GitApplyPatch(patch_file, revert)
except GitFailedException:
self.WaitForResolvingConflicts(patch_file)
def GetVersionTag(self, revision):
tag = self.Git("describe --tags %s" % revision).strip()
return SanitizeVersionTag(tag)
def GetRecentReleases(self, max_age):
# Make sure tags are fetched.
self.Git("fetch origin +refs/tags/*:refs/tags/*")
# Current timestamp.
time_now = int(self._side_effect_handler.GetUTCStamp())
# List every tag from a given period.
revisions = self.Git("rev-list --max-age=%d --tags" %
int(time_now - max_age)).strip()
# Filter out revisions who's tag is off by one or more commits.
return filter(lambda r: self.GetVersionTag(r), revisions.splitlines())
def GetLatestVersion(self):
# Use cached version if available.
if self["latest_version"]:
return self["latest_version"]
# Make sure tags are fetched.
self.Git("fetch origin +refs/tags/*:refs/tags/*")
all_tags = self.vc.GetTags()
only_version_tags = NormalizeVersionTags(all_tags)
version = sorted(only_version_tags,
key=SortingKey, reverse=True)[0]
self["latest_version"] = version
return version
def GetLatestRelease(self):
"""The latest release is the git hash of the latest tagged version.
This revision should be rolled into chromium.
"""
latest_version = self.GetLatestVersion()
# The latest release.
latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
assert latest_hash
return latest_hash
def GetLatestReleaseBase(self, version=None):
"""The latest release base is the latest revision that is covered in the
last change log file. It doesn't include cherry-picked patches.
"""
latest_version = version or self.GetLatestVersion()
# Strip patch level if it exists.
latest_version = ".".join(latest_version.split(".")[:3])
# The latest release base.
latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
assert latest_hash
title = self.GitLog(n=1, format="%s", git_hash=latest_hash)
match = PUSH_MSG_GIT_RE.match(title)
if match:
# Legacy: In the old process there's one level of indirection. The
# version is on the candidates branch and points to the real release
# base on master through the commit message.
return match.group("git_rev")
match = PUSH_MSG_NEW_RE.match(title)
if match:
# This is a new-style v8 version branched from master. The commit
# "latest_hash" is the version-file change. Its parent is the release
# base on master.
return self.GitLog(n=1, format="%H", git_hash="%s^" % latest_hash)
self.Die("Unknown latest release: %s" % latest_hash)
def ArrayToVersion(self, prefix):
return ".".join([self[prefix + "major"],
self[prefix + "minor"],
self[prefix + "build"],
self[prefix + "patch"]])
def StoreVersion(self, version, prefix):
version_parts = version.split(".")
if len(version_parts) == 3:
version_parts.append("0")
major, minor, build, patch = version_parts
self[prefix + "major"] = major
self[prefix + "minor"] = minor
self[prefix + "build"] = build
self[prefix + "patch"] = patch
def SetVersion(self, version_file, prefix):
output = ""
for line in FileToText(version_file).splitlines():
if line.startswith("#define V8_MAJOR_VERSION"):
line = re.sub("\d+$", self[prefix + "major"], line)
elif line.startswith("#define V8_MINOR_VERSION"):
line = re.sub("\d+$", self[prefix + "minor"], line)
elif line.startswith("#define V8_BUILD_NUMBER"):
line = re.sub("\d+$", self[prefix + "build"], line)
elif line.startswith("#define V8_PATCH_LEVEL"):
line = re.sub("\d+$", self[prefix + "patch"], line)
elif (self[prefix + "candidate"] and
line.startswith("#define V8_IS_CANDIDATE_VERSION")):
line = re.sub("\d+$", self[prefix + "candidate"], line)
output += "%s\n" % line
TextToFile(output, version_file)
class BootstrapStep(Step):
MESSAGE = "Bootstrapping checkout and state."
def RunStep(self):
# Reserve state entry for json output.
self['json_output'] = {}
if os.path.realpath(self.default_cwd) == os.path.realpath(V8_BASE):
self.Die("Can't use v8 checkout with calling script as work checkout.")
# Directory containing the working v8 checkout.
if not os.path.exists(self._options.work_dir):
os.makedirs(self._options.work_dir)
if not os.path.exists(self.default_cwd):
self.Command("fetch", "v8", cwd=self._options.work_dir)
class UploadStep(Step):
MESSAGE = "Upload for code review."
def RunStep(self):
reviewer = None
if self._options.reviewer:
print("Using account %s for review." % self._options.reviewer)
reviewer = self._options.reviewer
tbr_reviewer = None
if self._options.tbr_reviewer:
print("Using account %s for TBR review." % self._options.tbr_reviewer)
tbr_reviewer = self._options.tbr_reviewer
if not reviewer and not tbr_reviewer:
print(
"Please enter the email address of a V8 reviewer for your patch: ",
end=' ')
self.DieNoManualMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
self.GitUpload(reviewer, self._options.force_upload,
bypass_hooks=self._options.bypass_upload_hooks,
cc=self._options.cc, tbr_reviewer=tbr_reviewer)
def MakeStep(step_class=Step, number=0, state=None, config=None,
options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
# Allow to pass in empty dictionaries.
state = state if state is not None else {}
config = config if config is not None else {}
try:
message = step_class.MESSAGE
except AttributeError:
message = step_class.__name__
return step_class(message, number=number, config=config,
state=state, options=options,
handler=side_effect_handler)
class ScriptsBase(object):
def __init__(self,
config=None,
side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
state=None):
self._config = config or self._Config()
self._side_effect_handler = side_effect_handler
self._state = state if state is not None else {}
def _Description(self):
return None
def _PrepareOptions(self, parser):
pass
def _ProcessOptions(self, options):
return True
def _Steps(self): # pragma: no cover
raise Exception("Not implemented.")
def _Config(self):
return {}
def MakeOptions(self, args=None):
parser = argparse.ArgumentParser(description=self._Description())
parser.add_argument("-a", "--author", default="",
help="The author email used for code review.")
parser.add_argument("--dry-run", default=False, action="store_true",
help="Perform only read-only actions.")
parser.add_argument("--json-output",
help="File to write results summary to.")
parser.add_argument("-r", "--reviewer", default="",
help="The account name to be used for reviews.")
parser.add_argument("--tbr-reviewer", "--tbr", default="",
help="The account name to be used for TBR reviews.")
parser.add_argument("-s", "--step",
help="Specify the step where to start work. Default: 0.",
default=0, type=int)
parser.add_argument("--work-dir",
help=("Location where to bootstrap a working v8 "
"checkout."))
self._PrepareOptions(parser)
if args is None: # pragma: no cover
options = parser.parse_args()
else:
options = parser.parse_args(args)
# Process common options.
if options.step < 0: # pragma: no cover
print("Bad step number %d" % options.step)
parser.print_help()
return None
# Defaults for options, common to all scripts.
options.manual = getattr(options, "manual", True)
options.force = getattr(options, "force", False)
options.bypass_upload_hooks = False
# Derived options.
options.requires_editor = not options.force
options.wait_for_lgtm = not options.force
options.force_readline_defaults = not options.manual
options.force_upload = not options.manual
# Process script specific options.
if not self._ProcessOptions(options):
parser.print_help()
return None
if not options.work_dir:
options.work_dir = "/tmp/v8-release-scripts-work-dir"
return options
def RunSteps(self, step_classes, args=None):
options = self.MakeOptions(args)
if not options:
return 1
# Ensure temp dir exists for state files.
state_dir = os.path.dirname(self._config["PERSISTFILE_BASENAME"])
if not os.path.exists(state_dir):
os.makedirs(state_dir)
state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
if options.step == 0 and os.path.exists(state_file):
os.remove(state_file)
steps = []
for (number, step_class) in enumerate([BootstrapStep] + step_classes):
steps.append(MakeStep(step_class, number, self._state, self._config,
options, self._side_effect_handler))
try:
for step in steps[options.step:]:
if step.Run():
return 0
finally:
if options.json_output:
with open(options.json_output, "w") as f:
json.dump(self._state['json_output'], f)
return 0
def Run(self, args=None):
return self.RunSteps(self._Steps(), args)
| apache-2.0 |
appsembler/edx-platform | openedx/core/djangoapps/user_api/migrations/0001_initial.py | 13 | 2793 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
import model_utils.fields
import django.core.validators
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserCourseTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=255, db_index=True)),
('course_id', CourseKeyField(max_length=255, db_index=True)),
('value', models.TextField()),
('user', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.CreateModel(
name='UserOrgTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('key', models.CharField(max_length=255, db_index=True)),
('org', models.CharField(max_length=255, db_index=True)),
('value', models.TextField()),
('user', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.CreateModel(
name='UserPreference',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(db_index=True, max_length=255, validators=[django.core.validators.RegexValidator(b'[-_a-zA-Z0-9]+')])),
('value', models.TextField()),
('user', models.ForeignKey(related_name='preferences', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.AlterUniqueTogether(
name='userpreference',
unique_together=set([('user', 'key')]),
),
migrations.AlterUniqueTogether(
name='userorgtag',
unique_together=set([('user', 'org', 'key')]),
),
migrations.AlterUniqueTogether(
name='usercoursetag',
unique_together=set([('user', 'course_id', 'key')]),
),
]
| agpl-3.0 |
JamesClough/networkx | networkx/algorithms/components/attracting.py | 5 | 3852 | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Christopher Ellison
"""Attracting components."""
import networkx as nx
from networkx.utils.decorators import not_implemented_for
__all__ = ['number_attracting_components',
'attracting_components',
'is_attracting_component',
'attracting_component_subgraphs',
]
@not_implemented_for('undirected')
def attracting_components(G):
"""Generates a list of attracting components in `G`.
An attracting component in a directed graph `G` is a strongly connected
component with the property that a random walker on the graph will never
leave the component, once it enters the component.
The nodes in attracting components can also be thought of as recurrent
nodes. If a random walker enters the attractor containing the node, then
the node will be visited infinitely often.
Parameters
----------
G : DiGraph, MultiDiGraph
The graph to be analyzed.
Returns
-------
attractors : generator of sets
A generator of sets of nodes, one for each attracting component of G.
Raises
------
NetworkXNotImplemented :
If the input graph is undirected.
See Also
--------
number_attracting_components
is_attracting_component
attracting_component_subgraphs
"""
scc = list(nx.strongly_connected_components(G))
cG = nx.condensation(G, scc)
for n in cG:
if cG.out_degree(n) == 0:
yield scc[n]
@not_implemented_for('undirected')
def number_attracting_components(G):
"""Returns the number of attracting components in `G`.
Parameters
----------
G : DiGraph, MultiDiGraph
The graph to be analyzed.
Returns
-------
n : int
The number of attracting components in G.
Raises
------
NetworkXNotImplemented :
If the input graph is undirected.
See Also
--------
attracting_components
is_attracting_component
attracting_component_subgraphs
"""
n = len(list(attracting_components(G)))
return n
@not_implemented_for('undirected')
def is_attracting_component(G):
"""Returns True if `G` consists of a single attracting component.
Parameters
----------
G : DiGraph, MultiDiGraph
The graph to be analyzed.
Returns
-------
attracting : bool
True if `G` has a single attracting component. Otherwise, False.
Raises
------
NetworkXNotImplemented :
If the input graph is undirected.
See Also
--------
attracting_components
number_attracting_components
attracting_component_subgraphs
"""
ac = list(attracting_components(G))
if len(ac[0]) == len(G):
attracting = True
else:
attracting = False
return attracting
@not_implemented_for('undirected')
def attracting_component_subgraphs(G, copy=True):
"""Generates a list of attracting component subgraphs from `G`.
Parameters
----------
G : DiGraph, MultiDiGraph
The graph to be analyzed.
Returns
-------
subgraphs : list
A list of node-induced subgraphs of the attracting components of `G`.
copy : bool
If copy is True, graph, node, and edge attributes are copied to the
subgraphs.
Raises
------
NetworkXNotImplemented :
If the input graph is undirected.
See Also
--------
attracting_components
number_attracting_components
is_attracting_component
"""
for ac in attracting_components(G):
if copy:
yield G.subgraph(ac).copy()
else:
yield G.subgraph(ac)
| bsd-3-clause |
Gamebasis/3DGamebasisServer | GameData/blender-2.71-windows64/2.71/python/lib/encodings/tis_620.py | 272 | 12300 | """ Python Character Mapping Codec tis_620 generated from 'python-mappings/TIS-620.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='tis-620',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\ufffe'
'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
'\u0e24' # 0xC4 -> THAI CHARACTER RU
'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
'\u0e26' # 0xC6 -> THAI CHARACTER LU
'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
'\u0e50' # 0xF0 -> THAI DIGIT ZERO
'\u0e51' # 0xF1 -> THAI DIGIT ONE
'\u0e52' # 0xF2 -> THAI DIGIT TWO
'\u0e53' # 0xF3 -> THAI DIGIT THREE
'\u0e54' # 0xF4 -> THAI DIGIT FOUR
'\u0e55' # 0xF5 -> THAI DIGIT FIVE
'\u0e56' # 0xF6 -> THAI DIGIT SIX
'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
'\u0e59' # 0xF9 -> THAI DIGIT NINE
'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
0todd0000/spm1d | spm1d/data/uv0d/anova2nested.py | 1 | 1275 |
import numpy as np
from .. import _base
class QIMacros(_base.DatasetANOVA2nested): #nested
def _set_values(self):
self.www = 'https://www.qimacros.com/hypothesis-testing/anova-two-way-nested-excel/'
self.Y = np.array([3,4,7.1,7.1, 6,5,8.1,8.1, 3,4,7.1,9.1, 3,3,6,8.1, 1,2,5,9.9, 2,3,6,9.9, 2,4,5,8.9, 2,3,6,10.8])
self.A = np.array([0]*16 + [1]*16)
self.B = np.array([0,1,2,3]*4 + [4,5,6,7]*4)
self.z = 0.111, 45.726
self.df = (1, 6), (6, 24)
self.p = (0.742, 0.000)
self._rtol = 0.01
class SouthamptonNested1(_base.DatasetANOVA2nested):
def _set_values(self):
self.www = 'http://www.southampton.ac.uk/~cpd/anovas/datasets/Doncaster&Davey%20-%20Model%202_1%20Two%20factor%20nested.txt'
self.A = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3])
self.B = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2])
self.Y = np.array([4.5924, -0.5488, 6.1605, 2.3374, 5.1873, 3.3579, 6.3092, 3.2831, 7.3809, 9.2085, 13.1147, 15.2654, 12.4188, 14.3951, 8.5986, 3.4945, 21.3220, 25.0426, 22.6600, 24.1283, 16.5927, 10.2129, 9.8934, 10.0203])
self.z = 4.02, 9.26
self.df = (2, 3), (3, 18)
self.p = (0.142, 0.001)
| gpl-3.0 |
Zhongqilong/kbengine | kbe/res/scripts/common/Lib/encodings/cp1251.py | 272 | 13361 | """ Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1251',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE
'\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u20ac' # 0x88 -> EURO SIGN
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE
'\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE
'\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE
'\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE
'\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE
'\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE
'\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE
'\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U
'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U
'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
'\xa4' # 0xA4 -> CURRENCY SIGN
'\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
'\u2116' # 0xB9 -> NUMERO SIGN
'\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
'\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE
'\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE
'\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI
'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
edk0/IAmARelayBotAMA | tx_redis.py | 1 | 6135 | from __future__ import print_function
from twisted.internet import protocol
class Node(object):
def __init__(self, length=None, parent=None, data=None):
self.data = data or []
self.parent = parent
self.length = length or (len(self.data) if isinstance(self.data, list) else None) or 1
@property
def full(self):
if isinstance(self.data, list):
return len(self.data) >= self.length
else:
return bool(self.data)
def append(self, child):
if isinstance(child, Node):
child.parent = self
self.data.append(child)
def serialize(self):
if isinstance(self.data, list):
return [c.serialize() if isinstance(c, Node) else c for c in self.data]
else:
return self.data
class _RedisProtocol(protocol.Protocol):
def connectionMade(self):
self.parent.connectionMade(self)
def request(self, *args):
self.transport.write(self.encode_request(args))
def encode_request(self, args):
lines = []
lines.append('*' + str(len(args)))
for a in args:
if isinstance(a, unicode):
a = a.encode('utf8')
lines.append('$' + str(len(a)))
lines.append(a)
lines.append('')
return '\r\n'.join(lines)
class HiRedisProtocol(_RedisProtocol):
def __init__(self, factory):
self.parent = factory
self.reader = hiredis.Reader()
def dataReceived(self, data):
self.reader.feed(data)
response = self.reader.gets()
while response:
self.parent.handle(response)
response = self.reader.gets()
class PythonRedisProtocol(_RedisProtocol):
decode_next = -1 # the number of bytes we require to decode the next thing
# -1 is "until CRLF"
decode_state = 'type'
decode_type = '-'
buf = ''
def __init__(self, factory):
self.parent = factory
self.decode_node = Node(length=1)
def reset(self):
self.decode_node = Node(length=1)
def add(self, thing):
while self.decode_node and self.decode_node.full:
assert self.decode_node != self.decode_node.parent
self.decode_node = self.decode_node.parent
assert self.decode_node
self.decode_node.append(thing)
if isinstance(thing, Node) and not thing.full:
self.decode_node = thing
else:
n = self.decode_node
while n.parent and n.full:
n = n.parent
if not n.parent:
d = n.data[0].serialize()
self.parent.handle(d)
self.reset()
def add_node(self, *a, **kw):
n = Node(*a, **kw)
self.add(n)
if not n.full:
self.decode_node = n
def decoder(self, data):
if self.decode_state == 'type':
self.decode_type = {
'$': 'bulk',
'*': 'multi_bulk',
':': 'integer',
'+': 'status',
'-': 'error'
}.get(data[0])
stuff = data[1:]
if self.decode_type in ('status', 'error'):
self.reset()
else:
stuff = int(stuff)
if self.decode_type == 'bulk':
if stuff == -1:
self.add(None)
self.decode_next = -1
self.decode_state = 'type'
else:
self.decode_next = stuff + 2
self.decode_state = 'read_bulk'
elif self.decode_type == 'multi_bulk':
self.add_node(length=stuff)
self.decode_next = -1
self.decode_state = 'type'
elif self.decode_type == 'integer':
self.add(stuff)
self.decode_next = -1
self.decode_state = 'type'
elif self.decode_state == 'read_bulk':
self.add(data)
self.decode_next = -1
self.decode_state = 'type'
def dataReceived(self, data):
self.buf += data
while True:
if self.decode_next >= 0 and len(self.buf) >= self.decode_next:
d = self.buf[:self.decode_next - 2]
self.buf = self.buf[self.decode_next:]
self.decoder(d)
elif self.decode_next < 0 and '\r\n' in self.buf:
d, self.buf = self.buf.split('\r\n', 1)
self.decoder(d)
else:
break
try:
import hiredis
RedisProtocol = HiRedisProtocol
print("using hiredis to parse incoming redis messages")
except ImportError:
RedisProtocol = PythonRedisProtocol
print("using pure python to parse incoming redis messages - slow")
class RedisFactory(protocol.ReconnectingClientFactory):
def __init__(self, parent, channels):
self.parent = parent
self.channels = channels
def buildProtocol(self, addr):
self.protocol = RedisProtocol(self)
return self.protocol
def handle(self, thing):
if isinstance(thing, list) and len(thing) >= 1:
cmd, args = thing[0], thing[1:]
handler = getattr(self.parent, 'handle_' + cmd, None)
if handler:
handler(*args)
else:
print("warning: nothing handles '{}'".format(cmd))
else:
print("I don't understand: {}".format(repr(thing)))
def connectionMade(self, protocol):
self.resetDelay()
self.subscribe(self.channels)
def publish(self, data, channel=None):
channel = channel or self.channel
self.protocol.request("PUBLISH", channel, json.dumps(data))
def subscribe(self, channels):
self.protocol.request("SUBSCRIBE", *channels)
| mit |
cboling/SDNdbg | docs/old-stuff/pydzcvr/doc/neutron/plugins/mlnx/db/mlnx_db_v2.py | 15 | 10337 | # Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import moves
from sqlalchemy.orm import exc
from neutron.common import exceptions as n_exc
import neutron.db.api as db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.common import config # noqa
from neutron.plugins.mlnx.db import mlnx_models_v2
LOG = logging.getLogger(__name__)
def _remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids):
if physical_network in allocations:
for entry in allocations[physical_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(entry.segmentation_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not entry.allocated:
# it's not, so remove it from table
LOG.debug(_(
"Removing vlan %(seg_id)s on "
"physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': physical_network})
session.delete(entry)
del allocations[physical_network]
def _add_missing_allocatable_vlans(session, physical_network, vlan_ids):
for vlan_id in sorted(vlan_ids):
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
vlan_id)
session.add(entry)
def _remove_unconfigured_vlans(session, allocations):
for entries in allocations.itervalues():
for entry in entries:
if not entry.allocated:
LOG.debug(_("Removing vlan %(seg_id)s on physical "
"network %(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
session.delete(entry)
def sync_network_states(network_vlan_ranges):
"""Synchronize network_states table with current configured VLAN ranges."""
session = db.get_session()
with session.begin():
# get existing allocations for all physical networks
allocations = dict()
entries = (session.query(mlnx_models_v2.SegmentationIdAllocation).
all())
for entry in entries:
allocations.setdefault(entry.physical_network, set()).add(entry)
# process vlan ranges for each configured physical network
for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
# determine current configured allocatable vlans for this
# physical network
vlan_ids = set()
for vlan_range in vlan_ranges:
vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1))
# remove from table unallocated vlans not currently allocatable
_remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids)
# add missing allocatable vlans to table
_add_missing_allocatable_vlans(session, physical_network, vlan_ids)
# remove from table unallocated vlans for any unconfigured physical
# networks
_remove_unconfigured_vlans(session, allocations)
def get_network_state(physical_network, segmentation_id):
"""Get entry of specified network."""
session = db.get_session()
qry = session.query(mlnx_models_v2.SegmentationIdAllocation)
qry = qry.filter_by(physical_network=physical_network,
segmentation_id=segmentation_id)
return qry.first()
def reserve_network(session):
with session.begin(subtransactions=True):
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if not entry:
raise n_exc.NoNetworkAvailable()
LOG.debug(_("Reserving vlan %(seg_id)s on physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
entry.allocated = True
return (entry.physical_network, entry.segmentation_id)
def reserve_specific_network(session, physical_network, segmentation_id):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').one())
if entry.allocated:
raise n_exc.VlanIdInUse(vlan_id=segmentation_id,
physical_network=physical_network)
LOG.debug(_("Reserving specific vlan %(seg_id)s "
"on physical network %(phy_net)s from pool"),
log_args)
entry.allocated = True
except exc.NoResultFound:
LOG.debug(_("Reserving specific vlan %(seg_id)s on "
"physical network %(phy_net)s outside pool"),
log_args)
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
segmentation_id)
entry.allocated = True
session.add(entry)
def release_network(session, physical_network,
segmentation_id, network_vlan_ranges):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
state = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
state.allocated = False
inside = False
for vlan_range in network_vlan_ranges.get(physical_network, []):
if (segmentation_id >= vlan_range[0] and
segmentation_id <= vlan_range[1]):
inside = True
break
if inside:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s to pool"),
log_args)
else:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s outside pool"),
log_args)
session.delete(state)
except exc.NoResultFound:
LOG.warning(_("vlan_id %(seg_id)s on physical network "
"%(phy_net)s not found"),
log_args)
def add_network_binding(session, network_id, network_type,
physical_network, vlan_id):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.NetworkBinding(network_id, network_type,
physical_network, vlan_id)
session.add(binding)
def get_network_binding(session, network_id):
return (session.query(mlnx_models_v2.NetworkBinding).
filter_by(network_id=network_id).first())
def add_port_profile_binding(session, port_id, vnic_type):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.PortProfileBinding(port_id, vnic_type)
session.add(binding)
def get_port_profile_binding(session, port_id):
return (session.query(mlnx_models_v2.PortProfileBinding).
filter_by(port_id=port_id).first())
def get_port_from_device(device):
"""Get port from database."""
LOG.debug(_("get_port_from_device() called"))
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(device))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_in_db, sg_id in port_and_sgs if sg_id
]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def get_port_from_device_mac(device_mac):
"""Get port from database."""
LOG.debug(_("Get_port_from_device_mac() called"))
session = db.get_session()
qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
return qry.first()
def set_port_status(port_id, status):
"""Set the port status."""
LOG.debug(_("Set_port_status as %s called"), status)
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except exc.NoResultFound:
raise n_exc.PortNotFound(port_id=port_id)
| apache-2.0 |
rowillia/buck | src/com/facebook/buck/python/__test_main__.py | 1 | 13102 | #!/usr/local/bin/python2.6 -tt
#
# Copyright 2004-present Facebook. All rights reserved.
#
"""
This file contains the main module code for buck python test programs.
By default, this is the main module for all python_test() rules. However,
rules can also specify their own custom main_module. If you write your own
main module, you can import this module as tools.test.stubs.fbpyunit, to access
any of its code to help implement your main module.
"""
from __future__ import print_function
import contextlib
import json
import optparse
import os
import re
import sys
import time
import traceback
import unittest
class TestStatus(object):
ABORTED = 'FAILURE'
PASSED = 'SUCCESS'
FAILED = 'FAILURE'
EXPECTED_FAILURE = 'SUCCESS'
UNEXPECTED_SUCCESS = 'FAILURE'
SKIPPED = 'ASSUMPTION_VIOLATION'
class TeeStream(object):
def __init__(self, *streams):
self._streams = streams
def write(self, data):
for stream in self._streams:
stream.write(data)
def flush(self):
for stream in self._streams:
stream.flush()
def isatty(self):
return False
class CallbackStream(object):
def __init__(self, callback):
self._callback = callback
def write(self, data):
self._callback(data)
def flush(self):
pass
def isatty(self):
return False
class FbJsonTestResult(unittest._TextTestResult):
"""
Our own TestResult class that outputs data in a format that can be easily
parsed by fbmake's test runner.
"""
def __init__(self, stream, descriptions, verbosity, suite):
super(FbJsonTestResult, self).__init__(stream, descriptions, verbosity)
self._suite = suite
self._results = []
self._current_test = None
self._saved_stdout = sys.stdout
self._saved_stderr = sys.stderr
def getResults(self):
return self._results
def startTest(self, test):
super(FbJsonTestResult, self).startTest(test)
sys.stdout = CallbackStream(self.addStdout)
sys.stderr = CallbackStream(self.addStderr)
self._current_test = test
self._test_start_time = time.time()
self._current_status = TestStatus.ABORTED
self._messages = []
self._stacktrace = None
self._stdout = ''
self._stderr = ''
def _find_next_test(self, suite):
"""
Find the next test that has not been run.
"""
for test in suite:
# We identify test suites by test that are iterable (as is done in
# the builtin python test harness). If we see one, recurse on it.
if hasattr(test, '__iter__'):
test = self._find_next_test(test)
# The builtin python test harness sets test references to `None`
# after they have run, so we know we've found the next test up
# if it's not `None`.
if test is not None:
return test
def stopTest(self, test):
sys.stdout = self._saved_stdout
sys.stderr = self._saved_stderr
super(FbJsonTestResult, self).stopTest(test)
# If a failure occured during module/class setup, then this "test" may
# actually be a `_ErrorHolder`, which doesn't contain explicit info
# about the upcoming test. Since we really only care about the test
# name field (i.e. `_testMethodName`), we use that to detect an actual
# test cases, and fall back to looking the test up from the suite
# otherwise.
if not hasattr(test, '_testMethodName'):
test = self._find_next_test(self._suite)
self._results.append({
'testCaseName': '{0}.{1}'.format(
test.__class__.__module__,
test.__class__.__name__),
'testCase': test._testMethodName,
'type': self._current_status,
'time': int((time.time() - self._test_start_time) * 1000),
'message': os.linesep.join(self._messages),
'stacktrace': self._stacktrace,
'stdOut': self._stdout,
'stdErr': self._stderr,
})
self._current_test = None
@contextlib.contextmanager
def _withTest(self, test):
self.startTest(test)
yield
self.stopTest(test)
def _setStatus(self, test, status, message=None, stacktrace=None):
assert test == self._current_test
self._current_status = status
self._stacktrace = stacktrace
if message is not None:
if message.endswith(os.linesep):
message = message[:-1]
self._messages.append(message)
def setStatus(self, test, status, message=None, stacktrace=None):
# addError() may be called outside of a test if one of the shared
# fixtures (setUpClass/tearDownClass/setUpModule/tearDownModule)
# throws an error.
#
# In this case, create a fake test result to record the error.
if self._current_test is None:
with self._withTest(test):
self._setStatus(test, status, message, stacktrace)
else:
self._setStatus(test, status, message, stacktrace)
def setException(self, test, status, excinfo):
exctype, value, tb = excinfo
self.setStatus(
test, status,
'{0}: {1}'.format(exctype.__name__, value),
''.join(traceback.format_tb(tb)))
def addSuccess(self, test):
super(FbJsonTestResult, self).addSuccess(test)
self.setStatus(test, TestStatus.PASSED)
def addError(self, test, err):
super(FbJsonTestResult, self).addError(test, err)
self.setException(test, TestStatus.ABORTED, err)
def addFailure(self, test, err):
super(FbJsonTestResult, self).addFailure(test, err)
self.setException(test, TestStatus.FAILED, err)
def addSkip(self, test, reason):
super(FbJsonTestResult, self).addSkip(test, reason)
self.setStatus(test, TestStatus.SKIPPED, 'Skipped: %s' % (reason,))
def addExpectedFailure(self, test, err):
super(FbJsonTestResult, self).addExpectedFailure(test, err)
self.setException(test, TestStatus.EXPECTED_FAILURE, err)
def addUnexpectedSuccess(self, test):
super(FbJsonTestResult, self).addUnexpectedSuccess(test)
self.setStatus(test, TestStatus.UNEXPECTED_SUCCESS,
'Unexpected success')
def addStdout(self, val):
self._stdout += val
def addStderr(self, val):
self._stderr += val
class FbJsonTestRunner(unittest.TextTestRunner):
def __init__(self, suite, **kwargs):
super(FbJsonTestRunner, self).__init__(**kwargs)
self._suite = suite
def _makeResult(self):
return FbJsonTestResult(
self.stream,
self.descriptions,
self.verbosity,
self._suite)
def _format_test_name(test_class, attrname):
"""
Format the name of the test buck-style.
"""
return '{0}.{1}#{2}'.format(
test_class.__module__,
test_class.__name__,
attrname)
class RegexTestLoader(unittest.TestLoader):
def __init__(self, regex=None):
super(unittest.TestLoader, self).__init__()
self.regex = regex
def getTestCaseNames(self, testCaseClass):
"""
Return a sorted sequence of method names found within testCaseClass
"""
testFnNames = super(RegexTestLoader, self).getTestCaseNames(
testCaseClass)
matched = []
for attrname in testFnNames:
fullname = _format_test_name(testCaseClass, attrname)
if self.regex is None or re.search(self.regex, fullname):
matched.append(attrname)
return matched
class Loader(object):
def __init__(self, modules, regex=None):
self.modules = modules
self.regex = regex
def load_all(self):
loader = RegexTestLoader(self.regex)
test_suite = unittest.TestSuite()
for module_name in self.modules:
__import__(module_name, level=0)
module = sys.modules[module_name]
module_suite = loader.loadTestsFromModule(module)
test_suite.addTest(module_suite)
return test_suite
def load_args(self, args):
loader = RegexTestLoader(self.regex)
suites = []
for arg in args:
suite = loader.loadTestsFromName(arg)
# loadTestsFromName() can only process names that refer to
# individual test functions or modules. It can't process package
# names. If there were no module/function matches, check to see if
# this looks like a package name.
if suite.countTestCases() != 0:
suites.append(suite)
continue
# Load all modules whose name is <arg>.<something>
prefix = arg + '.'
for module in self.modules:
if module.startswith(prefix):
suite = loader.loadTestsFromName(module)
suites.append(suite)
return loader.suiteClass(suites)
class MainProgram(object):
'''
This class implements the main program. It can be subclassed by
users who wish to customize some parts of the main program.
(Adding additional command line options, customizing test loading, etc.)
'''
DEFAULT_VERBOSITY = 2
def __init__(self, argv):
self.init_option_parser()
self.parse_options(argv)
def init_option_parser(self):
usage = '%prog [options] [TEST] ...'
op = optparse.OptionParser(usage=usage, add_help_option=False)
self.option_parser = op
op.add_option(
'-o', '--output',
help='Write results to a file in a JSON format to be read by Buck')
op.add_option(
'-l', '--list-tests', action='store_true', dest='list',
default=False, help='List tests and exit')
op.add_option(
'-L', '--list-format', dest='list_format',
choices=['buck', 'python'], default='python',
help='List tests format')
op.add_option(
'-q', '--quiet', action='count', default=0,
help='Decrease the verbosity (may be specified multiple times)')
op.add_option(
'-v', '--verbosity',
action='count', default=self.DEFAULT_VERBOSITY,
help='Increase the verbosity (may be specified multiple times)')
op.add_option(
'-r', '--regex', default=None,
help='Regex to apply to tests, to only run those tests')
op.add_option(
'-?', '--help', action='help',
help='Show this help message and exit')
def parse_options(self, argv):
self.options, self.test_args = self.option_parser.parse_args(argv[1:])
self.options.verbosity -= self.options.quiet
def create_loader(self):
import __test_modules__
return Loader(__test_modules__.TEST_MODULES, self.options.regex)
def load_tests(self):
loader = self.create_loader()
if self.test_args:
return loader.load_args(self.test_args)
else:
return loader.load_all()
def get_tests(self, test_suite):
tests = []
for test in test_suite:
if isinstance(test, unittest.TestSuite):
tests.extend(self.get_tests(test))
else:
tests.append(test)
return tests
def run(self):
test_suite = self.load_tests()
if self.options.list:
for test in self.get_tests(test_suite):
if self.options.list_format == 'python':
name = str(test)
elif self.options.list_format == 'buck':
method_name = getattr(test, '_testMethodName', '')
name = _format_test_name(test.__class__, method_name)
else:
raise Exception('Bad test list format: %s' % (
self.options.list_format,))
print(name)
return 0
else:
result = self.run_tests(test_suite)
if self.options.output is not None:
with open(self.options.output, 'w') as f:
json.dump(result.getResults(), f, indent=4, sort_keys=True)
return 0
#return 0 if result.wasSuccessful() else 1
def run_tests(self, test_suite):
# Install a signal handler to catch Ctrl-C and display the results
# (but only if running >2.6).
if sys.version_info[0] > 2 or sys.version_info[1] > 6:
unittest.installHandler()
# Run the tests
runner = FbJsonTestRunner(test_suite, verbosity=self.options.verbosity)
result = runner.run(test_suite)
return result
def main(argv):
return MainProgram(sys.argv).run()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 |
NORMA-Inc/AtEar | module/NetBIOS.py | 4 | 3922 | __author__ = 'root'
import random, socket, time, select
import struct, string, re
import types, errno
class NetBIOS():
def __init__(self, broadcast=True, listen_port=0):
self.broadcast = broadcast
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.HEADER_STRUCT_FORMAT = '>HHHHHH'
self.HEADER_STRUCT_SIZE = struct.calcsize(self.HEADER_STRUCT_FORMAT)
if self.broadcast:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
if listen_port:
self.sock.bind(( '', listen_port ))
def write(self, data, ip, port):
self.sock.sendto(data, ( ip, port ))
def queryIPForName(self, ip, port=137, timeout=5):
TYPE_SERVER = 0x20
trn_id = random.randint(1, 0xFFFF)
data = self.prepareNetNameQuery(trn_id)
self.write(data, ip, port)
ret = self._pollForQueryPacket(trn_id, timeout)
if ret:
return map(lambda s: s[0], filter(lambda s: s[1] == TYPE_SERVER, ret))
else:
return None
#
# Contributed by Jason Anderson
#
def _pollForQueryPacket(self, wait_trn_id, timeout):
end_time = time.time() + timeout
while True:
try:
_timeout = end_time - time.time()
if _timeout <= 0:
return None
ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], _timeout)
if not ready:
return None
data, _ = self.sock.recvfrom(0xFFFF)
if len(data) == 0:
return None
trn_id, ret = self.decodeIPQueryPacket(data)
if trn_id == wait_trn_id:
return ret
except select.error, ex:
if type(ex) is types.TupleType:
if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN:
return None
else:
return None
def prepareNetNameQuery(self, trn_id):
header = struct.pack(self.HEADER_STRUCT_FORMAT,
trn_id, 0x0010, 1, 0, 0, 0)
payload = self.encode_name('*', 0) + '\x00\x21\x00\x01'
return header + payload
def decodeIPQueryPacket(self, data):
if len(data) < self.HEADER_STRUCT_SIZE:
return None
trn_id, code, question_count, answer_count, authority_count, additional_count = struct.unpack(self.HEADER_STRUCT_FORMAT, data[:self.HEADER_STRUCT_SIZE])
is_response = bool((code >> 15) & 0x01)
opcode = (code >> 11) & 0x0F
flags = (code >> 4) & 0x7F
rcode = code & 0x0F
numnames = struct.unpack('B', data[self.HEADER_STRUCT_SIZE + 44])[0]
if numnames > 0:
ret = [ ]
offset = self.HEADER_STRUCT_SIZE + 45
for i in range(0, numnames):
mynme = data[offset:offset + 15]
mynme = mynme.strip()
ret.append(( mynme, ord(data[offset+15]) ))
offset += 18
return trn_id, ret
else:
return trn_id, None
def encode_name(self, name, type, scope = None):
if name == '*':
name = name + '\0' * 15
elif len(name) > 15:
name = name[:15] + chr(type)
else:
name = string.ljust(name, 15) + chr(type)
def _do_first_level_encoding(m):
s = ord(m.group(0))
return string.uppercase[s >> 4] + string.uppercase[s & 0x0f]
encoded_name = chr(len(name) * 2) + re.sub('.', _do_first_level_encoding, name)
if scope:
encoded_scope = ''
for s in string.split(scope, '.'):
encoded_scope = encoded_scope + chr(len(s)) + s
return encoded_name + encoded_scope + '\0'
else:
return encoded_name + '\0' | apache-2.0 |
blossomica/airmozilla | mysql2postgres/py-mysql2pgsql-0.1.5/tests/test_writer.py | 26 | 4467 | from __future__ import with_statement, absolute_import
import os
import sys
import re
import tempfile
import unittest
from . import WithReader
sys.path.append(os.path.abspath('../'))
from mysql2pgsql.lib.postgres_writer import PostgresWriter
from mysql2pgsql.lib.postgres_file_writer import PostgresFileWriter
from mysql2pgsql.lib.postgres_db_writer import PostgresDbWriter
def squeeze(val):
return re.sub(r"[\x00-\x20]+", " ", val).strip()
class WithTables(WithReader):
def setUp(self):
super(WithTables, self).setUp()
self.table1 = next((t for t in self.reader.tables if t.name == 'type_conversion_test_1'), None)
self.table2 = next((t for t in self.reader.tables if t.name == 'type_conversion_test_2'), None)
assert self.table1
assert self.table2
class TestPostgresWriter(WithTables):
def setUp(self):
super(self.__class__, self).setUp()
self.writer = PostgresWriter()
assert self.writer
def test_truncate(self):
trunc_cmds = self.writer.truncate(self.table1)
assert len(trunc_cmds) == 2
trunc_stmt, reset_seq = trunc_cmds
assert squeeze(trunc_stmt) == 'TRUNCATE "%s" CASCADE;' % self.table1.name
if reset_seq:
self.assertRegexpMatches(squeeze(reset_seq),
"^SELECT pg_catalog.setval\(pg_get_serial_sequence\('%s', 'id'\), \d+, true\);$" % self.table1.name)
def test_write_table(self):
write_table_cmds = self.writer.write_table(self.table1)
assert len(write_table_cmds) == 2
table_cmds, seq_cmds = write_table_cmds
assert len(table_cmds) == 2
assert squeeze(table_cmds[0]) == 'DROP TABLE IF EXISTS "%s" CASCADE;' % self.table1.name
assert 'CREATE TABLE "%s"' % self.table1.name in table_cmds[1]
# assert self.assertRegexpMatches(squeeze(table_cmds[1]),
# '^CREATE TABLE "%s" \(.*\) WITHOUT OIDS;$' % self.table1.name)
if seq_cmds:
assert len(seq_cmds) == 3
self.assertRegexpMatches(squeeze(seq_cmds[0]),
'^DROP SEQUENCE IF EXISTS %s_([^\s]+)_seq CASCADE;$' % self.table1.name)
self.assertRegexpMatches(squeeze(seq_cmds[1]),
'^CREATE SEQUENCE %s_([^\s]+)_seq INCREMENT BY 1 NO MAXVALUE NO MINVALUE CACHE 1;$' % self.table1.name)
self.assertRegexpMatches(squeeze(seq_cmds[2]),
"^SELECT pg_catalog.setval\('%s_([^\s]+)_seq', \d+, true\);$" % self.table1.name)
def test_write_indexex(self):
index_cmds = self.writer.write_indexes(self.table1)
assert len(index_cmds) == 9
def test_write_constraints(self):
constraint_cmds = self.writer.write_constraints(self.table2)
assert constraint_cmds
class WithOutput(WithTables):
def setUp(self):
super(WithOutput, self).setUp()
def tearDown(self):
super(WithOutput, self).tearDown()
class TestPostgresFileWriter(WithOutput):
def setUp(self):
super(self.__class__, self).setUp()
self.outfile = tempfile.NamedTemporaryFile()
self.writer = PostgresFileWriter(self.outfile)
def tearDown(self):
super(self.__class__, self).tearDown()
self.writer.close()
def test_truncate(self):
self.writer.truncate(self.table1)
def test_write_table(self):
self.writer.write_table(self.table1)
def test_write_indexes(self):
self.writer.write_indexes(self.table1)
def test_write_constraints(self):
self.writer.write_constraints(self.table2)
def test_write_contents(self):
self.writer.write_contents(self.table1, self.reader)
class TestPostgresDbWriter(WithOutput):
def setUp(self):
super(self.__class__, self).setUp()
self.writer = PostgresDbWriter(self.config.options['destination']['postgres'], True)
def tearDown(self):
super(self.__class__, self).tearDown()
self.writer.close()
def test_truncate(self):
self.writer.truncate(self.table1)
def test_write_table_indexes_and_constraints(self):
self.writer.write_table(table=self.table1)
self.writer.write_indexes(self.table1)
self.writer.write_constraints(self.table2)
def test_write_contents(self):
self.writer.write_contents(self.table1, self.reader)
| bsd-3-clause |
aperigault/ansible | lib/ansible/modules/remote_management/oneview/oneview_san_manager.py | 146 | 7717 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_san_manager
short_description: Manage OneView SAN Manager resources
description:
- Provides an interface to manage SAN Manager resources. Can create, update, or delete.
version_added: "2.4"
requirements:
- hpOneView >= 3.1.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
state:
description:
- Indicates the desired state for the Uplink Set resource.
- C(present) ensures data properties are compliant with OneView.
- C(absent) removes the resource from OneView, if it exists.
- C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
default: present
choices: [present, absent, connection_information_set]
data:
description:
- List with SAN Manager properties.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: present
data:
providerDisplayName: Brocade Network Advisor
connectionInfo:
- name: Host
value: 172.18.15.1
- name: Port
value: 5989
- name: Username
value: username
- name: Password
value: password
- name: UseSsl
value: true
delegate_to: localhost
- name: Ensure a Device Manager for the Cisco SAN Provider is present
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: present
data:
name: 172.18.20.1
providerDisplayName: Cisco
connectionInfo:
- name: Host
value: 172.18.20.1
- name: SnmpPort
value: 161
- name: SnmpUserName
value: admin
- name: SnmpAuthLevel
value: authnopriv
- name: SnmpAuthProtocol
value: sha
- name: SnmpAuthString
value: password
delegate_to: localhost
- name: Sets the SAN Manager connection information
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: connection_information_set
data:
connectionInfo:
- name: Host
value: '172.18.15.1'
- name: Port
value: '5989'
- name: Username
value: 'username'
- name: Password
value: 'password'
- name: UseSsl
value: true
delegate_to: localhost
- name: Refreshes the SAN Manager
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: present
data:
name: 172.18.15.1
refreshState: RefreshPending
delegate_to: localhost
- name: Delete the SAN Manager recently created
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: absent
data:
name: '172.18.15.1'
delegate_to: localhost
'''
RETURN = '''
san_manager:
description: Has the OneView facts about the SAN Manager.
returned: On state 'present'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError
class SanManagerModule(OneViewModuleBase):
MSG_CREATED = 'SAN Manager created successfully.'
MSG_UPDATED = 'SAN Manager updated successfully.'
MSG_DELETED = 'SAN Manager deleted successfully.'
MSG_ALREADY_PRESENT = 'SAN Manager is already present.'
MSG_ALREADY_ABSENT = 'SAN Manager is already absent.'
MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found."
argument_spec = dict(
state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']),
data=dict(type='dict', required=True)
)
def __init__(self):
super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True)
self.resource_client = self.oneview_client.san_managers
def execute_module(self):
if self.data.get('connectionInfo'):
for connection_hash in self.data.get('connectionInfo'):
if connection_hash.get('name') == 'Host':
resource_name = connection_hash.get('value')
elif self.data.get('name'):
resource_name = self.data.get('name')
else:
msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
raise OneViewModuleValueError(msg.format())
resource = self.resource_client.get_by_name(resource_name)
if self.state == 'present':
changed, msg, san_manager = self._present(resource)
return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
elif self.state == 'absent':
return self.resource_absent(resource, method='remove')
elif self.state == 'connection_information_set':
changed, msg, san_manager = self._connection_information_set(resource)
return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
def _present(self, resource):
if not resource:
provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data))
return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri)
else:
merged_data = resource.copy()
merged_data.update(self.data)
# Remove 'connectionInfo' from comparison, since it is not possible to validate it.
resource.pop('connectionInfo', None)
merged_data.pop('connectionInfo', None)
if self.compare(resource, merged_data):
return False, self.MSG_ALREADY_PRESENT, resource
else:
updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
return True, self.MSG_UPDATED, updated_san_manager
def _connection_information_set(self, resource):
if not resource:
return self._present(resource)
else:
merged_data = resource.copy()
merged_data.update(self.data)
merged_data.pop('refreshState', None)
if not self.data.get('connectionInfo', None):
raise OneViewModuleValueError('A connectionInfo field is required for this operation.')
updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
return True, self.MSG_UPDATED, updated_san_manager
def _get_provider_uri_by_display_name(self, data):
display_name = data.get('providerDisplayName')
provider_uri = self.resource_client.get_provider_uri(display_name)
if not provider_uri:
raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name))
return provider_uri
def main():
SanManagerModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 |
hsum/sqlalchemy | lib/sqlalchemy/engine/util.py | 81 | 2338 | # engine/util.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import util
def connection_memoize(key):
"""Decorator, memoize a function in a connection.info stash.
Only applicable to functions which take no arguments other than a
connection. The memo will be stored in ``connection.info[key]``.
"""
@util.decorator
def decorated(fn, self, connection):
connection = connection.connect()
try:
return connection.info[key]
except KeyError:
connection.info[key] = val = fn(self, connection)
return val
return decorated
def py_fallback():
def _distill_params(multiparams, params):
"""Given arguments from the calling form *multiparams, **params,
return a list of bind parameter structures, usually a list of
dictionaries.
In the case of 'raw' execution which accepts positional parameters,
it may be a list of tuples or lists.
"""
if not multiparams:
if params:
return [params]
else:
return []
elif len(multiparams) == 1:
zero = multiparams[0]
if isinstance(zero, (list, tuple)):
if not zero or hasattr(zero[0], '__iter__') and \
not hasattr(zero[0], 'strip'):
# execute(stmt, [{}, {}, {}, ...])
# execute(stmt, [(), (), (), ...])
return zero
else:
# execute(stmt, ("value", "value"))
return [zero]
elif hasattr(zero, 'keys'):
# execute(stmt, {"key":"value"})
return [zero]
else:
# execute(stmt, "value")
return [[zero]]
else:
if hasattr(multiparams[0], '__iter__') and \
not hasattr(multiparams[0], 'strip'):
return multiparams
else:
return [multiparams]
return locals()
try:
from sqlalchemy.cutils import _distill_params
except ImportError:
globals().update(py_fallback())
| mit |
campagnola/acq4 | acq4/pyqtgraph/graphicsItems/TextItem.py | 3 | 8282 | import numpy as np
from ..Qt import QtCore, QtGui
from ..Point import Point
from .. import functions as fn
from .GraphicsObject import GraphicsObject
class TextItem(GraphicsObject):
"""
GraphicsItem displaying unscaled text (the text will always appear normal even inside a scaled ViewBox).
"""
def __init__(self, text='', color=(200,200,200), html=None, anchor=(0,0),
border=None, fill=None, angle=0, rotateAxis=None):
"""
============== =================================================================================
**Arguments:**
*text* The text to display
*color* The color of the text (any format accepted by pg.mkColor)
*html* If specified, this overrides both *text* and *color*
*anchor* A QPointF or (x,y) sequence indicating what region of the text box will
be anchored to the item's position. A value of (0,0) sets the upper-left corner
of the text box to be at the position specified by setPos(), while a value of (1,1)
sets the lower-right corner.
*border* A pen to use when drawing the border
*fill* A brush to use when filling within the border
*angle* Angle in degrees to rotate text. Default is 0; text will be displayed upright.
*rotateAxis* If None, then a text angle of 0 always points along the +x axis of the scene.
If a QPointF or (x,y) sequence is given, then it represents a vector direction
in the parent's coordinate system that the 0-degree line will be aligned to. This
Allows text to follow both the position and orientation of its parent while still
discarding any scale and shear factors.
============== =================================================================================
The effects of the `rotateAxis` and `angle` arguments are added independently. So for example:
* rotateAxis=None, angle=0 -> normal horizontal text
* rotateAxis=None, angle=90 -> normal vertical text
* rotateAxis=(1, 0), angle=0 -> text aligned with x axis of its parent
* rotateAxis=(0, 1), angle=0 -> text aligned with y axis of its parent
* rotateAxis=(1, 0), angle=90 -> text orthogonal to x axis of its parent
"""
self.anchor = Point(anchor)
self.rotateAxis = None if rotateAxis is None else Point(rotateAxis)
#self.angle = 0
GraphicsObject.__init__(self)
self.textItem = QtGui.QGraphicsTextItem()
self.textItem.setParentItem(self)
self._lastTransform = None
self._lastScene = None
self._bounds = QtCore.QRectF()
if html is None:
self.setColor(color)
self.setText(text)
else:
self.setHtml(html)
self.fill = fn.mkBrush(fill)
self.border = fn.mkPen(border)
self.setAngle(angle)
def setText(self, text, color=None):
"""
Set the text of this item.
This method sets the plain text of the item; see also setHtml().
"""
if color is not None:
self.setColor(color)
self.setPlainText(text)
def setPlainText(self, text):
"""
Set the plain text to be rendered by this item.
See QtGui.QGraphicsTextItem.setPlainText().
"""
if text != self.toPlainText():
self.textItem.setPlainText(text)
self.updateTextPos()
def toPlainText(self):
return self.textItem.toPlainText()
def setHtml(self, html):
"""
Set the HTML code to be rendered by this item.
See QtGui.QGraphicsTextItem.setHtml().
"""
if self.toHtml() != html:
self.textItem.setHtml(html)
self.updateTextPos()
def toHtml(self):
return self.textItem.toHtml()
def setTextWidth(self, *args):
"""
Set the width of the text.
If the text requires more space than the width limit, then it will be
wrapped into multiple lines.
See QtGui.QGraphicsTextItem.setTextWidth().
"""
self.textItem.setTextWidth(*args)
self.updateTextPos()
def setFont(self, *args):
"""
Set the font for this text.
See QtGui.QGraphicsTextItem.setFont().
"""
self.textItem.setFont(*args)
self.updateTextPos()
def setAngle(self, angle):
self.angle = angle
self.updateTransform()
def setAnchor(self, anchor):
self.anchor = Point(anchor)
self.updateTextPos()
def setColor(self, color):
"""
Set the color for this text.
See QtGui.QGraphicsItem.setDefaultTextColor().
"""
self.color = fn.mkColor(color)
self.textItem.setDefaultTextColor(self.color)
def updateTextPos(self):
# update text position to obey anchor
r = self.textItem.boundingRect()
tl = self.textItem.mapToParent(r.topLeft())
br = self.textItem.mapToParent(r.bottomRight())
offset = (br - tl) * self.anchor
self.textItem.setPos(-offset)
### Needed to maintain font size when rendering to image with increased resolution
#self.textItem.resetTransform()
##self.textItem.rotate(self.angle)
#if self._exportOpts is not False and 'resolutionScale' in self._exportOpts:
#s = self._exportOpts['resolutionScale']
#self.textItem.scale(s, s)
def boundingRect(self):
return self.textItem.mapToParent(self.textItem.boundingRect()).boundingRect()
def viewTransformChanged(self):
# called whenever view transform has changed.
# Do this here to avoid double-updates when view changes.
self.updateTransform()
def paint(self, p, *args):
# this is not ideal because it requires the transform to be updated at every draw.
# ideally, we would have a sceneTransformChanged event to react to..
s = self.scene()
ls = self._lastScene
if s is not ls:
if ls is not None:
ls.sigPrepareForPaint.disconnect(self.updateTransform)
self._lastScene = s
if s is not None:
s.sigPrepareForPaint.connect(self.updateTransform)
self.updateTransform()
p.setTransform(self.sceneTransform())
if self.border.style() != QtCore.Qt.NoPen or self.fill.style() != QtCore.Qt.NoBrush:
p.setPen(self.border)
p.setBrush(self.fill)
p.setRenderHint(p.Antialiasing, True)
p.drawPolygon(self.textItem.mapToParent(self.textItem.boundingRect()))
def setVisible(self, v):
GraphicsObject.setVisible(self, v)
if v:
self.updateTransform()
def updateTransform(self):
if not self.isVisible():
return
# update transform such that this item has the correct orientation
# and scaling relative to the scene, but inherits its position from its
# parent.
# This is similar to setting ItemIgnoresTransformations = True, but
# does not break mouse interaction and collision detection.
p = self.parentItem()
if p is None:
pt = QtGui.QTransform()
else:
pt = p.sceneTransform()
if pt == self._lastTransform:
return
t = pt.inverted()[0]
# reset translation
t.setMatrix(t.m11(), t.m12(), t.m13(), t.m21(), t.m22(), t.m23(), 0, 0, t.m33())
# apply rotation
angle = -self.angle
if self.rotateAxis is not None:
d = pt.map(self.rotateAxis) - pt.map(Point(0, 0))
a = np.arctan2(d.y(), d.x()) * 180 / np.pi
angle += a
t.rotate(angle)
self.setTransform(t)
self._lastTransform = pt
self.updateTextPos()
| mit |
rahul67/hue | desktop/core/ext-py/Django-1.6.10/django/conf/locale/tr/formats.py | 118 | 1142 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'd F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'd F'
SHORT_DATE_FORMAT = 'd M Y'
SHORT_DATETIME_FORMAT = 'd M Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Pazartesi
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%y-%m-%d', # '06-10-25'
# '%d %B %Y', '%d %b. %Y', # '25 Ekim 2006', '25 Eki. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| apache-2.0 |