repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
ansible/ansible-modules-extras | refs/heads/devel | monitoring/datadog_monitor.py | 22 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# import module snippets
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: datadog_monitor
short_description: Manages Datadog monitors
description:
- "Manages monitors within Datadog"
- "Options like described on http://docs.datadoghq.com/api/"
version_added: "2.0"
author: "Sebastian Kornehl (@skornehl)"
requirements: [datadog]
options:
api_key:
description: ["Your DataDog API key."]
required: true
app_key:
description: ["Your DataDog app key."]
required: true
state:
description: ["The designated state of the monitor."]
required: true
choices: ['present', 'absent', 'muted', 'unmuted']
tags:
description: ["A list of tags to associate with your monitor when creating or updating. This can help you categorize and filter monitors."]
required: false
default: None
version_added: "2.2"
type:
description:
- "The type of the monitor."
- The 'event alert'is available starting at Ansible 2.1
required: false
default: null
choices: ['metric alert', 'service check', 'event alert']
query:
description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
required: false
default: null
name:
description: ["The name of the alert."]
required: true
message:
description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'."]
required: false
default: null
silenced:
description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "]
required: false
default: ""
notify_no_data:
description: ["A boolean indicating whether this monitor will notify when data stops reporting.."]
required: false
default: False
no_data_timeframe:
description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."]
required: false
default: 2x timeframe for metric, 2 minutes for service
timeout_h:
description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."]
required: false
default: null
renotify_interval:
description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."]
required: false
default: null
escalation_message:
description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"]
required: false
default: null
notify_audit:
description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."]
required: false
default: False
thresholds:
description: ["A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have multiple thresholds, we don't define them directly in the query."]
required: false
default: {'ok': 1, 'critical': 1, 'warning': 1}
locked:
description: ["A boolean indicating whether changes to this monitor should be restricted to the creator or admins."]
required: false
default: False
version_added: "2.2"
require_full_window:
description: ["A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped."]
required: false
default: null
version_added: "2.3"
id:
description: ["The id of the alert. If set, will be used instead of the name to locate the alert."]
required: false
default: null
version_added: "2.3"
'''
EXAMPLES = '''
# Create a metric monitor
datadog_monitor:
type: "metric alert"
name: "Test monitor"
state: "present"
query: "datadog.agent.up".over("host:host1").last(2).count_by_status()"
message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Deletes a monitor
datadog_monitor:
name: "Test monitor"
state: "absent"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Mutes a monitor
datadog_monitor:
name: "Test monitor"
state: "mute"
silenced: '{"*":None}'
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Unmutes a monitor
datadog_monitor:
name: "Test monitor"
state: "unmute"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
'''
# Import Datadog
try:
from datadog import initialize, api
HAS_DATADOG = True
except:
HAS_DATADOG = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
app_key=dict(required=True, no_log=True),
state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']),
type=dict(required=False, choises=['metric alert', 'service check', 'event alert']),
name=dict(required=True),
query=dict(required=False),
message=dict(required=False, default=None),
silenced=dict(required=False, default=None, type='dict'),
notify_no_data=dict(required=False, default=False, type='bool'),
no_data_timeframe=dict(required=False, default=None),
timeout_h=dict(required=False, default=None),
renotify_interval=dict(required=False, default=None),
escalation_message=dict(required=False, default=None),
notify_audit=dict(required=False, default=False, type='bool'),
thresholds=dict(required=False, type='dict', default=None),
tags=dict(required=False, type='list', default=None),
locked=dict(required=False, default=False, type='bool'),
require_full_window=dict(required=False, default=None, type='bool'),
id=dict(required=False)
)
)
# Prepare Datadog
if not HAS_DATADOG:
module.fail_json(msg='datadogpy required for this module')
options = {
'api_key': module.params['api_key'],
'app_key': module.params['app_key']
}
initialize(**options)
if module.params['state'] == 'present':
install_monitor(module)
elif module.params['state'] == 'absent':
delete_monitor(module)
elif module.params['state'] == 'mute':
mute_monitor(module)
elif module.params['state'] == 'unmute':
unmute_monitor(module)
def _fix_template_vars(message):
if message:
return message.replace('[[', '{{').replace(']]', '}}')
return message
def _get_monitor(module):
if module.params['id'] is not None:
monitor = api.Monitor.get(module.params['id'])
if 'errors' in monitor:
module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
return monitor
else:
monitors = api.Monitor.get_all()
for monitor in monitors:
if monitor['name'] == module.params['name']:
return monitor
return {}
def _post_monitor(module, options):
try:
kwargs = dict(type=module.params['type'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.create(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
else:
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def _equal_dicts(a, b, ignore_keys):
ka = set(a).difference(ignore_keys)
kb = set(b).difference(ignore_keys)
return ka == kb and all(a[k] == b[k] for k in ka)
def _update_monitor(module, monitor, options):
try:
kwargs = dict(id=monitor['id'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.update(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified']):
module.exit_json(changed=False, msg=msg)
else:
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def install_monitor(module):
options = {
"silenced": module.params['silenced'],
"notify_no_data": module.boolean(module.params['notify_no_data']),
"no_data_timeframe": module.params['no_data_timeframe'],
"timeout_h": module.params['timeout_h'],
"renotify_interval": module.params['renotify_interval'],
"escalation_message": module.params['escalation_message'],
"notify_audit": module.boolean(module.params['notify_audit']),
"locked": module.boolean(module.params['locked']),
"require_full_window" : module.params['require_full_window']
}
if module.params['type'] == "service check":
options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
if module.params['type'] == "metric alert" and module.params['thresholds'] is not None:
options["thresholds"] = module.params['thresholds']
monitor = _get_monitor(module)
if not monitor:
_post_monitor(module, options)
else:
_update_monitor(module, monitor, options)
def delete_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.exit_json(changed=False)
try:
msg = api.Monitor.delete(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def mute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif monitor['options']['silenced']:
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
elif (module.params['silenced'] is not None
and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0):
module.exit_json(changed=False)
try:
if module.params['silenced'] is None or module.params['silenced'] == "":
msg = api.Monitor.mute(id=monitor['id'])
else:
msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def unmute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif not monitor['options']['silenced']:
module.exit_json(changed=False)
try:
msg = api.Monitor.unmute(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
GPCsolutions/mod-webui | refs/heads/master | module/plugins/problems/__init__.py | 288 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
|
HerlanAssis/Django-AulaOsvandoSantana | refs/heads/master | lib/python2.7/site-packages/setuptools/tests/test_packageindex.py | 377 | """Package Index Tests
"""
import sys
import os
import unittest
import pkg_resources
from setuptools.compat import urllib2, httplib, HTTPError, unicode, pathname2url
import distutils.errors
import setuptools.package_index
from setuptools.tests.server import IndexServer
class TestPackageIndex(unittest.TestCase):
def test_bad_url_bad_port(self):
index = setuptools.package_index.PackageIndex()
url = 'http://127.0.0.1:0/nonesuch/test_package_index'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_typo(self):
# issue 16
# easy_install inquant.contentmirror.plone breaks because of a typo
# in its home URL
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_bad_status_line(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
def _urlopen(*args):
raise httplib.BadStatusLine('line')
index.opener = _urlopen
url = 'http://example.com'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue('line' in str(v))
else:
raise AssertionError('Should have raise here!')
def test_bad_url_double_scheme(self):
"""
A bad URL with a double scheme should raise a DistutilsError.
"""
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue 20
url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
try:
index.open_url(url)
except distutils.errors.DistutilsError:
error = sys.exc_info()[1]
msg = unicode(error)
assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg
return
raise RuntimeError("Did not raise")
def test_bad_url_screwy_href(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue #160
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
# this should not fail
url = 'http://example.com'
page = ('<a href="http://www.famfamfam.com]('
'http://www.famfamfam.com/">')
index.process_index(url, page)
def test_url_ok(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'file:///tmp/test_package_index'
self.assertTrue(index.url_ok(url, True))
def test_links_priority(self):
"""
Download links from the pypi simple index should be used before
external download links.
https://bitbucket.org/tarek/distribute/issue/163
Usecase :
- someone uploads a package on pypi, a md5 is generated
- someone manually copies this link (with the md5 in the url) onto an
external page accessible from the package page.
- someone reuploads the package (with a different md5)
- while easy_installing, an MD5 error occurs because the external link
is used
-> Setuptools should use the link from pypi, not the external one.
"""
if sys.platform.startswith('java'):
# Skip this test on jython because binding to :0 fails
return
# start an index server
server = IndexServer()
server.start()
index_url = server.base_url() + 'test_links_priority/simple/'
# scan a test index
pi = setuptools.package_index.PackageIndex(index_url)
requirement = pkg_resources.Requirement.parse('foobar')
pi.find_packages(requirement)
server.stop()
# the distribution has been found
self.assertTrue('foobar' in pi)
# we have only one link, because links are compared without md5
self.assertTrue(len(pi['foobar'])==1)
# the link should be from the index
self.assertTrue('correct_md5' in pi['foobar'][0].location)
def test_parse_bdist_wininst(self):
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32-py2.4.exe'), ('reportlab-2.5', '2.4', 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32.exe'), ('reportlab-2.5', None, 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64-py2.7.exe'), ('reportlab-2.5', '2.7', 'win-amd64'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64.exe'), ('reportlab-2.5', None, 'win-amd64'))
def test__vcs_split_rev_from_url(self):
"""
Test the basic usage of _vcs_split_rev_from_url
"""
vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url
url, rev = vsrfu('https://example.com/bar@2995')
self.assertEqual(url, 'https://example.com/bar')
self.assertEqual(rev, '2995')
def test_local_index(self):
"""
local_open should be able to read an index from the file system.
"""
f = open('index.html', 'w')
f.write('<div>content</div>')
f.close()
try:
url = 'file:' + pathname2url(os.getcwd()) + '/'
res = setuptools.package_index.local_open(url)
finally:
os.remove('index.html')
assert 'content' in res.read()
class TestContentCheckers(unittest.TestCase):
def test_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
checker.feed('You should probably not be using MD5'.encode('ascii'))
self.assertEqual(checker.hash.hexdigest(),
'f12895fdffbd45007040d2e44df98478')
self.assertTrue(checker.is_valid())
def test_other_fragment(self):
"Content checks should succeed silently if no hash is present"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#something%20completely%20different')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_blank_md5(self):
"Content checks should succeed if a hash is empty"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_get_hash_name_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
self.assertEqual(checker.hash_name, 'md5')
def test_report(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
rep = checker.report(lambda x: x, 'My message about %s')
self.assertEqual(rep, 'My message about md5')
|
kalikaneko/euler | refs/heads/master | 003/primefactor.py | 1 | import math
def is_prime(x):
d = 2
while d * d <= x:
if x % d == 0:
return False
d += 1
return x > 1
def factors(x):
yield 1
for i in xrange(2, int(math.sqrt(x))):
if x % i == 0:
yield i
yield x/i
yield x
print max(filter(is_prime, factors(600851475143)))
|
pombredanne/django-rest-swagger | refs/heads/master | tests/compat/mock.py | 2 | # pylint: disable=W0614,W0401
from __future__ import absolute_import
try:
from unittest.mock import *
except ImportError:
from mock import *
|
UWPCE-PythonCert/IntroPython2016 | refs/heads/master | students/spencer_mcghin/session5/Exception_Lab.py | 3 | def safe_input(input_string=""):
try:
n = input(input_string)
return n
except (KeyboardInterrupt, EOFError):
return None
if __name__ == "__main__":
n = safe_input("Provide a string: ")
if n is None:
print("Program terminated by user.")
else:
print("User responded with:", n)
|
apache/incubator-metron | refs/heads/master | metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/service_check.py | 11 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from resource_management.core.logger import Logger
from resource_management.libraries.script import Script
from parser_commands import ParserCommands
from enrichment_commands import EnrichmentCommands
from indexing_commands import IndexingCommands
from profiler_commands import ProfilerCommands
from rest_commands import RestCommands
from management_ui_commands import ManagementUICommands
from alerts_ui_commands import AlertsUICommands
class ServiceCheck(Script):
def service_check(self, env):
from params import params
# check the parsers
Logger.info("Performing Parser service check")
parser_cmds = ParserCommands(params)
parser_cmds.service_check(env)
# check enrichment
Logger.info("Performing Enrichment service check")
enrichment_cmds = EnrichmentCommands(params)
enrichment_cmds.service_check(env)
# check indexing
Logger.info("Performing Indexing service check")
indexing_cmds = IndexingCommands(params)
indexing_cmds.service_check(env)
# check the profiler
Logger.info("Performing Profiler service check")
profiler_cmds = ProfilerCommands(params)
profiler_cmds.service_check(env)
# check the rest api
Logger.info("Performing REST application service check")
rest_cmds = RestCommands(params)
rest_cmds.service_check(env)
# check the management UI
Logger.info("Performing Management UI service check")
mgmt_cmds = ManagementUICommands(params)
mgmt_cmds.service_check(env)
# check the alerts UI
Logger.info("Performing Alerts UI service check")
alerts_cmds = AlertsUICommands(params)
alerts_cmds.service_check(env)
Logger.info("Metron service check completed successfully")
exit(0)
if __name__ == "__main__":
ServiceCheck().execute()
|
PlayUAV/MissionPlanner | refs/heads/master | Lib/encodings/cp865.py | 93 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP865.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp865',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00a4, # CURRENCY SIGN
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xa4' # 0x00af -> CURRENCY SIGN
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00af, # CURRENCY SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
smallyear/linuxLearn | refs/heads/master | salt/salt/daemons/test/test_raetkey.py | 3 | # -*- coding: utf-8 -*-
'''
Tests to try out salt key.RaetKey Potentially ephemeral
'''
from __future__ import absolute_import
# pylint: skip-file
# pylint: disable=C0103
import sys
from salt.ext.six.moves import map
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import os
import stat
import time
import tempfile
import shutil
from ioflo.aid.odicting import odict
from ioflo.aid.timing import Timer, StoreTimer
from ioflo.base import storing
from ioflo.base.consoling import getConsole
console = getConsole()
from raet import raeting, nacling
from raet.road import estating, keeping, stacking
from salt.key import RaetKey
def setUpModule():
console.reinit(verbosity=console.Wordage.concise)
def tearDownModule():
pass
class BasicTestCase(unittest.TestCase):
""""""
def setUp(self):
self.store = storing.Store(stamp=0.0)
self.timer = StoreTimer(store=self.store, duration=1.0)
self.saltDirpath = tempfile.mkdtemp(prefix="salt", suffix="main", dir='/tmp')
pkiDirpath = os.path.join(self.saltDirpath, 'pki')
if not os.path.exists(pkiDirpath):
os.makedirs(pkiDirpath)
acceptedDirpath = os.path.join(pkiDirpath, 'accepted')
if not os.path.exists(acceptedDirpath):
os.makedirs(acceptedDirpath)
pendingDirpath = os.path.join(pkiDirpath, 'pending')
if not os.path.exists(pendingDirpath):
os.makedirs(pendingDirpath)
rejectedDirpath = os.path.join(pkiDirpath, 'rejected')
if not os.path.exists(rejectedDirpath):
os.makedirs(rejectedDirpath)
self.localFilepath = os.path.join(pkiDirpath, 'local.key')
if os.path.exists(self.localFilepath):
mode = os.stat(self.localFilepath).st_mode
os.chmod(self.localFilepath, mode | stat.S_IWUSR | stat.S_IWUSR)
self.cacheDirpath = os.path.join(self.saltDirpath, 'cache')
self.sockDirpath = os.path.join(self.saltDirpath, 'sock')
self.opts = dict(
__role='master',
id='master',
pki_dir=pkiDirpath,
sock_dir=self.sockDirpath,
cachedir=self.cacheDirpath,
open_mode=False,
auto_accept=True,
transport='raet',
)
self.mainKeeper = RaetKey(opts=self.opts)
self.baseDirpath = tempfile.mkdtemp(prefix="salt", suffix="base", dir='/tmp')
def tearDown(self):
if os.path.exists(self.saltDirpath):
shutil.rmtree(self.saltDirpath)
def createRoadData(self, name, base):
'''
Creates odict and populates with data to setup road stack
{
name: stack name local estate name
dirpath: dirpath for keep files
sighex: signing key
verhex: verify key
prihex: private key
pubhex: public key
}
'''
data = odict()
data['name'] = name
data['dirpath'] = os.path.join(base, 'road', 'keep', name)
signer = nacling.Signer()
data['sighex'] = signer.keyhex
data['verhex'] = signer.verhex
privateer = nacling.Privateer()
data['prihex'] = privateer.keyhex
data['pubhex'] = privateer.pubhex
return data
def testAutoAccept(self):
'''
Basic function of RaetKey in auto accept mode
'''
console.terse("{0}\n".format(self.testAutoAccept.__doc__))
self.opts['auto_accept'] = True
self.assertTrue(self.opts['auto_accept'])
self.assertDictEqual(self.mainKeeper.all_keys(), {'accepted': [],
'local': [],
'rejected': [],
'pending': []})
localkeys = self.mainKeeper.read_local()
self.assertDictEqual(localkeys, {})
main = self.createRoadData(name='main', base=self.baseDirpath)
self.mainKeeper.write_local(main['prihex'], main['sighex'])
localkeys = self.mainKeeper.read_local()
self.assertDictEqual(localkeys, {'priv': main['prihex'],
'sign': main['sighex']})
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': [],
'local': [self.localFilepath],
'rejected': [],
'pending': []})
other1 = self.createRoadData(name='other1', base=self.baseDirpath)
other2 = self.createRoadData(name='other2', base=self.baseDirpath)
status = self.mainKeeper.status(other1['name'], other1['pubhex'], other1['verhex'])
self.assertEqual(status, 'accepted')
status = self.mainKeeper.status(other2['name'], other2['pubhex'], other2['verhex'])
self.assertEqual(status, 'accepted')
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': ['other1', 'other2'],
'local': [self.localFilepath],
'pending': [],
'rejected': []} )
remotekeys = self.mainKeeper.read_remote(other1['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other1',
'pub': other1['pubhex'],
'verify': other1['verhex']} )
remotekeys = self.mainKeeper.read_remote(other2['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other2',
'pub': other2['pubhex'],
'verify': other2['verhex']} )
listkeys = self.mainKeeper.list_keys()
self.assertDictEqual(listkeys, {'accepted': ['other1', 'other2'],
'rejected': [],
'pending': []})
allremotekeys = self.mainKeeper.read_all_remote()
self.assertDictEqual(allremotekeys, {'other1':
{'verify': other1['verhex'],
'minion_id': 'other1',
'acceptance': 'accepted',
'pub': other1['pubhex'],},
'other2':
{'verify': other2['verhex'],
'minion_id': 'other2',
'acceptance': 'accepted',
'pub': other2['pubhex'],}
})
def testManualAccept(self):
'''
Basic function of RaetKey in non auto accept mode
'''
console.terse("{0}\n".format(self.testAutoAccept.__doc__))
self.opts['auto_accept'] = False
self.assertFalse(self.opts['auto_accept'])
self.assertDictEqual(self.mainKeeper.all_keys(), {'accepted': [],
'local': [],
'rejected': [],
'pending': []})
localkeys = self.mainKeeper.read_local()
self.assertDictEqual(localkeys, {})
main = self.createRoadData(name='main', base=self.baseDirpath)
self.mainKeeper.write_local(main['prihex'], main['sighex'])
localkeys = self.mainKeeper.read_local()
self.assertDictEqual(localkeys, {'priv': main['prihex'],
'sign': main['sighex']})
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': [],
'local': [self.localFilepath],
'rejected': [],
'pending': []})
other1 = self.createRoadData(name='other1', base=self.baseDirpath)
other2 = self.createRoadData(name='other2', base=self.baseDirpath)
status = self.mainKeeper.status(other1['name'], other1['pubhex'], other1['verhex'])
self.assertEqual(status, 'pending')
status = self.mainKeeper.status(other2['name'], other2['pubhex'], other2['verhex'])
self.assertEqual(status, 'pending')
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': [],
'local': [self.localFilepath],
'pending': ['other1', 'other2'],
'rejected': []} )
remotekeys = self.mainKeeper.read_remote(other1['name'])
self.assertDictEqual(remotekeys, {})
remotekeys = self.mainKeeper.read_remote(other2['name'])
self.assertDictEqual(remotekeys, {})
listkeys = self.mainKeeper.list_keys()
self.assertDictEqual(listkeys, {'accepted': [],
'rejected': [],
'pending': ['other1', 'other2']})
allremotekeys = self.mainKeeper.read_all_remote()
self.assertDictEqual(allremotekeys, {'other1':
{'verify': other1['verhex'],
'minion_id': 'other1',
'acceptance': 'pending',
'pub': other1['pubhex'],},
'other2':
{'verify': other2['verhex'],
'minion_id': 'other2',
'acceptance': 'pending',
'pub': other2['pubhex'],}
})
self.mainKeeper.accept_all()
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': ['other1', 'other2'],
'local': [self.localFilepath],
'pending': [],
'rejected': []} )
remotekeys = self.mainKeeper.read_remote(other1['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other1',
'pub': other1['pubhex'],
'verify': other1['verhex']} )
remotekeys = self.mainKeeper.read_remote(other2['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other2',
'pub': other2['pubhex'],
'verify': other2['verhex']} )
listkeys = self.mainKeeper.list_keys()
self.assertDictEqual(listkeys, {'accepted': ['other1', 'other2'],
'rejected': [],
'pending': []})
allremotekeys = self.mainKeeper.read_all_remote()
self.assertDictEqual(allremotekeys, {'other1':
{'verify': other1['verhex'],
'minion_id': 'other1',
'acceptance': 'accepted',
'pub': other1['pubhex'],},
'other2':
{'verify': other2['verhex'],
'minion_id': 'other2',
'acceptance': 'accepted',
'pub': other2['pubhex'],}
})
def testDelete(self):
'''
Basic function of RaetKey to delete key
'''
console.terse("{0}\n".format(self.testDelete.__doc__))
self.opts['auto_accept'] = True
self.assertTrue(self.opts['auto_accept'])
self.assertDictEqual(self.mainKeeper.all_keys(), {'accepted': [],
'local': [],
'rejected': [],
'pending': []})
localkeys = self.mainKeeper.read_local()
self.assertDictEqual(localkeys, {})
main = self.createRoadData(name='main', base=self.baseDirpath)
self.mainKeeper.write_local(main['prihex'], main['sighex'])
localkeys = self.mainKeeper.read_local()
self.assertDictEqual(localkeys, {'priv': main['prihex'],
'sign': main['sighex']})
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': [],
'local': [self.localFilepath],
'rejected': [],
'pending': []})
other1 = self.createRoadData(name='other1', base=self.baseDirpath)
other2 = self.createRoadData(name='other2', base=self.baseDirpath)
status = self.mainKeeper.status(other1['name'], other1['pubhex'], other1['verhex'])
self.assertEqual(status, 'accepted')
status = self.mainKeeper.status(other2['name'], other2['pubhex'], other2['verhex'])
self.assertEqual(status, 'accepted')
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': ['other1', 'other2'],
'local': [self.localFilepath],
'pending': [],
'rejected': []} )
remotekeys = self.mainKeeper.read_remote(other1['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other1',
'pub': other1['pubhex'],
'verify': other1['verhex']} )
remotekeys = self.mainKeeper.read_remote(other2['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other2',
'pub': other2['pubhex'],
'verify': other2['verhex']} )
listkeys = self.mainKeeper.list_keys()
self.assertDictEqual(listkeys, {'accepted': ['other1', 'other2'],
'rejected': [],
'pending': []})
allremotekeys = self.mainKeeper.read_all_remote()
self.assertDictEqual(allremotekeys, {'other1':
{'verify': other1['verhex'],
'minion_id': 'other1',
'acceptance': 'accepted',
'pub': other1['pubhex']},
'other2':
{'verify': other2['verhex'],
'minion_id': 'other2',
'acceptance': 'accepted',
'pub': other2['pubhex'],}
})
self.mainKeeper.delete_key(match=other1['name'])
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': ['other2'],
'local': [self.localFilepath],
'pending': [],
'rejected': []} )
remotekeys = self.mainKeeper.read_remote(other1['name'])
self.assertDictEqual(remotekeys, {} )
remotekeys = self.mainKeeper.read_remote(other2['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other2',
'pub': other2['pubhex'],
'verify': other2['verhex']} )
listkeys = self.mainKeeper.list_keys()
self.assertDictEqual(listkeys, {'accepted': [ 'other2'],
'rejected': [],
'pending': []})
allremotekeys = self.mainKeeper.read_all_remote()
self.assertDictEqual(allremotekeys, {
'other2':
{'verify': other2['verhex'],
'minion_id': 'other2',
'acceptance': 'accepted',
'pub': other2['pubhex'],}
})
def runOne(test):
'''
Unittest Runner
'''
test = BasicTestCase(test)
suite = unittest.TestSuite([test])
unittest.TextTestRunner(verbosity=2).run(suite)
def runSome():
'''
Unittest runner
'''
tests = []
names = ['testAutoAccept',
'testManualAccept',
'testDelete']
tests.extend(list(list(map(BasicTestCase, names))))
suite = unittest.TestSuite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
def runAll():
'''
Unittest runner
'''
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(BasicTestCase))
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__' and __package__ is None:
#console.reinit(verbosity=console.Wordage.concise)
runAll() #run all unittests
#runSome()#only run some
#runOne('testDelete')
|
fxfitz/ansible | refs/heads/devel | test/units/plugins/connection/test_winrm.py | 3 | # -*- coding: utf-8 -*-
# (c) 2018, Jordan Borean <jborean@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from io import StringIO
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import connection_loader
from ansible.plugins.connection import winrm
pytest.importorskip("winrm")
class TestConnectionWinRM(object):
OPTIONS_DATA = (
# default options
(
{},
{'_extras': {}},
{},
{
'_kerb_managed': False,
'_kinit_cmd': 'kinit',
'_winrm_connection_timeout': None,
'_winrm_host': 'inventory_hostname',
'_winrm_kwargs': {'username': None, 'password': ''},
'_winrm_pass': '',
'_winrm_path': '/wsman',
'_winrm_port': 5986,
'_winrm_scheme': 'https',
'_winrm_transport': ['ssl'],
'_winrm_user': None
},
False
),
# http through port
(
{},
{'_extras': {}, 'ansible_port': 5985},
{},
{
'_winrm_kwargs': {'username': None, 'password': ''},
'_winrm_port': 5985,
'_winrm_scheme': 'http',
'_winrm_transport': ['plaintext'],
},
False
),
# kerberos user with kerb present
(
{},
{'_extras': {}, 'ansible_user': 'user@domain.com'},
{},
{
'_kerb_managed': False,
'_kinit_cmd': 'kinit',
'_winrm_kwargs': {'username': 'user@domain.com',
'password': ''},
'_winrm_pass': '',
'_winrm_transport': ['kerberos', 'ssl'],
'_winrm_user': 'user@domain.com'
},
True
),
# kerberos user without kerb present
(
{},
{'_extras': {}, 'ansible_user': 'user@domain.com'},
{},
{
'_kerb_managed': False,
'_kinit_cmd': 'kinit',
'_winrm_kwargs': {'username': 'user@domain.com',
'password': ''},
'_winrm_pass': '',
'_winrm_transport': ['ssl'],
'_winrm_user': 'user@domain.com'
},
False
),
# kerberos user with managed ticket (implicit)
(
{'password': 'pass'},
{'_extras': {}, 'ansible_user': 'user@domain.com'},
{},
{
'_kerb_managed': True,
'_kinit_cmd': 'kinit',
'_winrm_kwargs': {'username': 'user@domain.com',
'password': 'pass'},
'_winrm_pass': 'pass',
'_winrm_transport': ['kerberos', 'ssl'],
'_winrm_user': 'user@domain.com'
},
True
),
# kerb with managed ticket (explicit)
(
{'password': 'pass'},
{'_extras': {}, 'ansible_user': 'user@domain.com',
'ansible_winrm_kinit_mode': 'managed'},
{},
{
'_kerb_managed': True,
},
True
),
# kerb with unmanaged ticket (explicit))
(
{'password': 'pass'},
{'_extras': {}, 'ansible_user': 'user@domain.com',
'ansible_winrm_kinit_mode': 'manual'},
{},
{
'_kerb_managed': False,
},
True
),
# transport override (single)
(
{},
{'_extras': {}, 'ansible_user': 'user@domain.com',
'ansible_winrm_transport': 'ntlm'},
{},
{
'_winrm_kwargs': {'username': 'user@domain.com',
'password': ''},
'_winrm_pass': '',
'_winrm_transport': ['ntlm'],
},
False
),
# transport override (list)
(
{},
{'_extras': {}, 'ansible_user': 'user@domain.com',
'ansible_winrm_transport': ['ntlm', 'certificate']},
{},
{
'_winrm_kwargs': {'username': 'user@domain.com',
'password': ''},
'_winrm_pass': '',
'_winrm_transport': ['ntlm', 'certificate'],
},
False
),
# winrm extras
(
{},
{'_extras': {'ansible_winrm_server_cert_validation': 'ignore',
'ansible_winrm_service': 'WSMAN'}},
{},
{
'_winrm_kwargs': {'username': None, 'password': '',
'server_cert_validation': 'ignore',
'service': 'WSMAN'},
},
False
),
# direct override
(
{},
{'_extras': {}, 'ansible_winrm_connection_timeout': 5},
{'connection_timeout': 10},
{
'_winrm_connection_timeout': 10,
},
False
),
# user comes from option not play context
(
{'username': 'user1'},
{'_extras': {}, 'ansible_user': 'user2'},
{},
{
'_winrm_user': 'user2',
'_winrm_kwargs': {'username': 'user2', 'password': ''}
},
False
)
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('play, options, direct, expected, kerb',
((p, o, d, e, k) for p, o, d, e, k in OPTIONS_DATA))
def test_set_options(self, play, options, direct, expected, kerb):
winrm.HAVE_KERBEROS = kerb
pc = PlayContext()
for attr, value in play.items():
setattr(pc, attr, value)
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options=options, direct=direct)
for attr, expected in expected.items():
actual = getattr(conn, attr)
assert actual == expected, \
"winrm attr '%s', actual '%s' != expected '%s'"\
% (attr, actual, expected)
class TestWinRMKerbAuth(object):
@pytest.mark.parametrize('options, expected', [
[{"_extras": {}},
(["kinit", "user@domain"],)],
[{"_extras": {}, 'ansible_winrm_kinit_cmd': 'kinit2'},
(["kinit2", "user@domain"],)],
[{"_extras": {'ansible_winrm_kerberos_delegation': True}},
(["kinit", "-f", "user@domain"],)],
])
def test_kinit_success_subprocess(self, monkeypatch, options, expected):
def mock_communicate(input=None, timeout=None):
return b"", b""
mock_popen = MagicMock()
mock_popen.return_value.communicate = mock_communicate
mock_popen.return_value.returncode = 0
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options=options)
conn._kerb_auth("user@domain", "pass")
mock_calls = mock_popen.mock_calls
assert len(mock_calls) == 1
assert mock_calls[0][1] == expected
actual_env = mock_calls[0][2]['env']
assert list(actual_env.keys()) == ['KRB5CCNAME']
assert actual_env['KRB5CCNAME'].startswith("FILE:/")
@pytest.mark.parametrize('options, expected', [
[{"_extras": {}},
("kinit", ["user@domain"],)],
[{"_extras": {}, 'ansible_winrm_kinit_cmd': 'kinit2'},
("kinit2", ["user@domain"],)],
[{"_extras": {'ansible_winrm_kerberos_delegation': True}},
("kinit", ["-f", "user@domain"],)],
])
def test_kinit_success_pexpect(self, monkeypatch, options, expected):
pytest.importorskip("pexpect")
mock_pexpect = MagicMock()
mock_pexpect.return_value.exitstatus = 0
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options=options)
conn._kerb_auth("user@domain", "pass")
mock_calls = mock_pexpect.mock_calls
assert mock_calls[0][1] == expected
actual_env = mock_calls[0][2]['env']
assert list(actual_env.keys()) == ['KRB5CCNAME']
assert actual_env['KRB5CCNAME'].startswith("FILE:/")
assert mock_calls[0][2]['echo'] is False
assert mock_calls[1][0] == "().expect"
assert mock_calls[1][1] == (".*:",)
assert mock_calls[2][0] == "().sendline"
assert mock_calls[2][1] == ("pass",)
assert mock_calls[3][0] == "().read"
assert mock_calls[4][0] == "().wait"
def test_kinit_with_missing_executable_subprocess(self, monkeypatch):
expected_err = "[Errno 2] No such file or directory: " \
"'/fake/kinit': '/fake/kinit'"
mock_popen = MagicMock(side_effect=OSError(expected_err))
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
options = {"_extras": {}, "ansible_winrm_kinit_cmd": "/fake/kinit"}
conn.set_options(var_options=options)
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("user@domain", "pass")
assert str(err.value) == "Kerberos auth failure when calling " \
"kinit cmd '/fake/kinit': %s" % expected_err
def test_kinit_with_missing_executable_pexpect(self, monkeypatch):
pexpect = pytest.importorskip("pexpect")
expected_err = "The command was not found or was not " \
"executable: /fake/kinit"
mock_pexpect = \
MagicMock(side_effect=pexpect.ExceptionPexpect(expected_err))
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
options = {"_extras": {}, "ansible_winrm_kinit_cmd": "/fake/kinit"}
conn.set_options(var_options=options)
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("user@domain", "pass")
assert str(err.value) == "Kerberos auth failure when calling " \
"kinit cmd '/fake/kinit': %s" % expected_err
def test_kinit_error_subprocess(self, monkeypatch):
expected_err = "kinit: krb5_parse_name: " \
"Configuration file does not specify default realm"
def mock_communicate(input=None, timeout=None):
return b"", to_bytes(expected_err)
mock_popen = MagicMock()
mock_popen.return_value.communicate = mock_communicate
mock_popen.return_value.returncode = 1
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("invaliduser", "pass")
assert str(err.value) == \
"Kerberos auth failure for principal invaliduser with " \
"subprocess: %s" % (expected_err)
def test_kinit_error_pexpect(self, monkeypatch):
pytest.importorskip("pexpect")
expected_err = "Configuration file does not specify default realm"
mock_pexpect = MagicMock()
mock_pexpect.return_value.expect = MagicMock(side_effect=OSError)
mock_pexpect.return_value.read.return_value = to_bytes(expected_err)
mock_pexpect.return_value.exitstatus = 1
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("invaliduser", "pass")
assert str(err.value) == \
"Kerberos auth failure for principal invaliduser with " \
"pexpect: %s" % (expected_err)
def test_kinit_error_pass_in_output_subprocess(self, monkeypatch):
def mock_communicate(input=None, timeout=None):
return b"", b"Error with kinit\n" + input
mock_popen = MagicMock()
mock_popen.return_value.communicate = mock_communicate
mock_popen.return_value.returncode = 1
monkeypatch.setattr("subprocess.Popen", mock_popen)
winrm.HAS_PEXPECT = False
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("username", "password")
assert str(err.value) == \
"Kerberos auth failure for principal username with subprocess: " \
"Error with kinit\n<redacted>"
def test_kinit_error_pass_in_output_pexpect(self, monkeypatch):
pytest.importorskip("pexpect")
mock_pexpect = MagicMock()
mock_pexpect.return_value.expect = MagicMock()
mock_pexpect.return_value.read.return_value = \
b"Error with kinit\npassword\n"
mock_pexpect.return_value.exitstatus = 1
monkeypatch.setattr("pexpect.spawn", mock_pexpect)
winrm.HAS_PEXPECT = True
pc = PlayContext()
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('winrm', pc, new_stdin)
conn.set_options(var_options={"_extras": {}})
with pytest.raises(AnsibleConnectionFailure) as err:
conn._kerb_auth("username", "password")
assert str(err.value) == \
"Kerberos auth failure for principal username with pexpect: " \
"Error with kinit\n<redacted>"
|
Gutier14/CAAFinder | refs/heads/master | tests/test.py | 1 | # -*- coding: utf-8 -*-
from caafinder.database import database
from caafinder.workspace import *
import os
if __name__=='__main__':
print("Hello CAA Developer")
# db = database()
# db.initDatabase('~/Developer/CAAFinderffffff')
# print(len(db))
# print(db.querryByHeader('CATBoolean.h'))
# print(db.querryByType('CATBoolean'))
# print(db.querryByModuel('JS0FM'))
# db.insert('type','framework','header','moduel','method','fullname')
# a = workspace('LC')
# a.info
# cpp = '****Developer/CAAFinder/caafinder/******/******.cpp'
# header = '****Developer/CAAFinder/caafinder/******/*******.h'
# imakefile = '****Developer/CAAFinder/caafinder/******/Imakefile.mk'
# identityCard = '****Developer/CAAFinder/caafinder/******/IdentityCard.xml'
# res = parseCpp(cpp)
# print(len(res))
# print(res[0])
# print(res[1])
# res = parseHeader(header)
# print(len(res))
# print(res[0])
# print(res[1])
# modifyHeader(header,res[0],res[1])
# res = parseImakefile(imakefile)
# print(len(res))
# print(res[0])
# print(res[1])
# modifyHeader(header,res[0],res[1])
# res = parseIdentityCard(identityCard)
# print(len(res))
# print(res[0])
# print(res[1])
# modifyHeader(header,res[0],res[1])
# a.completeUnit('***')
# a.completeModuel('***')
# a.completeFramework('***')
# a.completeAll()
|
Akasurde/virt-manager | refs/heads/master | virtinst/idmap.py | 5 | #
# Copyright 2014 Fujitsu Limited.
# Chen Hanxiao <chenhanxiao at cn.fujitsu.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
from .xmlbuilder import XMLBuilder, XMLProperty
class IdMap(XMLBuilder):
"""
Class for generating user namespace related XML
"""
_XML_ROOT_NAME = "idmap"
_XML_PROP_ORDER = ["uid_start", "uid_target", "uid_count",
"gid_start", "gid_target", "gid_count"]
uid_start = XMLProperty("./uid/@start", is_int=True)
uid_target = XMLProperty("./uid/@target", is_int=True)
uid_count = XMLProperty("./uid/@count", is_int=True)
gid_start = XMLProperty("./gid/@start", is_int=True)
gid_target = XMLProperty("./gid/@target", is_int=True)
gid_count = XMLProperty("./gid/@count", is_int=True)
|
Pinecast/pinecast | refs/heads/master | dashboard/migrations/0006_auto_20170209_1712.py | 3 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-09 17:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0005_collaborator'),
]
operations = [
migrations.AlterField(
model_name='collaborator',
name='collaborator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='collaborated_podcasts', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='collaborator',
name='podcast',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='collaborators', to='podcasts.Podcast'),
),
]
|
CJ8664/servo | refs/heads/master | tests/wpt/web-platform-tests/fetch/api/resources/method.py | 161 | def main(request, response):
headers = []
if "cors" in request.GET:
headers.append(("Access-Control-Allow-Origin", "*"))
headers.append(("Access-Control-Allow-Credentials", "true"))
headers.append(("Access-Control-Allow-Methods", "GET, POST, PUT, FOO"))
headers.append(("Access-Control-Allow-Headers", "x-test, x-foo"))
headers.append(("Access-Control-Expose-Headers", "x-request-method"))
headers.append(("x-request-method", request.method))
return headers, request.body
|
Amechi101/concepteur-market-app | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources.py | 160 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys
import os
import time
import re
import imp
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
from pkgutil import get_importer
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
basestring
next = lambda o: o.next()
from cStringIO import StringIO as BytesIO
except NameError:
basestring = str
from io import BytesIO
def execfile(fn, globs=None, locs=None):
if globs is None:
globs = globals()
if locs is None:
locs = globs
exec(compile(open(fn).read(), fn, 'exec'), globs, locs)
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
def _bypass_ensure_directory(name, mode=0x1FF): # 0777
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.items():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
list(map(shadow_set.add, self)) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError:
v = sys.exc_info()[1]
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': lambda: sys.version.split()[0],
'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]),
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError:
return cls.normalize_exception(sys.exc_info()[1])
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.or_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
raise SyntaxError("Language feature not supported in environment markers")
@classmethod
def comparison(cls, nodelist):
if len(nodelist)>4:
raise SyntaxError("Chained comparison not allowed in environment markers")
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
raise SyntaxError(repr(cop)+" operator not allowed in environment markers")
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
from pip._vendor import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError:
e = sys.exc_info()[1]
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
def build_zipmanifest(path):
"""
This builds a similar dictionary to the zipimport directory
caches. However instead of tuples, ZipInfo objects are stored.
The translation of the tuple is as follows:
* [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep
on pypy it is the same (one reason why distribute did work
in some cases on pypy and win32).
* [1] - zipinfo.compress_type
* [2] - zipinfo.compress_size
* [3] - zipinfo.file_size
* [4] - len(utf-8 encoding of filename) if zipinfo & 0x800
len(ascii encoding of filename) otherwise
* [5] - (zipinfo.date_time[0] - 1980) << 9 |
zipinfo.date_time[1] << 5 | zipinfo.date_time[2]
* [6] - (zipinfo.date_time[3] - 1980) << 11 |
zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2)
* [7] - zipinfo.CRC
"""
zipinfo = dict()
zfile = zipfile.ZipFile(path)
#Got ZipFile has not __exit__ on python 3.1
try:
for zitem in zfile.namelist():
zpath = zitem.replace('/', os.sep)
zipinfo[zpath] = zfile.getinfo(zitem)
assert zipinfo[zpath] is not None
finally:
zfile.close()
return zipinfo
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = build_zipmanifest(self.loader.archive)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst
#1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
f = open(file_path, 'rb')
file_contents = f.read()
f.close()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = build_zipmanifest(importer.archive)
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
entry_file = open(os.path.join(path_item, entry))
try:
entry_lines = entry_file.readlines()
finally:
entry_file.close()
for line in entry_lines:
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(pkgutil.ImpImporter,find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
list(map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer)))
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':',1)
if invalid_marker(marker):
reqs=[] # XXX warn
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
list(map(declare_namespace, self._get_metadata('namespace_packages.txt')))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='setuptools':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from pip._vendor._markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = next(lines)
p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key != self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F':
return False
elif action=='T':
return True
elif action=='+':
last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs)==1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
state_machine = {
# =><
'<': '--T',
'<=': 'T-T',
'>': 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
list(map(working_set.add_entry,sys.path)) # match order
|
doismellburning/django | refs/heads/master | tests/template_backends/test_dummy.py | 14 | # coding: utf-8
from __future__ import unicode_literals
from django.http import HttpRequest
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.dummy import TemplateStrings
from django.test import SimpleTestCase
class TemplateStringsTests(SimpleTestCase):
engine_class = TemplateStrings
backend_name = 'dummy'
options = {}
@classmethod
def setUpClass(cls):
params = {
'DIRS': [],
'APP_DIRS': True,
'NAME': cls.backend_name,
'OPTIONS': cls.options,
}
cls.engine = cls.engine_class(params)
def test_from_string(self):
template = self.engine.from_string("Hello!\n")
content = template.render()
self.assertEqual(content, "Hello!\n")
def test_get_template(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'world'})
self.assertEqual(content, "Hello world!\n")
def test_get_template_non_existing(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('template_backends/non_existing.html')
def test_get_template_syntax_error(self):
# There's no way to trigger a syntax error with the dummy backend.
# The test still lives here to factor it between other backends.
if self.backend_name == 'dummy':
return
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('template_backends/syntax_error.html')
def test_html_escaping(self):
template = self.engine.get_template('template_backends/hello.html')
context = {'name': '<script>alert("XSS!");</script>'}
content = template.render(context)
self.assertIn('<script>', content)
self.assertNotIn('<script>', content)
def test_csrf_token(self):
request = HttpRequest()
CsrfViewMiddleware().process_view(request, lambda r: None, (), {})
template = self.engine.get_template('template_backends/csrf.html')
content = template.render(request=request)
expected = (
'<input type="hidden" name="csrfmiddlewaretoken" '
'value="{}" />'.format(get_token(request)))
self.assertHTMLEqual(content, expected)
def test_no_directory_traversal(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('../forbidden/template_backends/hello.html')
def test_non_ascii_characters(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'Jérôme'})
self.assertEqual(content, "Hello Jérôme!\n")
|
jacklee0810/QMarkdowner | refs/heads/master | dpkt/smb.py | 15 | # $Id: smb.py 23 2006-11-08 15:45:33Z dugsong $
"""Server Message Block."""
import dpkt
class SMB(dpkt.Packet):
__hdr__ = [
('proto', '4s', ''),
('cmd', 'B', 0),
('err', 'I', 0),
('flags1', 'B', 0),
('flags2', 'B', 0),
('pad', '6s', ''),
('tid', 'H', 0),
('pid', 'H', 0),
('uid', 'H', 0),
('mid', 'H', 0)
]
|
GbalsaC/bitnamiP | refs/heads/master | venv/lib/python2.7/site-packages/pygments/lexers/html.py | 72 | # -*- coding: utf-8 -*-
"""
pygments.lexers.html
~~~~~~~~~~~~~~~~~~~~
Lexers for HTML, XML and related markup.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation
from pygments.util import looks_like_xml, html_doctype_matches
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.jvm import ScalaLexer
from pygments.lexers.css import CssLexer, _indentation, _starts_block
from pygments.lexers.ruby import RubyLexer
__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
'ScamlLexer', 'JadeLexer']
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
# note: this allows tag names not used in HTML like <x:with-dash>,
# this is to support yet-unknown template engines and the like
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'([\w:-]+\s*=)(\s*)', bygroups(Name.Attribute, Text), 'attr'),
(r'[\w:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
.. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL | re.UNICODE
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.45 # less than HTML
class XsltLexer(XmlLexer):
"""
A lexer for XSLT.
.. versionadded:: 0.10
"""
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = set((
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
))
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
.. versionadded:: 1.3
"""
name = 'Haml'
aliases = ['haml']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
.. versionadded:: 1.4
"""
name = 'Scaml'
aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class JadeLexer(ExtendedRegexLexer):
"""
For Jade markup.
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
.. versionadded:: 1.4
"""
name = 'Jade'
aliases = ['jade']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
default('plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
|
geekaia/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/tests/test_conditional.py | 37 | import json
import unittest
from fs.memoryfs import MemoryFS
from mock import Mock, patch
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.error_module import NonStaffErrorDescriptor
from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore, CourseLocationGenerator
from xmodule.conditional_module import ConditionalDescriptor
from xmodule.tests import DATA_DIR, get_test_system, get_test_descriptor_system
from xmodule.x_module import STUDENT_VIEW
ORG = 'test_org'
COURSE = 'conditional' # name of directory with course data
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", course_dirs=[], load_error_modules=load_error_modules)
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=SlashSeparatedCourseKey(ORG, COURSE, 'test_run'),
course_dir='test_dir',
error_tracker=Mock(),
parent_tracker=Mock(),
load_error_modules=load_error_modules,
)
def render_template(self, template, context):
raise Exception("Shouldn't be called")
class ConditionalFactory(object):
"""
A helper class to create a conditional module and associated source and child modules
to allow for testing.
"""
@staticmethod
def create(system, source_is_error_module=False):
"""
return a dict of modules: the conditional with a single source and a single child.
Keys are 'cond_module', 'source_module', and 'child_module'.
if the source_is_error_module flag is set, create a real ErrorModule for the source.
"""
descriptor_system = get_test_descriptor_system()
# construct source descriptor and module:
source_location = Location("edX", "conditional_test", "test_run", "problem", "SampleProblem", None)
if source_is_error_module:
# Make an error descriptor and module
source_descriptor = NonStaffErrorDescriptor.from_xml(
'some random xml data',
system,
id_generator=CourseLocationGenerator(SlashSeparatedCourseKey('edX', 'conditional_test', 'test_run')),
error_msg='random error message'
)
else:
source_descriptor = Mock()
source_descriptor.location = source_location
source_descriptor.runtime = descriptor_system
source_descriptor.render = lambda view, context=None: descriptor_system.render(source_descriptor, view, context)
# construct other descriptors:
child_descriptor = Mock()
child_descriptor._xmodule.student_view.return_value.content = u'<p>This is a secret</p>'
child_descriptor.student_view = child_descriptor._xmodule.student_view
child_descriptor.displayable_items.return_value = [child_descriptor]
child_descriptor.runtime = descriptor_system
child_descriptor.xmodule_runtime = get_test_system()
child_descriptor.render = lambda view, context=None: descriptor_system.render(child_descriptor, view, context)
child_descriptor.location = source_location.replace(category='html', name='child')
descriptor_system.load_item = {
child_descriptor.location: child_descriptor,
source_location: source_descriptor
}.get
# construct conditional module:
cond_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'attempted': 'true'},
'children': [child_descriptor.location],
})
cond_descriptor = ConditionalDescriptor(
descriptor_system,
field_data,
ScopeIds(None, None, cond_location, cond_location)
)
cond_descriptor.xmodule_runtime = system
system.get_module = lambda desc: desc
cond_descriptor.get_required_module_descriptors = Mock(return_value=[source_descriptor])
# return dict:
return {'cond_module': cond_descriptor,
'source_module': source_descriptor,
'child_module': child_descriptor}
class ConditionalModuleBasicTest(unittest.TestCase):
"""
Make sure that conditional module works, using mocks for
other modules.
"""
def setUp(self):
self.test_system = get_test_system()
def test_icon_class(self):
'''verify that get_icon_class works independent of condition satisfaction'''
modules = ConditionalFactory.create(self.test_system)
for attempted in ["false", "true"]:
for icon_class in ['other', 'problem', 'video']:
modules['source_module'].is_attempted = attempted
modules['child_module'].get_icon_class = lambda: icon_class
self.assertEqual(modules['cond_module'].get_icon_class(), icon_class)
def test_get_html(self):
modules = ConditionalFactory.create(self.test_system)
# because get_test_system returns the repr of the context dict passed to render_template,
# we reverse it here
html = modules['cond_module'].render(STUDENT_VIEW).content
expected = modules['cond_module'].xmodule_runtime.render_template('conditional_ajax.html', {
'ajax_url': modules['cond_module'].xmodule_runtime.ajax_url,
'element_id': u'i4x-edX-conditional_test-conditional-SampleConditional',
'depends': u'i4x-edX-conditional_test-problem-SampleProblem',
})
self.assertEquals(expected, html)
def test_handle_ajax(self):
modules = ConditionalFactory.create(self.test_system)
modules['source_module'].is_attempted = "false"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# now change state of the capa problem to make it completed
modules['source_module'].is_attempted = "true"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_error_as_source(self):
'''
Check that handle_ajax works properly if the source is really an ErrorModule,
and that the condition is not satisfied.
'''
modules = ConditionalFactory.create(self.test_system, source_is_error_module=True)
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
class ConditionalModuleXmlTest(unittest.TestCase):
"""
Make sure ConditionalModule works, by loading data in from an XML-defined course.
"""
@staticmethod
def get_system(load_error_modules=True):
'''Get a dummy system'''
return DummySystem(load_error_modules)
def setUp(self):
self.test_system = get_test_system()
def get_course(self, name):
"""Get a test course by directory name. If there's more than one, error."""
print "Importing {0}".format(name)
modulestore = XMLModuleStore(DATA_DIR, course_dirs=[name])
courses = modulestore.get_courses()
self.modulestore = modulestore
self.assertEquals(len(courses), 1)
return courses[0]
def test_conditional_module(self):
"""Make sure that conditional module works"""
print "Starting import"
course = self.get_course('conditional_and_poll')
print "Course: ", course
print "id: ", course.id
def inner_get_module(descriptor):
if isinstance(descriptor, Location):
location = descriptor
descriptor = self.modulestore.get_item(location, depth=None)
descriptor.xmodule_runtime = get_test_system()
descriptor.xmodule_runtime.get_module = inner_get_module
return descriptor
# edx - HarvardX
# cond_test - ER22x
location = Location("HarvardX", "ER22x", "2013_Spring", "conditional", "condone")
def replace_urls(text, staticfiles_prefix=None, replace_prefix='/static/', course_namespace=None):
return text
self.test_system.replace_urls = replace_urls
self.test_system.get_module = inner_get_module
module = inner_get_module(location)
print "module: ", module
print "module children: ", module.get_children()
print "module display items (children): ", module.get_display_items()
html = module.render(STUDENT_VIEW).content
print "html type: ", type(html)
print "html: ", html
html_expect = module.xmodule_runtime.render_template(
'conditional_ajax.html',
{
# Test ajax url is just usage-id / handler_name
'ajax_url': '{}/xmodule_handler'.format(location.to_deprecated_string()),
'element_id': u'i4x-HarvardX-ER22x-conditional-condone',
'depends': u'i4x-HarvardX-ER22x-problem-choiceprob'
}
)
self.assertEqual(html, html_expect)
gdi = module.get_display_items()
print "gdi=", gdi
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# Now change state of the capa problem to make it completed
inner_module = inner_get_module(location.replace(category="problem", name='choiceprob'))
inner_module.attempts = 1
# Save our modifications to the underlying KeyValueStore so they can be persisted
inner_module.save()
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_conditional_module_with_empty_sources_list(self):
"""
If a ConditionalDescriptor is initialized with an empty sources_list, we assert that the sources_list is set
via generating UsageKeys from the values in xml_attributes['sources']
"""
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.sources_list[0],
conditional.location.course_key.make_usage_key_from_deprecated_string(conditional.xml_attributes['sources'])
)
def test_conditional_module_parse_sources(self):
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll;i4x://HarvardX/ER22x/poll_question/T16_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.parse_sources(conditional.xml_attributes),
['i4x://HarvardX/ER22x/poll_question/T15_poll', 'i4x://HarvardX/ER22x/poll_question/T16_poll']
)
|
RackHD-Mirror/RackHD | refs/heads/master | test/tests/rackhd20/test_rackhd20_api_lookups.py | 13 | '''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd20_api_lookups(fit_common.unittest.TestCase):
def setUp(self):
# delete any instance of test lookup
api_data = fit_common.rackhdapi("/api/2.0/lookups")
for item in api_data['json']:
if item['macAddress'] == "00:0a:0a:0a:0a:0a":
fit_common.rackhdapi("/api/2.0/lookups/" + item['id'], action="delete")
def test_api_20_lookups_ID(self):
api_data = fit_common.rackhdapi("/api/2.0/lookups")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for item in api_data['json']:
self.assertEqual(fit_common.rackhdapi("/api/2.0/lookups/" + item['id'])
['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
# this test cross-references node MAC addresses to lookup tables
def test_api_20_lookups_cross_reference(self):
nodecatalog = fit_common.rackhdapi("/api/2.0/nodes")['json']
lookuptable = fit_common.rackhdapi("/api/2.0/lookups")['json']
errorlist = ""
for node in nodecatalog:
# get list of compute nodes with sku
if node['type'] == "compute" and 'sku' in node and 'identifiers' in node:
# find node entry mac addresses
for macaddr in node['identifiers']:
# find mac address in lookup table
for lookupid in lookuptable:
#verify node ID for mac address
if macaddr in lookupid['macAddress']:
if fit_common.VERBOSITY >= 2:
print "*** Checking Node ID: " + node['id'] + " MAC: " + macaddr
if 'node' not in lookupid:
errorlist = errorlist + "Missing node ID: " + node['id'] + " MAC: " + macaddr + "\n"
if node['id'] != lookupid['node']:
errorlist = errorlist + "Wrong node in lookup table ID: " + lookupid['id'] + "\n"
if errorlist != "":
print "**** Lookup Errors:"
print errorlist
self.assertEqual(errorlist, "", "Errors in lookup table detected.")
def test_api_20_lookups_post_get_delete(self):
node = fit_common.node_select()[0]
data_payload = {
"macAddress": "00:0a:0a:0a:0a:0a",
"ipAddress": "128.128.128.128",
"node": node
}
api_data = fit_common.rackhdapi("/api/2.0/lookups", action="post", payload=data_payload)
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expected 201, got:' + str(api_data['status']))
lookup_id = api_data['json']['id']
api_data = fit_common.rackhdapi("/api/2.0/lookups/" + lookup_id)
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
self.assertEqual(api_data['json']['macAddress'], "00:0a:0a:0a:0a:0a", "Bad lookup MAC Address")
self.assertEqual(api_data['json']['ipAddress'], "128.128.128.128", "Bad lookup IP Address")
self.assertEqual(api_data['json']['node'], node, "Bad lookup node ID")
api_data = fit_common.rackhdapi("/api/2.0/lookups/" + lookup_id, action="delete")
self.assertEqual(api_data['status'], 204, 'Incorrect HTTP return code, expected 204, got:' + str(api_data['status']))
if __name__ == '__main__':
fit_common.unittest.main()
|
lj020326/cloudify3-plugin-test | refs/heads/master | plugin/__init__.py | 1 | __author__ = 'Lee' |
dimroc/tensorflow-mnist-tutorial | refs/heads/master | lib/python3.6/site-packages/tensorflow/examples/tutorials/mnist/input_data.py | 165 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
|
wanggang3333/scikit-learn | refs/heads/master | sklearn/covariance/tests/test_graph_lasso.py | 272 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
eval1749/elang | refs/heads/master | testing/PRESUBMIT.py | 134 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for testing.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
def CommonChecks(input_api, output_api):
output = []
blacklist = [r'gmock.*', r'gtest.*']
output.extend(input_api.canned_checks.RunPylint(
input_api, output_api, black_list=blacklist))
return output
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
|
bdrung/audacity | refs/heads/master | lib-src/libsndfile/src/binheader_writef_check.py | 41 | #!/usr/bin/python
# Copyright (C) 2006-2011 Erik de Castro Lopo <erikd@mega-nerd.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the author nor the names of any contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This parses C code using regexes (yes, thats horrible) and makes sure
# that calling conventions to the function psf_binheader_writef are
# correct.
import re, string, sys
_whitespace_re = re.compile ("\s+", re.MULTILINE)
def find_binheader_writefs (data):
lst = re.findall ('psf_binheader_writef\s*\(\s*[a-zA-Z_]+\s*,\s*\"[^;]+;', data, re.MULTILINE)
return [_whitespace_re.sub (" ", x) for x in lst]
def find_format_string (s):
fmt = re.search ('"([^"]+)"', s)
if not fmt:
print "Bad format in :\n\n\t%s\n\n" % s
sys.exit (1)
fmt = fmt.groups ()
if len (fmt) != 1:
print "Bad format in :\n\n\t%s\n\n" % s
sys.exit (1)
return _whitespace_re.sub ("", fmt [0])
def get_param_list (data):
dlist = re.search ("\((.+)\)\s*;", data)
dlist = dlist.groups ()[0]
dlist = string.split (dlist, ",")
dlist = [string.strip (x) for x in dlist]
return dlist [2:]
def handle_file (fname):
errors = 0
data = open (fname, "r").read ()
writefs = find_binheader_writefs (data)
for item in writefs:
fmt = find_format_string (item)
params = get_param_list (item)
param_index = 0
# print item
for ch in fmt:
if ch in 'Eet ':
continue
# print " param [%d] %c : %s" % (param_index, ch, params [param_index])
if ch != 'b':
param_index += 1
continue
# print item
# print " param [%d] %c : %s <-> %s" % (param_index, ch, params [param_index], params [param_index + 1])
if string.find (params [param_index + 1], "sizeof") < 0 \
and string.find (params [param_index + 1], "make_size_t") < 0 \
and string.find (params [param_index + 1], "strlen") < 0:
if errors == 0: print
print "\n%s :" % fname
print " param [%d] %c : %s <-> %s" % (param_index, ch, params [param_index], params [param_index + 1])
print " %s" % item
errors += 1
param_index += 2
return errors
#===============================================================================
if len (sys.argv) > 1:
print "\n binheader_writef_check :",
sys.stdout.flush ()
errors = 0
for fname in sys.argv [1:]:
errors += handle_file (fname)
if errors > 0:
print "\nErrors : %d\n" % errors
sys.exit (1)
print "ok\n"
|
toontownfunserver/Panda3D-1.9.0 | refs/heads/master | samples/Solar-System/Tut-Step-5-Complete-Solar-System.py | 3 | # Author: Shao Zhang and Phil Saltzman
# Last Updated: 4/19/2005
#
# This tutorial is intended as a initial panda scripting lesson going over
# display initialization, loading models, placing objects, and the scene graph.
#
# Step 5: Here we put the finishing touches on our solar system model by
# making the planets move. The actual code for doing the movement is covered
# in the next tutorial, but watching it move really shows what inheritance on
# the scene graph is all about.
import direct.directbase.DirectStart
from direct.gui.DirectGui import *
from panda3d.core import Vec3, Vec4
import sys
class World:
def __init__(self):
#This is the initialization we had before
self.title = OnscreenText( #Create the title
text="Panda3D: Tutorial 1 - Solar System",
style=1, fg=(1,1,1,1), pos=(0.8,-0.95), scale = .07)
base.setBackgroundColor(0, 0, 0) #Set the background to black
base.disableMouse() #disable mouse control of the camera
camera.setPos ( 0, 0, 45 ) #Set the camera position (X, Y, Z)
camera.setHpr ( 0, -90, 0 ) #Set the camera orientation
#(heading, pitch, roll) in degrees
#Here again is where we put our global variables. Added this time are
#variables to control the relative speeds of spinning and orbits in the
#simulation
#Number of seconds a full rotation of Earth around the sun should take
self.yearscale = 60
#Number of seconds a day rotation of Earth should take.
#It is scaled from its correct value for easier visability
self.dayscale = self.yearscale / 365.0 * 5
self.orbitscale = 10 #Orbit scale
self.sizescale = 0.6 #Planet size scale
self.loadPlanets() #Load and position the models
#Finally, we call the rotatePlanets function which puts the planets,
#sun, and moon into motion.
self.rotatePlanets()
def loadPlanets(self):
#This is the same function that we completed in the previous step
#It is unchanged in this version
#Create the dummy nodes
self.orbit_root_mercury = render.attachNewNode('orbit_root_mercury')
self.orbit_root_venus = render.attachNewNode('orbit_root_venus')
self.orbit_root_mars = render.attachNewNode('orbit_root_mars')
self.orbit_root_earth = render.attachNewNode('orbit_root_earth')
#The moon orbits Earth, not the sun
self.orbit_root_moon = (
self.orbit_root_earth.attachNewNode('orbit_root_moon'))
###############################################################
#Load the sky
self.sky = loader.loadModel("models/solar_sky_sphere")
self.sky_tex = loader.loadTexture("models/stars_1k_tex.jpg")
self.sky.setTexture(self.sky_tex, 1)
self.sky.reparentTo(render)
self.sky.setScale(40)
#Load the Sun
self.sun = loader.loadModel("models/planet_sphere")
self.sun_tex = loader.loadTexture("models/sun_1k_tex.jpg")
self.sun.setTexture(self.sun_tex, 1)
self.sun.reparentTo(render)
self.sun.setScale(2 * self.sizescale)
#Load mercury
self.mercury = loader.loadModel("models/planet_sphere")
self.mercury_tex = loader.loadTexture("models/mercury_1k_tex.jpg")
self.mercury.setTexture(self.mercury_tex, 1)
self.mercury.reparentTo(self.orbit_root_mercury)
self.mercury.setPos( 0.38 * self.orbitscale, 0, 0)
self.mercury.setScale(0.385 * self.sizescale)
#Load Venus
self.venus = loader.loadModel("models/planet_sphere")
self.venus_tex = loader.loadTexture("models/venus_1k_tex.jpg")
self.venus.setTexture(self.venus_tex, 1)
self.venus.reparentTo(self.orbit_root_venus)
self.venus.setPos( 0.72 * self.orbitscale, 0, 0)
self.venus.setScale(0.923 * self.sizescale)
#Load Mars
self.mars = loader.loadModel("models/planet_sphere")
self.mars_tex = loader.loadTexture("models/mars_1k_tex.jpg")
self.mars.setTexture(self.mars_tex, 1)
self.mars.reparentTo(self.orbit_root_mars)
self.mars.setPos( 1.52 * self.orbitscale, 0, 0)
self.mars.setScale(0.515 * self.sizescale)
#Load Earth
self.earth = loader.loadModel("models/planet_sphere")
self.earth_tex = loader.loadTexture("models/earth_1k_tex.jpg")
self.earth.setTexture(self.earth_tex, 1)
self.earth.reparentTo(self.orbit_root_earth)
self.earth.setScale(self.sizescale)
self.earth.setPos( self.orbitscale, 0, 0)
#Offest the moon dummy node so that it is positioned properly
self.orbit_root_moon.setPos( self.orbitscale, 0, 0)
#Load the moon
self.moon = loader.loadModel("models/planet_sphere")
self.moon_tex = loader.loadTexture("models/moon_1k_tex.jpg")
self.moon.setTexture(self.moon_tex, 1)
self.moon.reparentTo(self.orbit_root_moon)
self.moon.setScale(0.1 * self.sizescale)
self.moon.setPos(0.1 * self.orbitscale, 0, 0)
#end loadPlanets()
def rotatePlanets(self):
#rotatePlanets creates intervals to actually use the hierarchy we created
#to turn the sun, planets, and moon to give a rough representation of the
#solar system. The next lesson will go into more depth on intervals.
self.day_period_sun = self.sun.hprInterval(20, Vec3(360, 0, 0))
self.orbit_period_mercury = self.orbit_root_mercury.hprInterval(
(0.241 * self.yearscale), Vec3(360, 0, 0))
self.day_period_mercury = self.mercury.hprInterval(
(59 * self.dayscale), Vec3(360, 0, 0))
self.orbit_period_venus = self.orbit_root_venus.hprInterval(
(0.615 * self.yearscale), Vec3(360, 0, 0))
self.day_period_venus = self.venus.hprInterval(
(243 * self.dayscale), Vec3(360,0,0))
self.orbit_period_earth = self.orbit_root_earth.hprInterval(
self.yearscale, Vec3(360, 0, 0))
self.day_period_earth = self.earth.hprInterval(
self.dayscale, Vec3(360, 0, 0))
self.orbit_period_moon = self.orbit_root_moon.hprInterval(
(.0749 * self.yearscale), Vec3(360, 0, 0))
self.day_period_moon = self.moon.hprInterval(
(.0749 * self.yearscale), Vec3(360, 0, 0))
self.orbit_period_mars = self.orbit_root_mars.hprInterval(
(1.881 * self.yearscale), Vec3(360, 0, 0))
self.day_period_mars = self.mars.hprInterval(
(1.03 * self.dayscale), Vec3(360, 0, 0))
self.day_period_sun.loop()
self.orbit_period_mercury.loop()
self.day_period_mercury.loop()
self.orbit_period_venus.loop()
self.day_period_venus.loop()
self.orbit_period_earth.loop()
self.day_period_earth.loop()
self.orbit_period_moon.loop()
self.day_period_moon.loop()
self.orbit_period_mars.loop()
self.day_period_mars.loop()
#end RotatePlanets()
#end class world
w = World()
run()
|
balister/GNU-Radio | refs/heads/adap | gr-video-sdl/python/video_sdl/qa_video_sdl.py | 57 | #!/usr/bin/env python
#
# Copyright 2006,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, video_sdl
class test_video_sdl (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_000_nop (self):
"""Just see if we can import the module...
They may not have video drivers, etc. Don't try to run anything"""
pass
if __name__ == '__main__':
gr_unittest.run(test_video_sdl, "test_video_sdl.xml")
|
tenggyut/PicScrapy | refs/heads/master | tutorial/items.py | 1 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JdPicture(scrapy.Item):
title = scrapy.Field()
image_urls = scrapy.Field()
catalog = scrapy.Field()
|
Fiware/ops.Sla-dashboard | refs/heads/master | manage.py | 3 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sladashboard.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
alimony/django | refs/heads/master | django/contrib/auth/migrations/0004_alter_user_username_opts.py | 134 | from django.contrib.auth import validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0003_alter_user_email_max_length'),
]
# No database changes; modifies validators and error_messages (#13147).
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(
error_messages={'unique': 'A user with that username already exists.'}, max_length=30,
validators=[validators.UnicodeUsernameValidator()],
help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.',
unique=True, verbose_name='username'
),
),
]
|
michaelaye/scikit-image | refs/heads/master | skimage/transform/hough_transform.py | 7 | import numpy as np
from scipy import ndimage as ndi
from .. import measure, morphology
from ._hough_transform import _hough_circle
def hough_line_peaks(hspace, angles, dists, min_distance=9, min_angle=10,
threshold=None, num_peaks=np.inf):
"""Return peaks in hough transform.
Identifies most prominent lines separated by a certain angle and distance
in a hough transform. Non-maximum suppression with different sizes is
applied separately in the first (distances) and second (angles) dimension
of the hough space to identify peaks.
Parameters
----------
hspace : (N, M) array
Hough space returned by the `hough_line` function.
angles : (M,) array
Angles returned by the `hough_line` function. Assumed to be continuous.
(`angles[-1] - angles[0] == PI`).
dists : (N, ) array
Distances returned by the `hough_line` function.
min_distance : int
Minimum distance separating lines (maximum filter size for first
dimension of hough space).
min_angle : int
Minimum angle separating lines (maximum filter size for second
dimension of hough space).
threshold : float
Minimum intensity of peaks. Default is `0.5 * max(hspace)`.
num_peaks : int
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` coordinates based on peak intensity.
Returns
-------
hspace, angles, dists : tuple of array
Peak values in hough space, angles and distances.
Examples
--------
>>> from skimage.transform import hough_line, hough_line_peaks
>>> from skimage.draw import line
>>> img = np.zeros((15, 15), dtype=np.bool_)
>>> rr, cc = line(0, 0, 14, 14)
>>> img[rr, cc] = 1
>>> rr, cc = line(0, 14, 14, 0)
>>> img[cc, rr] = 1
>>> hspace, angles, dists = hough_line(img)
>>> hspace, angles, dists = hough_line_peaks(hspace, angles, dists)
>>> len(angles)
2
"""
hspace = hspace.copy()
rows, cols = hspace.shape
if threshold is None:
threshold = 0.5 * np.max(hspace)
distance_size = 2 * min_distance + 1
angle_size = 2 * min_angle + 1
hspace_max = ndi.maximum_filter1d(hspace, size=distance_size, axis=0,
mode='constant', cval=0)
hspace_max = ndi.maximum_filter1d(hspace_max, size=angle_size, axis=1,
mode='constant', cval=0)
mask = (hspace == hspace_max)
hspace *= mask
hspace_t = hspace > threshold
label_hspace = measure.label(hspace_t)
props = measure.regionprops(label_hspace)
coords = np.array([np.round(p.centroid) for p in props], dtype=int)
hspace_peaks = []
dist_peaks = []
angle_peaks = []
# relative coordinate grid for local neighbourhood suppression
dist_ext, angle_ext = np.mgrid[-min_distance:min_distance + 1,
-min_angle:min_angle + 1]
for dist_idx, angle_idx in coords:
accum = hspace[dist_idx, angle_idx]
if accum > threshold:
# absolute coordinate grid for local neighbourhood suppression
dist_nh = dist_idx + dist_ext
angle_nh = angle_idx + angle_ext
# no reflection for distance neighbourhood
dist_in = np.logical_and(dist_nh > 0, dist_nh < rows)
dist_nh = dist_nh[dist_in]
angle_nh = angle_nh[dist_in]
# reflect angles and assume angles are continuous, e.g.
# (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)
angle_low = angle_nh < 0
dist_nh[angle_low] = rows - dist_nh[angle_low]
angle_nh[angle_low] += cols
angle_high = angle_nh >= cols
dist_nh[angle_high] = rows - dist_nh[angle_high]
angle_nh[angle_high] -= cols
# suppress neighbourhood
hspace[dist_nh, angle_nh] = 0
# add current line to peaks
hspace_peaks.append(accum)
dist_peaks.append(dists[dist_idx])
angle_peaks.append(angles[angle_idx])
hspace_peaks = np.array(hspace_peaks)
dist_peaks = np.array(dist_peaks)
angle_peaks = np.array(angle_peaks)
if num_peaks < len(hspace_peaks):
idx_maxsort = np.argsort(hspace_peaks)[::-1][:num_peaks]
hspace_peaks = hspace_peaks[idx_maxsort]
dist_peaks = dist_peaks[idx_maxsort]
angle_peaks = angle_peaks[idx_maxsort]
return hspace_peaks, angle_peaks, dist_peaks
def hough_circle(image, radius, normalize=True, full_output=False):
"""Perform a circular Hough transform.
Parameters
----------
image : (M, N) ndarray
Input image with nonzero values representing edges.
radius : ndarray
Radii at which to compute the Hough transform.
normalize : boolean, optional (default True)
Normalize the accumulator with the number
of pixels used to draw the radius.
full_output : boolean, optional (default False)
Extend the output size by twice the largest
radius in order to detect centers outside the
input picture.
Returns
-------
H : 3D ndarray (radius index, (M + 2R, N + 2R) ndarray)
Hough transform accumulator for each radius.
R designates the larger radius if full_output is True.
Otherwise, R = 0.
"""
return _hough_circle(image, radius.astype(np.intp),
normalize=normalize, full_output=full_output)
|
MartinEnder/erpnext-de | refs/heads/develop | erpnext/selling/doctype/campaign/test_campaign.py | 121 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
test_records = frappe.get_test_records('Campaign') |
jmacmahon/invenio | refs/heads/elasticsearch_logging | modules/websearch/lib/websearch_external_collections.py | 2 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""External collection 'core' file.
Perform search, database access."""
__revision__ = "$Id$"
import cgi
import sys
from copy import copy
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.config import CFG_SITE_LANG
from invenio.dbquery import run_sql, OperationalError, ProgrammingError
from invenio.messages import gettext_set_language
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_TIMEOUT
from invenio.websearch_external_collections_searcher import external_collections_dictionary
from invenio.websearch_external_collections_getter import HTTPAsyncPageGetter, async_download
from invenio.websearch_external_collections_templates import print_results, print_timeout
from invenio.websearch_external_collections_utils import get_collection_id, get_collection_descendants, \
warning, get_verbose_print
import invenio.template
# Global variables
template = invenio.template.load('websearch_external_collections')
external_collections_state = None
dico_collection_external_searches = None
dico_collection_seealso = None
#dico_collection_external_searches = {}
#dico_collection_seealso = {}
def print_external_results_overview(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG, print_overview=True):
"""Print the external collection overview box. Return the selected external collections and parsed query"""
from invenio.search_engine import create_basic_search_units
assert req
vprint = get_verbose_print(req, 'External collection (print_external_results_overview): ', verbosity_level)
pattern = bind_patterns(pattern_list)
vprint(3, 'pattern = %s' % cgi.escape(pattern))
if not pattern:
return (None, None, None, None)
basic_search_units = create_basic_search_units(None, pattern, field)
vprint(3, 'basic_search_units = %s' % cgi.escape(repr(basic_search_units)))
(search_engines, seealso_engines) = select_external_engines(current_collection, external_collection)
vprint(3, 'search_engines = ' + str(search_engines))
vprint(3, 'seealso_engines = ' + str(seealso_engines))
search_engines_list = external_collection_sort_engine_by_name(search_engines)
vprint(3, 'search_engines_list (sorted) : ' + str(search_engines_list))
if print_overview:
html = template.external_collection_overview(lang, search_engines_list)
req.write(html)
return (search_engines, seealso_engines, pattern, basic_search_units)
def perform_external_collection_search(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG,
selected_external_collections_infos=None, print_overview=True,
print_search_info=True, print_see_also_box=True, print_body=True):
"""Search external collection and print the seealso box."""
vprint = get_verbose_print(req, 'External collection: ', verbosity_level)
if selected_external_collections_infos:
(search_engines, seealso_engines, pattern, basic_search_units) = selected_external_collections_infos
else:
(search_engines, seealso_engines, pattern, basic_search_units) = print_external_results_overview(req,
current_collection, pattern_list, field, external_collection, verbosity_level, lang, print_overview=print_overview)
if not pattern:
return
do_external_search(req, lang, vprint, basic_search_units, search_engines, print_search_info, print_body)
if print_see_also_box:
create_seealso_box(req, lang, vprint, basic_search_units, seealso_engines, pattern)
vprint(3, 'end')
def bind_patterns(pattern_list):
"""Combine a list of patterns in an unique pattern.
pattern_list[0] should be the standart search pattern,
pattern_list[1:] are advanced search patterns."""
# just in case an empty list is fed to this function
try:
if pattern_list[0]:
return pattern_list[0]
except IndexError:
return None
pattern = ""
for pattern_part in pattern_list[1:]:
if pattern_part:
pattern += " " + pattern_part
return pattern.strip()
# See also box
def create_seealso_box(req, lang, vprint, basic_search_units=None, seealso_engines=None, query=''):
"Create the box that proposes links to other useful search engines like Google."
vprint(3, 'Create seealso box')
seealso_engines_list = external_collection_sort_engine_by_name(seealso_engines)
vprint(3, 'seealso_engines_list = ' + str(seealso_engines_list))
links = build_seealso_links(basic_search_units, seealso_engines_list, req, lang, query)
html = template.external_collection_seealso_box(lang, links)
req.write(html)
def build_seealso_links(basic_search_units, seealso_engines, req, lang, query):
"""Build the links for the see also box."""
_ = gettext_set_language(lang)
links = []
for engine in seealso_engines:
url = engine.build_search_url(basic_search_units, req.args, lang)
user_url = engine.build_user_search_url(basic_search_units, req.args, lang)
url = user_url or url
if url:
links.append('<a class="google" href="%(url)s">%(query)s %(text_in)s %(name)s</a>' % \
{'url': cgi.escape(url),
'query': cgi.escape(query),
'text_in': _('in'),
'name': _(engine.name)})
return links
# Selection
def select_external_engines(collection_name, selected_external_searches):
"""Build a tuple of two sets. The first one is the list of engine to use for an external search and the
second one is for the seealso box."""
collection_id = get_collection_id(collection_name)
if not collection_id:
return (None, None)
if not type(selected_external_searches) is list:
selected_external_searches = [selected_external_searches]
seealso_engines = set()
search_engines = set()
if dico_collection_seealso.has_key(collection_id):
seealso_engines = copy(dico_collection_seealso[collection_id])
if dico_collection_external_searches.has_key(collection_id):
seealso_engines = seealso_engines.union(dico_collection_external_searches[collection_id])
for ext_search_name in selected_external_searches:
if external_collections_dictionary.has_key(ext_search_name):
engine = external_collections_dictionary[ext_search_name]
if engine.parser:
search_engines.add(engine)
else:
warning('select_external_engines: %(ext_search_name)s unknown.' % locals())
seealso_engines = seealso_engines.difference(search_engines)
return (search_engines, seealso_engines)
# Search
def do_external_search(req, lang, vprint, basic_search_units, search_engines, print_search_info=True, print_body=True):
"""Make the external search."""
_ = gettext_set_language(lang)
vprint(3, 'beginning external search')
engines_list = []
for engine in search_engines:
url = engine.build_search_url(basic_search_units, req.args, lang)
user_url = engine.build_user_search_url(basic_search_units, req.args, lang)
if url:
engines_list.append([url, engine, user_url])
pagegetters_list = [HTTPAsyncPageGetter(engine[0]) for engine in engines_list]
def finished(pagegetter, data, current_time, print_search_info=True, print_body=True):
"""Function called, each time the download of a web page finish.
Will parse and print the results of this page."""
print_results(req, lang, pagegetter, data, current_time, print_search_info, print_body)
finished_list = async_download(pagegetters_list, finished, engines_list, CFG_EXTERNAL_COLLECTION_TIMEOUT, print_search_info, print_body)
for (finished, engine) in zip(finished_list, engines_list):
if not finished:
url = engine[2] or engine[0]
name = engine[1].name
print_timeout(req, lang, engine[1], name, url)
# Database management
def external_collection_load_states():
global external_collections_state, dico_collection_external_searches, dico_collection_seealso
external_collections_state = {}
dico_collection_external_searches = {}
dico_collection_seealso = {}
query = "SELECT collection_externalcollection.id_collection, collection_externalcollection.type, externalcollection.name FROM collection_externalcollection, externalcollection WHERE collection_externalcollection.id_externalcollection = externalcollection.id;"
try:
results = run_sql(query)
except (OperationalError, ProgrammingError):
results = None
if results:
for result in results:
collection_id = int(result[0])
search_type = int(result[1])
engine_name = result[2]
if not external_collections_dictionary.has_key(engine_name):
warning("No search engine : " + engine_name)
continue
engine = external_collections_dictionary[engine_name]
if not external_collections_state.has_key(collection_id):
external_collections_state[collection_id] = {}
col_states = external_collections_state[collection_id]
col_states[engine] = search_type
dictionary = None
if search_type == 1:
dictionary = dico_collection_seealso
if search_type in [2, 3]:
dictionary = dico_collection_external_searches
if dictionary is None:
continue
if not dictionary.has_key(collection_id):
dictionary[collection_id] = set()
engine_set = dictionary[collection_id]
engine_set.add(engine)
def external_collection_get_state(external_collection, collection_id):
external_collection_load_states()
if not external_collections_state.has_key(collection_id):
return 0
col_states = external_collections_state[collection_id]
if not col_states.has_key(external_collection):
return 0
return col_states[external_collection]
def external_collection_get_update_state_list(external_collection, collection_id, state, recurse=False):
changes = []
if external_collection_get_state(external_collection, collection_id) != state:
changes = ['(%(collection_id)d, %(id_externalcollection)d, %(state)d)' %
{'collection_id': collection_id, 'id_externalcollection': external_collection_getid(external_collection), 'state': state}]
if not recurse:
return changes
for descendant_id in get_collection_descendants(collection_id):
changes += external_collection_get_update_state_list(external_collection, descendant_id, state)
return changes
def external_collection_apply_changes(changes_list):
if not changes_list:
return
sql_values = ", ".join(changes_list)
sql = 'INSERT INTO collection_externalcollection (id_collection, id_externalcollection, type) VALUES ' + sql_values + 'ON DUPLICATE KEY UPDATE type=VALUES(type);'
run_sql(sql)
# Misc functions
def external_collection_sort_engine_by_name(engines_set):
"""Return a list of sorted (by name) search engines."""
if not engines_set:
return []
engines_list = [engine for engine in engines_set]
engines_list.sort(lambda x, y: cmp(x.name, y.name))
return engines_list
# External search ID
def external_collection_getid(external_collection):
"""Return the id of an external_collection. Will create a new entry in DB if needed."""
if external_collection.__dict__.has_key('id'):
return external_collection.id
query = 'SELECT id FROM externalcollection WHERE name="%(name)s";' % {'name': external_collection.name}
results = run_sql(query)
if not results:
query = 'INSERT INTO externalcollection (name) VALUES ("%(name)s");' % {'name': external_collection.name}
run_sql(query)
return external_collection_getid(external_collection)
external_collection.id = results[0][0]
return external_collection.id
def get_external_collection_engine(external_collection_name):
"""Return the external collection engine given its name"""
if external_collections_dictionary.has_key(external_collection_name):
return external_collections_dictionary[external_collection_name]
else:
return None
# Load db infos if it's not already done.
if external_collections_state is None:
external_collection_load_states()
# Hosted Collections related functions (the following functions should eventually be regrouped as above)
# These functions could eventually be placed into there own file, ex. websearch_hosted_collections.py
def calculate_hosted_collections_results(req, pattern_list, field, hosted_collections, verbosity_level=0,
lang=CFG_SITE_LANG, timeout=CFG_EXTERNAL_COLLECTION_TIMEOUT):
"""Ruturn a list of the various results for a every hosted collection organized in tuples"""
# normally, the following should be checked before even running this function so the following line could be removed
if not hosted_collections: return (None, None)
vprint = get_verbose_print(req, 'Hosted collections: ', verbosity_level)
vprint(3, 'pattern_list = %s, field = %s' % (cgi.escape(repr(pattern_list)), cgi.escape(field)))
# firstly we calculate the search parameters, i.e. the actual hosted search engines and the basic search units
(hosted_search_engines, basic_search_units) = \
calculate_hosted_collections_search_params(req,
pattern_list,
field,
hosted_collections,
verbosity_level)
# in case something went wrong with the above calculation just return None
# however, once we run this function no fail should be expected here
# UPDATE : let search go on even there are no basic search units (an empty pattern_list and field)
#if basic_search_units == None or len(hosted_search_engines) == 0: return (None, None)
if len(hosted_search_engines) == 0: return (None, None)
# finally return the list of tuples with the results
return do_calculate_hosted_collections_results(req, lang, vprint, verbosity_level, basic_search_units, hosted_search_engines, timeout)
vprint(3, 'end')
def calculate_hosted_collections_search_params(req,
pattern_list,
field,
hosted_collections,
verbosity_level=0):
"""Calculate the searching parameters for the selected hosted collections
i.e. the actual hosted search engines and the basic search units"""
from invenio.search_engine import create_basic_search_units
assert req
vprint = get_verbose_print(req, 'Hosted collections (calculate_hosted_collections_search_params): ', verbosity_level)
pattern = bind_patterns(pattern_list)
vprint(3, 'pattern = %s' % cgi.escape(pattern))
# if for any strange reason there is no pattern, just return
# UPDATE : let search go on even there is no pattern (an empty pattern_list and field)
#if not pattern: return (None, None)
# calculate the basic search units
basic_search_units = create_basic_search_units(None, pattern, field)
vprint(3, 'basic_search_units = %s' % cgi.escape(repr(basic_search_units)))
# calculate the set of hosted search engines
hosted_search_engines = select_hosted_search_engines(hosted_collections)
vprint(3, 'hosted_search_engines = ' + str(hosted_search_engines))
# no need really to print out a sorted list of the hosted search engines, is there? I'll leave this commented out
#hosted_search_engines_list = external_collection_sort_engine_by_name(hosted_search_engines)
#vprint(3, 'hosted_search_engines_list (sorted) : ' + str(hosted_search_engines_list))
return (hosted_search_engines, basic_search_units)
def select_hosted_search_engines(selected_hosted_collections):
"""Build the set of engines to be used for the hosted collections"""
if not type(selected_hosted_collections) is list:
selected_hosted_collections = [selected_hosted_collections]
hosted_search_engines = set()
for hosted_collection_name in selected_hosted_collections:
if external_collections_dictionary.has_key(hosted_collection_name):
engine = external_collections_dictionary[hosted_collection_name]
# the hosted collection cannot present its results unless it has a parser implemented
if engine.parser:
hosted_search_engines.add(engine)
else:
warning('select_hosted_search_engines: %(hosted_collection_name)s unknown.' % locals())
return hosted_search_engines
def do_calculate_hosted_collections_results(req, lang, vprint, verbosity_level, basic_search_units, hosted_search_engines,
timeout=CFG_EXTERNAL_COLLECTION_TIMEOUT):
"""Actually search the hosted collections and return their results and information in a list of tuples.
One tuple for each hosted collection. Handles timeouts"""
_ = gettext_set_language(lang)
if not vprint:
vprint = get_verbose_print(req, 'Hosted collections (calculate_hosted_collections_search_params): ', verbosity_level)
# defining vprint at this moment probably means we'll just run this one function at this time, therefore the "verbose"
# end hosted search string will not be printed (it is normally printed by the initial calculate function)
# Therefore, either define a flag here to print it by the end of this function or redefine the whole "verbose"
# printing logic of the above functions
vprint(3, 'beginning hosted search')
# list to hold the hosted search engines and their respective search urls
engines_list = []
# list to hold the non timed out results
results_list = []
# list to hold all the results
full_results_list = []
# list to hold all the timeouts
timeout_list = []
# in case this is an engine-only list
if type(hosted_search_engines) is set:
for engine in hosted_search_engines:
url = engine.build_search_url(basic_search_units, req.args, lang)
user_url = engine.build_user_search_url(basic_search_units, req.args, lang)
if url:
engines_list.append([url, engine, user_url])
# in case we are iterating a pre calculated url+engine list
elif type(hosted_search_engines) is list:
for engine in hosted_search_engines:
engines_list.append(engine)
# in both the above cases we end up with a [[search url], [engine]] kind of list
# create the list of search urls to be handed to the asynchronous getter
pagegetters_list = [HTTPAsyncPageGetter(engine[0]) for engine in engines_list]
# function to be run on every result
def finished(pagegetter, data, current_time):
"""Function called, each time the download of a web page finish.
Will parse and print the results of this page."""
# each pagegetter that didn't timeout is added to this list
results_list.append((pagegetter, data, current_time))
# run the asynchronous getter
finished_list = async_download(pagegetters_list, finished, engines_list, timeout)
# create the complete list of tuples, one for each hosted collection, with the results and other information,
# including those that timed out
for (finished, engine) in zip(finished_list, engines_list): #finished_and_engines_list:
if finished:
for result in results_list:
if result[1] == engine:
# the engine is fed the results, it will be parsed later, at printing time
engine[1].parser.parse_and_get_results(result[0].data, feedonly=True)
## the list contains:
## * the engine itself: [ search url], [engine]
## * the parsed number of found results
## * the fetching time
full_results_list.append(
(engine, engine[1].parser.parse_num_results(), result[2])
)
break
elif not finished:
## the list contains:
## * the engine itself: [search url], [engine]
timeout_list.append(engine)
return (full_results_list, timeout_list)
|
shakamunyi/neutron-vrrp | refs/heads/master | neutron/plugins/opencontrail/contrail_plugin.py | 10 | # Copyright 2014 Juniper Networks. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo.config import cfg
import requests
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as exc
from neutron.db import portbindings_base
from neutron.extensions import external_net
from neutron.extensions import portbindings
from neutron.extensions import securitygroup
from neutron import neutron_plugin_base_v2
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.opencontrail.common import exceptions as c_exc
LOG = logging.getLogger(__name__)
opencontrail_opts = [
cfg.StrOpt('api_server_ip', default='127.0.0.1',
help='IP address to connect to opencontrail controller'),
cfg.IntOpt('api_server_port', default=8082,
help='Port to connect to opencontrail controller'),
]
cfg.CONF.register_opts(opencontrail_opts, 'CONTRAIL')
CONTRAIL_EXCEPTION_MAP = {
requests.codes.not_found: c_exc.ContrailNotFoundError,
requests.codes.conflict: c_exc.ContrailConflictError,
requests.codes.bad_request: c_exc.ContrailBadRequestError,
requests.codes.service_unavailable: c_exc.ContrailServiceUnavailableError,
requests.codes.unauthorized: c_exc.ContrailNotAuthorizedError,
requests.codes.internal_server_error: c_exc.ContrailError,
}
class NeutronPluginContrailCoreV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
securitygroup.SecurityGroupPluginBase,
portbindings_base.PortBindingBaseMixin,
external_net.External_net):
supported_extension_aliases = ["security-group", "router",
"port-security", "binding", "agent",
"quotas", "external-net"]
PLUGIN_URL_PREFIX = '/neutron'
__native_bulk_support = False
def __init__(self):
"""Initialize the plugin class."""
super(NeutronPluginContrailCoreV2, self).__init__()
portbindings_base.register_port_dict_function()
self.base_binding_dict = self._get_base_binding_dict()
def _get_base_binding_dict(self):
"""return VIF type and details."""
binding = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_VROUTER,
portbindings.VIF_DETAILS: {
# TODO(praneetb): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases
}
}
return binding
def _request_api_server(self, url, data=None, headers=None):
"""Send received request to api server."""
return requests.post(url, data=data, headers=headers)
def _relay_request(self, url_path, data=None):
"""Send received request to api server."""
url = "http://%s:%d%s" % (cfg.CONF.CONTRAIL.api_server_ip,
cfg.CONF.CONTRAIL.api_server_port,
url_path)
return self._request_api_server(
url, data=data, headers={'Content-type': 'application/json'})
def _request_backend(self, context, data_dict, obj_name, action):
"""Relays request to the controller."""
context_dict = self._encode_context(context, action, obj_name)
data = jsonutils.dumps({'context': context_dict, 'data': data_dict})
url_path = "%s/%s" % (self.PLUGIN_URL_PREFIX, obj_name)
response = self._relay_request(url_path, data=data)
if response.content:
return response.status_code, response.json()
else:
return response.status_code, response.content
def _encode_context(self, context, operation, apitype):
"""Encode the context to be sent to the controller."""
cdict = {'user_id': getattr(context, 'user_id', ''),
'is_admin': getattr(context, 'is_admin', False),
'operation': operation,
'type': apitype,
'tenant_id': getattr(context, 'tenant_id', None)}
if context.roles:
cdict['roles'] = context.roles
if context.tenant:
cdict['tenant'] = context.tenant
return cdict
def _encode_resource(self, resource_id=None, resource=None, fields=None,
filters=None):
"""Encode a resource to be sent to the controller."""
resource_dict = {}
if resource_id:
resource_dict['id'] = resource_id
if resource:
resource_dict['resource'] = resource
resource_dict['filters'] = filters
resource_dict['fields'] = fields
return resource_dict
def _prune(self, resource_dict, fields):
"""Prune the resource dictionary based in the fields."""
if fields:
return dict(((key, item) for key, item in resource_dict.items()
if key in fields))
return resource_dict
def _transform_response(self, status_code, info=None, obj_name=None,
fields=None):
"""Transform the response for a Resource API."""
if status_code == requests.codes.ok:
if not isinstance(info, list):
return self._prune(info, fields)
else:
return [self._prune(items, fields) for items in info]
self._raise_contrail_error(status_code, info, obj_name)
def _raise_contrail_error(self, status_code, info, obj_name):
"""Raises an error in handling of a Resource.
This method converts return error code into neutron exception.
"""
if status_code == requests.codes.bad_request:
raise c_exc.ContrailBadRequestError(
msg=info['message'], resource=obj_name)
error_class = CONTRAIL_EXCEPTION_MAP.get(status_code,
c_exc.ContrailError)
raise error_class(msg=info['message'])
def _create_resource(self, res_type, context, res_data):
"""Create a resource in API server.
This method encodes neutron model, and sends it to the
contrail api server.
"""
for key, value in res_data[res_type].items():
if value == attr.ATTR_NOT_SPECIFIED:
res_data[res_type][key] = None
res_dict = self._encode_resource(resource=res_data[res_type])
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'CREATE')
res_dicts = self._transform_response(status_code, info=res_info,
obj_name=res_type)
LOG.debug("create_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _get_resource(self, res_type, context, res_id, fields):
"""Get a resource from API server.
This method gets a resource from the contrail api server
"""
res_dict = self._encode_resource(resource_id=res_id, fields=fields)
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'READ')
res_dicts = self._transform_response(status_code, info=res_info,
fields=fields, obj_name=res_type)
LOG.debug("get_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _update_resource(self, res_type, context, res_id, res_data):
"""Update a resource in API server.
This method updates a resource in the contrail api server
"""
res_dict = self._encode_resource(resource_id=res_id,
resource=res_data[res_type])
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'UPDATE')
res_dicts = self._transform_response(status_code, info=res_info,
obj_name=res_type)
LOG.debug("update_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _delete_resource(self, res_type, context, res_id):
"""Delete a resource in API server
This method deletes a resource in the contrail api server
"""
res_dict = self._encode_resource(resource_id=res_id)
LOG.debug("delete_%(res_type)s(): %(res_id)s",
{'res_type': res_type, 'res_id': res_id})
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'DELETE')
if status_code != requests.codes.ok:
self._raise_contrail_error(status_code, info=res_info,
obj_name=res_type)
def _list_resource(self, res_type, context, filters, fields):
"""Get the list of a Resource."""
res_dict = self._encode_resource(filters=filters, fields=fields)
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'READALL')
res_dicts = self._transform_response(status_code, info=res_info,
fields=fields, obj_name=res_type)
LOG.debug(
"get_%(res_type)s(): filters: %(filters)r data: %(res_dicts)r",
{'res_type': res_type, 'filters': filters,
'res_dicts': res_dicts})
return res_dicts
def _count_resource(self, res_type, context, filters):
"""Get the count of a Resource."""
res_dict = self._encode_resource(filters=filters)
_, res_count = self._request_backend(context, res_dict, res_type,
'READCOUNT')
LOG.debug("get_%(res_type)s_count(): %(res_count)r",
{'res_type': res_type, 'res_count': res_count})
return res_count
def _get_network(self, context, res_id, fields=None):
"""Get the attributes of a Virtual Network."""
return self._get_resource('network', context, res_id, fields)
def create_network(self, context, network):
"""Creates a new Virtual Network."""
return self._create_resource('network', context, network)
def get_network(self, context, network_id, fields=None):
"""Get the attributes of a particular Virtual Network."""
return self._get_network(context, network_id, fields)
def update_network(self, context, network_id, network):
"""Updates the attributes of a particular Virtual Network."""
return self._update_resource('network', context, network_id,
network)
def delete_network(self, context, network_id):
"""Deletes the network with the specified network identifier."""
self._delete_resource('network', context, network_id)
def get_networks(self, context, filters=None, fields=None):
"""Get the list of Virtual Networks."""
return self._list_resource('network', context, filters,
fields)
def get_networks_count(self, context, filters=None):
"""Get the count of Virtual Network."""
networks_count = self._count_resource('network', context, filters)
return networks_count['count']
def create_subnet(self, context, subnet):
"""Creates a new subnet, and assigns it a symbolic name."""
if subnet['subnet']['gateway_ip'] is None:
subnet['subnet']['gateway_ip'] = '0.0.0.0'
if subnet['subnet']['host_routes'] != attr.ATTR_NOT_SPECIFIED:
if (len(subnet['subnet']['host_routes']) >
cfg.CONF.max_subnet_host_routes):
raise exc.HostRoutesExhausted(subnet_id=subnet[
'subnet'].get('id', _('new subnet')),
quota=cfg.CONF.max_subnet_host_routes)
subnet_created = self._create_resource('subnet', context, subnet)
return self._make_subnet_dict(subnet_created)
def _make_subnet_dict(self, subnet):
"""Fixes subnet attributes."""
if subnet.get('gateway_ip') == '0.0.0.0':
subnet['gateway_ip'] = None
return subnet
def _get_subnet(self, context, subnet_id, fields=None):
"""Get the attributes of a subnet."""
subnet = self._get_resource('subnet', context, subnet_id, fields)
return self._make_subnet_dict(subnet)
def get_subnet(self, context, subnet_id, fields=None):
"""Get the attributes of a particular subnet."""
return self._get_subnet(context, subnet_id, fields)
def update_subnet(self, context, subnet_id, subnet):
"""Updates the attributes of a particular subnet."""
subnet = self._update_resource('subnet', context, subnet_id, subnet)
return self._make_subnet_dict(subnet)
def delete_subnet(self, context, subnet_id):
"""
Deletes the subnet with the specified subnet identifier
belonging to the specified tenant.
"""
self._delete_resource('subnet', context, subnet_id)
def get_subnets(self, context, filters=None, fields=None):
"""Get the list of subnets."""
return [self._make_subnet_dict(s)
for s in self._list_resource(
'subnet', context, filters, fields)]
def get_subnets_count(self, context, filters=None):
"""Get the count of subnets."""
subnets_count = self._count_resource('subnet', context, filters)
return subnets_count['count']
def _make_port_dict(self, port, fields=None):
"""filters attributes of a port based on fields."""
if not fields:
port.update(self.base_binding_dict)
else:
for key in self.base_binding_dict:
if key in fields:
port.update(self.base_binding_dict[key])
return port
def _get_port(self, context, res_id, fields=None):
"""Get the attributes of a port."""
port = self._get_resource('port', context, res_id, fields)
return self._make_port_dict(port, fields)
def _update_ips_for_port(self, context, original_ips, new_ips):
"""Add or remove IPs from the port."""
# These ips are still on the port and haven't been removed
prev_ips = []
# the new_ips contain all of the fixed_ips that are to be updated
if len(new_ips) > cfg.CONF.max_fixed_ips_per_port:
msg = _('Exceeded maximim amount of fixed ips per port')
raise exc.InvalidInput(error_message=msg)
# Remove all of the intersecting elements
for original_ip in original_ips[:]:
for new_ip in new_ips[:]:
if ('ip_address' in new_ip and
original_ip['ip_address'] == new_ip['ip_address']):
original_ips.remove(original_ip)
new_ips.remove(new_ip)
prev_ips.append(original_ip)
return new_ips, prev_ips
def create_port(self, context, port):
"""Creates a port on the specified Virtual Network."""
port = self._create_resource('port', context, port)
return self._make_port_dict(port)
def get_port(self, context, port_id, fields=None):
"""Get the attributes of a particular port."""
return self._get_port(context, port_id, fields)
def update_port(self, context, port_id, port):
"""Updates a port.
Updates the attributes of a port on the specified Virtual
Network.
"""
if 'fixed_ips' in port['port']:
original = self._get_port(context, port_id)
added_ips, prev_ips = self._update_ips_for_port(
context, original['fixed_ips'], port['port']['fixed_ips'])
port['port']['fixed_ips'] = prev_ips + added_ips
port = self._update_resource('port', context, port_id, port)
return self._make_port_dict(port)
def delete_port(self, context, port_id):
"""Deletes a port.
Deletes a port on a specified Virtual Network,
if the port contains a remote interface attachment,
the remote interface is first un-plugged and then the port
is deleted.
"""
self._delete_resource('port', context, port_id)
def get_ports(self, context, filters=None, fields=None):
"""Get all ports.
Retrieves all port identifiers belonging to the
specified Virtual Network with the specfied filter.
"""
return [self._make_port_dict(p, fields)
for p in self._list_resource('port', context, filters, fields)]
def get_ports_count(self, context, filters=None):
"""Get the count of ports."""
ports_count = self._count_resource('port', context, filters)
return ports_count['count']
# Router API handlers
def create_router(self, context, router):
"""Creates a router.
Creates a new Logical Router, and assigns it
a symbolic name.
"""
return self._create_resource('router', context, router)
def get_router(self, context, router_id, fields=None):
"""Get the attributes of a router."""
return self._get_resource('router', context, router_id, fields)
def update_router(self, context, router_id, router):
"""Updates the attributes of a router."""
return self._update_resource('router', context, router_id,
router)
def delete_router(self, context, router_id):
"""Deletes a router."""
self._delete_resource('router', context, router_id)
def get_routers(self, context, filters=None, fields=None):
"""Retrieves all router identifiers."""
return self._list_resource('router', context, filters, fields)
def get_routers_count(self, context, filters=None):
"""Get the count of routers."""
routers_count = self._count_resource('router', context, filters)
return routers_count['count']
def _validate_router_interface_request(self, interface_info):
"""Validates parameters to the router interface requests."""
port_id_specified = interface_info and 'port_id' in interface_info
subnet_id_specified = interface_info and 'subnet_id' in interface_info
if not (port_id_specified or subnet_id_specified):
msg = _("Either subnet_id or port_id must be specified")
raise exc.BadRequest(resource='router', msg=msg)
def add_router_interface(self, context, router_id, interface_info):
"""Add interface to a router."""
self._validate_router_interface_request(interface_info)
if 'port_id' in interface_info:
if 'subnet_id' in interface_info:
msg = _("Cannot specify both subnet-id and port-id")
raise exc.BadRequest(resource='router', msg=msg)
res_dict = self._encode_resource(resource_id=router_id,
resource=interface_info)
status_code, res_info = self._request_backend(context, res_dict,
'router', 'ADDINTERFACE')
if status_code != requests.codes.ok:
self._raise_contrail_error(status_code, info=res_info,
obj_name='add_router_interface')
return res_info
def remove_router_interface(self, context, router_id, interface_info):
"""Delete interface from a router."""
self._validate_router_interface_request(interface_info)
res_dict = self._encode_resource(resource_id=router_id,
resource=interface_info)
status_code, res_info = self._request_backend(context, res_dict,
'router', 'DELINTERFACE')
if status_code != requests.codes.ok:
self._raise_contrail_error(status_code, info=res_info,
obj_name='remove_router_interface')
return res_info
# Floating IP API handlers
def create_floatingip(self, context, floatingip):
"""Creates a floating IP."""
return self._create_resource('floatingip', context, floatingip)
def update_floatingip(self, context, fip_id, floatingip):
"""Updates the attributes of a floating IP."""
return self._update_resource('floatingip', context, fip_id,
floatingip)
def get_floatingip(self, context, fip_id, fields=None):
"""Get the attributes of a floating ip."""
return self._get_resource('floatingip', context, fip_id, fields)
def delete_floatingip(self, context, fip_id):
"""Deletes a floating IP."""
self._delete_resource('floatingip', context, fip_id)
def get_floatingips(self, context, filters=None, fields=None):
"""Retrieves all floating ips identifiers."""
return self._list_resource('floatingip', context, filters, fields)
def get_floatingips_count(self, context, filters=None):
"""Get the count of floating IPs."""
fips_count = self._count_resource('floatingip', context, filters)
return fips_count['count']
# Security Group handlers
def create_security_group(self, context, security_group):
"""Creates a Security Group."""
return self._create_resource('security_group', context,
security_group)
def get_security_group(self, context, sg_id, fields=None, tenant_id=None):
"""Get the attributes of a security group."""
return self._get_resource('security_group', context, sg_id, fields)
def update_security_group(self, context, sg_id, security_group):
"""Updates the attributes of a security group."""
return self._update_resource('security_group', context, sg_id,
security_group)
def delete_security_group(self, context, sg_id):
"""Deletes a security group."""
self._delete_resource('security_group', context, sg_id)
def get_security_groups(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Retrieves all security group identifiers."""
return self._list_resource('security_group', context,
filters, fields)
def create_security_group_rule(self, context, security_group_rule):
"""Creates a security group rule."""
return self._create_resource('security_group_rule', context,
security_group_rule)
def delete_security_group_rule(self, context, sg_rule_id):
"""Deletes a security group rule."""
self._delete_resource('security_group_rule', context, sg_rule_id)
def get_security_group_rule(self, context, sg_rule_id, fields=None):
"""Get the attributes of a security group rule."""
return self._get_resource('security_group_rule', context,
sg_rule_id, fields)
def get_security_group_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Retrieves all security group rules."""
return self._list_resource('security_group_rule', context,
filters, fields)
|
rrrichter/ufrgs | refs/heads/master | biologia_computacional/assignment7/e7-1a.py | 1 | import numpy as np
import copy
amountOfNodes = 0
class Node:
def __init__(self):
self.children = []
global amountOfNodes
self.id = amountOfNodes-1
amountOfNodes += 1
self.distanceFromParent = 0
# Initiate distance Mat
def initDistMat():
distMat = np.zeros((5,5))
distMat[0][1] = 0.189
distMat[0][2] = 0.11
distMat[0][3] = 0.113
distMat[0][4] = 0.215
distMat[1][0] = 0.189
distMat[1][2] = 0.179
distMat[1][3] = 0.192
distMat[1][4] = 0.211
distMat[2][0] = 0.11
distMat[2][1] = 0.179
distMat[2][3] = 0.09405
distMat[2][4] = 0.205
distMat[3][0] = 0.113
distMat[3][1] = 0.192
distMat[3][2] = 0.0940
distMat[3][4] = 0.2140
distMat[4][0] = 0.215
distMat[4][1] = 0.211
distMat[4][2] = 0.205
distMat[4][3] = 0.214
return distMat
# start nodes
def startNodes():
root = Node()
root.children = [Node(),Node(),Node(),Node(),Node()]
return root
# calculate new distMat
def updateDistMat(distMat, pair):
matSize = distMat.shape[0]
distMat = np.insert(copy.deepcopy(distMat),matSize,0,axis=1)
distMat = np.insert(copy.deepcopy(distMat),matSize,0,axis=0)
# find distance from the rest of taxa to the new node
matSize = distMat.shape[0]
for i in range(matSize):
distMat[matSize-1][i] = (1/2.0)*(distMat[pair[0]][i] + distMat[pair[1]][i] - distMat[pair[0]][pair[1]])
distMat[i][matSize-1] = copy.deepcopy(distMat[matSize-1][i])
print(distMat[i][matSize-1])
print(distMat)
distMat = np.delete(distMat,(pair[1]),axis=0)
distMat = np.delete(distMat,(pair[1]),axis=1)
distMat = np.delete(distMat,(pair[0]),axis=0)
distMat = np.delete(distMat,(pair[0]),axis=1)
return distMat
# find q Mat from distMat
def findQMat(distMat):
matSize = distMat.shape[0]
qMat = np.zeros((matSize,matSize))
for i in range(matSize):
for j in range(matSize):
s1 = 0
for k in range(matSize):
s1+=distMat[i][k]
s2 = 0
for k in range(matSize):
s2+=distMat[j][k]
qMat[i][j] = (5-2)*distMat[i][j] - s1 - s2
return qMat
# find pair of distinct taxa with lowest value in q Mat
def findSmallestPair(qMat):
smallest = 999999
smallestIJ = []
for i in range(0,qMat.shape[0]):
for j in range(0,qMat.shape[0]):
if i != j and qMat[i][j] < smallest:
smallest = qMat[i][j]
smallestIJ = [i,j]
return smallestIJ
# generate new node from pair and add to the tree
def updateTree(distMat, smallestPair, root):
newNode = Node()
newNode.id = distMat.shape[0]-2
i = 0
while i < len(root.children):
if root.children[i].id == smallestPair[0] or root.children[i].id == smallestPair[1]:
newNode.children.append(copy.deepcopy(root.children[i]))
print('achou')
root.children.pop(i)
i-=1
i+=1
matSize = distMat.shape[0]
# find distance from pair to the newly created node
s1 = 0
for k in range(0,matSize):
s1+=distMat[smallestPair[0]][k]
s2 = 0
for k in range(0,matSize):
s2+=distMat[smallestPair[1]][k]
newNode.children[0].distanceFromParent = (1/2.0)*(distMat[smallestPair[0],smallestPair[1]])+(1/6.0)*(s1-s2)
newNode.children[1].distanceFromParent = distMat[smallestPair[0]][smallestPair[1]] - distMat[matSize-1][smallestPair[0]]
root.children.append(newNode)
# main
distMat = initDistMat()
distMatIdxs = {}
root = startNodes()
for i in range(len(root.children)):
qMat = findQMat(distMat)
smallestPair = findSmallestPair(qMat)
updateTree(distMat,smallestPair,root)
distMat = updateDistMat(distMat,smallestPair)
#print(distMat)
|
helldorado/ansible | refs/heads/devel | lib/ansible/module_utils/alicloud_ecs.py | 66 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2017 Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils.basic import env_fallback
try:
import footmark
import footmark.ecs
import footmark.slb
import footmark.vpc
import footmark.rds
import footmark.ess
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
class AnsibleACSError(Exception):
pass
def acs_common_argument_spec():
return dict(
alicloud_access_key=dict(required=True, aliases=['access_key_id', 'access_key'], no_log=True,
fallback=(env_fallback, ['ALICLOUD_ACCESS_KEY', 'ALICLOUD_ACCESS_KEY_ID'])),
alicloud_secret_key=dict(required=True, aliases=['secret_access_key', 'secret_key'], no_log=True,
fallback=(env_fallback, ['ALICLOUD_SECRET_KEY', 'ALICLOUD_SECRET_ACCESS_KEY'])),
alicloud_security_token=dict(aliases=['security_token'], no_log=True,
fallback=(env_fallback, ['ALICLOUD_SECURITY_TOKEN'])),
)
def ecs_argument_spec():
spec = acs_common_argument_spec()
spec.update(
dict(
alicloud_region=dict(required=True, aliases=['region', 'region_id'],
fallback=(env_fallback, ['ALICLOUD_REGION', 'ALICLOUD_REGION_ID'])),
)
)
return spec
def get_acs_connection_info(module):
ecs_params = dict(acs_access_key_id=module.params.get('alicloud_access_key'),
acs_secret_access_key=module.params.get('alicloud_secret_key'),
security_token=module.params.get('alicloud_security_token'),
user_agent='Ansible-Provider-Alicloud')
return module.params.get('alicloud_region'), ecs_params
def connect_to_acs(acs_module, region, **params):
conn = acs_module.connect_to_region(region, **params)
if not conn:
if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]:
raise AnsibleACSError(
"Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__))
else:
raise AnsibleACSError(
"Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__))
return conn
def ecs_connect(module):
""" Return an ecs connection"""
region, ecs_params = get_acs_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ecs = connect_to_acs(footmark.ecs, region, **ecs_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
return ecs
def slb_connect(module):
""" Return an slb connection"""
region, slb_params = get_acs_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
slb = connect_to_acs(footmark.slb, region, **slb_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
return slb
def vpc_connect(module):
""" Return an vpc connection"""
region, vpc_params = get_acs_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc = connect_to_acs(footmark.vpc, region, **vpc_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
return vpc
def rds_connect(module):
""" Return an rds connection"""
region, rds_params = get_acs_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
rds = connect_to_acs(footmark.rds, region, **rds_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
return rds
def ess_connect(module):
""" Return an ess connection"""
region, ess_params = get_acs_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ess = connect_to_acs(footmark.ess, region, **ess_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
return ess
|
shahbazn/neutron | refs/heads/master | neutron/cmd/netns_cleanup.py | 24 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from neutron.agent.common import config as agent_config
from neutron.agent.common import ovs_lib
from neutron.agent.dhcp import config as dhcp_config
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import dvr
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
LB_NS_PREFIX = 'qlbaas-'
NS_MANGLING_PATTERN = ('(%s|%s|%s|%s|%s)' % (dhcp.NS_PREFIX,
l3_agent.NS_PREFIX,
dvr.SNAT_NS_PREFIX,
dvr_fip_ns.FIP_NS_PREFIX,
LB_NS_PREFIX) +
attributes.UUID_PATTERN)
class FakeDhcpPlugin(object):
"""Fake RPC plugin to bypass any RPC calls."""
def __getattribute__(self, name):
def fake_method(*args):
pass
return fake_method
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
cli_opts = [
cfg.BoolOpt('force',
default=False,
help=_('Delete the namespace by removing all devices.')),
]
conf = cfg.CONF
conf.register_cli_opts(cli_opts)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_use_namespaces_opts_helper(conf)
conf.register_opts(dhcp_config.DHCP_AGENT_OPTS)
conf.register_opts(dhcp_config.DHCP_OPTS)
conf.register_opts(dhcp_config.DNSMASQ_OPTS)
conf.register_opts(interface.OPTS)
return conf
def _get_dhcp_process_monitor(config):
return external_process.ProcessMonitor(config=config,
resource_type='dhcp')
def kill_dhcp(conf, namespace):
"""Disable DHCP for a network if DHCP is still active."""
network_id = namespace.replace(dhcp.NS_PREFIX, '')
dhcp_driver = importutils.import_object(
conf.dhcp_driver,
conf=conf,
process_monitor=_get_dhcp_process_monitor(conf),
network=dhcp.NetModel(conf.use_namespaces, {'id': network_id}),
plugin=FakeDhcpPlugin())
if dhcp_driver.active:
dhcp_driver.disable()
def eligible_for_deletion(conf, namespace, force=False):
"""Determine whether a namespace is eligible for deletion.
Eligibility is determined by having only the lo device or if force
is passed as a parameter.
"""
# filter out namespaces without UUID as the name
if not re.match(NS_MANGLING_PATTERN, namespace):
return False
ip = ip_lib.IPWrapper(namespace=namespace)
return force or ip.namespace_is_empty()
def unplug_device(conf, device):
try:
device.link.delete()
except RuntimeError:
# Maybe the device is OVS port, so try to delete
ovs = ovs_lib.BaseOVS()
bridge_name = ovs.get_bridge_for_iface(device.name)
if bridge_name:
bridge = ovs_lib.OVSBridge(bridge_name)
bridge.delete_port(device.name)
else:
LOG.debug('Unable to find bridge for device: %s', device.name)
def destroy_namespace(conf, namespace, force=False):
"""Destroy a given namespace.
If force is True, then dhcp (if it exists) will be disabled and all
devices will be forcibly removed.
"""
try:
ip = ip_lib.IPWrapper(namespace=namespace)
if force:
kill_dhcp(conf, namespace)
# NOTE: The dhcp driver will remove the namespace if is it empty,
# so a second check is required here.
if ip.netns.exists(namespace):
for device in ip.get_devices(exclude_loopback=True):
unplug_device(conf, device)
ip.garbage_collect_namespace()
except Exception:
LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
def cleanup_network_namespaces(conf):
# Identify namespaces that are candidates for deletion.
candidates = [ns for ns in
ip_lib.IPWrapper.get_namespaces()
if eligible_for_deletion(conf, ns, conf.force)]
if candidates:
time.sleep(2)
for namespace in candidates:
destroy_namespace(conf, namespace, conf.force)
def main():
"""Main method for cleaning up network namespaces.
This method will make two passes checking for namespaces to delete. The
process will identify candidates, sleep, and call garbage collect. The
garbage collection will re-verify that the namespace meets the criteria for
deletion (ie it is empty). The period of sleep and the 2nd pass allow
time for the namespace state to settle, so that the check prior deletion
will re-confirm the namespace is empty.
The utility is designed to clean-up after the forced or unexpected
termination of Neutron agents.
The --force flag should only be used as part of the cleanup of a devstack
installation as it will blindly purge namespaces and their devices. This
option also kills any lingering DHCP instances.
"""
conf = setup_conf()
conf()
config.setup_logging()
cleanup_network_namespaces(conf)
|
mega-sanke/mega-snake-python | refs/heads/master | src/notification.py | 1 | import globals
def __notify__(user, type, *message):
"""
This function send a notifications for the user
:param user: the user whom this function notify
:type user: globals.User
:param type: the type of the message
:type type: str
:param message: the contact for the message
:type message: list[str]
"""
message = list(message)
for (i, v) in enumerate(message):
message[i] = str(v)
message.insert(0, type)
message = '&&'.join(message)
user.socket.sendall(message + '\n')
def notify_error(user, msg):
__notify__(user, 'ERROR', msg)
def notify_message(user, msg):
__notify__(user, 'MESSAGE', msg)
def notify_variable(user, name, value, type):
__notify__(user, 'VALUE', name, value, type) |
rlbabyuk/integration_tests | refs/heads/master | fixtures/parallelizer/__init__.py | 1 | """Parallel testing, supporting arbitrary collection ordering
The Workflow
------------
- Master py.test process starts up, inspects config to decide how many slave to start, if at all
- env['parallel_base_urls'] is inspected first
- py.test config.option.appliances and the related --appliance cmdline flag are used
if env['parallel_base_urls'] isn't set
- if neither are set, no parallelization happens
- Slaves are started
- Master runs collection, blocks until slaves report their collections
- Slaves each run collection and submit them to the master, then block inside their runtest loop,
waiting for tests to run
- Master diffs slave collections against its own; the test ids are verified to match
across all nodes
- Master enters main runtest loop, uses a generator to build lists of test groups which are then
sent to slaves, one group at a time
- For each phase of each test, the slave serializes test reports, which are then unserialized on
the master and handed to the normal pytest reporting hooks, which is able to deal with test
reports arriving out of order
- Before running the last test in a group, the slave will request more tests from the master
- If more tests are received, they are run
- If no tests are received, the slave will shut down after running its final test
- After all slaves are shut down, the master will do its end-of-session reporting as usual, and
shut down
"""
from itertools import groupby
import difflib
import json
import os
import signal
import subprocess
from collections import defaultdict, deque, namedtuple
from datetime import datetime
from itertools import count
import attr
from threading import Thread
from time import sleep, time
from urlparse import urlparse
import pytest
import zmq
from _pytest import runner
from fixtures import terminalreporter
from fixtures.parallelizer import remote
from fixtures.pytest_store import store
from utils import at_exit, conf
from utils.appliance import IPAppliance
from utils.log import create_sublogger
from utils.path import conf_path
# Initialize slaveid to None, indicating this as the master process
# slaves will set this to a unique string when they're initialized
conf.runtime['env']['slaveid'] = None
if not conf.runtime['env'].get('ts'):
ts = str(time())
conf.runtime['env']['ts'] = ts
def pytest_addhooks(pluginmanager):
import hooks
pluginmanager.add_hookspecs(hooks)
@pytest.mark.trylast
def pytest_configure(config):
# configures the parallel session, then fires pytest_parallel_configured
if len(config.option.appliances) > 1:
session = ParallelSession(config)
config.pluginmanager.register(session, "parallel_session")
store.parallelizer_role = 'master'
config.hook.pytest_parallel_configured(parallel_session=session)
else:
config.hook.pytest_parallel_configured(parallel_session=None)
def handle_end_session(signal, frame):
# when signaled, end the current test session immediately
if store.parallel_session:
store.parallel_session.session_finished = True
signal.signal(signal.SIGQUIT, handle_end_session)
@attr.s(hash=False)
class SlaveDetail(object):
slaveid_generator = ('slave{:02d}'.format(i) for i in count())
url = attr.ib()
id = attr.ib(default=attr.Factory(
lambda: next(SlaveDetail.slaveid_generator)))
forbid_restart = attr.ib(default=False, init=False)
tests = attr.ib(default=attr.Factory(set), repr=False)
process = attr.ib(default=None, repr=False)
provider_allocation = attr.ib(default=attr.Factory(list), repr=False)
def start(self):
if self.forbid_restart:
return
devnull = open(os.devnull, 'w')
# worker output redirected to null; useful info comes via messages and logs
self.process = subprocess.Popen(
['python', remote.__file__, self.id, self.url, conf.runtime['env']['ts']],
stdout=devnull,
)
at_exit(self.process.kill)
def poll(self):
if self.process is not None:
return self.process.poll()
class ParallelSession(object):
def __init__(self, config):
self.config = config
self.session = None
self.session_finished = False
self.countfailures = 0
self.collection = []
self.sent_tests = 0
self.log = create_sublogger('master')
self.maxfail = config.getvalue("maxfail")
self._failed_collection_errors = {}
self.terminal = store.terminalreporter
self.trdist = None
self.slaves = {}
self.test_groups = self._test_item_generator()
self._pool = []
from utils.conf import cfme_data
self.provs = sorted(set(cfme_data['management_systems'].keys()),
key=len, reverse=True)
self.used_prov = set()
self.failed_slave_test_groups = deque()
self.slave_spawn_count = 0
self.appliances = self.config.option.appliances
# set up the ipc socket
zmq_endpoint = 'ipc://{}'.format(
config.cache.makedir('parallelize').join(str(os.getpid())))
ctx = zmq.Context.instance()
self.sock = ctx.socket(zmq.ROUTER)
self.sock.bind(zmq_endpoint)
# clean out old slave config if it exists
slave_config = conf_path.join('slave_config.yaml')
slave_config.check() and slave_config.remove()
# write out the slave config
conf.runtime['slave_config'] = {
'args': self.config.args,
'options': self.config.option.__dict__,
'zmq_endpoint': zmq_endpoint,
}
if hasattr(self, "slave_appliances_data"):
conf.runtime['slave_config']["appliance_data"] = self.slave_appliances_data
conf.runtime['slave_config']['options']['use_sprout'] = False # Slaves don't use sprout
conf.save('slave_config')
for base_url in self.appliances:
slave_data = SlaveDetail(url=base_url)
self.slaves[slave_data.id] = slave_data
for slave in sorted(self.slaves):
self.print_message("using appliance {}".format(self.slaves[slave].url),
slave, green=True)
def _slave_audit(self):
# XXX: There is currently no mechanism to add or remove slave_urls, short of
# firing up the debugger and doing it manually. This is making room for
# planned future abilities to dynamically add and remove slaves via automation
# check for unexpected slave shutdowns and redistribute tests
for slave in self.slaves.values():
returncode = slave.poll()
if returncode:
slave.process = None
if returncode == -9:
msg = '{} killed due to error, respawning'.format(slave.id)
else:
msg = '{} terminated unexpectedly with status {}, respawning'.format(
slave.id, returncode)
if slave.tests:
failed_tests, slave.tests = slave.tests, set()
num_failed_tests = len(failed_tests)
self.sent_tests -= num_failed_tests
msg += ' and redistributing {} tests'.format(num_failed_tests)
self.failed_slave_test_groups.append(failed_tests)
self.print_message(msg, purple=True)
# If a slave was terminated for any reason, kill that slave
# the terminated flag implies the appliance has died :(
for slave in list(self.slaves.values()):
if slave.forbid_restart:
if slave.process is None:
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.url)
del self.slaves[slave.id]
else:
# no hook call here, a future audit will handle the fallout
self.print_message(
"{}'s appliance has died, deactivating slave".format(slave.id))
self.interrupt(slave)
else:
if slave.process is None:
slave.start()
self.slave_spawn_count += 1
def send(self, slave, event_data):
"""Send data to slave.
``event_data`` will be serialized as JSON, and so must be JSON serializable
"""
event_json = json.dumps(event_data)
self.sock.send_multipart([slave.id, '', event_json])
def recv(self):
# poll the zmq socket, populate the recv queue deque with responses
events = zmq.zmq_poll([(self.sock, zmq.POLLIN)], 50)
if not events:
return None, None, None
slaveid, _, event_json = self.sock.recv_multipart(flags=zmq.NOBLOCK)
event_data = json.loads(event_json)
event_name = event_data.pop('_event_name')
if slaveid not in self.slaves:
self.log.error("message from terminated worker %s %s %s",
slaveid, event_name, event_data)
return None, None, None
return self.slaves[slaveid], event_data, event_name
def print_message(self, message, prefix='master', **markup):
"""Print a message from a node to the py.test console
Args:
message: The message to print
**markup: If set, overrides the default markup when printing the message
"""
# differentiate master and slave messages by default
prefix = getattr(prefix, 'id', prefix)
if not markup:
if prefix == 'master':
markup = {'blue': True}
else:
markup = {'cyan': True}
stamp = datetime.now().strftime("%Y%m%d %H:%M:%S")
self.terminal.write_ensure_prefix(
'({})[{}] '.format(prefix, stamp), message, **markup)
def ack(self, slave, event_name):
"""Acknowledge a slave's message"""
self.send(slave, 'ack {}'.format(event_name))
def monitor_shutdown(self, slave):
# non-daemon so slaves get every opportunity to shut down cleanly
shutdown_thread = Thread(target=self._monitor_shutdown_t,
args=(slave.id, slave.process))
shutdown_thread.start()
def _monitor_shutdown_t(self, slaveid, process):
# a KeyError here means self.slaves got mangled, indicating a problem elsewhere
if process is None:
self.log.warning('Slave was missing when trying to monitor shutdown')
def sleep_and_poll():
start_time = time()
# configure the polling logic
polls = 0
# how often to poll
poll_sleep_time = .5
# how often to report (calculated to be around once a minute based on poll_sleep_time)
poll_report_modulo = 60 / poll_sleep_time
# maximum time to wait
poll_num_sec = 300
while (time() - start_time) < poll_num_sec:
polls += 1
yield
if polls % poll_report_modulo == 0:
remaining_time = int(poll_num_sec - (time() - start_time))
self.print_message(
'{} still shutting down, '
'will continue polling for {} seconds '
.format(slaveid, remaining_time), blue=True)
sleep(poll_sleep_time)
# start the poll
for poll in sleep_and_poll():
ec = process.poll()
if ec is None:
continue
else:
if ec == 0:
self.print_message('{} exited'.format(slaveid), green=True)
else:
self.print_message('{} died'.format(slaveid), red=True)
break
else:
self.print_message('{} failed to shut down gracefully; killed'.format(slaveid),
red=True)
process.kill()
def interrupt(self, slave, **kwargs):
"""Nicely ask a slave to terminate"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.send_signal(subprocess.signal.SIGINT)
self.monitor_shutdown(slave, **kwargs)
def kill(self, slave, **kwargs):
"""Rudely kill a slave"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.kill()
self.monitor_shutdown(slave, **kwargs)
def send_tests(self, slave):
"""Send a slave a group of tests"""
try:
tests = list(self.failed_slave_test_groups.popleft())
except IndexError:
tests = self.get(slave)
self.send(slave, tests)
slave.tests.update(tests)
collect_len = len(self.collection)
tests_len = len(tests)
self.sent_tests += tests_len
if tests:
self.print_message('sent {} tests to {} ({}/{}, {:.1f}%)'.format(
tests_len, slave.id, self.sent_tests, collect_len,
self.sent_tests * 100. / collect_len
))
return tests
def pytest_sessionstart(self, session):
"""pytest sessionstart hook
- sets up distributed terminal reporter
- sets up zmp ipc socket for the slaves to use
- writes pytest options and args to slave_config.yaml
- starts the slaves
- register atexit kill hooks to destroy slaves at the end if things go terribly wrong
"""
# If reporter() gave us a fake terminal reporter in __init__, the real
# terminal reporter is registered by now
self.terminal = store.terminalreporter
self.trdist = TerminalDistReporter(self.config, self.terminal)
self.config.pluginmanager.register(self.trdist, "terminaldistreporter")
self.session = session
def pytest_runtestloop(self):
"""pytest runtest loop
- Disable the master terminal reporter hooks, so we can add our own handlers
that include the slaveid in the output
- Send tests to slaves when they ask
- Log the starting of tests and test results, including slave id
- Handle clean slave shutdown when they finish their runtest loops
- Restore the master terminal reporter after testing so we get the final report
"""
# Build master collection for slave diffing and distribution
self.collection = [item.nodeid for item in self.session.items]
# Fire up the workers after master collection is complete
# master and the first slave share an appliance, this is a workaround to prevent a slave
# from altering an appliance while master collection is still taking place
for slave in self.slaves.values():
slave.start()
try:
self.print_message("Waiting for {} slave collections".format(len(self.slaves)),
red=True)
# Turn off the terminal reporter to suppress the builtin logstart printing
terminalreporter.disable()
while True:
# spawn/kill/replace slaves if needed
self._slave_audit()
if not self.slaves:
# All slaves are killed or errored, we're done with tests
self.print_message('all slaves have exited', yellow=True)
self.session_finished = True
if self.session_finished:
break
slave, event_data, event_name = self.recv()
if event_name == 'message':
message = event_data.pop('message')
markup = event_data.pop('markup')
# messages are special, handle them immediately
self.print_message(message, slave, **markup)
self.ack(slave, event_name)
elif event_name == 'collectionfinish':
slave_collection = event_data['node_ids']
# compare slave collection to the master, all test ids must be the same
self.log.debug('diffing {} collection'.format(slave.id))
diff_err = report_collection_diff(
slave.id, self.collection, slave_collection)
if diff_err:
self.print_message(
'collection differs, respawning', slave.id,
purple=True)
self.print_message(diff_err, purple=True)
self.log.error('{}'.format(diff_err))
self.kill(slave)
slave.start()
else:
self.ack(slave, event_name)
elif event_name == 'need_tests':
self.send_tests(slave)
self.log.info('starting master test distribution')
elif event_name == 'runtest_logstart':
self.ack(slave, event_name)
self.trdist.runtest_logstart(
slave.id,
event_data['nodeid'],
event_data['location'])
elif event_name == 'runtest_logreport':
self.ack(slave, event_name)
report = unserialize_report(event_data['report'])
if report.when in ('call', 'teardown'):
slave.tests.discard(report.nodeid)
self.trdist.runtest_logreport(slave.id, report)
elif event_name == 'internalerror':
self.ack(slave, event_name)
self.print_message(event_data['message'], slave, purple=True)
self.kill(slave)
elif event_name == 'shutdown':
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.url)
self.ack(slave, event_name)
del self.slaves[slave.id]
self.monitor_shutdown(slave)
# total slave spawn count * 3, to allow for each slave's initial spawn
# and then each slave (on average) can fail two times
if self.slave_spawn_count >= len(self.appliances) * 3:
self.print_message(
'too many slave respawns, exiting',
red=True, bold=True)
raise KeyboardInterrupt('Interrupted due to slave failures')
except Exception as ex:
self.log.error('Exception in runtest loop:')
self.log.exception(ex)
self.print_message(str(ex))
raise
finally:
terminalreporter.enable()
# Suppress other runtestloop calls
return True
def _test_item_generator(self):
for tests in self._modscope_item_generator():
yield tests
def _modscope_item_generator(self):
# breaks out tests by module, can work just about any way we want
# as long as it yields lists of tests id from the master collection
sent_tests = 0
collection_len = len(self.collection)
def get_fspart(nodeid):
return nodeid.split('::')[0]
for fspath, gen_moditems in groupby(self.collection, key=get_fspart):
for tests in self._modscope_id_splitter(gen_moditems):
sent_tests += len(tests)
self.log.info('{} tests remaining to send'.format(
collection_len - sent_tests))
yield list(tests)
def _modscope_id_splitter(self, module_items):
# given a list of item ids from one test module, break up tests into groups with the same id
parametrized_ids = defaultdict(list)
for item in module_items:
if '[' in item:
# split on the leftmost bracket, then strip everything after the rightmight bracket
# so 'test_module.py::test_name[parametrized_id]' becomes 'parametrized_id'
parametrized_id = item.split('[')[1].rstrip(']')
else:
# splits failed, item has no parametrized id
parametrized_id = 'no params'
parametrized_ids[parametrized_id].append(item)
for id, tests in parametrized_ids.items():
if tests:
self.log.info('sent tests with param {} {!r}'.format(id, tests))
yield tests
def get(self, slave):
def provs_of_tests(test_group):
found = set()
for test in test_group:
found.update(pv for pv in self.provs
if '[' in test and pv in test)
return sorted(found)
if not self._pool:
for test_group in self.test_groups:
self._pool.append(test_group)
self.used_prov.update(provs_of_tests(test_group))
if self.used_prov:
self.ratio = float(len(self.slaves)) / len(self.used_prov)
else:
self.ratio = 0.0
if not self._pool:
return []
appliance_num_limit = 1
for idx, test_group in enumerate(self._pool):
provs = provs_of_tests(test_group)
if provs:
prov = provs[0]
if prov in slave.provider_allocation:
# provider is already with the slave, so just return the tests
self._pool.remove(test_group)
return test_group
else:
if len(slave.provider_allocation) >= appliance_num_limit:
continue
else:
# Adding provider to slave since there are not too many
slave.provider_allocation.append(prov)
self._pool.remove(test_group)
return test_group
else:
# No providers - ie, not a provider parametrized test
# or no params, so not parametrized at all
self._pool.remove(test_group)
return test_group
# Here means no tests were able to be sent
for test_group in self._pool:
provs = provs_of_tests(test_group)
if provs:
prov = provs[0]
# Already too many slaves with provider
app_url = slave.url
app_ip = urlparse(app_url).netloc
app = IPAppliance(app_ip)
self.print_message(
'cleansing appliance', slave, purple=True)
try:
app.delete_all_providers()
except Exception as e:
self.print_message(
'cloud not cleanse', slave, red=True)
self.print_message('error:', e, red=True)
slave.provider_allocation = [prov]
self._pool.remove(test_group)
return test_group
assert not self._pool, self._pool
return []
def report_collection_diff(slaveid, from_collection, to_collection):
"""Report differences, if any exist, between master and a slave collection
Raises RuntimeError if collections differ
Note:
This function will sort functions before comparing them.
"""
from_collection, to_collection = sorted(from_collection), sorted(to_collection)
if from_collection == to_collection:
# Well, that was easy.
return
# diff the two, so we get some idea of what's wrong
diff = difflib.unified_diff(
from_collection,
to_collection,
fromfile='master',
tofile=slaveid,
)
# diff is a line generator, stringify it
diff = '\n'.join([line.rstrip() for line in diff])
return '{slaveid} diff:\n{diff}\n'.format(slaveid=slaveid, diff=diff)
class TerminalDistReporter(object):
"""Terminal Reporter for Distributed Testing
trdist reporter exists to make sure we get good distributed logging during the runtest loop,
which means the normal terminal reporter should be disabled during the loop
This class is where we make sure the terminal reporter is made aware of whatever state it
needs to report properly once we turn it back on after the runtest loop
It has special versions of pytest reporting hooks that, where possible, try to include a
slave ID. These hooks are called in :py:class:`ParallelSession`'s runtestloop hook.
"""
def __init__(self, config, terminal):
self.config = config
self.tr = terminal
self.outcomes = {}
def runtest_logstart(self, slaveid, nodeid, location):
test = self.tr._locationline(nodeid, *location)
prefix = '({}) {}'.format(slaveid, test)
self.tr.write_ensure_prefix(prefix, 'running', blue=True)
self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location)
def runtest_logreport(self, slaveid, report):
# Run all the normal logreport hooks
self.config.hook.pytest_runtest_logreport(report=report)
# Now do what the terminal reporter would normally do, but include parallelizer info
outcome, letter, word = self.config.hook.pytest_report_teststatus(report=report)
# Stash stats on the terminal reporter so it reports properly
# after it's reenabled at the end of runtestloop
self.tr.stats.setdefault(outcome, []).append(report)
test = self.tr._locationline(report.nodeid, *report.location)
prefix = '({}) {}'.format(slaveid, test)
try:
# for some reason, pytest_report_teststatus returns a word, markup tuple
# when the word would be 'XPASS', so unpack it here if that's the case
word, markup = word
except (TypeError, ValueError):
# word wasn't iterable or didn't have enough values, use it as-is
pass
if word in ('PASSED', 'xfail'):
markup = {'green': True}
elif word in ('ERROR', 'FAILED', 'XPASS'):
markup = {'red': True}
elif word:
markup = {'yellow': True}
# For every stage where we can report the outcome, stash it in the outcomes dict
if word:
self.outcomes[test] = Outcome(word, markup)
# Then, when we get to the teardown report, print the last outcome
# This prevents reportings a test as 'PASSED' if its teardown phase fails, for example
if report.when == 'teardown':
word, markup = self.outcomes.pop(test)
self.tr.write_ensure_prefix(prefix, word, **markup)
Outcome = namedtuple('Outcome', ['word', 'markup'])
def unserialize_report(reportdict):
"""
Generate a :py:class:`TestReport <pytest:_pytest.runner.TestReport>` from a serialized report
"""
return runner.TestReport(**reportdict)
|
tempbottle/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/ctypes/test/test_byteswap.py | 71 | import sys, unittest, struct, math, ctypes
from binascii import hexlify
from ctypes import *
def bin(s):
return hexlify(memoryview(s)).decode().upper()
# Each *simple* type that supports different byte orders has an
# __ctype_be__ attribute that specifies the same type in BIG ENDIAN
# byte order, and a __ctype_le__ attribute that is the same type in
# LITTLE ENDIAN byte order.
#
# For Structures and Unions, these types are created on demand.
class Test(unittest.TestCase):
@unittest.skip('test disabled')
def test_X(self):
print(sys.byteorder, file=sys.stderr)
for i in range(32):
bits = BITS()
setattr(bits, "i%s" % i, 1)
dump(bits)
def test_endian_short(self):
if sys.byteorder == "little":
self.assertIs(c_short.__ctype_le__, c_short)
self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short)
else:
self.assertIs(c_short.__ctype_be__, c_short)
self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short)
s = c_short.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_short.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
def test_endian_int(self):
if sys.byteorder == "little":
self.assertIs(c_int.__ctype_le__, c_int)
self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int)
else:
self.assertIs(c_int.__ctype_be__, c_int)
self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int)
s = c_int.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_int.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
def test_endian_longlong(self):
if sys.byteorder == "little":
self.assertIs(c_longlong.__ctype_le__, c_longlong)
self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong)
else:
self.assertIs(c_longlong.__ctype_be__, c_longlong)
self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong)
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_longlong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
def test_endian_float(self):
if sys.byteorder == "little":
self.assertIs(c_float.__ctype_le__, c_float)
self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float)
else:
self.assertIs(c_float.__ctype_be__, c_float)
self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
self.assertAlmostEqual(s.value, math.pi, places=6)
s = c_float.__ctype_le__(math.pi)
self.assertAlmostEqual(s.value, math.pi, places=6)
self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.assertAlmostEqual(s.value, math.pi, places=6)
self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s))
def test_endian_double(self):
if sys.byteorder == "little":
self.assertIs(c_double.__ctype_le__, c_double)
self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double)
else:
self.assertIs(c_double.__ctype_be__, c_double)
self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_endian_other(self):
self.assertIs(c_byte.__ctype_le__, c_byte)
self.assertIs(c_byte.__ctype_be__, c_byte)
self.assertIs(c_ubyte.__ctype_le__, c_ubyte)
self.assertIs(c_ubyte.__ctype_be__, c_ubyte)
self.assertIs(c_char.__ctype_le__, c_char)
self.assertIs(c_char.__ctype_be__, c_char)
def test_struct_fields_1(self):
if sys.byteorder == "little":
base = BigEndianStructure
else:
base = LittleEndianStructure
class T(base):
pass
_fields_ = [("a", c_ubyte),
("b", c_byte),
("c", c_short),
("d", c_ushort),
("e", c_int),
("f", c_uint),
("g", c_long),
("h", c_ulong),
("i", c_longlong),
("k", c_ulonglong),
("l", c_float),
("m", c_double),
("n", c_char),
("b1", c_byte, 3),
("b2", c_byte, 3),
("b3", c_byte, 2),
("a", c_int * 3 * 3 * 3)]
T._fields_ = _fields_
# these fields do not support different byte order:
for typ in c_wchar, c_void_p, POINTER(c_int):
_fields_.append(("x", typ))
class T(base):
pass
self.assertRaises(TypeError, setattr, T, "_fields_", [("x", typ)])
def test_struct_struct(self):
# nested structures with different byteorders
# create nested structures with given byteorders and set memory to data
for nested, data in (
(BigEndianStructure, b'\0\0\0\1\0\0\0\2'),
(LittleEndianStructure, b'\1\0\0\0\2\0\0\0'),
):
for parent in (
BigEndianStructure,
LittleEndianStructure,
Structure,
):
class NestedStructure(nested):
_fields_ = [("x", c_uint32),
("y", c_uint32)]
class TestStructure(parent):
_fields_ = [("point", NestedStructure)]
self.assertEqual(len(data), sizeof(TestStructure))
ptr = POINTER(TestStructure)
s = cast(data, ptr)[0]
del ctypes._pointer_type_cache[TestStructure]
self.assertEqual(s.point.x, 1)
self.assertEqual(s.point.y, 2)
def test_struct_fields_2(self):
# standard packing in struct uses no alignment.
# So, we have to align using pad bytes.
#
# Unaligned accesses will crash Python (on those platforms that
# don't allow it, like sparc solaris).
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">bxhid"
else:
base = LittleEndianStructure
fmt = "<bxhid"
class S(base):
_fields_ = [("b", c_byte),
("h", c_short),
("i", c_int),
("d", c_double)]
s1 = S(0x12, 0x1234, 0x12345678, 3.14)
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_nonnative_struct_fields(self):
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">b h xi xd"
else:
base = LittleEndianStructure
fmt = "<b h xi xd"
class S(base):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_native_struct_fields(self):
if sys.byteorder == "little":
fmt = "<b h xi xd"
else:
base = LittleEndianStructure
fmt = ">b h xi xd"
class S(Structure):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
if __name__ == "__main__":
unittest.main()
|
rafaeltomesouza/frontend-class1 | refs/heads/master | aula2/a13/linkedin/client/.gradle/nodejs/node-v7.5.0-darwin-x64/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
|
sserrot/champion_relationships | refs/heads/master | venv/Lib/site-packages/win32comext/adsi/__init__.py | 1 | import win32com
import win32com.client
if type(__path__)==type(''):
# For freeze to work!
import sys
try:
from . import adsi
sys.modules['win32com.adsi.adsi'] = adsi
except ImportError:
pass
else:
# See if we have a special directory for the binaries (for developers)
win32com.__PackageSupportBuildPath__(__path__)
# Some helpers
# We want to _look_ like the ADSI module, but provide some additional
# helpers.
# Of specific note - most of the interfaces supported by ADSI
# derive from IDispatch - thus, you get the custome methods from the
# interface, as well as via IDispatch.
import pythoncom
from .adsi import *
LCID = 0
IDispatchType = pythoncom.TypeIIDs[pythoncom.IID_IDispatch]
IADsContainerType = pythoncom.TypeIIDs[adsi.IID_IADsContainer]
def _get_good_ret(ob,
# Named arguments used internally
resultCLSID = None):
assert resultCLSID is None, "Now have type info for ADSI objects - fix me!"
# See if the object supports IDispatch
if hasattr(ob, "Invoke"):
import win32com.client.dynamic
name = "Dispatch wrapper around %r" % ob
return win32com.client.dynamic.Dispatch(ob, name, ADSIDispatch)
return ob
class ADSIEnumerator:
def __init__(self, ob):
# Query the object for the container interface.
self._cont_ = ob.QueryInterface(IID_IADsContainer)
self._oleobj_ = ADsBuildEnumerator(self._cont_) # a PyIADsEnumVARIANT
self.index = -1
def __getitem__(self, index):
return self.__GetIndex(index)
def __call__(self, index):
return self.__GetIndex(index)
def __GetIndex(self, index):
if type(index)!=type(0): raise TypeError("Only integer indexes are supported for enumerators")
if index != self.index + 1:
# Index requested out of sequence.
raise ValueError("You must index this object sequentially")
self.index = index
result = ADsEnumerateNext(self._oleobj_, 1)
if len(result):
return _get_good_ret(result[0])
# Failed - reset for next time around.
self.index = -1
self._oleobj_ = ADsBuildEnumerator(self._cont_) # a PyIADsEnumVARIANT
raise IndexError("list index out of range")
class ADSIDispatch(win32com.client.CDispatch):
def _wrap_dispatch_(self, ob, userName = None, returnCLSID = None, UnicodeToString=None):
assert UnicodeToString is None, "this is deprectated and will be removed"
if not userName:
userName = "ADSI-object"
olerepr = win32com.client.dynamic.MakeOleRepr(ob, None, None)
return ADSIDispatch(ob, olerepr, userName)
def _NewEnum(self):
try:
return ADSIEnumerator(self)
except pythoncom.com_error:
# doesnt support it - let our base try!
return win32com.client.CDispatch._NewEnum(self)
def __getattr__(self, attr):
try:
return getattr(self._oleobj_, attr)
except AttributeError:
return win32com.client.CDispatch.__getattr__(self, attr)
def QueryInterface(self, iid):
ret = self._oleobj_.QueryInterface(iid)
return _get_good_ret(ret)
# We override the global methods to do the right thing.
_ADsGetObject = ADsGetObject # The one in the .pyd
def ADsGetObject(path, iid = pythoncom.IID_IDispatch):
ret = _ADsGetObject(path, iid)
return _get_good_ret(ret)
_ADsOpenObject = ADsOpenObject
def ADsOpenObject(path, username, password, reserved = 0, iid = pythoncom.IID_IDispatch):
ret = _ADsOpenObject(path, username, password, reserved, iid)
return _get_good_ret(ret)
|
LighthouseHPC/lighthouse | refs/heads/master | src/lighthouseProject/dojango/decorators.py | 6 | from django import VERSION as django_version
if django_version >= (1, 5, 0):
import json
else:
from django.utils import simplejson as json
from django.http import HttpResponseNotAllowed, HttpResponseServerError
from util import to_json_response
from util import to_dojo_data
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
def expect_post_request(func):
"""Allow only POST requests to come in, throw an exception otherwise.
This relieves from checking every time that the request is
really a POST request, which it should be when using this
decorator.
"""
def _ret(*args, **kwargs):
ret = func(*args, **kwargs)
request = args[0]
if not request.method=='POST':
return HttpResponseNotAllowed(['POST'])
return ret
return _ret
def add_request_getdict(func):
"""Add the method getdict() to the request object.
This works just like getlist() only that it decodes any nested
JSON encoded object structure.
Since sending deep nested structures is not possible via
GET/POST by default, this enables it. Of course you need to
make sure that on the JavaScript side you are also sending
the data properly, which dojango.send() automatically does.
Example:
this is being sent:
one:1
two:{"three":3, "four":4}
using
request.POST.getdict('two')
returns a dict containing the values sent by the JavaScript.
"""
def _ret(*args, **kwargs):
args[0].POST.__class__.getdict = __getdict
ret = func(*args, **kwargs)
return ret
return _ret
def __getdict(self, key):
ret = self.get(key)
try:
ret = json.loads(ret)
except ValueError: # The value was not JSON encoded :-)
raise Exception('"%s" was not JSON encoded as expected (%s).' % (key, str(ret)))
return ret
def json_response(func):
"""
A simple json response decorator. Use it on views, where a python data object should be converted
to a json response:
@json_response
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret)
return wraps(func)(inner)
def jsonp_response_custom(callback_param_name):
"""
A jsonp (JSON with Padding) response decorator, where you can define your own callbackParamName.
It acts like the json_response decorator but with the difference, that it
wraps the returned json string into a client-specified function name (that is the Padding).
You can add this decorator to a function like that:
@jsonp_response_custom("my_callback_param")
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
Your now can access this view from a foreign URL using JSONP.
An example with Dojo looks like that:
dojo.io.script.get({ url:"http://example.com/my_url/",
callbackParamName:"my_callback_param",
load: function(response){
console.log(response);
}
});
Note: the callback_param_name in the decorator and in your JavaScript JSONP call must be the same.
"""
def decorator(func):
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, callback_param_name=callback_param_name)
return wraps(func)(inner)
return decorator
jsonp_response = jsonp_response_custom("jsonp_callback")
jsonp_response.__doc__ = "A predefined jsonp response decorator using 'jsoncallback' as a fixed callback_param_name."
def json_iframe_response(func):
"""
A simple json response decorator but wrapping the json response into a html page.
It helps when doing a json request using an iframe (e.g. file up-/download):
@json_iframe
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, use_iframe=True)
return wraps(func)(inner)
def __prepare_json_ret(request, ret, callback_param_name=None, use_iframe=False):
if ret==False:
ret = {'success':False}
elif ret==None: # Sometimes there is no return.
ret = {}
# Add the 'ret'=True, since it was obviously no set yet and we got valid data, no exception.
func_name = None
if callback_param_name:
func_name = request.GET.get(callback_param_name, "callbackParamName")
try:
if not ret.has_key('success'):
ret['success'] = True
except AttributeError, e:
raise Exception("The returned data of your function must be a dictionary!")
json_ret = ""
try:
# Sometimes the serialization fails, i.e. when there are too deeply nested objects or even classes inside
json_ret = to_json_response(ret, func_name, use_iframe)
except Exception, e:
print '\n\n===============Exception=============\n\n'+str(e)+'\n\n'
print ret
print '\n\n'
return HttpResponseServerError(content=str(e))
return json_ret
|
ContinuumIO/blaze | refs/heads/master | blaze/compute/tests/test_url_csv_compute.py | 2 | import pytest
import os
from blaze import data, compute
from blaze.utils import raises
from odo import URL, CSV
import pandas as pd
import pandas.util.testing as tm
from functools import partial
try:
from urllib2 import urlopen
from urllib2 import HTTPError, URLError
except ImportError:
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
pytestmark = pytest.mark.skipif(raises(URLError,
partial(urlopen, "http://google.com")),
reason='unable to connect to google.com')
iris_url = ('https://raw.githubusercontent.com/'
'blaze/blaze/master/blaze/examples/data/iris.csv')
@pytest.fixture
def iris_local():
thisdir = os.path.abspath(os.path.dirname(__file__))
return data(os.path.join(thisdir, os.pardir, os.pardir, "examples", "data", "iris.csv"))
def test_url_csv_data(iris_local):
iris_remote = data(iris_url)
assert isinstance(iris_remote.data, URL(CSV))
iris_remote_df = compute(iris_remote)
assert isinstance(iris_remote_df, pd.DataFrame)
iris_local_df = compute(iris_local)
tm.assert_frame_equal(iris_remote_df, iris_local_df)
|
DailyActie/Surrogate-Model | refs/heads/master | 01-codes/scikit-learn-master/benchmarks/bench_plot_ward.py | 1 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
import pylab as pl
from scipy.cluster import hierarchy
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
|
khosrow/metpx | refs/heads/master | sarracenia/sarra/sr_shovel.py | 1 | #!/usr/bin/python3
#
# This file is part of sarracenia.
# The sarracenia suite is Free and is proudly provided by the Government of Canada
# Copyright (C) Her Majesty The Queen in Right of Canada, Environment Canada, 2008-2015
#
# Questions or bugs report: dps-client@ec.gc.ca
# sarracenia repository: git://git.code.sf.net/p/metpx/git
# Documentation: http://metpx.sourceforge.net/#SarraDocumentation
#
# sr_shovel.py : python3 program allows to shovel message from one source broker
# to another destination broker (called post_broker)
#
#
# Code contributed by:
# Michel Grenier - Shared Services Canada
# Last Changed : Feb 8 16:14:12 EST 2016
#
########################################################################
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#============================================================
# usage example
#
# sr_shovel [options] [config] [start|stop|restart|reload|status]
#
# sr_shovel consumes message, for each selected message it reannounces it.
# One usage of shovel is to acquire log from source brokers.
# Another could be to avoid servers to announce to x broker, but instead
# to have its own broker and all remote brokers interested to its announcement
# coud shovel them down to themselves.
#
# broker = the remote broker...
# exchange = Mandatory
# topic_prefix = Mandatory
# subtopic = Mandatory
# accept/reject = default accept everything from previous settings
#
# post_broker = where sarra is running (manager)
# post_exchange = default to the value of exchange
#
# report_exchange = xreport (sent back to broker)
#
#============================================================
#
import os,sys,time
try :
from sr_amqp import *
from sr_consumer import *
from sr_instances import *
from sr_message import *
except :
from sarra.sr_amqp import *
from sarra.sr_consumer import *
from sarra.sr_instances import *
from sarra.sr_message import *
class sr_shovel(sr_instances):
def __init__(self,config=None,args=None):
sr_instances.__init__(self,config,args)
def check(self):
if self.broker == None :
self.logger.error("no broker given")
sys.exit(1)
if self.exchange == None :
self.logger.error("no exchange given")
sys.exit(1)
if self.topic_prefix == None :
self.logger.error("no topic_prefix given")
sys.exit(1)
# bindings should be defined
if self.bindings == [] :
key = self.topic_prefix + '.#'
self.bindings.append( (self.exchange,key) )
self.logger.debug("*** BINDINGS %s"% self.bindings)
# accept/reject
self.use_pattern = self.masks != []
self.accept_unmatch = self.masks == []
# make a single list for clusters that we accept message for
self.accept_msg_for_clusters = [ self.cluster ]
self.accept_msg_for_clusters.extend ( self.cluster_aliases )
self.accept_msg_for_clusters.extend ( self.gateway_for )
self.logger.debug("accept_msg_for_clusters %s "% self.accept_msg_for_clusters)
# default queue name if not given
if self.queue_name == None :
self.queue_name = 'q_' + self.broker.username + '.'
self.queue_name += self.program_name + '.' + self.config_name
def close(self):
self.consumer.close()
self.hc_pst.close()
def connect(self):
# =============
# create message if needed
# =============
self.msg = sr_message(self)
# =============
# consumer
# =============
self.consumer = sr_consumer(self)
if self.reportback :
self.msg.report_publisher = self.consumer.publish_back()
self.msg.report_exchange = self.report_exchange
self.logger.info("reportback to %s@%s, exchange: %s" %
( self.broker.username, self.broker.hostname, self.msg.report_exchange ) )
else:
self.logger.info( "reportback suppressed" )
# =============
# publisher
# =============
# publisher host
self.hc_pst = HostConnect( logger = self.logger )
self.hc_pst.set_url( self.post_broker )
self.hc_pst.connect()
# publisher
self.publisher = Publisher(self.hc_pst)
self.publisher.build()
self.msg.publisher = self.publisher
self.msg.post_exchange_split = self.post_exchange_split
def help(self):
print("Usage: %s [OPTIONS] configfile [start|stop|restart|reload|status]\n" % self.program_name )
print("OPTIONS:")
print("instances <nb_of_instances> default 1")
print("\nAMQP consumer broker settings:")
print("\tbroker amqp{s}://<user>:<pw>@<brokerhost>[:port]/<vhost>")
print("\t\t(MANDATORY)")
print("\nAMQP Queue bindings:")
print("\texchange <name> (MANDATORY)")
print("\ttopic_prefix <amqp pattern> (default: v02.post)")
print("\tsubtopic <amqp pattern> (default: #)")
print("\t\t <amqp pattern> = <directory>.<directory>.<directory>...")
print("\t\t\t* single directory wildcard (matches one directory)")
print("\t\t\t# wildcard (matches rest)")
print("\treport_exchange <name> (default: xreport)")
print("\nAMQP Queue settings:")
print("\tdurable <boolean> (default: False)")
print("\texpire <minutes> (default: None)")
print("\tmessage-ttl <minutes> (default: None)")
print("\nMessage settings:")
print("\taccept <regexp pattern> (default: None)")
print("\treject <regexp pattern> (default: None)")
print("\ton_message <script> (default None)")
print("\nAMQP posting broker settings:")
print("\tpost_broker amqp{s}://<user>:<pw>@<brokerhost>[:port]/<vhost>")
print("\t\t(default: manager amqp broker in default.conf)")
print("\tpost_exchange <name> (default xpublic)")
print("\ton_post <script> (default None)")
print("DEBUG:")
print("-debug")
# =============
# __on_message__
# =============
def __on_message__(self):
# the message has not specified a source.
if not 'source' in self.msg.headers :
self.msg.report_publish(403,"Forbidden : message without a source amqp header['source']")
self.logger.error("message without a source amqp header['source']")
return False
# the message has not specified a from_cluster.
if not 'from_cluster' in self.msg.headers :
self.msg.report_publish(403,"Forbidden : message without a cluster amqp header['from_cluster']")
self.logger.error("message without a cluster amqp header['from_cluster']")
return False
# the message has not specified a destination.
if not 'to_clusters' in self.msg.headers :
self.msg.report_publish(403,"Forbidden : message without destination amqp header['to_clusters']")
self.logger.error("message without destination amqp header['to_clusters']")
return False
# this instances of sr_shovel runs,
# for cluster : self.cluster
# alias for the cluster are : self.cluster_aliases
# it is a gateway for : self.gateway_for
# all these cluster names were put in list self.accept_msg_for_clusters
# The message's target clusters self.msg.to_clusters should be in
# the self.accept_msg_for_clusters list
# if this cluster is a valid destination than one of the "to_clusters" pump
# will be present in self.accept_msg_for_clusters
ok = False
for target in self.msg.to_clusters :
if not target in self.accept_msg_for_clusters : continue
ok = True
break
if not ok :
self.logger.warning("skipped : not for this cluster...")
return False
# invoke user defined on_message when provided
if self.on_message : return self.on_message(self)
return True
# =============
# __on_post__ posting of message
# =============
def __on_post__(self):
# same exchange or overwrite with config one ?
if self.post_exchange : self.msg.exchange = self.post_exchange
# invoke on_post when provided
if self.on_post :
ok = self.on_post(self)
if not ok: return ok
# should always be ok
ok = self.msg.publish( )
return ok
def overwrite_defaults(self):
# overwrite defaults
# the default settings in most cases :
# sarra receives directly from sources onto itself
# or it consumes message from another pump
# we cannot define a default broker exchange
# default broker and exchange None
self.broker = None
self.exchange = None
self.topic_prefix = None
# FIX ME report_exchange set to NONE
# instead of xreport and make it mandatory perhaps ?
# since it can be xreport or xs_remotepumpUsername ?
self.report_exchange = 'xreport'
# in most cases, sarra downloads and repost for itself.
# default post_broker and post_exchange are
self.post_broker = None
self.post_exchange = None
if hasattr(self,'manager'):
self.post_broker = self.manager
# Should there be accept/reject option used unmatch are accepted
self.accept_unmatch = True
# =============
# process message
# =============
def process_message(self):
self.logger.debug("Received %s '%s' %s" % (self.msg.topic,self.msg.notice,self.msg.hdrstr))
#=================================
# now message is complete : invoke __on_message__
#=================================
ok = self.__on_message__()
if not ok : return ok
#=================================
# publish the message
#=================================
self.__on_post__()
self.msg.report_publish(201,'Published')
return True
def run(self):
# present basic config
self.logger.info("sr_sarra run")
# loop/process messages
self.connect()
while True :
try :
# consume message
ok, self.msg = self.consumer.consume()
if not ok : continue
# process message (ok or not... go to the next)
ok = self.process_message()
except:
(stype, svalue, tb) = sys.exc_info()
self.logger.error("Type: %s, Value: %s, ..." % (stype, svalue))
def reload(self):
self.logger.info("%s reload" % self.program_name)
self.close()
self.configure()
self.run()
def start(self):
self.logger.info("%s start" % self.program_name)
self.run()
def stop(self):
self.logger.info("%s stop" % self.program_name)
self.close()
os._exit(0)
# ===================================
# MAIN
# ===================================
def main():
action = None
args = None
config = None
if len(sys.argv) >= 2 :
action = sys.argv[-1]
if len(sys.argv) >= 3 :
config = sys.argv[-2]
args = sys.argv[1:-2]
shovel = sr_shovel(config,args)
if action == 'foreground' : shovel.foreground_parent()
elif action == 'reload' : shovel.reload_parent()
elif action == 'restart' : shovel.restart_parent()
elif action == 'start' : shovel.start_parent()
elif action == 'stop' : shovel.stop_parent()
elif action == 'status' : shovel.status_parent()
else :
shovel.logger.error("action unknown %s" % action)
sys.exit(1)
sys.exit(0)
# =========================================
# direct invocation
# =========================================
if __name__=="__main__":
main()
|
jemekite/Dougpool | refs/heads/master | p2pool/test/bitcoin/test_getwork.py | 275 | import unittest
from p2pool.bitcoin import getwork, data as bitcoin_data
class Test(unittest.TestCase):
def test_all(self):
cases = [
{
'target': '0000000000000000000000000000000000000000000000f2b944000000000000',
'midstate': '5982f893102dec03e374b472647c4f19b1b6d21ae4b2ac624f3d2f41b9719404',
'hash1': '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'data': '0000000163930d52a5ffca79b29b95a659a302cd4e1654194780499000002274000000002e133d9e51f45bc0886d05252038e421e82bff18b67dc14b90d9c3c2f422cd5c4dd4598e1a44b9f200000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000'
},
{
'midstate' : 'f4a9b048c0cb9791bc94b13ee0eec21e713963d524fd140b58bb754dd7b0955f',
'data' : '000000019a1d7342fb62090bda686b22d90f9f73d0f5c418b9c980cd0000011a00000000680b07c8a2f97ecd831f951806857e09f98a3b81cdef1fa71982934fef8dc3444e18585d1a0abbcf00000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000',
'hash1' : '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'target' : '0000000000000000000000000000000000000000000000cfbb0a000000000000',
'extrathing': 'hi!',
},
{
'data' : '000000019a1d7342fb62090bda686b22d90f9f73d0f5c418b9c980cd0000011a00000000680b07c8a2f97ecd831f951806857e09f98a3b81cdef1fa71982934fef8dc3444e18585d1a0abbcf00000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000',
'hash1' : '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'target' : '0000000000000000000000000000000000000000000000cfbb0a000000000000',
'extrathing': 'hi!',
},
]
for case in cases:
ba = getwork.BlockAttempt.from_getwork(case)
extra = dict(case)
del extra['data'], extra['hash1'], extra['target']
extra.pop('midstate', None)
getwork_check = ba.getwork(**extra)
assert getwork_check == case or dict((k, v) for k, v in getwork_check.iteritems() if k != 'midstate') == case
case2s = [
getwork.BlockAttempt(
1,
0x148135e10208db85abb62754341a392eab1f186aab077a831cf7,
0x534ea08be1ab529f484369344b6d5423ef5a0767db9b3ebb4e182bbb67962520,
1305759879,
bitcoin_data.FloatingInteger.from_target_upper_bound(0x44b9f20000000000000000000000000000000000000000000000),
0x44b9f20000000000000000000000000000000000000000000000,
),
getwork.BlockAttempt(
1,
0x148135e10208db85abb62754341a392eab1f186aab077a831cf7,
0x534ea08be1ab529f484369344b6d5423ef5a0767db9b3ebb4e182bbb67962520,
1305759879,
bitcoin_data.FloatingInteger.from_target_upper_bound(0x44b9f20000000000000000000000000000000000000000000000),
432*2**230,
),
getwork.BlockAttempt(
1,
0x148135e10208db85abb62754341a392eab1f186aab077a831cf7,
0x534ea08be1ab529f484369344b6d5423ef5a0767db9b3ebb4e182bbb67962520,
1305759879,
bitcoin_data.FloatingInteger.from_target_upper_bound(0x44b9f20000000000000000000000000000000000000000000000),
7*2**240,
)
]
for case2 in case2s:
assert getwork.BlockAttempt.from_getwork(case2.getwork()) == case2
assert getwork.BlockAttempt.from_getwork(case2.getwork(ident='hi')) == case2
case2 = case2.update(previous_block=case2.previous_block - 10)
assert getwork.BlockAttempt.from_getwork(case2.getwork()) == case2
assert getwork.BlockAttempt.from_getwork(case2.getwork(ident='hi')) == case2
|
peastman/deepchem | refs/heads/master | deepchem/feat/tests/test_atomic_coordinates.py | 2 | """
Test atomic coordinates and neighbor lists.
"""
import os
import logging
import numpy as np
import unittest
from deepchem.utils import conformers
from deepchem.feat import AtomicCoordinates
from deepchem.feat import NeighborListAtomicCoordinates
from deepchem.feat import NeighborListComplexAtomicCoordinates
logger = logging.getLogger(__name__)
class TestAtomicCoordinates(unittest.TestCase):
"""
Test AtomicCoordinates.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
from rdkit import Chem
mol = Chem.MolFromSmiles(smiles)
engine = conformers.ConformerGenerator(max_conformers=1)
self.mol = engine.generate_conformers(mol)
self.get_angstrom_coords = AtomicCoordinates()._featurize
assert self.mol.GetNumConformers() > 0
def test_atomic_coordinates(self):
"""
Simple test that atomic coordinates returns ndarray of right shape.
"""
N = self.mol.GetNumAtoms()
atomic_coords_featurizer = AtomicCoordinates()
coords = atomic_coords_featurizer._featurize(self.mol)
assert isinstance(coords, np.ndarray)
assert coords.shape == (N, 3)
def test_neighbor_list_shape(self):
"""
Simple test that Neighbor Lists have right shape.
"""
nblist_featurizer = NeighborListAtomicCoordinates()
N = self.mol.GetNumAtoms()
coords = self.get_angstrom_coords(self.mol)
nblist_featurizer = NeighborListAtomicCoordinates()
nblist = nblist_featurizer._featurize(self.mol)[1]
assert isinstance(nblist, dict)
assert len(nblist.keys()) == N
for (atom, neighbors) in nblist.items():
assert isinstance(atom, int)
assert isinstance(neighbors, list)
assert len(neighbors) <= N
# Do a manual distance computation and make
for i in range(N):
for j in range(N):
dist = np.linalg.norm(coords[i] - coords[j])
logger.info("Distance(%d, %d) = %f" % (i, j, dist))
if dist < nblist_featurizer.neighbor_cutoff and i != j:
assert j in nblist[i]
else:
assert j not in nblist[i]
def test_neighbor_list_extremes(self):
"""
Test Neighbor Lists with large/small boxes.
"""
N = self.mol.GetNumAtoms()
# Test with cutoff 0 angstroms. There should be no neighbors in this case.
nblist_featurizer = NeighborListAtomicCoordinates(neighbor_cutoff=.1)
nblist = nblist_featurizer._featurize(self.mol)[1]
for atom in range(N):
assert len(nblist[atom]) == 0
# Test with cutoff 100 angstroms. Everything should be neighbors now.
nblist_featurizer = NeighborListAtomicCoordinates(neighbor_cutoff=100)
nblist = nblist_featurizer._featurize(self.mol)[1]
for atom in range(N):
assert len(nblist[atom]) == N - 1
def test_neighbor_list_max_num_neighbors(self):
"""
Test that neighbor lists return only max_num_neighbors.
"""
N = self.mol.GetNumAtoms()
max_num_neighbors = 1
nblist_featurizer = NeighborListAtomicCoordinates(max_num_neighbors)
nblist = nblist_featurizer._featurize(self.mol)[1]
for atom in range(N):
assert len(nblist[atom]) <= max_num_neighbors
# Do a manual distance computation and ensure that selected neighbor is
# closest since we set max_num_neighbors = 1
coords = self.get_angstrom_coords(self.mol)
for i in range(N):
closest_dist = np.inf
closest_nbr = None
for j in range(N):
if i == j:
continue
dist = np.linalg.norm(coords[i] - coords[j])
logger.info("Distance(%d, %d) = %f" % (i, j, dist))
if dist < closest_dist:
closest_dist = dist
closest_nbr = j
logger.info("Closest neighbor to %d is %d" % (i, closest_nbr))
logger.info("Distance: %f" % closest_dist)
if closest_dist < nblist_featurizer.neighbor_cutoff:
assert nblist[i] == [closest_nbr]
else:
assert nblist[i] == []
def test_neighbor_list_periodic(self):
"""Test building a neighbor list with periodic boundary conditions."""
cutoff = 4.0
box_size = np.array([10.0, 8.0, 9.0])
N = self.mol.GetNumAtoms()
coords = self.get_angstrom_coords(self.mol)
featurizer = NeighborListAtomicCoordinates(
neighbor_cutoff=cutoff, periodic_box_size=box_size)
neighborlist = featurizer._featurize(self.mol)[1]
expected_neighbors = [set() for i in range(N)]
for i in range(N):
for j in range(i):
delta = coords[i] - coords[j]
delta -= np.round(delta / box_size) * box_size
if np.linalg.norm(delta) < cutoff:
expected_neighbors[i].add(j)
expected_neighbors[j].add(i)
for i in range(N):
assert (set(neighborlist[i]) == expected_neighbors[i])
def test_complex_featurization_simple(self):
"""Test Neighbor List computation on protein-ligand complex."""
dir_path = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(dir_path, "data/3zso_ligand_hyd.pdb")
protein_file = os.path.join(dir_path, "data/3zso_protein.pdb")
max_num_neighbors = 4
complex_featurizer = NeighborListComplexAtomicCoordinates(max_num_neighbors)
system_coords, system_neighbor_list = complex_featurizer._featurize(
(ligand_file, protein_file))
N = system_coords.shape[0]
assert len(system_neighbor_list.keys()) == N
for atom in range(N):
assert len(system_neighbor_list[atom]) <= max_num_neighbors
|
Jgarcia-IAS/SAT | refs/heads/master | openerp/addons-extra/odoo-pruebas/odoo-server/addons/account/tests/test_search.py | 204 | from openerp.tests.common import TransactionCase
class TestSearch(TransactionCase):
"""Tests for search on name_search (account.account)
The name search on account.account is quite complexe, make sure
we have all the correct results
"""
def setUp(self):
super(TestSearch, self).setUp()
cr, uid = self.cr, self.uid
self.account_model = self.registry('account.account')
self.account_type_model = self.registry('account.account.type')
ac_ids = self.account_type_model.search(cr, uid, [], limit=1)
self.atax = (int(self.account_model.create(cr, uid, dict(
name="Tax Received",
code="121",
user_type=ac_ids[0],
))), "121 Tax Received")
self.apurchase = (int(self.account_model.create(cr, uid, dict(
name="Purchased Stocks",
code="1101",
user_type=ac_ids[0],
))), "1101 Purchased Stocks")
self.asale = (int(self.account_model.create(cr, uid, dict(
name="Product Sales",
code="200",
user_type=ac_ids[0],
))), "200 Product Sales")
self.all_ids = [self.atax[0], self.apurchase[0], self.asale[0]]
def test_name_search(self):
cr, uid = self.cr, self.uid
atax_ids = self.account_model.name_search(cr, uid, name="Tax", operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0]]), set([a[0] for a in atax_ids]), "name_search 'ilike Tax' should have returned Tax Received account only")
atax_ids = self.account_model.name_search(cr, uid, name="Tax", operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.apurchase[0], self.asale[0]]), set([a[0] for a in atax_ids]), "name_search 'not ilike Tax' should have returned all but Tax Received account")
apur_ids = self.account_model.name_search(cr, uid, name='1101', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.apurchase[0]]), set([a[0] for a in apur_ids]), "name_search 'ilike 1101' should have returned Purchased Stocks account only")
apur_ids = self.account_model.name_search(cr, uid, name='1101', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.asale[0]]), set([a[0] for a in apur_ids]), "name_search 'not ilike 1101' should have returned all but Purchased Stocks account")
asale_ids = self.account_model.name_search(cr, uid, name='200 Sales', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.asale[0]]), set([a[0] for a in asale_ids]), "name_search 'ilike 200 Sales' should have returned Product Sales account only")
asale_ids = self.account_model.name_search(cr, uid, name='200 Sales', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.apurchase[0]]), set([a[0] for a in asale_ids]), "name_search 'not ilike 200 Sales' should have returned all but Product Sales account")
asale_ids = self.account_model.name_search(cr, uid, name='Product Sales', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.asale[0]]), set([a[0] for a in asale_ids]), "name_search 'ilike Product Sales' should have returned Product Sales account only")
asale_ids = self.account_model.name_search(cr, uid, name='Product Sales', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.apurchase[0]]), set([a[0] for a in asale_ids]), "name_search 'not ilike Product Sales' should have returned all but Product Sales account")
|
chros73/rtorrent-ps-ch | refs/heads/master | tasks.py | 2 | # -*- coding: utf-8 -*-
#
# Project Tasks
#
from __future__ import print_function, unicode_literals
import os
import re
import time
import glob
import shutil
import subprocess
from invoke import task
SPHINX_AUTOBUILD_PORT = 8340
def watchdog_pid(ctx):
"""Get watchdog PID via ``netstat``."""
result = ctx.run('netstat -tulpn 2>/dev/null | grep 127.0.0.1:{:d}'
.format(SPHINX_AUTOBUILD_PORT), warn=True, pty=False)
pid = result.stdout.strip()
pid = pid.split()[-1] if pid else None
pid = pid.split('/', 1)[0] if pid and pid != '-' else None
return pid
@task
def docs(ctx):
"""Start watchdog to build the Sphinx docs."""
build_dir = 'docs/_build'
index_html = build_dir + '/html/index.html'
stop(ctx)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
print("\n*** Generating HTML doc ***\n")
ctx.run('builtin cd docs'
' && . {pwd}/.pyvenv/*/bin/activate'
' && nohup {pwd}/docs/Makefile SPHINXBUILD="sphinx-autobuild -p {port:d}'
' -i \'.*\' -i \'*.log\' -i \'*.png\' -i \'*.txt\'" html >autobuild.log 2>&1 &'
.format(port=SPHINX_AUTOBUILD_PORT, pwd=os.getcwd()), pty=False)
for i in range(25):
time.sleep(2.5)
pid = watchdog_pid(ctx)
if pid:
ctx.run("touch docs/index.rst")
ctx.run('ps {}'.format(pid), pty=False)
url = 'http://localhost:{port:d}/'.format(port=SPHINX_AUTOBUILD_PORT)
print("\n*** Open '{}' in your browser...".format(url))
break
@task
def stop(ctx):
"Stop Sphinx watchdog"
print("\n*** Stopping watchdog ***\n")
for i in range(4):
pid = watchdog_pid(ctx)
if not pid:
break
else:
if not i:
ctx.run('ps {}'.format(pid), pty=False)
ctx.run('kill {}'.format(pid), pty=False)
time.sleep(.5)
@task(
help={
'name': "name of a specific group of tests to run",
},
)
def test(ctx, name=''):
"""Run command integration tests."""
test_dir = 'tests/commands'
failures = 0
if name:
assert os.path.exists(os.path.join(test_dir, name + '.txt')), \
"Named test file does not exist!"
for test_file in glob.glob(os.path.join(test_dir, name + '.txt' if name else '*.txt')):
print("--- Running tests in '{}'...".format(test_file))
with open(test_file, 'r') as handle:
cmd, output = None, None
for line in handle:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('$'):
cmd = (line[1:].strip()
.replace('as_bool', "sed -e 's/^0$/‹false›/' -e 's/^1$/‹true›/'"))
output = subprocess.check_output(cmd + '; echo RC=$?; exit 0',
shell=True, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
elif all(x.strip() in output for x in line.split('…')):
print('.', end='')
else:
failures += 1
print('\nFAIL: »{l}« not found in output of »{cmd}«\n{d}\n{o}\n{d}\n'
.format(l=line, cmd=cmd, o=output.rstrip(), d='~'*78))
print('\n')
print('\n☹ ☹ ☹ {} TEST(S) FAILED. ☹ ☹ ☹ '.format(failures) if failures else '\n☺ ☺ ☺ ALL OK. ☺ ☺ ☺ ')
@task
def cmd_docs(ctx):
"""Generated customc command docs – invoke cmd_docs >docs/include-commands.rst"""
output = subprocess.check_output(
"egrep -nH '^ CMD2?_' patches/ui_pyroscope.cc patches/command_pyroscope.cc"
" | cut -f2 -d'\"' | sort ; exit 0", shell=True, stderr=subprocess.STDOUT)
url = 'https://rtorrent-docs.readthedocs.io/en/latest/cmd-ref.html'
commands = ['ui.color.alarm…title', 'ui.color.*.index', 'ui.color.*.set', 'ui.column.render']
commands.extend(output.decode('ascii').splitlines())
commands.sort()
print('.. ' + cmd_docs.__doc__)
print('')
for group in ('math.*', 'string.*', 'convert.*', 'system.*', 'd.*', 'network.*', 'ui.*', ''):
group_commands = []
for idx, name in reversed(list(enumerate(commands))):
if name.startswith(group.rstrip('*')):
group_commands.insert(0, name)
del commands[idx]
print('.. rubric:: `{}` Commands'.format(group or 'Other'))
print('')
print('.. hlist::')
print(' :columns: 3')
print('')
for name in group_commands:
print(' * `{}`_'.format(name))
print('')
for name in group_commands:
slug = re.sub(r'[^a-z0-9]+', '-', name)
print('.. _`{name}`: {url}#term-{slug}'.format(name=name, slug=slug, url=url)
.replace('ui-color-alarm-title', 'ui-color-custom1-9')
.replace('ui-color-index', 'ui-color-custom1-9')
.replace('ui-color-set', 'ui-color-custom1-9')
)
print('')
print('')
assert not commands, "Not all commands added!"
|
cloudcopy/seahub | refs/heads/master | seahub/signals.py | 5 | import django.dispatch
# Use org_id = -1 if it's not an org repo
repo_created = django.dispatch.Signal(providing_args=["org_id", "creator", "repo_id", "repo_name"])
repo_deleted = django.dispatch.Signal(providing_args=["org_id", "usernames", "repo_owner", "repo_id", "repo_name"])
share_file_to_user_successful = django.dispatch.Signal(providing_args=["priv_share_obj"])
upload_file_successful = django.dispatch.Signal(providing_args=["repo_id", "file_path", "owner"])
|
kawamon/hue | refs/heads/master | apps/filebrowser/src/filebrowser/templatetags/__init__.py | 646 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
hnakamur/saklient.python | refs/heads/master | saklient/cloud/facility.py | 1 | # -*- coding:utf-8 -*-
from .models.model_region import Model_Region
from .client import Client
from ..util import Util
import saklient
# module saklient.cloud.facility
class Facility:
## 設備情報にアクセスするためのモデルを集めたクラス。
# (instance field) _region
## @return {saklient.cloud.models.model_region.Model_Region}
def get_region(self):
return self._region
## リージョン情報。
region = property(get_region, None, None)
## @ignore
# @param {saklient.cloud.client.Client} client
def __init__(self, client):
Util.validate_type(client, "saklient.cloud.client.Client")
self._region = Model_Region(client)
|
alpayOnal/flj | refs/heads/master | flj/settings.py | 2 | """
Django settings for flj project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-zdpa7z9rl%-^x)tn((&wt!l3^cg%gf4+jjmq%sgb6njib_qf5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"posts.apps.PostsConfig",
"rest_framework_swagger",
"debug_toolbar",
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'flj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'flj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.mysql',
'NAME': 'flj',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '0.0.0.0', # Or an IP Address that your DB is hosted on
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
"posts.backends.TokenAuthREST",
),
"EXCEPTION_HANDLER": "posts.helpers.my_exception_handler",
}
AUTH_PROFILE_MODULE = 'posts.models.UserProfile'
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"posts.backends.TokenAuth"
]
GEOPOSITION_GOOGLE_MAPS_API_KEY = "ff"
|
sgallagher/reviewboard | refs/heads/master | reviewboard/reviews/urls.py | 2 | from __future__ import unicode_literals
from django.conf.urls import include, patterns, url
from reviewboard.reviews.views import (ReviewsDiffFragmentView,
ReviewsDiffViewerView,
ReviewsDownloadPatchErrorBundleView)
download_diff_urls = patterns(
'reviewboard.reviews.views',
url(r'^orig/$', 'download_orig_file', name='download-orig-file'),
url(r'^new/$', 'download_modified_file', name='download-modified-file'),
)
diff_fragment_urls = patterns(
'',
url('^$', ReviewsDiffFragmentView.as_view(),
name='view-diff-fragment'),
url('^patch-error-bundle/$',
ReviewsDownloadPatchErrorBundleView.as_view(),
name='patch-error-bundle'),
)
diffviewer_revision_urls = patterns(
'reviewboard.reviews.views',
url(r'^$',
ReviewsDiffViewerView.as_view(),
name="view-diff-revision"),
url(r'^raw/$',
'raw_diff',
name='raw-diff-revision'),
url(r'^fragment/(?P<filediff_id>[0-9]+)/'
r'(chunk/(?P<chunk_index>[0-9]+)/)?',
include(diff_fragment_urls)),
url(r'^download/(?P<filediff_id>[0-9]+)/', include(download_diff_urls)),
)
diffviewer_interdiff_urls = patterns(
'reviewboard.reviews.views',
url(r'^$',
ReviewsDiffViewerView.as_view(),
name="view-interdiff"),
url(r'^fragment/(?P<filediff_id>[0-9]+)(-(?P<interfilediff_id>[0-9]+))?/'
r'(chunk/(?P<chunk_index>[0-9]+)/)?',
include(diff_fragment_urls)),
)
diffviewer_urls = patterns(
'reviewboard.reviews.views',
url(r'^$', ReviewsDiffViewerView.as_view(), name="view-diff"),
url(r'^raw/$', 'raw_diff', name='raw-diff'),
url(r'^(?P<revision>[0-9]+)/', include(diffviewer_revision_urls)),
url(r'^(?P<revision>[0-9]+)-(?P<interdiff_revision>[0-9]+)/',
include(diffviewer_interdiff_urls)),
)
bugs_urls = patterns(
'reviewboard.reviews.views',
url(r'^infobox/$', 'bug_infobox', name='bug_infobox'),
url(r'^$', 'bug_url', name='bug_url'),
)
review_request_urls = patterns(
'reviewboard.reviews.views',
# Review request detail
url(r'^$', 'review_detail', name="review-request-detail"),
# Review request diffs
url(r'^diff/', include(diffviewer_urls)),
# Fragments
url(r'^fragments/diff-comments/(?P<comment_ids>[0-9,]+)/$',
'comment_diff_fragments'),
# File attachments
url(r'^file/(?P<file_attachment_id>[0-9]+)/$',
'review_file_attachment',
name='file-attachment'),
url(r'^file/(?P<file_attachment_diff_id>[0-9]+)'
r'-(?P<file_attachment_id>[0-9]+)/$',
'review_file_attachment',
name='file-attachment'),
# Screenshots
url(r'^s/(?P<screenshot_id>[0-9]+)/$',
'view_screenshot',
name='screenshot'),
# Bugs
url(r'^bugs/(?P<bug_id>[A-Za-z0-9\-_.]+)/', include(bugs_urls)),
# E-mail previews
url(r'^preview-email/(?P<format>(text|html))/$',
'preview_review_request_email',
name='preview-review-request-email'),
url(r'^changes/(?P<changedesc_id>[0-9]+)/preview-email/'
r'(?P<format>(text|html))/$',
'preview_review_request_email',
name='preview-review-request-email'),
url(r'^reviews/(?P<review_id>[0-9]+)/preview-email/'
r'(?P<format>(text|html))/$',
'preview_review_email',
name='preview-review-email'),
url(r'^reviews/(?P<review_id>[0-9]+)/replies/(?P<reply_id>[0-9]+)/'
r'preview-email/(?P<format>(text|html))/$',
'preview_reply_email',
name='preview-review-reply-email'),
)
urlpatterns = patterns(
'reviewboard.reviews.views',
url(r'^new/$', 'new_review_request', name="new-review-request"),
url(r'^(?P<review_request_id>[0-9]+)/', include(review_request_urls)),
)
|
ict-felix/stack | refs/heads/master | vt_manager_kvm/src/python/vt_manager_kvm/communication/sfa/methods/Remove.py | 1 | from vt_manager_kvm.communication.sfa.util.xrn import Xrn
from vt_manager_kvm.communication.sfa.util.method import Method
from vt_manager_kvm.communication.sfa.trust.credential import Credential
from vt_manager_kvm.communication.sfa.util.parameter import Parameter, Mixed
class Remove(Method):
"""
Remove an object from the registry. If the object represents a PLC object,
then the PLC records will also be removed.
@param cred credential string
@param type record type
@param xrn human readable name of record to remove (hrn or urn)
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(str, "Human readable name of slice to instantiate (hrn or urn)"),
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
Mixed(Parameter(str, "Record type"),
Parameter(None, "Type not specified")),
]
returns = Parameter(int, "1 if successful")
def call(self, xrn, creds, type):
xrn=Xrn(xrn,type=type)
# validate the cred
valid_creds = self.api.auth.checkCredentials(creds, "remove")
self.api.auth.verify_object_permission(xrn.get_hrn())
#log the call
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tmethod-name: %s\tcaller-hrn: %s\ttarget-urn: %s"%(
self.api.interface, self.name, origin_hrn, xrn.get_urn()))
return self.api.manager.Remove(self.api, xrn)
|
mikeing2001/LoopDetection | refs/heads/master | pox/forwarding/l3_learning.py | 3 | # Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
A stupid L3 switch
For each switch:
1) Keep a table that maps IP addresses to MAC addresses and switch ports.
Stock this table using information from ARP and IP packets.
2) When you see an ARP query, try to answer it using information in the table
from step 1. If the info in the table is old, just flood the query.
3) Flood all other ARPs.
4) When you see an IP packet, if you know the destination port (because it's
in the table from step 1), install a flow for it.
"""
from pox.core import core
import pox
log = core.getLogger()
from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.addresses import IPAddr, EthAddr
from pox.lib.util import str_to_bool, dpidToStr
from pox.lib.recoco import Timer
import pox.openflow.libopenflow_01 as of
from pox.lib.revent import *
import time
# Timeout for flows
FLOW_IDLE_TIMEOUT = 10
# Timeout for ARP entries
ARP_TIMEOUT = 60 * 2
# Maximum number of packet to buffer on a switch for an unknown IP
MAX_BUFFERED_PER_IP = 5
# Maximum time to hang on to a buffer for an unknown IP in seconds
MAX_BUFFER_TIME = 5
class Entry (object):
"""
Not strictly an ARP entry.
We use the port to determine which port to forward traffic out of.
We use the MAC to answer ARP replies.
We use the timeout so that if an entry is older than ARP_TIMEOUT, we
flood the ARP request rather than try to answer it ourselves.
"""
def __init__ (self, port, mac):
self.timeout = time.time() + ARP_TIMEOUT
self.port = port
self.mac = mac
def __eq__ (self, other):
if type(other) == tuple:
return (self.port,self.mac)==other
else:
return (self.port,self.mac)==(other.port,other.mac)
def __ne__ (self, other):
return not self.__eq__(other)
def isExpired (self):
if self.port == of.OFPP_NONE: return False
return time.time() > self.timeout
def dpid_to_mac (dpid):
return EthAddr("%012x" % (dpid & 0xffFFffFFffFF,))
class l3_switch (EventMixin):
def __init__ (self, fakeways = [], arp_for_unknowns = False):
# These are "fake gateways" -- we'll answer ARPs for them with MAC
# of the switch they're connected to.
self.fakeways = set(fakeways)
# If this is true and we see a packet for an unknown
# host, we'll ARP for it.
self.arp_for_unknowns = arp_for_unknowns
# (IP,dpid) -> expire_time
# We use this to keep from spamming ARPs
self.outstanding_arps = {}
# (IP,dpid) -> [(expire_time,buffer_id,in_port), ...]
# These are buffers we've gotten at this datapath for this IP which
# we can't deliver because we don't know where they go.
self.lost_buffers = {}
# For each switch, we map IP addresses to Entries
self.arpTable = {}
# This timer handles expiring stuff
self._expire_timer = Timer(5, self._handle_expiration, recurring=True)
self.listenTo(core)
def _handle_expiration (self):
# Called by a timer so that we can remove old items.
empty = []
for k,v in self.lost_buffers.iteritems():
ip,dpid = k
expires_at,buffer_id,in_port = v
for item in list(v):
if expires_at < time.time():
# This packet is old. Tell this switch to drop it.
v.remove(item)
po = of.ofp_packet_out(buffer_id = buffer_id, in_port = in_port)
core.openflow.sendToDPID(dpid, po)
if len(v) == 0: empty.append(k)
# Remove empty buffer bins
for k in empty:
del self.lost_buffers[k]
def _send_lost_buffers (self, dpid, ipaddr, macaddr, port):
"""
We may have "lost" buffers -- packets we got but didn't know
where to send at the time. We may know now. Try and see.
"""
if (dpid,ipaddr) in self.lost_buffers:
# Yup!
bucket = self.lost_buffers[(dpid,ipaddr)]
del self.lost_buffers[(dpid,ipaddr)]
log.debug("Sending %i buffered packets to %s from %s"
% (len(bucket),ipaddr,dpidToStr(dpid)))
for _,buffer_id,in_port in bucket:
po = of.ofp_packet_out(buffer_id=buffer_id,in_port=in_port)
po.actions.append(of.ofp_action_dl_addr.set_dst(macaddr))
po.actions.append(of.ofp_action_output(port = port))
core.openflow.sendToDPID(dpid, po)
def _handle_GoingUpEvent (self, event):
self.listenTo(core.openflow)
log.debug("Up...")
def _handle_PacketIn (self, event):
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if dpid not in self.arpTable:
# New switch -- create an empty table
self.arpTable[dpid] = {}
for fake in self.fakeways:
self.arpTable[dpid][IPAddr(fake)] = Entry(of.OFPP_NONE,
dpid_to_mac(dpid))
if packet.type == ethernet.LLDP_TYPE:
# Ignore LLDP packets
return
if isinstance(packet.next, ipv4):
log.debug("%i %i IP %s => %s", dpid,inport,
packet.next.srcip,packet.next.dstip)
# Send any waiting packets...
self._send_lost_buffers(dpid, packet.next.srcip, packet.src, inport)
# Learn or update port/MAC info
if packet.next.srcip in self.arpTable[dpid]:
if self.arpTable[dpid][packet.next.srcip] != (inport, packet.src):
log.info("%i %i RE-learned %s", dpid,inport,packet.next.srcip)
else:
log.debug("%i %i learned %s", dpid,inport,str(packet.next.srcip))
self.arpTable[dpid][packet.next.srcip] = Entry(inport, packet.src)
# Try to forward
dstaddr = packet.next.dstip
if dstaddr in self.arpTable[dpid]:
# We have info about what port to send it out on...
prt = self.arpTable[dpid][dstaddr].port
mac = self.arpTable[dpid][dstaddr].mac
if prt == inport:
log.warning("%i %i not sending packet for %s back out of the " +
"input port" % (dpid, inport, str(dstaddr)))
else:
log.debug("%i %i installing flow for %s => %s out port %i"
% (dpid, inport, packet.next.srcip, dstaddr, prt))
actions = []
actions.append(of.ofp_action_dl_addr.set_dst(mac))
actions.append(of.ofp_action_output(port = prt))
match = of.ofp_match.from_packet(packet, inport)
match.dl_src = None # Wildcard source MAC
msg = of.ofp_flow_mod(command=of.OFPFC_ADD,
idle_timeout=FLOW_IDLE_TIMEOUT,
hard_timeout=of.OFP_FLOW_PERMANENT,
buffer_id=event.ofp.buffer_id,
actions=actions,
match=of.ofp_match.from_packet(packet,
inport))
event.connection.send(msg.pack())
elif self.arp_for_unknowns:
# We don't know this destination.
# First, we track this buffer so that we can try to resend it later
# if we learn the destination, second we ARP for the destination,
# which should ultimately result in it responding and us learning
# where it is
# Add to tracked buffers
if (dpid,dstaddr) not in self.lost_buffers:
self.lost_buffers[(dpid,dstaddr)] = []
bucket = self.lost_buffers[(dpid,dstaddr)]
entry = (time.time() + MAX_BUFFER_TIME,event.ofp.buffer_id,inport)
bucket.append(entry)
while len(bucket) > MAX_BUFFERED_PER_IP: del bucket[0]
# Expire things from our outstanding ARP list...
self.outstanding_arps = {k:v for k,v in
self.outstanding_arps.iteritems() if v > time.time()}
# Check if we've already ARPed recently
if (dpid,dstaddr) in self.outstanding_arps:
# Oop, we've already done this one recently.
return
# And ARP...
self.outstanding_arps[(dpid,dstaddr)] = time.time() + 4
r = arp()
r.hwtype = r.HW_TYPE_ETHERNET
r.prototype = r.PROTO_TYPE_IP
r.hwlen = 6
r.protolen = r.protolen
r.opcode = r.REQUEST
r.hwdst = ETHER_BROADCAST
r.protodst = dstaddr
r.hwsrc = packet.src
r.protosrc = packet.next.srcip
e = ethernet(type=ethernet.ARP_TYPE, src=packet.src,
dst=ETHER_BROADCAST)
e.set_payload(r)
log.debug("%i %i ARPing for %s on behalf of %s" % (dpid, inport,
str(r.protodst), str(r.protosrc)))
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.in_port = inport
event.connection.send(msg)
elif isinstance(packet.next, arp):
a = packet.next
log.debug("%i %i ARP %s %s => %s", dpid, inport,
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode,
'op:%i' % (a.opcode,)), str(a.protosrc), str(a.protodst))
if a.prototype == arp.PROTO_TYPE_IP:
if a.hwtype == arp.HW_TYPE_ETHERNET:
if a.protosrc != 0:
# Learn or update port/MAC info
if a.protosrc in self.arpTable[dpid]:
if self.arpTable[dpid][a.protosrc] != (inport, packet.src):
log.info("%i %i RE-learned %s", dpid,inport,str(a.protosrc))
else:
log.debug("%i %i learned %s", dpid,inport,str(a.protosrc))
self.arpTable[dpid][a.protosrc] = Entry(inport, packet.src)
# Send any waiting packets...
self._send_lost_buffers(dpid, a.protosrc, packet.src, inport)
if a.opcode == arp.REQUEST:
# Maybe we can answer
if a.protodst in self.arpTable[dpid]:
# We have an answer...
if not self.arpTable[dpid][a.protodst].isExpired():
# .. and it's relatively current, so we'll reply ourselves
r = arp()
r.hwtype = a.hwtype
r.prototype = a.prototype
r.hwlen = a.hwlen
r.protolen = a.protolen
r.opcode = arp.REPLY
r.hwdst = a.hwsrc
r.protodst = a.protosrc
r.protosrc = a.protodst
r.hwsrc = self.arpTable[dpid][a.protodst].mac
e = ethernet(type=packet.type, src=dpid_to_mac(dpid), dst=a.hwsrc)
e.set_payload(r)
log.debug("%i %i answering ARP for %s" % (dpid, inport,
str(r.protosrc)))
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port =
of.OFPP_IN_PORT))
msg.in_port = inport
event.connection.send(msg)
return
# Didn't know how to answer or otherwise handle this ARP, so just flood it
log.debug("%i %i flooding ARP %s %s => %s" % (dpid, inport,
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode,
'op:%i' % (a.opcode,)), str(a.protosrc), str(a.protodst)))
msg = of.ofp_packet_out(in_port = inport, action = of.ofp_action_output(port = of.OFPP_FLOOD))
if event.ofp.buffer_id is of.NO_BUFFER:
# Try sending the (probably incomplete) raw data
msg.data = event.data
else:
msg.buffer_id = event.ofp.buffer_id
event.connection.send(msg.pack())
def launch (fakeways="", arp_for_unknowns=None):
fakeways = fakeways.replace(","," ").split()
fakeways = [IPAddr(x) for x in fakeways]
if arp_for_unknowns is None:
arp_for_unknowns = len(fakeways) > 0
else:
arp_for_unknowns = str_to_bool(arp_for_unknowns)
core.registerNew(l3_switch, fakeways, arp_for_unknowns)
|
nikitabiradar/student_registration | refs/heads/master | janastu/lib/python2.7/site-packages/setuptools/command/test.py | 20 | from distutils.errors import DistutilsOptionError
from unittest import TestLoader
import sys
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.compat import PY3
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@property
def test_args(self):
return list(self._test_args())
def _test_args(self):
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
with_2to3 = PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
else:
self.announce('running "%s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_suite.split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
|
apache/bloodhound | refs/heads/trunk | bloodhound_multiproduct/tests/db/cursor.py | 2 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for multiproduct/dbcursor.py"""
import unittest
from multiproduct.dbcursor import BloodhoundProductSQLTranslate, SKIP_TABLES, TRANSLATE_TABLES, PRODUCT_COLUMN
# Test case data, each section consists of list of tuples of original and correctly translated SQL statements
data = {
# non-translated SELECTs
'system_select_nontranslated' : [
(
"""SELECT id,
name,
value
FROM repository
WHERE name IN ('alias',
'description',
'dir',
'hidden',
'name',
'type',
'url')""",
"""SELECT id,
name,
value
FROM repository
WHERE name IN ('alias',
'description',
'dir',
'hidden',
'name',
'type',
'url')"""
),
],
# translated SELECTs
'system_select_translated' : [
(
"""SELECT TYPE, id,
filename,
time,
description,
author
FROM attachment
WHERE time > %s
AND time < %s
AND TYPE = %s""",
"""SELECT TYPE, id,
filename,
time,
description,
author
FROM (SELECT * FROM attachment WHERE product='PRODUCT') AS attachment
WHERE time > %s
AND time < %s
AND TYPE = %s"""
),
(
"""SELECT name,
due,
completed,
description
FROM milestone
WHERE name=%s""",
"""SELECT name,
due,
completed,
description
FROM (SELECT * FROM milestone WHERE product='PRODUCT') AS milestone
WHERE name=%s"""
),
(
"""SELECT COALESCE(component, ''),
count(COALESCE(component, ''))
FROM ticket
GROUP BY COALESCE(component, '')""",
"""SELECT COALESCE(component, ''),
count(COALESCE(component, ''))
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket
GROUP BY COALESCE(component, '')"""
),
(
"""SELECT id, time, reporter, TYPE, summary,
description
FROM ticket
WHERE time>=%s
AND time<=%s""",
"""SELECT id, time, reporter, TYPE, summary,
description
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket
WHERE time>=%s
AND time<=%s"""
),
(
"""SELECT t.id,
tc.time,
tc.author,
t.type,
t.summary,
tc.field,
tc.oldvalue,
tc.newvalue
FROM ticket_change tc
INNER JOIN ticket t ON t.id = tc.ticket
AND tc.time>=1351375199999999
AND tc.time<=1354057199999999
ORDER BY tc.time""",
"""SELECT t.id,
tc.time,
tc.author,
t.type,
t.summary,
tc.field,
tc.oldvalue,
tc.newvalue
FROM (SELECT * FROM ticket_change WHERE product='PRODUCT') AS tc
INNER JOIN (SELECT * FROM ticket WHERE product='PRODUCT') AS t ON t.id = tc.ticket
AND tc.time>=1351375199999999
AND tc.time<=1354057199999999
ORDER BY tc.time"""
),
(
"""SELECT COUNT(*)
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.owner AS OWNER,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN milestone ON (milestone.name=milestone)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(t.milestone,'')='',
COALESCE(milestone.completed,0)=0,
milestone.completed,
COALESCE(milestone.due,0)=0,
milestone.due,
t.milestone,
COALESCE(priority.value,'')='' DESC,CAST(priority.value AS integer) DESC,t.id) AS x""",
"""SELECT COUNT(*)
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.owner AS OWNER,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
priority.value AS priority_value
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT OUTER JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN (SELECT * FROM milestone WHERE product='PRODUCT') AS milestone ON (milestone.name=milestone)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(t.milestone,'')='',
COALESCE(milestone.completed,0)=0,
milestone.completed,
COALESCE(milestone.due,0)=0,
milestone.due,
t.milestone,
COALESCE(priority.value,'')='' DESC,CAST(priority.value AS integer) DESC,t.id) AS x"""
),
(
"""SELECT t.id AS id,
t.summary AS summary,
t.owner AS OWNER,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN milestone ON (milestone.name=milestone)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(t.milestone,'')='',
COALESCE(milestone.completed,0)=0,
milestone.completed,
COALESCE(milestone.due,0)=0,
milestone.due,
t.milestone,
COALESCE(priority.value,'')='' DESC,
CAST(priority.value AS integer) DESC,t.id""",
"""SELECT t.id AS id,
t.summary AS summary,
t.owner AS OWNER,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
priority.value AS priority_value
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT OUTER JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN (SELECT * FROM milestone WHERE product='PRODUCT') AS milestone ON (milestone.name=milestone)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(t.milestone,'')='',
COALESCE(milestone.completed,0)=0,
milestone.completed,
COALESCE(milestone.due,0)=0,
milestone.due,
t.milestone,
COALESCE(priority.value,'')='' DESC,
CAST(priority.value AS integer) DESC,t.id"""
),
(
"""SELECT COUNT(*)
FROM
(SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time ) AS tab""",
"""SELECT COUNT(*)
FROM
(SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time ) AS tab"""
),
(
"""SELECT COUNT(*)
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.status AS status,
t.type AS TYPE,
t.priority AS priority,
t.product AS product,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
t.owner AS OWNER,
priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.TYPE='priority'
AND priority.name=priority)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id) AS x""",
"""SELECT COUNT(*)
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.status AS status,
t.type AS TYPE,
t.priority AS priority,
t.product AS product,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
t.owner AS OWNER,
priority.value AS priority_value
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT OUTER JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS priority ON (priority.TYPE='priority'
AND priority.name=priority)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id) AS x"""
),
(
"""SELECT t.id AS id,
t.summary AS summary,
t.status AS status,
t.type AS TYPE,
t.priority AS priority,
t.product AS product,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
t.owner AS OWNER,
priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.TYPE='priority'
AND priority.name=priority)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id""",
"""SELECT t.id AS id,
t.summary AS summary,
t.status AS status,
t.type AS TYPE,
t.priority AS priority,
t.product AS product,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
t.owner AS OWNER,
priority.value AS priority_value
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT OUTER JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS priority ON (priority.TYPE='priority'
AND priority.name=priority)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id"""
),
(
"""SELECT *
FROM
(SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time ) AS tab LIMIT 1""",
"""SELECT *
FROM
(SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time ) AS tab LIMIT 1"""
),
(
"""SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time""",
"""SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time"""
),
(
"""SELECT COALESCE(version, '') ,
count(COALESCE(version, ''))
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.owner AS owner,
t.type AS type,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.version AS version,
t.time AS time,
t.changetime AS changetime,
t.product AS product,
priority.value AS priority_value
FROM
(SELECT *
FROM ticket
WHERE product="default") AS t
LEFT OUTER JOIN
(SELECT *
FROM enum
WHERE product="default") AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN
(SELECT *
FROM version
WHERE product="default") AS version ON (version.name=version)
WHERE ((COALESCE(t.product,'')='default'))
ORDER BY COALESCE(t.version,'')='',
COALESCE(version.time,0)=0,version.time,
t.version,COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id) AS foo
GROUP BY COALESCE(version, '')""",
"""SELECT COALESCE(version, '') ,
count(COALESCE(version, ''))
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.owner AS owner,
t.type AS type,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.version AS version,
t.time AS time,
t.changetime AS changetime,
t.product AS product,
priority.value AS priority_value
FROM
(SELECT *
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket
WHERE product="default") AS t
LEFT OUTER JOIN
(SELECT *
FROM (SELECT * FROM enum WHERE product='PRODUCT') AS enum
WHERE product="default") AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN
(SELECT *
FROM (SELECT * FROM version WHERE product='PRODUCT') AS version
WHERE product="default") AS version ON (version.name=version)
WHERE ((COALESCE(t.product,'')='default'))
ORDER BY COALESCE(t.version,'')='',
COALESCE(version.time,0)=0,version.time,
t.version,COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id) AS foo
GROUP BY COALESCE(version, '')"""
),
(
"""SELECT w1.name, w1.time, w1.author, w1.text
FROM wiki w1,(SELECT name, max(version) AS ver
FROM wiki GROUP BY name) w2
WHERE w1.version = w2.ver AND w1.name = w2.name
AND (w1.name LIKE %s ESCAPE '/' OR w1.author LIKE %s ESCAPE '/' OR w1.text LIKE %s ESCAPE '/')""",
"""SELECT w1.name, w1.time, w1.author, w1.text
FROM (SELECT * FROM wiki WHERE product='PRODUCT') AS w1,(SELECT name, max(version) AS ver
FROM (SELECT * FROM wiki WHERE product='PRODUCT') AS wiki GROUP BY name) AS w2
WHERE w1.version = w2.ver AND w1.name = w2.name
AND (w1.name LIKE %s ESCAPE '/' OR w1.author LIKE %s ESCAPE '/' OR w1.text LIKE %s ESCAPE '/')"""
),
(
"""INSERT INTO ticket(id, type, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords)
SELECT id, 'defect', time, changetime, component, severity, priority, owner,
reporter, cc, version, milestone, status, resolution, summary,
description, keywords FROM ticket_old
WHERE COALESCE(severity,'') <> 'enhancement'""",
"""INSERT INTO ticket(id, type, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords, product)
SELECT id, 'defect', time, changetime, component, severity, priority, owner,
reporter, cc, version, milestone, status, resolution, summary,
description, keywords, 'PRODUCT' FROM (SELECT * FROM "PRODUCT_ticket_old") AS ticket_old
WHERE COALESCE(severity,'') <> 'enhancement'"""
),
(
"""INSERT INTO ticket(id, type, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords)
SELECT id, 'enhancement', time, changetime, component, 'normal', priority,
owner, reporter, cc, version, milestone, status, resolution, summary,
description, keywords FROM ticket_old
WHERE severity = 'enhancement'""",
"""INSERT INTO ticket(id, type, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords, product)
SELECT id, 'enhancement', time, changetime, component, 'normal', priority,
owner, reporter, cc, version, milestone, status, resolution, summary,
description, keywords, 'PRODUCT' FROM (SELECT * FROM "PRODUCT_ticket_old") AS ticket_old
WHERE severity = 'enhancement'"""
),
(
"""SELECT COUNT(*) FROM (
SELECT __color__, __group,
(CASE
WHEN __group = 1 THEN 'Accepted'
WHEN __group = 2 THEN 'Owned'
WHEN __group = 3 THEN 'Reported'
ELSE 'Commented' END) AS __group__,
ticket, summary, component, version, milestone,
type, priority, created, _changetime, _description,
_reporter
FROM (
SELECT DISTINCT CAST(p.value AS integer) AS __color__,
(CASE
WHEN owner = %s AND status = 'accepted' THEN 1
WHEN owner = %s THEN 2
WHEN reporter = %s THEN 3
ELSE 4 END) AS __group,
t.id AS ticket, summary, component, version, milestone,
t.type AS type, priority, t.time AS created,
t.changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
LEFT JOIN ticket_change tc ON tc.ticket = t.id AND tc.author = %s
AND tc.field = 'comment'
WHERE t.status <> 'closed'
AND (owner = %s OR reporter = %s OR author = %s)
) AS sub
ORDER BY __group, __color__, milestone, type, created
) AS tab""",
"""SELECT COUNT(*) FROM (
SELECT __color__, __group,
(CASE
WHEN __group = 1 THEN 'Accepted'
WHEN __group = 2 THEN 'Owned'
WHEN __group = 3 THEN 'Reported'
ELSE 'Commented' END) AS __group__,
ticket, summary, component, version, milestone,
type, priority, created, _changetime, _description,
_reporter
FROM (
SELECT DISTINCT CAST(p.value AS integer) AS __color__,
(CASE
WHEN owner = %s AND status = 'accepted' THEN 1
WHEN owner = %s THEN 2
WHEN reporter = %s THEN 3
ELSE 4 END) AS __group,
t.id AS ticket, summary, component, version, milestone,
t.type AS type, priority, t.time AS created,
t.changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS p ON p.name = t.priority AND p.type = 'priority'
LEFT JOIN (SELECT * FROM ticket_change WHERE product='PRODUCT') AS tc ON tc.ticket = t.id AND tc.author = %s
AND tc.field = 'comment'
WHERE t.status <> 'closed'
AND (owner = %s OR reporter = %s OR author = %s)
) AS sub
ORDER BY __group, __color__, milestone, type, created
) AS tab"""
),
],
# custom table SELECTs
'custom_select' : [
(
"""SELECT bklg_id, count(*) as total
FROM backlog_ticket
WHERE tkt_order IS NULL OR tkt_order > -1
GROUP BY bklg_id
""",
"""SELECT bklg_id, count(*) as total
FROM (SELECT * FROM "PRODUCT_backlog_ticket") AS backlog_ticket
WHERE tkt_order IS NULL OR tkt_order > -1
GROUP BY bklg_id
"""
),
(
"""SELECT bt.bklg_id, t.status, count(*) as total
FROM backlog_ticket bt, ticket t
WHERE t.id = bt.tkt_id
AND (bt.tkt_order IS NULL OR bt.tkt_order > -1)
GROUP BY bklg_id, status""",
"""SELECT bt.bklg_id, t.status, count(*) as total
FROM (SELECT * FROM "PRODUCT_backlog_ticket") AS bt, (SELECT * FROM ticket WHERE product='PRODUCT') AS t
WHERE t.id = bt.tkt_id
AND (bt.tkt_order IS NULL OR bt.tkt_order > -1)
GROUP BY bklg_id, status"""
),
],
# non-translated INSERTs
'system_insert_nontranslated' : [
(
"""INSERT INTO session VALUES (%s,%s,0)""",
"""INSERT INTO session VALUES (%s,%s,0)"""
),
],
# translated INSERTs
'system_insert_translated' : [
(
"""INSERT INTO ticket_custom (ticket, name, value)
SELECT id, 'totalhours', '0' FROM ticket WHERE id NOT IN (
SELECT ticket from ticket_custom WHERE name='totalhours'
)""",
"""INSERT INTO ticket_custom (ticket, name, value, product)
SELECT id, 'totalhours', '0', 'PRODUCT' FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket WHERE id NOT IN (
SELECT ticket from (SELECT * FROM ticket_custom WHERE product='PRODUCT') AS ticket_custom WHERE name='totalhours'
)"""
),
(
"""INSERT INTO ticket_custom (ticket, name, value)
SELECT id, 'totalhours', '0' FROM ticket WHERE id NOT IN (
SELECT ticket from ticket_custom WHERE name='totalhours')""",
"""INSERT INTO ticket_custom (ticket, name, value, product)
SELECT id, 'totalhours', '0', 'PRODUCT' FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket WHERE id NOT IN (
SELECT ticket from (SELECT * FROM ticket_custom WHERE product='PRODUCT') AS ticket_custom WHERE name='totalhours')"""
),
(
"""INSERT INTO session (sid, last_visit, authenticated)
SELECT distinct s.sid,COALESCE(%s,0),s.authenticated
FROM session_old AS s LEFT JOIN session_old AS s2
ON (s.sid=s2.sid AND s2.var_name='last_visit')
WHERE s.sid IS NOT NULL""",
"""INSERT INTO session (sid, last_visit, authenticated)
SELECT distinct s.sid,COALESCE(%s,0),s.authenticated
FROM (SELECT * FROM "PRODUCT_session_old") AS s LEFT JOIN (SELECT * FROM "PRODUCT_session_old") AS s2
ON (s.sid=s2.sid AND s2.var_name='last_visit')
WHERE s.sid IS NOT NULL"""
),
(
"""INSERT INTO session_attribute (sid, authenticated, name, value)
SELECT s.sid, s.authenticated, s.var_name, s.var_value
FROM session_old s
WHERE s.var_name <> 'last_visit' AND s.sid IS NOT NULL""",
"""INSERT INTO session_attribute (sid, authenticated, name, value)
SELECT s.sid, s.authenticated, s.var_name, s.var_value
FROM (SELECT * FROM "PRODUCT_session_old") AS s
WHERE s.var_name <> 'last_visit' AND s.sid IS NOT NULL"""
),
(
"""INSERT INTO wiki(version, name, time, author, ipnr, text)
SELECT 1 + COALESCE(max(version), 0), %s, %s, 'trac',
'127.0.0.1', %s FROM wiki WHERE name=%s""",
"""INSERT INTO wiki(version, name, time, author, ipnr, text, product)
SELECT 1 + COALESCE(max(version), 0), %s, %s, 'trac',
'127.0.0.1', %s, 'PRODUCT' FROM (SELECT * FROM wiki WHERE product='PRODUCT') AS wiki WHERE name=%s"""
),
(
"""INSERT INTO permission VALUES ('dev','WIKI_VIEW')""",
"""INSERT INTO permission VALUES ('dev','WIKI_VIEW','PRODUCT')"""
),
(
"""INSERT INTO permission (username, action) VALUES ('dev','WIKI_VIEW')""",
"""INSERT INTO permission (username, action, product) VALUES ('dev','WIKI_VIEW','PRODUCT')"""
),
],
'custom_insert' : [
(
"""INSERT INTO node_change (rev,path,kind,change,base_path,base_rev)
SELECT rev,path,kind,change,base_path,base_rev FROM node_change_old""",
"""INSERT INTO node_change (rev,path,kind,change,base_path,base_rev)
SELECT rev,path,kind,change,base_path,base_rev FROM (SELECT * FROM "PRODUCT_node_change_old") AS node_change_old"""
),
],
# translated UPDATEs
'system_update_translated' : [
(
"""UPDATE ticket SET changetime=%s WHERE id=%s""",
"""UPDATE ticket SET changetime=%s WHERE product='PRODUCT' AND id=%s"""
),
(
"""UPDATE ticket SET changetime=(
SELECT time FROM ticket_change WHERE ticket=%s
UNION
SELECT time FROM (
SELECT time FROM ticket WHERE id=%s LIMIT 1) AS t
ORDER BY time DESC LIMIT 1)
WHERE id=%s""",
"""UPDATE ticket SET changetime=(
SELECT time FROM (SELECT * FROM ticket_change WHERE product='PRODUCT') AS ticket_change WHERE ticket=%s
UNION
SELECT time FROM (
SELECT time FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket WHERE id=%s LIMIT 1) AS t
ORDER BY time DESC LIMIT 1)
WHERE product='PRODUCT' AND id=%s"""
),
(
"""UPDATE component SET name=%s,owner=%s, description=%s
WHERE name=%s""",
"""UPDATE component SET name=%s,owner=%s, description=%s
WHERE product='PRODUCT' AND name=%s"""
),
(
"""UPDATE milestone
SET name=%s, due=%s, completed=%s, description=%s
WHERE name=%s""",
"""UPDATE milestone
SET name=%s, due=%s, completed=%s, description=%s
WHERE product='PRODUCT' AND name=%s"""
),
(
"""UPDATE wiki
SET text=%s
WHERE name=%s""",
"""UPDATE wiki
SET text=%s
WHERE product='PRODUCT' AND name=%s"""
),
(
"""UPDATE ticket SET product=%s
WHERE product=%s""",
"""UPDATE ticket SET product=%s
WHERE product='PRODUCT' AND product=%s"""
),
(
"""UPDATE ticket set changetime=%s where id=%s""",
"""UPDATE ticket set changetime=%s where product='PRODUCT' AND id=%s"""
),
(
"""UPDATE
milestone
SET
id_project='%s' WHERE milestone='%s'""",
"""UPDATE
milestone
SET
id_project='%s' WHERE product='PRODUCT' AND milestone='%s'"""
),
(
"""UPDATE ticket_change SET newvalue=%s
WHERE ticket=%s and author=%s and time=%s and field=%s""",
"""UPDATE ticket_change SET newvalue=%s
WHERE product='PRODUCT' AND ticket=%s and author=%s and time=%s and field=%s"""
),
(
"""UPDATE ticket_change SET oldvalue=%s, newvalue=%s
WHERE ticket=%s and author=%s and time=%s and field=%s""",
"""UPDATE ticket_change SET oldvalue=%s, newvalue=%s
WHERE product='PRODUCT' AND ticket=%s and author=%s and time=%s and field=%s"""
),
(
"""UPDATE
ticket_custom
SET
value = '%s'
WHERE
name = 'project' AND value = '%s'""",
"""UPDATE
ticket_custom
SET
value = '%s'
WHERE
product='PRODUCT' AND name = 'project' AND value = '%s'"""
),
],
# non-translated UPDATEs
'system_update_nontranslated' : [
(
"""UPDATE session_attribute
SET value='1'
WHERE sid=%s
AND name='password_refreshed'""",
"""UPDATE session_attribute
SET value='1'
WHERE sid=%s
AND name='password_refreshed'"""
),
(
"""UPDATE session_attribute
SET value=%s""",
"""UPDATE session_attribute
SET value=%s"""
),
(
"""UPDATE auth_cookie
SET time=%s
WHERE cookie=%s""",
"""UPDATE auth_cookie
SET time=%s
WHERE cookie=%s"""
),
],
# custom (plugin) table UPDATEs
'custom_update' : [
(
"""UPDATE subscription
SET format=%s
WHERE distributor=%s
AND sid=%s
AND authenticated=%s""",
"""UPDATE "PRODUCT_subscription"
SET format=%s
WHERE distributor=%s
AND sid=%s
AND authenticated=%s"""
),
(
"""UPDATE subscription
SET changetime=CURRENT_TIMESTAMP,
priority=%s
WHERE id=%s""",
"""UPDATE "PRODUCT_subscription"
SET changetime=CURRENT_TIMESTAMP,
priority=%s
WHERE id=%s"""
),
(
"""UPDATE backlog_ticket SET tkt_order = NULL WHERE tkt_id = %s""",
"""UPDATE "PRODUCT_backlog_ticket" SET tkt_order = NULL WHERE tkt_id = %s"""
),
(
"""UPDATE backlog_ticket SET tkt_order = -1
WHERE bklg_id = %s
AND tkt_id IN
(SELECT id FROM ticket
WHERE status = 'closed')""",
"""UPDATE "PRODUCT_backlog_ticket" SET tkt_order = -1
WHERE bklg_id = %s
AND tkt_id IN
(SELECT id FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket
WHERE status = 'closed')"""
),
(
"""UPDATE backlog_ticket SET tkt_order = -1
WHERE bklg_id = %s
AND tkt_id IN (SELECT id FROM ticket
WHERE status = 'closed')""",
"""UPDATE "PRODUCT_backlog_ticket" SET tkt_order = -1
WHERE bklg_id = %s
AND tkt_id IN (SELECT id FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket
WHERE status = 'closed')"""
),
(
"""UPDATE estimate SET rate=%s, variability=%s, communication=%s, tickets=%s, comment=%s
WHERE id=%s""",
"""UPDATE "PRODUCT_estimate" SET rate=%s, variability=%s, communication=%s, tickets=%s, comment=%s
WHERE id=%s"""
),
(
"""UPDATE estimate_line_item SET estimate_id=%s ,
description=%s, low=%s, high=%s
WHERE id=%s""",
"""UPDATE "PRODUCT_estimate_line_item" SET estimate_id=%s ,
description=%s, low=%s, high=%s
WHERE id=%s"""
),
(
"""UPDATE estimate SET rate=%s, variability=%s, communication=%s, tickets=%s, comment=%s,
diffcomment=%s, saveepoch=%s
WHERE id=%s""",
"""UPDATE "PRODUCT_estimate" SET rate=%s, variability=%s, communication=%s, tickets=%s, comment=%s,
diffcomment=%s, saveepoch=%s
WHERE id=%s"""
),
(
"""UPDATE estimate_line_item SET estimate_id=%s ,
description=%s, low=%s, high=%s
WHERE id=%s""",
"""UPDATE "PRODUCT_estimate_line_item" SET estimate_id=%s ,
description=%s, low=%s, high=%s
WHERE id=%s"""
),
(
"""UPDATE estimate SET rate=%s, variability=%s, communication=%s, tickets=%s, comment=%s,
diffcomment=%s, saveepoch=%s
WHERE id=%s""",
"""UPDATE "PRODUCT_estimate" SET rate=%s, variability=%s, communication=%s, tickets=%s, comment=%s,
diffcomment=%s, saveepoch=%s
WHERE id=%s"""
),
],
# custom CREATE TABLE
'custom_create_table' : [
(
"""CREATE TABLE estimate(
id integer PRIMARY KEY,
rate DECIMAL,
variability DECIMAL,
communication DECIMAL,
tickets VARCHAR(512),
comment VARCHAR(8000)
)""",
"""CREATE TABLE "PRODUCT_estimate"(
id integer PRIMARY KEY,
rate DECIMAL,
variability DECIMAL,
communication DECIMAL,
tickets VARCHAR(512),
comment VARCHAR(8000)
)"""
),
(
"""CREATE TABLE estimate_line_item(
id integer PRIMARY KEY,
estimate_id integer,
description VARCHAR(2048),
low DECIMAL,
high DECIMAL
)""",
"""CREATE TABLE "PRODUCT_estimate_line_item"(
id integer PRIMARY KEY,
estimate_id integer,
description VARCHAR(2048),
low DECIMAL,
high DECIMAL
)"""
),
(
"""CREATE TABLE backlog_ticket (bklg_id INTEGER NOT NULL,"
" tkt_id INTEGER NOT NULL,"
" tkt_order REAL,"
" PRIMARY KEY(bklg_id, tkt_id))""",
"""CREATE TABLE "PRODUCT_backlog_ticket" (bklg_id INTEGER NOT NULL,"
" tkt_id INTEGER NOT NULL,"
" tkt_order REAL,"
" PRIMARY KEY(bklg_id, tkt_id))"""
),
(
"""CREATE TEMPORARY TABLE backlog_ticket (bklg_id INTEGER NOT NULL,"
" tkt_id INTEGER NOT NULL,"
" tkt_order REAL,"
" PRIMARY KEY(bklg_id, tkt_id))""",
"""CREATE TEMPORARY TABLE "PRODUCT_backlog_ticket" (bklg_id INTEGER NOT NULL,"
" tkt_id INTEGER NOT NULL,"
" tkt_order REAL,"
" PRIMARY KEY(bklg_id, tkt_id))"""
),
(
"""CREATE TEMPORARY TABLE table_old AS SELECT * FROM table""",
"""CREATE TEMPORARY TABLE "PRODUCT_table_old" AS SELECT * FROM"""
""" (SELECT * FROM "PRODUCT_table") AS table""",
),
],
# custom ALTER TABLE
'custom_alter_table' : [
(
"""ALTER TABLE estimate ADD COLUMN diffcomment text""",
"""ALTER TABLE "PRODUCT_estimate" ADD COLUMN diffcomment text"""
),
(
"""ALTER TABLE estimate ADD COLUMN saveepoch int""",
"""ALTER TABLE "PRODUCT_estimate" ADD COLUMN saveepoch int"""
),
],
#lowercase select (#548)
'lowercase_tokens': [
(
"""select * from ticket""",
"""select * from (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket"""
),
(
"""create temporary table table_old as select * from table""",
"""create temporary table "PRODUCT_table_old" as select * from (SELECT * FROM "PRODUCT_table") AS table""",
)
],
# insert with specified product (#601)
'insert_with_product': [
(
"""INSERT INTO ticket (summary, product) VALUES ('S', 'swlcu')""",
"""INSERT INTO ticket (summary, product, id) VALUES ('S', 'swlcu',COALESCE((SELECT MAX(id) FROM\n(SELECT * FROM ticket WHERE product='PRODUCT')\nAS subquery), 0)+1)"""
),
],
}
class DbCursorTestCase(unittest.TestCase):
"""Unit tests covering the BloodhoundProductSQLTranslate"""
def setUp(self):
self.translator = BloodhoundProductSQLTranslate(SKIP_TABLES, TRANSLATE_TABLES, PRODUCT_COLUMN, 'PRODUCT')
for section in data.keys():
if not getattr(self, 'test_%s' % section, None):
raise Exception("Section '%s' not covered in test case" % section)
def tearDown(self):
pass
def _run_test(self, section):
for (sql, translated_sql_check) in data[section]:
translated_sql = self.translator.translate(sql)
stripped_sql_check = '\n'.join([l.strip() for l in translated_sql_check.splitlines()])
stripped_translated_sql = '\n'.join([l.strip() for l in translated_sql.splitlines()])
self.assertEquals(stripped_sql_check, stripped_translated_sql)
def test_system_select_nontranslated(self):
self._run_test('system_select_nontranslated')
def test_system_select_translated(self):
self._run_test('system_select_translated')
def test_custom_select(self):
self._run_test('custom_select')
def test_system_insert_nontranslated(self):
self._run_test('system_insert_nontranslated')
def test_system_insert_translated(self):
self._run_test('system_insert_translated')
def test_custom_insert(self):
self._run_test('custom_insert')
def test_system_update_translated(self):
self._run_test('system_update_translated')
def test_system_update_nontranslated(self):
self._run_test('system_update_nontranslated')
def test_custom_update(self):
self._run_test('custom_update')
def test_custom_create_table(self):
self._run_test('custom_create_table')
def test_custom_alter_table(self):
self._run_test('custom_alter_table')
def test_lowercase_tokens(self):
self._run_test('lowercase_tokens')
def test_insert_with_product(self):
self._run_test('insert_with_product')
if __name__ == '__main__':
unittest.main()
|
toshywoshy/ansible | refs/heads/devel | lib/ansible/module_utils/json_utils.py | 89 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
# NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any
# changes are propagated there.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
for line in trailing_junk:
if line.strip():
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
break
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
|
rrmelcer/swissatest-analysis | refs/heads/master | gui/__init__.py | 7 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# -----------------------------------------------
# ----> Computer Aided Optical Analysis <----
# -----------------------------------------------
# (c) 2015 by Swissatest Testmaterialien AG
# http://www.swissatest.ch
# -----------------------------------------------
# Developeded 2015 by
__author__ = 'Raoul René Melcer'
# raoul.rene.melcer@webservices-consulting.ch
# http://webservices-consulting.ch
# -----------------------------------------------
# License: Apache 2
# http://www.apache.org/licenses/LICENSE-2.0
# -----------------------------------------------
# File: __init__.py
__date__ = '$03.02.2015 10:43:42$'
# https://github.com/rrmelcer/swissatest-analysis
# -----------------------------------------------
# Descripion:
# Initial the modules
# -----------------------------------------------
import logging
def main():
logging.info('single run: {0}'.format(__name__))
if __name__ == '__main__':
main()
|
cloudy064/googletest | refs/heads/master | test/gtest_env_var_test.py | 2408 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
|
x1101/nikola | refs/heads/master | nikola/plugins/command/new_page.py | 4 | # -*- coding: utf-8 -*-
# Copyright © 2012-2016 Roberto Alsina, Chris Warrick and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Create a new page."""
from __future__ import unicode_literals, print_function
from nikola.plugin_categories import Command
class CommandNewPage(Command):
"""Create a new page."""
name = "new_page"
doc_usage = "[options] [path]"
doc_purpose = "create a new page in the site"
cmd_options = [
{
'name': 'title',
'short': 't',
'long': 'title',
'type': str,
'default': '',
'help': 'Title for the page.'
},
{
'name': 'author',
'short': 'a',
'long': 'author',
'type': str,
'default': '',
'help': 'Author of the post.'
},
{
'name': 'onefile',
'short': '1',
'type': bool,
'default': False,
'help': 'Create the page with embedded metadata (single file format)'
},
{
'name': 'twofile',
'short': '2',
'type': bool,
'default': False,
'help': 'Create the page with separate metadata (two file format)'
},
{
'name': 'edit',
'short': 'e',
'type': bool,
'default': False,
'help': 'Open the page (and meta file, if any) in $EDITOR after creation.'
},
{
'name': 'content_format',
'short': 'f',
'long': 'format',
'type': str,
'default': '',
'help': 'Markup format for the page (use --available-formats for list)',
},
{
'name': 'available-formats',
'short': 'F',
'long': 'available-formats',
'type': bool,
'default': False,
'help': 'List all available input formats'
},
{
'name': 'import',
'short': 'i',
'long': 'import',
'type': str,
'default': '',
'help': 'Import an existing file instead of creating a placeholder'
},
]
def _execute(self, options, args):
"""Create a new page."""
# Defaults for some values that don’t apply to pages and the is_page option (duh!)
options['tags'] = ''
options['schedule'] = False
options['is_page'] = True
# Even though stuff was split into `new_page`, it’s easier to do it
# there not to duplicate the code.
p = self.site.plugin_manager.getPluginByName('new_post', 'Command').plugin_object
return p.execute(options, args)
|
leifos/ifind | refs/heads/master | ifind/search/engines/googlecse.py | 1 | import json
import requests
from ifind.search.engine import Engine
from ifind.search.response import Response
from ifind.search.exceptions import EngineAPIKeyException, QueryParamException, EngineConnectionException
# TODO: Bug fix - utils and utils.encoding do not exist
# from ifind.utils.encoding import encode_symbols
import time
API_ENDPOINT = "https://www.googleapis.com/customsearch/v1/"
# TODO: Temp limits. Just returns the first page. Google currently requires a paid API key.
MAX_PAGE_SIZE = 10
MAX_RESULTS = 10
RESULT_TYPES = ('web', 'image')
DEFAULT_RESULT_TYPE = 'web'
class Googlecse(Engine):
"""
GoogleCustomSearch search engine.
"""
def __init__(self, api_key='', cx='', **kwargs):
"""
Google engine constructor.
Kwargs:
api_key (str): string representation of api key needed to access google custom search api
cx (str): string representation of the cx parameter needed to access google custom search api
See Engine.
Raises:
EngineException
Usage:
engine = EngineFactory('GoogleCSE', api_key='etc123456etc123456etc123456', cx='abc123abc123abc123')
"""
Engine.__init__(self, **kwargs)
self.api_key = api_key
self.cx = cx
if not self.api_key:
raise EngineAPIKeyException(self.name, "'api_key=' keyword argument not specified")
if not self.cx:
raise EngineAPIKeyException(self.name, "'cx=' keyword argument not specified")
self.default_result_type = kwargs.get('default_result_type', DEFAULT_RESULT_TYPE)
# Catch empty strings and such.
if not self.default_result_type:
self.default_result_type = DEFAULT_RESULT_TYPE
def _search(self, query):
# TODO: Fix comments
"""
Concrete method of Engine's interface method 'search'.
Performs a search and retrieves the results as an ifind Response.
Args:
query (ifind Query): Object encapsulating details of a search query.
Query Kwargs:
result_type (str): specifies the type of results to return (see top of class for available types).
top (int): number of tweets to return up to MAX_PAGE_SIZE
Returns:
ifind Response: object encapulsating a search request's results.
Raises:
EngineException
Usage:
Private method.
Notes:
https://developers.google.com/custom-search/ for full API documentation.
"""
if not query.top:
raise QueryParamException(self.name, "Total result amount (query.top) not specified")
if query.top > MAX_RESULTS:
raise QueryParamException(self.name, 'Requested result amount (query.top) '
'exceeds max of {0}'.format(MAX_PAGE_SIZE))
# TODO: Currently assumes that all results fit on one page (query.top <= MAX_PAGE_SIZE)
return self._request(query)
def _request(self, query):
"""
Issues a single request to the API_ENDPOINT and returns the result as
an ifind Response.
Args:
query (ifind Query): object encapsulating details of a search query.
Returns:
ifind Response: object encapsulating a search request's results.
Raises:
EngineException
Usage:
Private method.
"""
query_string = self._create_query_string(query)
try:
response = requests.get(query_string)
except requests.exceptions.ConnectionError:
raise EngineConnectionException(self.name, "Unable to send request, check connectivity.")
if response.status_code != 200:
raise EngineConnectionException(self.name, "", code=response.status_code)
return self._parse_json_response(query, response)
def _create_query_string(self, query):
"""
Creates and returns Google Custom Search API query string with encoded query parameters.
Args:
query (ifind Query): object encapsulating details of a search query.
Returns:
str: query string for Google Custom Search API request
Raises:
EngineException
Usage:
Private method.
"""
# Check for a result type, if none found, set it to default.
result_type = query.result_type
if not result_type:
result_type = self.default_result_type
# Check to if the result type is valid
if result_type not in RESULT_TYPES:
raise QueryParamException(self.name, "Engine doesn't support query result type '{0}'"
.format(query.result_type))
if query.top and query.top <= MAX_PAGE_SIZE:
top = query.top
else:
top = MAX_PAGE_SIZE
# TODO: Should be all necessary parameters.
# Dictionary of search paramaters
search_params = {
'q': query.terms,
'top':top
}
# TODO: Replace .replace() function with a substitute of 'encode_symbols' function. encode_symbols no longer exists
# Craft the string to append to the endpoint url
if result_type in ['web', 'image']:
query_append = "?&key={}&cx={}&q={}&num={}".format\
(self.api_key, self.cx,
search_params['q'].replace(' ','+'), search_params['top'])
# return API_ENDPOINT + encode_symbols(query_append)
return API_ENDPOINT + query_append
def _parse_json_response(self, query, results):
"""
Parses Google Custom Search's JSON response and returns as an ifind Response.
Args:
query (ifind Query): object encapsulating details of a search query.
results : requests library response object containing search results.
Returns:
ifind Response: object encapsulating a search request's results.
Usage:
Private method.
"""
response = Response(query.terms, query)
content = json.loads(results.text)
# The query object wasn't mutated earlier and the result type isn't passed to this function.
# Check for a result_type or set it to default.
result_type = query.result_type
if not result_type:
result_type = self.default_result_type
# Check for a next page token.
next_page_token = content.get(u'nextPageToken')
if next_page_token:
# A page token exists, create the URL which will fetch the next page
response.next_page = "{}&pageToken={}".format(self._create_query_string(query), next_page_token)
rank_counter = 1
if result_type == 'web' or not query.result_type:
for result in content[u'items']:
title = result[u'title']
url = result[u'link']
summary = result[u'snippet']
response.add_result(title=title, url=url, summary=summary, rank=rank_counter)
rank_counter+=1
return response
|
weritos666/kernel_L7_II_KK | refs/heads/master | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
a4tech/dvbapp2-gui | refs/heads/master | lib/python/Components/InputDevice.py | 9 | from os import listdir, open as os_open, close as os_close, write as os_write, O_RDWR, O_NONBLOCK
from fcntl import ioctl
from boxbranding import getBoxType, getBrandOEM
import struct
from config import config, ConfigSubsection, ConfigInteger, ConfigYesNo, ConfigText, ConfigSlider
from Tools.Directories import pathExists
# asm-generic/ioctl.h
IOC_NRBITS = 8L
IOC_TYPEBITS = 8L
IOC_SIZEBITS = 13L
IOC_DIRBITS = 3L
IOC_NRSHIFT = 0L
IOC_TYPESHIFT = IOC_NRSHIFT+IOC_NRBITS
IOC_SIZESHIFT = IOC_TYPESHIFT+IOC_TYPEBITS
IOC_DIRSHIFT = IOC_SIZESHIFT+IOC_SIZEBITS
IOC_READ = 2L
def EVIOCGNAME(length):
return (IOC_READ<<IOC_DIRSHIFT)|(length<<IOC_SIZESHIFT)|(0x45<<IOC_TYPESHIFT)|(0x06<<IOC_NRSHIFT)
class inputDevices:
def __init__(self):
self.Devices = {}
self.currentDevice = ""
self.getInputDevices()
def getInputDevices(self):
devices = listdir("/dev/input/")
for evdev in devices:
try:
buffer = "\0"*512
self.fd = os_open("/dev/input/" + evdev, O_RDWR | O_NONBLOCK)
self.name = ioctl(self.fd, EVIOCGNAME(256), buffer)
self.name = self.name[:self.name.find("\0")]
if str(self.name).find("Keyboard") != -1:
self.name = 'keyboard'
os_close(self.fd)
except (IOError,OSError), err:
print '[iInputDevices] getInputDevices <ERROR: ioctl(EVIOCGNAME): ' + str(err) + ' >'
self.name = None
if self.name:
self.Devices[evdev] = {'name': self.name, 'type': self.getInputDeviceType(self.name),'enabled': False, 'configuredName': None }
def getInputDeviceType(self,name):
if "remote control" in name:
return "remote"
elif "keyboard" in name:
return "keyboard"
elif "mouse" in name:
return "mouse"
else:
print "Unknown device type:",name
return None
def getDeviceName(self, x):
if x in self.Devices.keys():
return self.Devices[x].get("name", x)
else:
return "Unknown device name"
def getDeviceList(self):
return sorted(self.Devices.iterkeys())
def setDeviceAttribute(self, device, attribute, value):
#print "[iInputDevices] setting for device", device, "attribute", attribute, " to value", value
if self.Devices.has_key(device):
self.Devices[device][attribute] = value
def getDeviceAttribute(self, device, attribute):
if self.Devices.has_key(device):
if self.Devices[device].has_key(attribute):
return self.Devices[device][attribute]
return None
def setEnabled(self, device, value):
oldval = self.getDeviceAttribute(device, 'enabled')
#print "[iInputDevices] setEnabled for device %s to %s from %s" % (device,value,oldval)
self.setDeviceAttribute(device, 'enabled', value)
if oldval is True and value is False:
self.setDefaults(device)
def setName(self, device, value):
#print "[iInputDevices] setName for device %s to %s" % (device,value)
self.setDeviceAttribute(device, 'configuredName', value)
#struct input_event {
# struct timeval time; -> ignored
# __u16 type; -> EV_REP (0x14)
# __u16 code; -> REP_DELAY (0x00) or REP_PERIOD (0x01)
# __s32 value; -> DEFAULTS: 700(REP_DELAY) or 100(REP_PERIOD)
#}; -> size = 16
def setDefaults(self, device):
print "[iInputDevices] setDefaults for device %s" % device
self.setDeviceAttribute(device, 'configuredName', None)
event_repeat = struct.pack('iihhi', 0, 0, 0x14, 0x01, 100)
event_delay = struct.pack('iihhi', 0, 0, 0x14, 0x00, 700)
fd = os_open("/dev/input/" + device, O_RDWR)
os_write(fd, event_repeat)
os_write(fd, event_delay)
os_close(fd)
def setRepeat(self, device, value): #REP_PERIOD
if self.getDeviceAttribute(device, 'enabled'):
print "[iInputDevices] setRepeat for device %s to %d ms" % (device,value)
event = struct.pack('iihhi', 0, 0, 0x14, 0x01, int(value))
fd = os_open("/dev/input/" + device, O_RDWR)
os_write(fd, event)
os_close(fd)
def setDelay(self, device, value): #REP_DELAY
if self.getDeviceAttribute(device, 'enabled'):
print "[iInputDevices] setDelay for device %s to %d ms" % (device,value)
event = struct.pack('iihhi', 0, 0, 0x14, 0x00, int(value))
fd = os_open("/dev/input/" + device, O_RDWR)
os_write(fd, event)
os_close(fd)
class InitInputDevices:
def __init__(self):
self.currentDevice = ""
self.createConfig()
def createConfig(self, *args):
config.inputDevices = ConfigSubsection()
for device in sorted(iInputDevices.Devices.iterkeys()):
self.currentDevice = device
#print "[InitInputDevices] -> creating config entry for device: %s -> %s " % (self.currentDevice, iInputDevices.Devices[device]["name"])
self.setupConfigEntries(self.currentDevice)
self.currentDevice = ""
def inputDevicesEnabledChanged(self,configElement):
if self.currentDevice != "" and iInputDevices.currentDevice == "":
iInputDevices.setEnabled(self.currentDevice, configElement.value)
elif iInputDevices.currentDevice != "":
iInputDevices.setEnabled(iInputDevices.currentDevice, configElement.value)
def inputDevicesNameChanged(self,configElement):
if self.currentDevice != "" and iInputDevices.currentDevice == "":
iInputDevices.setName(self.currentDevice, configElement.value)
if configElement.value != "":
devname = iInputDevices.getDeviceAttribute(self.currentDevice, 'name')
if devname != configElement.value:
cmd = "config.inputDevices." + self.currentDevice + ".enabled.value = False"
exec cmd
cmd = "config.inputDevices." + self.currentDevice + ".enabled.save()"
exec cmd
elif iInputDevices.currentDevice != "":
iInputDevices.setName(iInputDevices.currentDevice, configElement.value)
def inputDevicesRepeatChanged(self,configElement):
if self.currentDevice != "" and iInputDevices.currentDevice == "":
iInputDevices.setRepeat(self.currentDevice, configElement.value)
elif iInputDevices.currentDevice != "":
iInputDevices.setRepeat(iInputDevices.currentDevice, configElement.value)
def inputDevicesDelayChanged(self,configElement):
if self.currentDevice != "" and iInputDevices.currentDevice == "":
iInputDevices.setDelay(self.currentDevice, configElement.value)
elif iInputDevices.currentDevice != "":
iInputDevices.setDelay(iInputDevices.currentDevice, configElement.value)
def setupConfigEntries(self,device):
cmd = "config.inputDevices." + device + " = ConfigSubsection()"
exec cmd
cmd = "config.inputDevices." + device + ".enabled = ConfigYesNo(default = False)"
exec cmd
cmd = "config.inputDevices." + device + ".enabled.addNotifier(self.inputDevicesEnabledChanged,config.inputDevices." + device + ".enabled)"
exec cmd
cmd = "config.inputDevices." + device + '.name = ConfigText(default="")'
exec cmd
cmd = "config.inputDevices." + device + ".name.addNotifier(self.inputDevicesNameChanged,config.inputDevices." + device + ".name)"
exec cmd
cmd = "config.inputDevices." + device + ".repeat = ConfigSlider(default=100, increment = 10, limits=(0, 500))"
exec cmd
cmd = "config.inputDevices." + device + ".repeat.addNotifier(self.inputDevicesRepeatChanged,config.inputDevices." + device + ".repeat)"
exec cmd
cmd = "config.inputDevices." + device + ".delay = ConfigSlider(default=700, increment = 100, limits=(0, 5000))"
exec cmd
cmd = "config.inputDevices." + device + ".delay.addNotifier(self.inputDevicesDelayChanged,config.inputDevices." + device + ".delay)"
exec cmd
iInputDevices = inputDevices()
config.plugins.remotecontroltype = ConfigSubsection()
config.plugins.remotecontroltype.rctype = ConfigInteger(default = 0)
class RcTypeControl():
def __init__(self):
if pathExists('/proc/stb/ir/rc/type') and pathExists('/proc/stb/info/boxtype') and getBrandOEM() != 'gigablue':
self.isSupported = True
fd = open('/proc/stb/info/boxtype', 'r')
self.boxType = fd.read()
fd.close()
if config.plugins.remotecontroltype.rctype.value != 0:
self.writeRcType(config.plugins.remotecontroltype.rctype.value)
else:
self.isSupported = False
def multipleRcSupported(self):
return self.isSupported
def getBoxType(self):
return self.boxType
def writeRcType(self, rctype):
fd = open('/proc/stb/ir/rc/type', 'w')
fd.write('%d' % rctype)
fd.close()
iRcTypeControl = RcTypeControl()
|
BackupGGCode/sphinx | refs/heads/master | sphinx/util/smartypants.py | 4 | r"""
This is based on SmartyPants.py by `Chad Miller`_.
Copyright and License
=====================
SmartyPants_ license::
Copyright (c) 2003 John Gruber
(http://daringfireball.net/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license::
smartypants.py is a derivative work of SmartyPants.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
.. _Chad Miller: http://web.chad.org/
"""
import re
def sphinx_smarty_pants(t):
t = t.replace('"', '"')
t = educateDashesOldSchool(t)
t = educateQuotes(t)
t = t.replace('"', '"')
return t
# Constants for quote education.
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
close_class = r"""[^\ \t\r\n\[\{\(\-]"""
dec_dashes = r"""–|—"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
single_quote_start_re = re.compile(r"""^'(?=%s\\B)""" % (punct_class,))
double_quote_start_re = re.compile(r"""^"(?=%s\\B)""" % (punct_class,))
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
double_quote_sets_re = re.compile(r""""'(?=\w)""")
single_quote_sets_re = re.compile(r"""'"(?=\w)""")
# Special case for decade abbreviations (the '80s):
decade_abbr_re = re.compile(r"""\b'(?=\d{2}s)""")
# Get most opening double quotes:
opening_double_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
" # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
# Double closing quotes:
closing_double_quotes_regex = re.compile(r"""
#(%s)? # character that indicates the quote should be closing
"
(?=\s)
""" % (close_class,), re.VERBOSE)
closing_double_quotes_regex_2 = re.compile(r"""
(%s) # character that indicates the quote should be closing
"
""" % (close_class,), re.VERBOSE)
# Get most opening single quotes:
opening_single_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
' # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(?!\s | s\b | \d)
""" % (close_class,), re.VERBOSE)
closing_single_quotes_regex_2 = re.compile(r"""
(%s)
'
(\s | s\b)
""" % (close_class,), re.VERBOSE)
def educateQuotes(s):
"""
Parameter: String.
Returns: The string, with "educated" curly quote HTML entities.
Example input: "Isn't this fun?"
Example output: “Isn’t this fun?”
"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
s = single_quote_start_re.sub("’", s)
s = double_quote_start_re.sub("”", s)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
s = double_quote_sets_re.sub("“‘", s)
s = single_quote_sets_re.sub("‘“", s)
# Special case for decade abbreviations (the '80s):
s = decade_abbr_re.sub("’", s)
s = opening_single_quotes_regex.sub(r"\1‘", s)
s = closing_single_quotes_regex.sub(r"\1’", s)
s = closing_single_quotes_regex_2.sub(r"\1’\2", s)
# Any remaining single quotes should be opening ones:
s = s.replace("'", "‘")
s = opening_double_quotes_regex.sub(r"\1“", s)
s = closing_double_quotes_regex.sub(r"”", s)
s = closing_double_quotes_regex_2.sub(r"\1”", s)
# Any remaining quotes should be opening ones.
return s.replace('"', "“")
def educateQuotesLatex(s, dquotes=("``", "''")):
"""
Parameter: String.
Returns: The string, with double quotes corrected to LaTeX quotes.
Example input: "Isn't this fun?"
Example output: ``Isn't this fun?'';
"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
s = single_quote_start_re.sub("\x04", s)
s = double_quote_start_re.sub("\x02", s)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
s = double_quote_sets_re.sub("\x01\x03", s)
s = single_quote_sets_re.sub("\x03\x01", s)
# Special case for decade abbreviations (the '80s):
s = decade_abbr_re.sub("\x04", s)
s = opening_single_quotes_regex.sub("\\1\x03", s)
s = closing_single_quotes_regex.sub("\\1\x04", s)
s = closing_single_quotes_regex_2.sub("\\1\x04\\2", s)
# Any remaining single quotes should be opening ones:
s = s.replace("'", "\x03")
s = opening_double_quotes_regex.sub("\\1\x01", s)
s = closing_double_quotes_regex.sub("\x02", s)
s = closing_double_quotes_regex_2.sub("\\1\x02", s)
# Any remaining quotes should be opening ones.
s = s.replace('"', "\x01")
# Finally, replace all helpers with quotes.
return s.replace("\x01", dquotes[0]).replace("\x02", dquotes[1]).\
replace("\x03", "`").replace("\x04", "'")
def educateBackticks(s):
"""
Parameter: String.
Returns: The string, with ``backticks'' -style double quotes
translated into HTML curly quote entities.
Example input: ``Isn't this fun?''
Example output: “Isn't this fun?”
"""
return s.replace("``", "“").replace("''", "”")
def educateSingleBackticks(s):
"""
Parameter: String.
Returns: The string, with `backticks' -style single quotes
translated into HTML curly quote entities.
Example input: `Isn't this fun?'
Example output: ‘Isn’t this fun?’
"""
return s.replace('`', "‘").replace("'", "’")
def educateDashesOldSchool(s):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an en-dash HTML entity, and each "---" translated to
an em-dash HTML entity.
"""
return s.replace('---', "—").replace('--', "–")
def educateDashesOldSchoolInverted(s):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity, and each "---" translated to
an en-dash HTML entity. Two reasons why: First, unlike the
en- and em-dash syntax supported by
EducateDashesOldSchool(), it's compatible with existing
entries written before SmartyPants 1.1, back when "--" was
only used for em-dashes. Second, em-dashes are more
common than en-dashes, and so it sort of makes sense that
the shortcut should be shorter to type. (Thanks to Aaron
Swartz for the idea.)
"""
return s.replace('---', "–").replace('--', "—")
def educateEllipses(s):
"""
Parameter: String.
Returns: The string, with each instance of "..." translated to
an ellipsis HTML entity.
Example input: Huh...?
Example output: Huh…?
"""
return s.replace('...', "…").replace('. . .', "…")
__author__ = "Chad Miller <smartypantspy@chad.org>"
__version__ = "1.5_1.5: Sat, 13 Aug 2005 15:50:24 -0400"
__url__ = "http://wiki.chad.org/SmartyPantsPy"
__description__ = \
"Smart-quotes, smart-ellipses, and smart-dashes for weblog entries in pyblosxom"
|
donkirkby/django | refs/heads/master | django/core/serializers/json.py | 320 | """
Serialize data to/from JSON
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import, unicode_literals
import datetime
import decimal
import json
import sys
import uuid
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.utils import six
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def _init_options(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
if self.options.get('indent'):
# Prevent trailing spaces
self.json_kwargs['separators'] = (',', ': ')
def start_serialization(self):
self._init_options()
self.stream.write("[")
def end_serialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream,
cls=DjangoJSONEncoder, **self.json_kwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, six.string_types)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
objects = json.loads(stream_or_string)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time, decimal types and UUIDs.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, uuid.UUID):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
|
atheed/servo | refs/heads/master | tests/wpt/web-platform-tests/fetch/api/resources/method.py | 161 | def main(request, response):
headers = []
if "cors" in request.GET:
headers.append(("Access-Control-Allow-Origin", "*"))
headers.append(("Access-Control-Allow-Credentials", "true"))
headers.append(("Access-Control-Allow-Methods", "GET, POST, PUT, FOO"))
headers.append(("Access-Control-Allow-Headers", "x-test, x-foo"))
headers.append(("Access-Control-Expose-Headers", "x-request-method"))
headers.append(("x-request-method", request.method))
return headers, request.body
|
Vishluck/sympy | refs/heads/master | sympy/series/tests/test_lseries.py | 121 | from sympy import sin, cos, exp, tanh, E, S, Order
from sympy.abc import x, y
def test_sin():
e = sin(x).lseries(x)
assert next(e) == x
assert next(e) == -x**3/6
assert next(e) == x**5/120
def test_cos():
e = cos(x).lseries(x)
assert next(e) == 1
assert next(e) == -x**2/2
assert next(e) == x**4/24
def test_exp():
e = exp(x).lseries(x)
assert next(e) == 1
assert next(e) == x
assert next(e) == x**2/2
assert next(e) == x**3/6
def test_exp2():
e = exp(cos(x)).lseries(x)
assert next(e) == E
assert next(e) == -E*x**2/2
assert next(e) == E*x**4/6
assert next(e) == -31*E*x**6/720
def test_simple():
assert [t for t in x.lseries()] == [x]
assert [t for t in S.One.lseries(x)] == [1]
assert not next((x/(x + y)).lseries(y)).has(Order)
def test_issue_5183():
s = (x + 1/x).lseries()
assert [si for si in s] == [1/x, x]
assert next((x + x**2).lseries()) == x
assert next(((1 + x)**7).lseries(x)) == 1
assert next((sin(x + y)).series(x, n=3).lseries(y)) == x
# it would be nice if all terms were grouped, but in the
# following case that would mean that all the terms would have
# to be known since, for example, every term has a constant in it.
s = ((1 + x)**7).series(x, 1, n=None)
assert [next(s) for i in range(2)] == [128, -448 + 448*x]
def test_issue_6999():
s = tanh(x).lseries(x, 1)
assert next(s) == tanh(1)
assert next(s) == x - (x - 1)*tanh(1)**2 - 1
assert next(s) == -(x - 1)**2*tanh(1) + (x - 1)**2*tanh(1)**3
assert next(s) == -(x - 1)**3*tanh(1)**4 - (x - 1)**3/3 + \
4*(x - 1)**3*tanh(1)**2/3
|
yuanagain/seniorthesis | refs/heads/master | venv/lib/python2.7/site-packages/numpy/f2py/tests/util.py | 66 | """
Utility functions for
- building and importing modules on test time, using a temporary location
- detecting if compilers are present
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import subprocess
import tempfile
import shutil
import atexit
import textwrap
import re
import random
from numpy.compat import asbytes, asstr
import numpy.f2py
from numpy.testing import SkipTest, temppath
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
#
# Maintaining a temporary module directory
#
_module_dir = None
def _cleanup():
global _module_dir
if _module_dir is not None:
try:
sys.path.remove(_module_dir)
except ValueError:
pass
try:
shutil.rmtree(_module_dir)
except (IOError, OSError):
pass
_module_dir = None
def get_module_dir():
global _module_dir
if _module_dir is None:
_module_dir = tempfile.mkdtemp()
atexit.register(_cleanup)
if _module_dir not in sys.path:
sys.path.insert(0, _module_dir)
return _module_dir
def get_temp_module_name():
# Assume single-threaded, and the module dir usable only by this thread
d = get_module_dir()
for j in range(5403, 9999999):
name = "_test_ext_module_%d" % j
fn = os.path.join(d, name)
if name not in sys.modules and not os.path.isfile(fn + '.py'):
return name
raise RuntimeError("Failed to create a temporary module name")
def _memoize(func):
memo = {}
def wrapper(*a, **kw):
key = repr((a, kw))
if key not in memo:
try:
memo[key] = func(*a, **kw)
except Exception as e:
memo[key] = e
raise
ret = memo[key]
if isinstance(ret, Exception):
raise ret
return ret
wrapper.__name__ = func.__name__
return wrapper
#
# Building modules
#
@_memoize
def build_module(source_files, options=[], skip=[], only=[], module_name=None):
"""
Compile and import a f2py module, built from the given files.
"""
code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; "
"f2py2e.main()" % repr(sys.path))
d = get_module_dir()
# Copy files
dst_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
fn = os.path.join(os.path.dirname(fn), '.f2py_f2cmap')
if os.path.isfile(fn):
dst = os.path.join(d, os.path.basename(fn))
if not os.path.isfile(dst):
shutil.copyfile(fn, dst)
# Prepare options
if module_name is None:
module_name = get_temp_module_name()
f2py_opts = ['-c', '-m', module_name] + options + dst_sources
if skip:
f2py_opts += ['skip:'] + skip
if only:
f2py_opts += ['only:'] + only
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, '-c', code] + f2py_opts
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("Running f2py failed: %s\n%s"
% (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Import
__import__(module_name)
return sys.modules[module_name]
@_memoize
def build_code(source_code, options=[], skip=[], only=[], suffix=None,
module_name=None):
"""
Compile and import Fortran code using f2py.
"""
if suffix is None:
suffix = '.f'
with temppath(suffix=suffix) as path:
with open(path, 'w') as f:
f.write(source_code)
return build_module([path], options=options, skip=skip, only=only,
module_name=module_name)
#
# Check if compilers are available at all...
#
_compiler_status = None
def _get_compiler_status():
global _compiler_status
if _compiler_status is not None:
return _compiler_status
_compiler_status = (False, False, False)
# XXX: this is really ugly. But I don't know how to invoke Distutils
# in a safer way...
code = """
import os
import sys
sys.path = %(syspath)s
def configuration(parent_name='',top_path=None):
global config
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_name, top_path)
return config
from numpy.distutils.core import setup
setup(configuration=configuration)
config_cmd = config.get_config_cmd()
have_c = config_cmd.try_compile('void foo() {}')
print('COMPILERS:%%d,%%d,%%d' %% (have_c,
config.have_f77c(),
config.have_f90c()))
sys.exit(99)
"""
code = code % dict(syspath=repr(sys.path))
with temppath(suffix='.py') as script:
with open(script, 'w') as f:
f.write(code)
cmd = [sys.executable, script, 'config']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
m = re.search(asbytes(r'COMPILERS:(\d+),(\d+),(\d+)'), out)
if m:
_compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))),
bool(int(m.group(3))))
# Finished
return _compiler_status
def has_c_compiler():
return _get_compiler_status()[0]
def has_f77_compiler():
return _get_compiler_status()[1]
def has_f90_compiler():
return _get_compiler_status()[2]
#
# Building with distutils
#
@_memoize
def build_module_distutils(source_files, config_code, module_name, **kw):
"""
Build a module via distutils and import it.
"""
from numpy.distutils.misc_util import Configuration
from numpy.distutils.core import setup
d = get_module_dir()
# Copy files
dst_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
# Build script
config_code = textwrap.dedent(config_code).replace("\n", "\n ")
code = """\
import os
import sys
sys.path = %(syspath)s
def configuration(parent_name='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_name, top_path)
%(config_code)s
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
""" % dict(config_code=config_code, syspath=repr(sys.path))
script = os.path.join(d, get_temp_module_name() + '.py')
dst_sources.append(script)
f = open(script, 'wb')
f.write(asbytes(code))
f.close()
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, script, 'build_ext', '-i']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("Running distutils build failed: %s\n%s"
% (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Import
__import__(module_name)
return sys.modules[module_name]
#
# Unittest convenience
#
class F2PyTest(object):
code = None
sources = None
options = []
skip = []
only = []
suffix = '.f'
module = None
module_name = None
def setUp(self):
if self.module is not None:
return
# Check compiler availability first
if not has_c_compiler():
raise SkipTest("No C compiler available")
codes = []
if self.sources:
codes.extend(self.sources)
if self.code is not None:
codes.append(self.suffix)
needs_f77 = False
needs_f90 = False
for fn in codes:
if fn.endswith('.f'):
needs_f77 = True
elif fn.endswith('.f90'):
needs_f90 = True
if needs_f77 and not has_f77_compiler():
raise SkipTest("No Fortran 77 compiler available")
if needs_f90 and not has_f90_compiler():
raise SkipTest("No Fortran 90 compiler available")
# Build the module
if self.code is not None:
self.module = build_code(self.code, options=self.options,
skip=self.skip, only=self.only,
suffix=self.suffix,
module_name=self.module_name)
if self.sources is not None:
self.module = build_module(self.sources, options=self.options,
skip=self.skip, only=self.only,
module_name=self.module_name)
|
dzbarsky/servo | refs/heads/master | tests/wpt/web-platform-tests/websockets/cookies/support/set-cookie.py | 249 | import urllib
def main(request, response):
response.headers.set('Set-Cookie', urllib.unquote(request.url_parts.query))
return [("Content-Type", "text/plain")], ""
|
danieldanciu/schoggi | refs/heads/master | tests/functional/student_answers.py | 6 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analytics for extracting facts based on StudentAnswerEntity entries."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import collections
from common import crypto
from common import utils as common_utils
from models import courses
from models import models
from models import transforms
from models.data_sources import utils as data_sources_utils
from tests.functional import actions
from google.appengine.ext import db
COURSE_NAME = 'test_course'
COURSE_TITLE = 'Test Course'
ADMIN_EMAIL = 'test@example.com'
AssessmentDef = collections.namedtuple('AssessmentDef',
['unit_id', 'title', 'html_content'])
EntityDef = collections.namedtuple('EntityDef',
['entity_class', 'entity_id',
'entity_key_name', 'data'])
ASSESSMENTS = [
AssessmentDef(
1, 'One Question',
'<question quid="4785074604081152" weight="1" '
'instanceid="8TvGgbrrbZ49"></question><br>'),
AssessmentDef(
2, 'Groups And Questions',
'<question quid="5066549580791808" weight="1" '
'instanceid="zsgZ8dUMvJjz"></question><br>'
'<question quid="5629499534213120" weight="1" '
'instanceid="YlGaKQ2mnOPG"></question><br>'
'<question-group qgid="5348024557502464" '
'instanceid="YpoECeTunEpj"></question-group><br>'
'<question-group qgid="5910974510923776" '
'instanceid="FcIh3jyWOTbP"></question-group><br>'),
AssessmentDef(
3, 'All Questions',
'<question quid="6192449487634432" weight="1" '
'instanceid="E5P0a0bFB0EH"></question><br>'
'<question quid="5629499534213120" weight="1" '
'instanceid="DlfLRsko2QHb"></question><br>'
'<question quid="5066549580791808" weight="1" '
'instanceid="hGrEjnP13pMA"></question><br>'
'<question quid="4785074604081152" weight="1" '
'instanceid="knWukHJApaQh"></question><br>'),
]
ENTITIES = [
# Questions -----------------------------------------------------------
EntityDef(
models.QuestionEntity, 4785074604081152, None,
'{"question": "To produce maximum generosity, what should be the '
'overall shape of the final protrusion?", "rows": 1, "columns": 1'
'00, "defaultFeedback": "", "graders": [{"matcher": "case_insensi'
'tive", "feedback": "", "score": "0.7", "response": "oblong"}, {"'
'matcher": "case_insensitive", "feedback": "", "score": "0.3", "r'
'esponse": "extended"}], "type": 1, "description": "Maximum gener'
'osity protrusion shape", "version": "1.5", "hint": ""}'),
EntityDef(
models.QuestionEntity, 5066549580791808, None,
'{"question": "Describe the shape of a standard trepanning hammer'
'", "multiple_selections": false, "choices": [{"feedback": "", "s'
'core": 0.0, "text": "Round"}, {"feedback": "", "score": 0.0, "te'
'xt": "Square"}, {"feedback": "", "score": 1.0, "text": "Diamond"'
'}, {"feedback": "", "score": 0.0, "text": "Pyramid"}], "type": 0'
', "description": "Trepanning hammer shape", "version": "1.5"}'),
EntityDef(
models.QuestionEntity, 5629499534213120, None,
'{"question": "Describe an appropriate bedside manner for post-tr'
'eatment patient interaction", "rows": 1, "columns": 100, "defaul'
'tFeedback": "", "graders": [{"matcher": "case_insensitive", "fee'
'dback": "", "score": "1.0", "response": "gentle"}, {"matcher": "'
'case_insensitive", "feedback": "", "score": "0.8", "response": "'
'caring"}], "type": 1, "description": "Post-treatement interactio'
'n", "version": "1.5", "hint": ""}'),
EntityDef(
models.QuestionEntity, 6192449487634432, None,
'{"question": "When making a personality shift, how hard should t'
'he strike be?", "multiple_selections": true, "choices": [{"feedb'
'ack": "", "score": -1.0, "text": "Light"}, {"feedback": "", "sco'
're": 0.7, "text": "Medium"}, {"feedback": "", "score": 0.3, "tex'
't": "Heavy"}, {"feedback": "", "score": -1.0, "text": "Crushing"'
'}], "type": 0, "description": "Personality shift strike strength'
'", "version": "1.5"}'),
# Question Groups -----------------------------------------------------
EntityDef(
models.QuestionGroupEntity, 5348024557502464, None,
'{"description": "One MC, one SA", "introduction": "", "version":'
'"1.5", "items": [{"question": 5066549580791808, "weight": "1"}, '
'{"question": 6192449487634432, "weight": "1"}]}'),
EntityDef(
models.QuestionGroupEntity, 5910974510923776, None,
'{"description": "All Questions", "introduction": "All questions"'
', "version": "1.5", "items": [{"question": 4785074604081152, "we'
'ight": "0.25"}, {"question": 5066549580791808, "weight": "0.25"}'
', {"question": 5629499534213120, "weight": "0.25"}, {"question":'
'6192449487634432, "weight": "0.25"}]}'),
# Student Answers -----------------------------------------------------
EntityDef(
models.StudentAnswersEntity, None, '115715231223232197316',
'{"3": {"version": "1.5", "containedTypes": {"DlfLRsko2QHb": "SaQ'
'uestion", "E5P0a0bFB0EH": "McQuestion", "hGrEjnP13pMA": "McQuest'
'ion", "knWukHJApaQh": "SaQuestion"}, "hGrEjnP13pMA": [true, fals'
'e, false, false], "knWukHJApaQh": {"response": "fronk"}, "DlfLRs'
'ko2QHb": {"response": "phleem"}, "answers": {"DlfLRsko2QHb": "ph'
'leem", "E5P0a0bFB0EH": [1], "hGrEjnP13pMA": [0], "knWukHJApaQh":'
'"fronk"}, "E5P0a0bFB0EH": [false, true, false, false], "individu'
'alScores": {"DlfLRsko2QHb": 0, "E5P0a0bFB0EH": 0.7, "hGrEjnP13pM'
'A": 0, "knWukHJApaQh": 0}}, "2": {"version": "1.5", "containedTy'
'pes": {"zsgZ8dUMvJjz": "McQuestion", "FcIh3jyWOTbP": ["SaQuestio'
'n", "McQuestion", "SaQuestion", "McQuestion"], "YlGaKQ2mnOPG": "'
'SaQuestion", "YpoECeTunEpj": ["McQuestion", "McQuestion"]}, "ans'
'wers": {"zsgZ8dUMvJjz": [1], "FcIh3jyWOTbP": ["round", [1], "col'
'd", [3]], "YlGaKQ2mnOPG": "gentle", "YpoECeTunEpj": [[2], [1]]},'
'"FcIh3jyWOTbP": {"FcIh3jyWOTbP.2.5629499534213120": {"response":'
'"cold"}, "FcIh3jyWOTbP.1.5066549580791808": [false, true, false,'
'false], "FcIh3jyWOTbP.3.6192449487634432": [false, false, false,'
'true], "FcIh3jyWOTbP.0.4785074604081152": {"response": "round"}}'
', "YlGaKQ2mnOPG": {"response": "gentle"}, "zsgZ8dUMvJjz": [false'
',true, false, false], "individualScores": {"zsgZ8dUMvJjz": 0, "F'
'cIh3jyWOTbP": [0, 0, 0, 0], "YlGaKQ2mnOPG": 1, "YpoECeTunEpj": ['
'1, 0.7]}, "YpoECeTunEpj": {"YpoECeTunEpj.0.5066549580791808": [f'
'alse, false, true, false], "YpoECeTunEpj.1.6192449487634432": [f'
'alse, true, false, false]}}, "1": {"containedTypes": {"8TvGgbrrb'
'Z49": "SaQuestion"}, "version": "1.5", "answers": {"8TvGgbrrbZ49'
'": "oblong"}, "individualScores": {"8TvGgbrrbZ49": 0.7}, "8TvGgb'
'rrbZ49": {"response": "oblong"}}}'),
EntityDef(
models.StudentAnswersEntity, None, '187186200184131193542',
'{"3": {"version": "1.5", "containedTypes": {"DlfLRsko2QHb": "SaQ'
'uestion", "E5P0a0bFB0EH": "McQuestion", "hGrEjnP13pMA": "McQuest'
'ion", "knWukHJApaQh": "SaQuestion"}, "hGrEjnP13pMA": [false, tru'
'e, false, false], "knWukHJApaQh": {"response": "square"}, "DlfLR'
'sko2QHb": {"response": "caring"}, "answers": {"DlfLRsko2QHb": "c'
'aring", "E5P0a0bFB0EH": [1], "hGrEjnP13pMA": [1], "knWukHJApaQh"'
': "square"}, "E5P0a0bFB0EH": [false, true, false, false], "indiv'
'idualScores": {"DlfLRsko2QHb": 0.8, "E5P0a0bFB0EH": 0.7, "hGrEjn'
'P13pMA": 0, "knWukHJApaQh": 0}}, "2": {"version": "1.5", "contai'
'nedTypes": {"zsgZ8dUMvJjz": "McQuestion", "FcIh3jyWOTbP": ["SaQu'
'estion", "McQuestion", "SaQuestion", "McQuestion"], "YlGaKQ2mnOP'
'G": "SaQuestion", "YpoECeTunEpj": ["McQuestion", "McQuestion"]},'
' "answers": {"zsgZ8dUMvJjz": [3], "FcIh3jyWOTbP": ["spazzle", [3'
'], "gloonk", [3]], "YlGaKQ2mnOPG": "frink", "YpoECeTunEpj": [[0]'
', [0]]}, "FcIh3jyWOTbP": {"FcIh3jyWOTbP.2.5629499534213120": {"r'
'esponse": "gloonk"}, "FcIh3jyWOTbP.1.5066549580791808": [false, '
'false, false, true], "FcIh3jyWOTbP.3.6192449487634432": [false, '
'false, false, true], "FcIh3jyWOTbP.0.4785074604081152": {"respon'
'se": "spazzle"}}, "YlGaKQ2mnOPG": {"response": "frink"}, "zsgZ8d'
'UMvJjz": [false, false, false, true], "individualScores": {"zsgZ'
'8dUMvJjz": 0, "FcIh3jyWOTbP": [0, 0, 0, 0], "YlGaKQ2mnOPG": 0, "'
'YpoECeTunEpj": [0, 0]}, "YpoECeTunEpj": {"YpoECeTunEpj.0.5066549'
'580791808": [true, false, false, false], "YpoECeTunEpj.1.6192449'
'487634432": [true, false, false, false]}}, "1": {"containedTypes'
'": {"8TvGgbrrbZ49": "SaQuestion"}, "version": "1.5", "answers": '
'{"8TvGgbrrbZ49": "spalpeen"}, "individualScores": {"8TvGgbrrbZ49'
'": 0}, "8TvGgbrrbZ49": {"response": "spalpeen"}}}'),
]
EXPECTED_COURSE_UNITS = [
{
'title': 'One Question',
'unit_id': 1,
'now_available': True,
'type': 'A',
},
{
'title': 'Groups And Questions',
'unit_id': 2,
'now_available': True,
'type': 'A',
},
{
'title': 'All Questions',
'unit_id': 3,
'now_available': True,
'type': 'A',
}
]
EXPECTED_QUESTIONS = [
{
'question_id': 4785074604081152,
'description': 'Maximum generosity protrusion shape',
'choices': []
},
{
'question_id': 5066549580791808,
'description': 'Trepanning hammer shape',
'choices': ['Round', 'Square', 'Diamond', 'Pyramid']
},
{
'question_id': 5629499534213120,
'description': 'Post-treatement interaction',
'choices': []
},
{
'question_id': 6192449487634432,
'description': 'Personality shift strike strength',
'choices': ['Light', 'Medium', 'Heavy', 'Crushing']
}
]
EXPECTED_ANSWERS = [
{'unit_id': 1, 'sequence': 0, 'count': 1, 'is_valid': True,
'answer': 'oblong', 'question_id': 4785074604081152},
{'unit_id': 1, 'sequence': 0, 'count': 1, 'is_valid': False,
'answer': 'spalpeen', 'question_id': 4785074604081152},
{'unit_id': 2, 'sequence': 0, 'count': 1, 'is_valid': True,
'answer': 1, 'question_id': 5066549580791808},
{'unit_id': 2, 'sequence': 0, 'count': 1, 'is_valid': True,
'answer': 3, 'question_id': 5066549580791808},
{'unit_id': 2, 'sequence': 1, 'count': 1, 'is_valid': True,
'answer': 'gentle', 'question_id': 5629499534213120},
{'unit_id': 2, 'sequence': 1, 'count': 1, 'is_valid': False,
'answer': 'frink', 'question_id': 5629499534213120},
{'unit_id': 2, 'sequence': 2, 'count': 1, 'is_valid': True,
'answer': 0, 'question_id': 5066549580791808},
{'unit_id': 2, 'sequence': 2, 'count': 1, 'is_valid': True,
'answer': 2, 'question_id': 5066549580791808},
{'unit_id': 2, 'sequence': 3, 'count': 1, 'is_valid': True,
'answer': 0, 'question_id': 6192449487634432},
{'unit_id': 2, 'sequence': 3, 'count': 1, 'is_valid': True,
'answer': 1, 'question_id': 6192449487634432},
{'unit_id': 2, 'sequence': 4, 'count': 1, 'is_valid': False,
'answer': 'round', 'question_id': 4785074604081152},
{'unit_id': 2, 'sequence': 4, 'count': 1, 'is_valid': False,
'answer': 'spazzle', 'question_id': 4785074604081152},
{'unit_id': 2, 'sequence': 5, 'count': 1, 'is_valid': True,
'answer': 1, 'question_id': 5066549580791808},
{'unit_id': 2, 'sequence': 5, 'count': 1, 'is_valid': True,
'answer': 3, 'question_id': 5066549580791808},
{'unit_id': 2, 'sequence': 6, 'count': 1, 'is_valid': False,
'answer': 'cold', 'question_id': 5629499534213120},
{'unit_id': 2, 'sequence': 6, 'count': 1, 'is_valid': False,
'answer': 'gloonk', 'question_id': 5629499534213120},
{'unit_id': 2, 'sequence': 7, 'count': 2, 'is_valid': True,
'answer': 3, 'question_id': 6192449487634432},
{'unit_id': 3, 'sequence': 0, 'count': 2, 'is_valid': True,
'answer': 1, 'question_id': 6192449487634432},
{'unit_id': 3, 'sequence': 1, 'count': 1, 'is_valid': True,
'answer': 'caring', 'question_id': 5629499534213120},
{'unit_id': 3, 'sequence': 1, 'count': 1, 'is_valid': False,
'answer': 'phleem', 'question_id': 5629499534213120},
{'unit_id': 3, 'sequence': 2, 'count': 1, 'is_valid': True,
'answer': 0, 'question_id': 5066549580791808},
{'unit_id': 3, 'sequence': 2, 'count': 1, 'is_valid': True,
'answer': 1, 'question_id': 5066549580791808},
{'unit_id': 3, 'sequence': 3, 'count': 1, 'is_valid': False,
'answer': 'fronk', 'question_id': 4785074604081152},
{'unit_id': 3, 'sequence': 3, 'count': 1, 'is_valid': False,
'answer': 'square', 'question_id': 4785074604081152},
]
class StudentAnswersAnalyticsTest(actions.TestBase):
def setUp(self):
super(StudentAnswersAnalyticsTest, self).setUp()
self.context = actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL,
COURSE_TITLE)
self.course = courses.Course(None, self.context)
for assessment in ASSESSMENTS:
self._add_assessment(self.course, assessment)
self.course.save()
for entity in ENTITIES:
self._add_entity(self.context, entity)
def _add_assessment(self, course, assessment_def):
assessment = course.add_assessment()
assessment.unit_id = assessment_def.unit_id
assessment.title = assessment_def.title
assessment.now_available = True
assessment.html_content = assessment_def.html_content
def _add_entity(self, context, entity):
with common_utils.Namespace(context.get_namespace_name()):
if entity.entity_id:
key = db.Key.from_path(entity.entity_class.__name__,
entity.entity_id)
to_store = entity.entity_class(data=entity.data, key=key)
else:
to_store = entity.entity_class(key_name=entity.entity_key_name,
data=entity.data)
to_store.put()
def _get_data_source(self, source_name):
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
data_sources_utils.DATA_SOURCE_ACCESS_XSRF_ACTION)
url = ('/test_course/rest/data/%s/items?' % source_name +
'data_source_token=%s&page_number=0' % xsrf_token)
response = self.get(url)
return transforms.loads(response.body)['data']
def _verify_content(self, expected, actual):
for expected_item, actual_item in zip(expected, actual):
self.assertDictContainsSubset(expected_item, actual_item)
def test_end_to_end(self):
actions.login(ADMIN_EMAIL, is_admin=True)
# Start map/reduce analysis job.
response = self.get(
'/test_course/dashboard?action=analytics&tab=questions')
form = response.forms['gcb-run-visualization-question_answers']
self.submit(form, response)
# Wait for map/reduce to run to completion.
self.execute_all_deferred_tasks()
# Verify output.
course_units = self._get_data_source('course_units')
self._verify_content(EXPECTED_COURSE_UNITS, course_units)
course_questions = self._get_data_source('course_questions')
self._verify_content(EXPECTED_QUESTIONS, course_questions)
question_answers = self._get_data_source('question_answers')
self._verify_content(EXPECTED_ANSWERS, question_answers)
|
mediatum/mediatum | refs/heads/master | web/admin/views/__init__.py | 1 | # -*- coding: utf-8 -*-
"""
web.admin.views
~~~~~~~~~~~~~~~~~~
this package is part of mediatum - a multimedia content repository
:copyright: (c) 2016 by the mediaTUM authors
:license: GPL3, see COPYING for details
"""
from __future__ import absolute_import
from core import db
from flask_admin.contrib.sqla import ModelView
from flask_login import current_user
class BaseAdminView(ModelView):
"""Basic settings for all our admin views. All views should inherit this.
"""
column_display_pk = True
can_view_details = True
def __init__(self, model, session=None, *args, **kwargs):
super(BaseAdminView, self).__init__(model, session or db.Session, *args, **kwargs)
def is_accessible(self):
# view access only allowed for admins!
return current_user.is_authenticated and current_user.is_admin
|
shechque/python-web-fundation | refs/heads/master | twistedserver2.py | 1 |
from twisted.internet import reactor
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
class SimpleLogger(LineReceiver):
def connectionMade(self):
print 'Got connection from',self.transport.client
def connectionLost(self,reason):
print self.transport.client,'disconnected'
def lineReceived(self,line):
print line
factory = Factory()
factory.protocol = SimpleLogger
reactor.listenTCP(1234,factory)
reactor.run()
|
immenz/pyload | refs/heads/stable | module/lib/thrift/server/TNonblockingServer.py | 83 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Implementation of non-blocking server.
The main idea of the server is reciving and sending requests
only from main thread.
It also makes thread pool server in tasks terms, not connections.
"""
import threading
import socket
import Queue
import select
import struct
import logging
from thrift.transport import TTransport
from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory
__all__ = ['TNonblockingServer']
class Worker(threading.Thread):
"""Worker is a small helper to process incoming connection."""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Process queries from task queue, stop if processor is None."""
while True:
try:
processor, iprot, oprot, otrans, callback = self.queue.get()
if processor is None:
break
processor.process(iprot, oprot)
callback(True, otrans.getvalue())
except Exception:
logging.exception("Exception while processing request")
callback(False, '')
WAIT_LEN = 0
WAIT_MESSAGE = 1
WAIT_PROCESS = 2
SEND_ANSWER = 3
CLOSED = 4
def locked(func):
"Decorator which locks self.lock."
def nested(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return nested
def socket_exception(func):
"Decorator close object on socket.error."
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self.close()
return read
class Connection:
"""Basic class is represented connection.
It can be in state:
WAIT_LEN --- connection is reading request len.
WAIT_MESSAGE --- connection is reading request.
WAIT_PROCESS --- connection has just read whole request and
waits for call ready routine.
SEND_ANSWER --- connection is sending answer string (including length
of answer).
CLOSED --- socket was closed and connection should be deleted.
"""
def __init__(self, new_socket, wake_up):
self.socket = new_socket
self.socket.setblocking(False)
self.status = WAIT_LEN
self.len = 0
self.message = ''
self.lock = threading.Lock()
self.wake_up = wake_up
def _read_len(self):
"""Reads length of request.
It's really paranoic routine and it may be replaced by
self.socket.recv(4)."""
read = self.socket.recv(4 - len(self.message))
if len(read) == 0:
# if we read 0 bytes and self.message is empty, it means client close
# connection
if len(self.message) != 0:
logging.error("can't read frame size from socket")
self.close()
return
self.message += read
if len(self.message) == 4:
self.len, = struct.unpack('!i', self.message)
if self.len < 0:
logging.error("negative frame size, it seems client"\
" doesn't use FramedTransport")
self.close()
elif self.len == 0:
logging.error("empty frame, it's really strange")
self.close()
else:
self.message = ''
self.status = WAIT_MESSAGE
@socket_exception
def read(self):
"""Reads data from stream and switch state."""
assert self.status in (WAIT_LEN, WAIT_MESSAGE)
if self.status == WAIT_LEN:
self._read_len()
# go back to the main loop here for simplicity instead of
# falling through, even though there is a good chance that
# the message is already available
elif self.status == WAIT_MESSAGE:
read = self.socket.recv(self.len - len(self.message))
if len(read) == 0:
logging.error("can't read frame from socket (get %d of %d bytes)" %
(len(self.message), self.len))
self.close()
return
self.message += read
if len(self.message) == self.len:
self.status = WAIT_PROCESS
@socket_exception
def write(self):
"""Writes data from socket and switch state."""
assert self.status == SEND_ANSWER
sent = self.socket.send(self.message)
if sent == len(self.message):
self.status = WAIT_LEN
self.message = ''
self.len = 0
else:
self.message = self.message[sent:]
@locked
def ready(self, all_ok, message):
"""Callback function for switching state and waking up main thread.
This function is the only function witch can be called asynchronous.
The ready can switch Connection to three states:
WAIT_LEN if request was oneway.
SEND_ANSWER if request was processed in normal way.
CLOSED if request throws unexpected exception.
The one wakes up main thread.
"""
assert self.status == WAIT_PROCESS
if not all_ok:
self.close()
self.wake_up()
return
self.len = ''
if len(message) == 0:
# it was a oneway request, do not write answer
self.message = ''
self.status = WAIT_LEN
else:
self.message = struct.pack('!i', len(message)) + message
self.status = SEND_ANSWER
self.wake_up()
@locked
def is_writeable(self):
"Returns True if connection should be added to write list of select."
return self.status == SEND_ANSWER
# it's not necessary, but...
@locked
def is_readable(self):
"Returns True if connection should be added to read list of select."
return self.status in (WAIT_LEN, WAIT_MESSAGE)
@locked
def is_closed(self):
"Returns True if connection is closed."
return self.status == CLOSED
def fileno(self):
"Returns the file descriptor of the associated socket."
return self.socket.fileno()
def close(self):
"Closes connection"
self.status = CLOSED
self.socket.close()
class TNonblockingServer:
"""Non-blocking server."""
def __init__(self, processor, lsocket, inputProtocolFactory=None,
outputProtocolFactory=None, threads=10):
self.processor = processor
self.socket = lsocket
self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory()
self.out_protocol = outputProtocolFactory or self.in_protocol
self.threads = int(threads)
self.clients = {}
self.tasks = Queue.Queue()
self._read, self._write = socket.socketpair()
self.prepared = False
def setNumThreads(self, num):
"""Set the number of worker threads that should be created."""
# implement ThreadPool interface
assert not self.prepared, "You can't change number of threads for working server"
self.threads = num
def prepare(self):
"""Prepares server for serve requests."""
self.socket.listen()
for _ in xrange(self.threads):
thread = Worker(self.tasks)
thread.setDaemon(True)
thread.start()
self.prepared = True
def wake_up(self):
"""Wake up main thread.
The server usualy waits in select call in we should terminate one.
The simplest way is using socketpair.
Select always wait to read from the first socket of socketpair.
In this case, we can just write anything to the second socket from
socketpair."""
self._write.send('1')
def _select(self):
"""Does select on open connections."""
readable = [self.socket.handle.fileno(), self._read.fileno()]
writable = []
for i, connection in self.clients.items():
if connection.is_readable():
readable.append(connection.fileno())
if connection.is_writeable():
writable.append(connection.fileno())
if connection.is_closed():
del self.clients[i]
return select.select(readable, writable, readable)
def handle(self):
"""Handle requests.
WARNING! You must call prepare BEFORE calling handle.
"""
assert self.prepared, "You have to call prepare before handle"
rset, wset, xset = self._select()
for readable in rset:
if readable == self._read.fileno():
# don't care i just need to clean readable flag
self._read.recv(1024)
elif readable == self.socket.handle.fileno():
client = self.socket.accept().handle
self.clients[client.fileno()] = Connection(client, self.wake_up)
else:
connection = self.clients[readable]
connection.read()
if connection.status == WAIT_PROCESS:
itransport = TTransport.TMemoryBuffer(connection.message)
otransport = TTransport.TMemoryBuffer()
iprot = self.in_protocol.getProtocol(itransport)
oprot = self.out_protocol.getProtocol(otransport)
self.tasks.put([self.processor, iprot, oprot,
otransport, connection.ready])
for writeable in wset:
self.clients[writeable].write()
for oob in xset:
self.clients[oob].close()
del self.clients[oob]
def close(self):
"""Closes the server."""
for _ in xrange(self.threads):
self.tasks.put([None, None, None, None, None])
self.socket.close()
self.prepared = False
def serve(self):
"""Serve forever."""
self.prepare()
while True:
self.handle()
|
ntkrnl/yacoin-p2pool | refs/heads/master | p2pool/bitcoin/height_tracker.py | 45 | from twisted.internet import defer, task
from twisted.python import log
import p2pool
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import deferral, forest, jsonrpc, variable
class HeaderWrapper(object):
__slots__ = 'hash previous_hash'.split(' ')
@classmethod
def from_header(cls, header):
return cls(bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header)), header['previous_block'])
def __init__(self, hash, previous_hash):
self.hash, self.previous_hash = hash, previous_hash
class HeightTracker(object):
'''Point this at a factory and let it take care of getting block heights'''
def __init__(self, best_block_func, factory, backlog_needed):
self._best_block_func = best_block_func
self._factory = factory
self._backlog_needed = backlog_needed
self._tracker = forest.Tracker()
self._watch1 = self._factory.new_headers.watch(self._heard_headers)
self._watch2 = self._factory.new_block.watch(self._request)
self._requested = set()
self._clear_task = task.LoopingCall(self._requested.clear)
self._clear_task.start(60)
self._last_notified_size = 0
self.updated = variable.Event()
self._think_task = task.LoopingCall(self._think)
self._think_task.start(15)
self._think2_task = task.LoopingCall(self._think2)
self._think2_task.start(15)
def _think(self):
try:
highest_head = max(self._tracker.heads, key=lambda h: self._tracker.get_height_and_last(h)[0]) if self._tracker.heads else None
if highest_head is None:
return # wait for think2
height, last = self._tracker.get_height_and_last(highest_head)
if height < self._backlog_needed:
self._request(last)
except:
log.err(None, 'Error in HeightTracker._think:')
def _think2(self):
self._request(self._best_block_func())
def _heard_headers(self, headers):
changed = False
for header in headers:
hw = HeaderWrapper.from_header(header)
if hw.hash in self._tracker.items:
continue
changed = True
self._tracker.add(hw)
if changed:
self.updated.happened()
self._think()
if len(self._tracker.items) >= self._last_notified_size + 100:
print 'Have %i/%i block headers' % (len(self._tracker.items), self._backlog_needed)
self._last_notified_size = len(self._tracker.items)
@defer.inlineCallbacks
def _request(self, last):
if last in self._tracker.items:
return
if last in self._requested:
return
self._requested.add(last)
(yield self._factory.getProtocol()).send_getheaders(version=1, have=[], last=last)
def get_height_rel_highest(self, block_hash):
# callers: highest height can change during yields!
best_height, best_last = self._tracker.get_height_and_last(self._best_block_func())
height, last = self._tracker.get_height_and_last(block_hash)
if last != best_last:
return -1000000000 # XXX hack
return height - best_height
@defer.inlineCallbacks
def get_height_rel_highest_func(bitcoind, factory, best_block_func, net):
if '\ngetblock ' in (yield deferral.retry()(bitcoind.rpc_help)()):
@deferral.DeferredCacher
@defer.inlineCallbacks
def height_cacher(block_hash):
try:
x = yield bitcoind.rpc_getblock('%x' % (block_hash,))
except jsonrpc.Error_for_code(-5): # Block not found
if not p2pool.DEBUG:
raise deferral.RetrySilentlyException()
else:
raise
defer.returnValue(x['blockcount'] if 'blockcount' in x else x['height'])
best_height_cached = variable.Variable((yield deferral.retry()(height_cacher)(best_block_func())))
def get_height_rel_highest(block_hash):
this_height = height_cacher.call_now(block_hash, 0)
best_height = height_cacher.call_now(best_block_func(), 0)
best_height_cached.set(max(best_height_cached.value, this_height, best_height))
return this_height - best_height_cached.value
else:
get_height_rel_highest = HeightTracker(best_block_func, factory, 5*net.SHARE_PERIOD*net.CHAIN_LENGTH/net.PARENT.BLOCK_PERIOD).get_height_rel_highest
defer.returnValue(get_height_rel_highest)
|
tonycao/IntroToHadoopAndMR__Udacity_Course | refs/heads/master | ProblemStatement2/Python/P2Q1_Reducer.py | 12 | #!/usr/bin/python
# Write a MapReduce program which will display the number of hits for each different file on the Web site.
import sys
countTotal = 0
oldKey = None
# Loop around the data
# It will be in the format key\tval
#
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 2:
# Something has gone wrong. Skip this line.
continue
thisKey, thisCount = data_mapped
if oldKey and oldKey != thisKey:
print oldKey, "\t", countTotal
oldKey = thisKey;
countTotal = 0
oldKey = thisKey
countTotal += int(thisCount)
if oldKey != None:
print oldKey, "\t", countTotal
|
doismellburning/django | refs/heads/master | tests/migrations/test_migrations_run_before/0003_third.py | 427 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
"""
This is a wee bit crazy, but it's just to show that run_before works.
"""
dependencies = [
("migrations", "0001_initial"),
]
run_before = [
("migrations", "0002_second"),
]
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
],
)
]
|
cirrusone/phantom2 | refs/heads/master | src/breakpad/src/tools/gyp/test/module/gyptest-default.py | 158 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple build of a "Hello, world!" program with loadable modules. The
default for all platforms should be to output the loadable modules to the same
path as the executable.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('module.gyp', chdir='src')
test.build('module.gyp', test.ALL, chdir='src')
expect = """\
Hello from program.c
Hello from lib1.c
Hello from lib2.c
"""
test.run_built_executable('program', chdir='src', stdout=expect)
test.pass_test()
|
aniversarioperu/twython-django | refs/heads/master | twython_django_oauth/models.py | 3 | from django.db import models
from django.contrib.auth.models import User
class TwitterProfile(models.Model):
"""
An example Profile model that handles storing the oauth_token and
oauth_secret in relation to a user. Adapt this if you have a current
setup, there's really nothing special going on here.
"""
user = models.OneToOneField(User)
oauth_token = models.CharField(max_length=200)
oauth_secret = models.CharField(max_length=200)
|
nmercier/linux-cross-gcc | refs/heads/master | win32/bin/Lib/aifc.py | 2 | """Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, you must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
import struct
import __builtin__
__all__ = ["Error","open","openfp"]
class Error(Exception):
pass
_AIFC_version = 0xA2805140L # Version 1 of AIFF-C
def _read_long(file):
try:
return struct.unpack('>l', file.read(4))[0]
except struct.error:
raise EOFError
def _read_ulong(file):
try:
return struct.unpack('>L', file.read(4))[0]
except struct.error:
raise EOFError
def _read_short(file):
try:
return struct.unpack('>h', file.read(2))[0]
except struct.error:
raise EOFError
def _read_ushort(file):
try:
return struct.unpack('>H', file.read(2))[0]
except struct.error:
raise EOFError
def _read_string(file):
length = ord(file.read(1))
if length == 0:
data = ''
else:
data = file.read(length)
if length & 1 == 0:
dummy = file.read(1)
return data
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
def _read_float(f): # 10 bytes
expon = _read_short(f) # 2 bytes
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
himant = _read_ulong(f) # 4 bytes
lomant = _read_ulong(f) # 4 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
return sign * f
def _write_short(f, x):
f.write(struct.pack('>h', x))
def _write_ushort(f, x):
f.write(struct.pack('>H', x))
def _write_long(f, x):
f.write(struct.pack('>l', x))
def _write_ulong(f, x):
f.write(struct.pack('>L', x))
def _write_string(f, s):
if len(s) > 255:
raise ValueError("string exceeds maximum pstring length")
f.write(struct.pack('B', len(s)))
f.write(s)
if len(s) & 1 == 0:
f.write(chr(0))
def _write_float(f, x):
import math
if x < 0:
sign = 0x8000
x = x * -1
else:
sign = 0
if x == 0:
expon = 0
himant = 0
lomant = 0
else:
fmant, expon = math.frexp(x)
if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN
expon = sign|0x7FFF
himant = 0
lomant = 0
else: # Finite
expon = expon + 16382
if expon < 0: # denormalized
fmant = math.ldexp(fmant, expon)
expon = 0
expon = expon | sign
fmant = math.ldexp(fmant, 32)
fsmant = math.floor(fmant)
himant = long(fsmant)
fmant = math.ldexp(fmant - fsmant, 32)
fsmant = math.floor(fmant)
lomant = long(fsmant)
_write_ushort(f, expon)
_write_ulong(f, himant)
_write_ulong(f, lomant)
from chunk import Chunk
class Aifc_read:
# Variables used in this class:
#
# These variables are available to the user though appropriate
# methods of this class:
# _file -- the open file with methods read(), close(), and seek()
# set through the __init__() method
# _nchannels -- the number of audio channels
# available through the getnchannels() method
# _nframes -- the number of audio frames
# available through the getnframes() method
# _sampwidth -- the number of bytes per audio sample
# available through the getsampwidth() method
# _framerate -- the sampling frequency
# available through the getframerate() method
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
# available through the getcomptype() method
# _compname -- the human-readable AIFF-C compression type
# available through the getcomptype() method
# _markers -- the marks in the audio file
# available through the getmarkers() and getmark()
# methods
# _soundpos -- the position in the audio stream
# available through the tell() method, set through the
# setpos() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _decomp -- the decompressor from builtin module cl
# _comm_chunk_read -- 1 iff the COMM chunk has been read
# _aifc -- 1 iff reading an AIFF-C file
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
# file for readframes()
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
def initfp(self, file):
self._version = 0
self._decomp = None
self._convert = None
self._markers = []
self._soundpos = 0
self._file = file
chunk = Chunk(file)
if chunk.getname() != 'FORM':
raise Error, 'file does not start with FORM id'
formdata = chunk.read(4)
if formdata == 'AIFF':
self._aifc = 0
elif formdata == 'AIFC':
self._aifc = 1
else:
raise Error, 'not an AIFF or AIFF-C file'
self._comm_chunk_read = 0
while 1:
self._ssnd_seek_needed = 1
try:
chunk = Chunk(self._file)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'COMM':
self._read_comm_chunk(chunk)
self._comm_chunk_read = 1
elif chunkname == 'SSND':
self._ssnd_chunk = chunk
dummy = chunk.read(8)
self._ssnd_seek_needed = 0
elif chunkname == 'FVER':
self._version = _read_ulong(chunk)
elif chunkname == 'MARK':
self._readmark(chunk)
chunk.skip()
if not self._comm_chunk_read or not self._ssnd_chunk:
raise Error, 'COMM chunk and/or SSND chunk missing'
if self._aifc and self._decomp:
import cl
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._decomp.SetParams(params)
def __init__(self, f):
if type(f) == type(''):
f = __builtin__.open(f, 'rb')
# else, assume it is an open file object already
self.initfp(f)
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._ssnd_seek_needed = 1
self._soundpos = 0
def close(self):
decomp = self._decomp
try:
if decomp:
self._decomp = None
decomp.CloseDecompressor()
finally:
self._file.close()
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def getversion(self):
## return self._version
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._ssnd_seek_needed = 1
def readframes(self, nframes):
if self._ssnd_seek_needed:
self._ssnd_chunk.seek(0)
dummy = self._ssnd_chunk.read(8)
pos = self._soundpos * self._framesize
if pos:
self._ssnd_chunk.seek(pos + 8)
self._ssnd_seek_needed = 0
if nframes == 0:
return ''
data = self._ssnd_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _decomp_data(self, data):
import cl
dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
len(data) * 2)
return self._decomp.Decompress(len(data) // self._nchannels,
data)
def _ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def _adpcm2lin(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
# first time
self._adpcmstate = None
data, self._adpcmstate = audioop.adpcm2lin(data, 2,
self._adpcmstate)
return data
def _read_comm_chunk(self, chunk):
self._nchannels = _read_short(chunk)
self._nframes = _read_long(chunk)
self._sampwidth = (_read_short(chunk) + 7) // 8
self._framerate = int(_read_float(chunk))
self._framesize = self._nchannels * self._sampwidth
if self._aifc:
#DEBUG: SGI's soundeditor produces a bad size :-(
kludge = 0
if chunk.chunksize == 18:
kludge = 1
print 'Warning: bad COMM chunk size'
chunk.chunksize = 23
#DEBUG end
self._comptype = chunk.read(4)
#DEBUG start
if kludge:
length = ord(chunk.file.read(1))
if length & 1 == 0:
length = length + 1
chunk.chunksize = chunk.chunksize + length
chunk.file.seek(-1, 1)
#DEBUG end
self._compname = _read_string(chunk)
if self._comptype != 'NONE':
if self._comptype == 'G722':
try:
import audioop
except ImportError:
pass
else:
self._convert = self._adpcm2lin
self._sampwidth = 2
return
# for ULAW and ALAW try Compression Library
try:
import cl
except ImportError:
if self._comptype in ('ULAW', 'ulaw'):
try:
import audioop
self._convert = self._ulaw2lin
self._sampwidth = 2
return
except ImportError:
pass
raise Error, 'cannot read compressed AIFF-C files'
if self._comptype in ('ULAW', 'ulaw'):
scheme = cl.G711_ULAW
elif self._comptype in ('ALAW', 'alaw'):
scheme = cl.G711_ALAW
else:
raise Error, 'unsupported compression type'
self._decomp = cl.OpenDecompressor(scheme)
self._convert = self._decomp_data
self._sampwidth = 2
else:
self._comptype = 'NONE'
self._compname = 'not compressed'
def _readmark(self, chunk):
nmarkers = _read_short(chunk)
# Some files appear to contain invalid counts.
# Cope with this by testing for EOF.
try:
for i in range(nmarkers):
id = _read_short(chunk)
pos = _read_long(chunk)
name = _read_string(chunk)
if pos or name:
# some files appear to have
# dummy markers consisting of
# a position 0 and name ''
self._markers.append((id, pos, name))
except EOFError:
print 'Warning: MARK chunk contains only',
print len(self._markers),
if len(self._markers) == 1: print 'marker',
else: print 'markers',
print 'instead of', nmarkers
class Aifc_write:
# Variables used in this class:
#
# These variables are user settable through appropriate methods
# of this class:
# _file -- the open file with methods write(), close(), tell(), seek()
# set through the __init__() method
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
# set through the setcomptype() or setparams() method
# _compname -- the human-readable AIFF-C compression type
# set through the setcomptype() or setparams() method
# _nchannels -- the number of audio channels
# set through the setnchannels() or setparams() method
# _sampwidth -- the number of bytes per audio sample
# set through the setsampwidth() or setparams() method
# _framerate -- the sampling frequency
# set through the setframerate() or setparams() method
# _nframes -- the number of audio frames written to the header
# set through the setnframes() or setparams() method
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
# set through the aifc() method, reset through the
# aiff() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _comp -- the compressor from builtin module cl
# _nframeswritten -- the number of audio frames actually written
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
def __init__(self, f):
if type(f) == type(''):
filename = f
f = __builtin__.open(f, 'wb')
else:
# else, assume it is an open file object already
filename = '???'
self.initfp(f)
if filename[-5:] == '.aiff':
self._aifc = 0
else:
self._aifc = 1
def initfp(self, file):
self._file = file
self._version = _AIFC_version
self._comptype = 'NONE'
self._compname = 'not compressed'
self._comp = None
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._markers = []
self._marklength = 0
self._aifc = 1 # AIFF-C is default
def __del__(self):
if self._file:
self.close()
#
# User visible methods.
#
def aiff(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 0
def aifc(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 1
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def setversion(self, version):
## if self._nframeswritten:
## raise Error, 'cannot change parameters after starting to write'
## self._version = version
def setparams(self, info):
nchannels, sampwidth, framerate, nframes, comptype, compname = info
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
raise Error, 'unsupported compression type'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
if id <= 0:
raise Error, 'marker ID must be > 0'
if pos < 0:
raise Error, 'marker position must be >= 0'
if type(name) != type(''):
raise Error, 'marker name must be a string'
for i in range(len(self._markers)):
if id == self._markers[i][0]:
self._markers[i] = id, pos, name
return
self._markers.append((id, pos, name))
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
if self._file is None:
return
try:
self._ensure_header_written(0)
if self._datawritten & 1:
# quick pad to even size
self._file.write(chr(0))
self._datawritten = self._datawritten + 1
self._writemarkers()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten or \
self._marklength:
self._patchheader()
if self._comp:
self._comp.CloseCompressor()
self._comp = None
finally:
# Prevent ref cycles
self._convert = None
f = self._file
self._file = None
f.close()
#
# Internal methods.
#
def _comp_data(self, data):
import cl
dummy = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
dummy = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
return self._comp.Compress(self._nframes, data)
def _lin2ulaw(self, data):
import audioop
return audioop.lin2ulaw(data, 2)
def _lin2adpcm(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
self._adpcmstate = None
data, self._adpcmstate = audioop.lin2adpcm(data, 2,
self._adpcmstate)
return data
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
if self._comptype == 'G722':
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _init_compression(self):
if self._comptype == 'G722':
self._convert = self._lin2adpcm
return
try:
import cl
except ImportError:
if self._comptype in ('ULAW', 'ulaw'):
try:
import audioop
self._convert = self._lin2ulaw
return
except ImportError:
pass
raise Error, 'cannot write compressed AIFF-C files'
if self._comptype in ('ULAW', 'ulaw'):
scheme = cl.G711_ULAW
elif self._comptype in ('ALAW', 'alaw'):
scheme = cl.G711_ALAW
else:
raise Error, 'unsupported compression type'
self._comp = cl.OpenCompressor(scheme)
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate,
cl.FRAME_BUFFER_SIZE, 100,
cl.COMPRESSED_BUFFER_SIZE, 100]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._comp.SetParams(params)
# the compressor produces a header which we ignore
dummy = self._comp.Compress(0, '')
self._convert = self._comp_data
def _write_header(self, initlength):
if self._aifc and self._comptype != 'NONE':
self._init_compression()
self._file.write('FORM')
if not self._nframes:
self._nframes = initlength // (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'):
self._datalength = self._datalength // 2
if self._datalength & 1:
self._datalength = self._datalength + 1
elif self._comptype == 'G722':
self._datalength = (self._datalength + 3) // 4
if self._datalength & 1:
self._datalength = self._datalength + 1
try:
self._form_length_pos = self._file.tell()
except (AttributeError, IOError):
self._form_length_pos = None
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write('AIFC')
self._file.write('FVER')
_write_ulong(self._file, 4)
_write_ulong(self._file, self._version)
else:
self._file.write('AIFF')
self._file.write('COMM')
_write_ulong(self._file, commlength)
_write_short(self._file, self._nchannels)
if self._form_length_pos is not None:
self._nframes_pos = self._file.tell()
_write_ulong(self._file, self._nframes)
if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
_write_short(self._file, 8)
else:
_write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write('SSND')
if self._form_length_pos is not None:
self._ssnd_length_pos = self._file.tell()
_write_ulong(self._file, self._datalength + 8)
_write_ulong(self._file, 0)
_write_ulong(self._file, 0)
def _write_form_length(self, datalength):
if self._aifc:
commlength = 18 + 5 + len(self._compname)
if commlength & 1:
commlength = commlength + 1
verslength = 12
else:
commlength = 18
verslength = 0
_write_ulong(self._file, 4 + verslength + self._marklength + \
8 + commlength + 16 + datalength)
return commlength
def _patchheader(self):
curpos = self._file.tell()
if self._datawritten & 1:
datalength = self._datawritten + 1
self._file.write(chr(0))
else:
datalength = self._datawritten
if datalength == self._datalength and \
self._nframes == self._nframeswritten and \
self._marklength == 0:
self._file.seek(curpos, 0)
return
self._file.seek(self._form_length_pos, 0)
dummy = self._write_form_length(datalength)
self._file.seek(self._nframes_pos, 0)
_write_ulong(self._file, self._nframeswritten)
self._file.seek(self._ssnd_length_pos, 0)
_write_ulong(self._file, datalength + 8)
self._file.seek(curpos, 0)
self._nframes = self._nframeswritten
self._datalength = datalength
def _writemarkers(self):
if len(self._markers) == 0:
return
self._file.write('MARK')
length = 2
for marker in self._markers:
id, pos, name = marker
length = length + len(name) + 1 + 6
if len(name) & 1 == 0:
length = length + 1
_write_ulong(self._file, length)
self._marklength = length + 8
_write_short(self._file, len(self._markers))
for marker in self._markers:
id, pos, name = marker
_write_short(self._file, id)
_write_ulong(self._file, pos)
_write_string(self._file, name)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Aifc_read(f)
elif mode in ('w', 'wb'):
return Aifc_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
f = open(fn, 'r')
try:
print "Reading", fn
print "nchannels =", f.getnchannels()
print "nframes =", f.getnframes()
print "sampwidth =", f.getsampwidth()
print "framerate =", f.getframerate()
print "comptype =", f.getcomptype()
print "compname =", f.getcompname()
if sys.argv[2:]:
gn = sys.argv[2]
print "Writing", gn
g = open(gn, 'w')
try:
g.setparams(f.getparams())
while 1:
data = f.readframes(1024)
if not data:
break
g.writeframes(data)
finally:
g.close()
print "Done."
finally:
f.close()
|
jk1/intellij-community | refs/heads/master | python/testData/completion/qualifiedAssignment.after.py | 83 | def foo(a):
woo = []
a.words = {}
for x in woo
|
nicobustillos/odoo | refs/heads/8.0 | addons/mail/mail_vote.py | 439 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class mail_vote(osv.Model):
''' Mail vote feature allow users to like and unlike messages attached
to a document. This allows for example to build a ranking-based
displaying of messages, for FAQ. '''
_name = 'mail.vote'
_description = 'Mail Vote'
_columns = {
'message_id': fields.many2one('mail.message', 'Message', select=1,
ondelete='cascade', required=True),
'user_id': fields.many2one('res.users', 'User', select=1,
ondelete='cascade', required=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
neutrongenious/xrt | refs/heads/master | X-Art Parser/beautifulsoup4-4.3.1/bs4/tests/test_htmlparser.py | 433 | """Tests to ensure that the html.parser tree builder generates good
trees."""
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest
from bs4.builder import HTMLParserTreeBuilder
class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
@property
def default_builder(self):
return HTMLParserTreeBuilder()
def test_namespaced_system_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_namespaced_public_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
|
avneesh91/django | refs/heads/master | django/conf/locale/ka/formats.py | 65 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'l, j F, Y'
TIME_FORMAT = 'h:i a'
DATETIME_FORMAT = 'j F, Y h:i a'
YEAR_MONTH_FORMAT = 'F, Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j.M.Y'
SHORT_DATETIME_FORMAT = 'j.M.Y H:i'
FIRST_DAY_OF_WEEK = 1 # (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
# '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3
|
m-a-d-n-e-s-s/madness-seprep | refs/heads/master | src/madness/tensor/new_mtxmq/codegen/mtxm.py | 5 | """ Codegen for mtxm
A I
+-------------+
K | a b ....... |
| ... |
+-------------+
B J
+-------------+
K | i j k l ... |
| ... |
+-------------+
C J
+-------------+
I | w x y z ... |
| ... |
+-------------+
# Complex Complex
w += a i - b j
x += a j + b i
y += a k - b l
z += a l + b k
temps: _c += _a _b +- _ai _br
# Real Complex
w += a i
x += a j
y += a k
z += a l
temps: _c += _a _b ; doubled dimj
# Complex Real
w += a i
x += b i
y += a j
z += b j
temps: _c += _az _bz
# Real Real
w += a i
x += a j
y += a k
z += a l
temps: _c += _a _b
"""
from itertools import product
import logging
logger = logging.getLogger(__name__)
class MTXMGen:
def __init__(self, cxa=False, cxb=False):
self.indent = 4
self.__in_main_loop = False
self._mask = False
self._odds = [1]
self.have_bgp = False
self.have_bgq = False
self.have_avx2 = False
self.complex_a = cxa
self.complex_b = cxb
self.complex_dup_cast = ''
@property
def complex_c(self):
return self.complex_a or self.complex_b
@property
def complex_complex(self):
return self.complex_a and self.complex_b
@property
def real_complex(self):
return not self.complex_a and self.complex_b
@property
def complex_real(self):
return self.complex_a and not self.complex_b
@property
def real_real(self):
return not self.complex_a and not self.complex_b
def _temp(self, prefix, x, y):
return prefix + '_' + str(x) + '_' + str(y)
def _temps(self, prefix, x, y, size):
"""
>>> list(MTXMGen()._temps('_x', 'i', 'j', {'i':2, 'j':3}))
['_x_0_0', '_x_0_1', '_x_0_2', '_x_1_0', '_x_1_1', '_x_1_2']
"""
return [self._temp(prefix, i, j) for i, j in product(range(size[x]), range(size[y]))]
def _post_process(self, lines):
return lines
def _header(self, func_name):
f = lambda x: x and "complex" or ""
x2 = lambda x: x and "*2" or ""
if self.have_bgq:
ret = ["void " + func_name + "(long dimi, long dimj, long dimk, long extb, double {} * __restrict__ c_x, const double {} * __restrict__ a_x, const double {} * __restrict__ b_x) {{".format(f(self.complex_c), f(self.complex_a), f(self.complex_b))]
else:
ret = ["void " + func_name + "(long dimi, long dimj, long dimk, double {} * __restrict__ c_x, const double {} * __restrict__ a_x, const double {} * __restrict__ b_x) {{".format(f(self.complex_c), f(self.complex_a), f(self.complex_b))]
ret.append(" int i, j, k;")
ret.append(" double * __restrict__ c = (double*)c_x;")
ret.append(" const double * __restrict__ a = (double*)a_x;")
ret.append(" const double * __restrict__ b = (double*)b_x;")
ret.append(" long effj = dimj;")
if self.have_bgq:
ret.append(" double * c_buf;")
ret.append(" double * b_buf;")
ret.append(" bool free_b = false;")
ret.append(" /* Setup a buffer for c if needed */")
ret.append(" double* c_out = c;")
ret.append(" if (dimj%{}) {{".format(self.complex_c and 2 or 4))
ret.append(" effj = (dimj | {}) + 1;".format(self.complex_c and 1 or 3))
ret.append(" posix_memalign((void **) &c, 32, dimi*effj*sizeof(double){});".format(x2(self.complex_c)))
ret.append(" c_buf = c;")
ret.append(" }")
ret.append(" /* Copy b into a buffer if needed */")
ret.append(" if (extb%{}) {{".format(self.complex_b and 2 or 4))
ret.append(" long t_extb = (dimj | {}) + 1;".format(self.complex_b and 1 or 3))
ret.append(" free_b = true;")
ret.append(" posix_memalign((void **) &b_buf, 32, dimk*t_extb*sizeof(double){});".format(x2(self.complex_b)))
ret.append(" double* bp = b_buf;")
ret.append(" for (k=0; k<dimk; k++, bp += t_extb{0}, b += extb{0})".format(x2(self.complex_b)))
ret.append(" memcpy(bp, b, sizeof(double)*dimj{});".format(x2(self.complex_b)))
ret.append(" b = b_buf;")
ret.append(" extb = t_extb;")
ret.append(" }")
return ret
def _footer(self):
x2 = lambda x: x and "*2" or ""
ret = []
if self.have_bgq:
ret.append(" /* Copy c out if needed */")
ret.append(" if (dimj%{}) {{".format(self.complex_c and 2 or 4))
ret.append(" double* ct = c_buf;")
ret.append(" for (i=0; i<dimi; i++, ct += effj{0}, c_out += dimj{0})".format(x2(self.complex_c)))
ret.append(" memcpy(c_out, ct, sizeof(double)*dimj{});".format(x2(self.complex_c)))
ret.append(" free(c_buf);")
ret.append(" }")
ret.append(" /* Free the buffer for b */")
ret.append(" if (free_b) free(b_buf);")
ret.append("}")
return ret
def _temp_dec(self, size):
ret = []
indent = ' '
x = indent + self.vector_type + ' '
x += ', '.join(self._temps('_c', 'i', 'j', size) +
self._temps('_b', 'k', 'j', size)) + ';'
ret.append(x)
x = indent + self.splat_type + ' ' + ', '.join(self._temps('_a', 'k', 'i', size)) + ';'
ret.append(x)
if self.complex_complex:
if not self.have_bgp and not self.have_bgq:
# BGP does not need seperate reversed registers because a special fma is used
x = "{} {} {};".format(indent, self.vector_type, ', '.join(self._temps('_br', 'k', 'j', size)))
ret.append(x)
if not self.have_bgq:
# Imaginary component of A
x = "{} {} {};".format(indent, self.splat_type, ', '.join(self._temps('_ai', 'k', 'i', size)))
ret.append(x)
elif self.complex_real:
# register from A: a b a b
x = "{} {} {};".format(indent, self.vector_type, ', '.join(self._temps('_az', 'k', 'i', size)))
ret.append(x)
# register from B: i i j j
x = "{} {} {};".format(indent, self.splat_type, ', '.join(self._temps('_bz', 'k', 'j', size)))
ret.append(x)
elif self.real_complex:
pass
return ret
def _extra(self):
return []
def _temps_to_load(self, unrolls, z, x, y, tname=None):
if not tname:
tname = '_'+z
ret = []
ystep = 1
if y == 'j':
ystep = self.vector_length
for i, j in product(range(unrolls[x]), range(0, unrolls[y], ystep)):
ret.append((self._temp(tname, i, j), i, j))
return ret
def _load_a(self, unrolls, indent):
spaces = ' ' * (self.indent*indent)
ret = []
for temp, k, i in self._temps_to_load(unrolls, 'a', 'k', 'i'):
addr = '(pa+' + str((self.complex_a and 2 or 1)*i) + ')'
if self.complex_real:
ret.append(self._load_az(spaces, addr, temp, k, i))
elif self.complex_complex and self.have_bgq:
ret.append(spaces + temp + " = vec_ld2(0, {});".format(addr))
else:
arg0 = ''
if self.have_bgq:
arg0 = '0, '
ret.append(spaces + temp + ' = {}({}{});'.format(self.splat_op, arg0, addr))
if self.complex_complex:
ret.append(spaces + self._temp('_ai', k, i) + ' = {}({}{}+1);'.format(self.splat_op, arg0, addr))
return ret
def _load_b(self, unrolls, indent):
spaces = ' ' * (self.indent*indent)
ret = []
for temp, k, j in self._temps_to_load(unrolls, 'b', 'k', 'j'):
arg0 = ""
if self.have_bgq:
arg0 = "0, "
addr = '({}pb+{})'.format(arg0, j // (self.complex_real and 2 or 1))
if self.complex_real:
ret.append(self._load_bz(spaces, addr, temp, k, j))
else:
ret.append(spaces + temp + ' = ' + self.vector_load + addr + ';')
if self.complex_complex and not self.have_bgp and not self.have_bgq:
ret.append(self._load_br(spaces, addr, temp, k, j))
return ret
def _load_c(self, unrolls, indent):
spaces = ' ' * (self.indent*indent)
ret = []
for temp, i, j in self._temps_to_load(unrolls, 'c', 'i', 'j'):
ret.append(spaces + temp + ' = ' + self.vector_zero + ';')
return ret
def _load_br(self, spaces, addr, temp, k, j):
return spaces + self._temp('_br', k, j) + ' = {}({});'.format(self.complex_reverse_dup, addr)
def _load_az(self, spaces, addr, temp, k, i):
arg0 = ''
if self.have_bgq:
arg0 = '0, '
return spaces + self._temp('_az', k, i) + ' = {}({}{}{});'.format(self.complex_dup, arg0, self.complex_dup_cast, addr)
def _load_bz(self, spaces, addr, temp, k, j):
return spaces + self._temp('_bz', k, j) + ' = {}({});'.format(self.pair_splat, addr)
def _fma(self, at, bt, ct):
raise NotImplementedError()
def _fmaddsub(self, at, bt, ct):
raise NotImplementedError()
def _maths(self, unrolls, indent=0):
spaces = ' ' * (self.indent*indent)
ret = []
for j, i, k in product(range(0, unrolls['j'], self.vector_length), range(unrolls['i']), range(unrolls['k'])):
if self.real_real or self.real_complex:
at = self._temp('_a', k, i)
bt = self._temp('_b', k, j)
ct = self._temp('_c', i, j)
ret.append(spaces + self._fma(at, bt, ct))
elif self.complex_real:
at = self._temp('_az', k, i)
bt = self._temp('_bz', k, j)
ct = self._temp('_c', i, j)
ret.append(spaces + self._fma(at, bt, ct))
elif self.complex_complex:
if self.have_avx2:
at = self._temp('_ai', k, i)
bt = self._temp('_br', k, j)
ct = self._temp('_c', i, j)
ret.append(spaces + self._fmaddsub(at, bt, ct))
at = self._temp('_a', k, i)
bt = self._temp('_b', k, j)
ret.append(spaces + self._fmaddsub(at, bt, ct))
else:
at = self._temp('_a', k, i)
bt = self._temp('_b', k, j)
ct = self._temp('_c', i, j)
ret.append(spaces + self._fma(at, bt, ct))
if not self.have_bgq:
at = self._temp('_ai', k, i)
if not self.have_bgp:
bt = self._temp('_br', k, j)
ret.append(spaces + self._fmaddsub(at, bt, ct))
return ret
def _array(self, z, x, xx, y, yy, cpx):
if y == 'j':
y = "effj"
else:
y = "dim" + y
return z + '+(' + x + '+' + xx + ')*' + y + (cpx and "*2" or "") + '+' + yy
def _store_c(self, unrolls, indent, bc_mod=""):
spaces = ' ' * (self.indent*indent)
ret = []
jstep = self.vector_length
for i, j in product(range(unrolls['i']), range(0, unrolls['j'], jstep)):
if j + jstep < unrolls['j'] or self.__in_main_loop or not self._mask:
arg0 = self._array(bc_mod+'c', 'i', str(i), 'j', str(j), self.complex_c)
arg1 = self._temp('_' + 'c', i, j)
mid = ', '
if self.have_bgq:
arg0, arg1 = arg1, arg0
mid = ', 0, '
ret.append(spaces + self.vector_store + '(' + arg0 + mid + arg1 + ');')
else:
# This is somewhat AVX specific, but no other arch's currently support masking, so ok.
ret.append(spaces + '{}('.format(self.mask_store) + self._array(bc_mod+'c', 'i', str(i), 'j', str(j), self.complex_c) + ', mask, ' + self._temp('_' + 'c', i, j) + ');')
return ret
def _loops(self, i, size, bc_mod=""):
if i == 'i':
start = 'i=0'
#FIXME Don't include _odds if i%2==0 and only evens
loops = [size[i]]
if loops[-1] != 1:
loops += self._odds
if self.have_bgp:
loops = range(size[i], 0, -2)
for loop in loops:
yield ('for ({0}; i+{1}<=dimi; i+={1}) {{'.format(start, loop), loop)
start = ''
elif i == 'j':
loop = size[i] // (self.complex_c and 2 or 1)
self.__in_main_loop = True
yield ("for (j=effj; j>{0}; j-={0},{1}c+={0}{2},{1}b+={0}{3}) {{".format(loop, bc_mod, self.complex_c and "*2" or "", self.complex_b and "*2" or ""), size[i])
self.__in_main_loop = False
start = ''
for loop in range(size[i]-self.vector_length, 0, -self.vector_length):
yield (start + "if (j>{}) {{".format(loop//(self.complex_c and 2 or 1)), loop+self.vector_length)
start = 'else '
if size[i] == self.vector_length:
yield ("{", self.vector_length)
else:
yield ("else {", self.vector_length)
elif i == 'k':
assert(size[i] == 1)
pb_inc = 'effj'
if self.have_bgq:
pb_inc = 'extb'
yield ("for (k=0; k<dimk; k+=1,pb+={}{},pa+=dimi{}) {{".format(pb_inc, self.complex_b and "*2" or "", self.complex_a and "*2" or ""), 1)
def _close_braces(self, indent=0):
ret = []
for i in range(indent, -1, -1):
ret += [' '*(self.indent*i) + '}']
return ret
def _inner_loops(self, perm, sizes, indent=0, unrolls=None, bc_mod=""):
indent += 1
if not unrolls:
unrolls = {x:0 for x in perm}
ret = []
spaces = ' '*(self.indent*indent)
if perm == ['k']:
ret.append(spaces + "const double* __restrict__ pb = {}b;".format(bc_mod))
ret.append(spaces + "const double* __restrict__ pa = a+i{};".format(self.complex_a and "*2" or ""))
ret += self._load_c(unrolls, indent)
if perm == ['j', 'k']:
bc_mod = "x"
ret.append(spaces + "const double* __restrict__ {}b = b;".format(bc_mod))
ret.append(spaces + "double* __restrict__ {}c = c;".format(bc_mod))
for loop, unroll in self._loops(perm[0], sizes, bc_mod):
unrolls[perm[0]] = unroll
ret.append(spaces + loop)
if len(perm) > 1:
ret += self._inner_loops(perm[1:], sizes, indent, unrolls, bc_mod)
else:
ret += self._load_a(unrolls, indent+1)
b_loads = self._load_b(unrolls, indent+1)
maths = self._maths(unrolls, indent+1)
b_take = (self.complex_complex and not self.have_bgp and not self.have_bgq) and 2 or 1
m_take = unrolls['i']*(self.complex_complex and 2 or 1)
while b_loads:
ret += b_loads[0:b_take]
ret += maths[0:m_take]
b_loads = b_loads[b_take:]
maths = maths[m_take:]
ret.append(spaces + '}')
if perm == ['k']:
ret += self._store_c(unrolls, indent, bc_mod)
return ret
def gen(self, f, perm, size, func_name='mtxmq'):
"""Output generated code to file f
Input:
perm - an array of 'i', 'j', 'k' in the desired loop order
size - { index : int, }
Output:
None
Code printed to file f
"""
if type(perm) is not list:
perm = list(perm)
if perm[-1] != 'k':
raise Exception("k must be inner loop")
lines = []
# Header
lines += self._header(func_name)
# Temps Declaration
lines += self._temp_dec(size)
# Architecture Specific declarations, e.g. mask prep
lines += self._extra()
# Computation
lines += self._inner_loops(perm, size)
# Footer
lines += self._footer()
lines = self._post_process(lines)
# Output
for line in lines:
print(line, file=f)
class MTXMAVX(MTXMGen):
def __init__(self, *args):
super().__init__(*args)
self.vector_length = 4
self.vector_type = '__m256d'
self.vector_load = '_mm256_loadu_pd'
self.vector_store = '_mm256_storeu_pd'
self.vector_zero = '_mm256_setzero_pd()'
self._mask = True
self.mask_store = '_mm256_maskstore_pd'
self.splat_type = '__m256d'
self.splat_op = '_mm256_broadcast_sd'
#self.complex_reverse_dup = '_mm256_permute_pd' # (_mm256_loadu_pd(addr), 5), could also use shuffle 5
self.complex_dup = '_mm256_broadcast_pd'
self.complex_dup_cast = '(const __m128d*)'
#self.pair_splat = ''
def _load_bz(self, spaces, addr, temp, k, j):
return spaces + self._temp('_bz', k, j) + ' = _mm256_permute_pd(_mm256_broadcast_pd((const __m128d*){}),12);'.format(addr)
def _load_br(self, spaces, addr, temp, k, j):
return spaces + self._temp('_br', k, j) + ' = _mm256_permute_pd({}, 5);'.format(temp)
def _fma(self, at, bt, ct):
return ct + ' = _mm256_add_pd(_mm256_mul_pd(' + bt + ', ' + at + '), ' + ct + ');'
def _fmaddsub(self, at, bt, ct):
return ct + ' = _mm256_addsub_pd(' + ct + ', _mm256_mul_pd(' + at + ', ' + bt + '));'
def _extra(self):
if self.real_real:
return [' ' * self.indent + """
__m256i mask;
j = effj % 4;
switch (j) {
case 0:
mask = _mm256_set_epi32(-1,-1,-1,-1,-1,-1,-1,-1);
break;
case 1:
mask = _mm256_set_epi32( 0, 0, 0, 0, 0, 0,-1,-1);
break;
case 2:
mask = _mm256_set_epi32( 0, 0, 0, 0,-1,-1,-1,-1);
break;
case 3:
mask = _mm256_set_epi32( 0, 0,-1,-1,-1,-1,-1,-1);
break;
default:
return;
}"""]
else:
return [' ' * self.indent + """
__m256i mask;
j = effj % 2;
switch (j) {
case 0:
mask = _mm256_set_epi32(-1,-1,-1,-1,-1,-1,-1,-1);
break;
case 1:
mask = _mm256_set_epi32( 0, 0, 0, 0,-1,-1,-1,-1);
break;
default:
return;
}"""]
class MTXMAVX2(MTXMAVX):
def __init__(self, *args):
super().__init__(*args)
self.have_avx2 = True
def _fma(self, at, bt, ct):
return ct + ' = _mm256_fmadd_pd(' + ','.join([at,bt,ct]) + ');'
def _fmaddsub(self, at, bt, ct):
return ct + ' = _mm256_fmaddsub_pd(' + ','.join([at,bt,ct]) + ');'
class MTXMSSE(MTXMGen):
def __init__(self, *args):
super().__init__(*args)
self.vector_length = 2
self.vector_type = '__m128d'
self.vector_load = '_mm_loadu_pd'
self.vector_store = '_mm_storeu_pd'
self.vector_zero = '_mm_setzero_pd()'
self.splat_type = '__m128d'
self.splat_op = '_mm_load1_pd'
self.complex_reverse_dup = '_mm_loadr_pd' # aligned only!
self.complex_dup = '_mm_loadu_pd'
self.pair_splat = '_mm_load1_pd'
def _fma(self, at, bt, ct):
return ct + ' = _mm_add_pd(_mm_mul_pd(' + bt + ', ' + at + '), ' + ct + ');'
def _fmaddsub(self, at, bt, ct):
return "{2} = _mm_addsub_pd({2}, _mm_mul_pd({0}, {1}));".format(at, bt, ct)
class MTXMBGP(MTXMGen):
def __init__(self, *args):
super().__init__(*args)
self.have_bgp = True
self.vector_length = 2
self.vector_type = '__complex__ double'
self.vector_load = '__lfpd'
self.vector_store = '__stfpd'
self.vector_zero = '__cmplx(0.0,0.0)'
self.splat_type = 'double'
self.splat_op = '*'
self.complex_reverse_dup = '__lfxd'
self.complex_dup = '__lfpd'
self.pair_splat = '*'
def _fma(self, at, bt, ct):
return ct + ' = __fxcpmadd(' + ct + ', ' + bt + ', ' + at + ');'
def _fmaddsub(self, at, bt, ct):
return ct + ' = __fxcxnpma(' + ct + ', ' + bt + ', ' + at + ');'
def _post_process(self, lines):
return [x.replace("__restrict__", "").replace("const", "").replace("double complex", "__complex__ double") for x in lines]
class MTXMBGQ(MTXMGen):
def __init__(self, *args):
super().__init__(*args)
self.have_bgq = True
self.vector_length = 4
self.vector_type = 'vector4double'
self.vector_load = 'vec_ld'
self.vector_store = 'vec_st'
self.vector_zero = '(vector4double)(0.0)'
self.complex_dup = 'vec_ld2'
self.splat_type = 'vector4double'
self.splat_op = 'vec_lds'
def _load_bz(self, spaces, addr, temp, k, j):
t = self._temp('_bz', k, j)
ret = spaces + t + ' = vec_ld2{};\n'.format(addr)
ret += spaces + t + ' = vec_perm({0}, {0}, _cr_perm);'.format(t)
return ret
def _fma(self, at, bt, ct):
if self.complex_complex:
return ct + ' = vec_xmadd(' + at + ', ' + bt + ', ' + ct + ');'
else:
return ct + ' = vec_madd(' + at + ', ' + bt + ', ' + ct + ');'
def _fmaddsub(self, at, bt, ct):
return ct + ' = vec_xxnpmadd(' + bt + ', ' + at + ', ' + ct + ');'
def _post_process(self, lines):
return [x.replace("__restrict__", "").replace("const", "").replace("double complex", "__complex__ double") for x in lines]
def _extra(self):
if self.complex_real:
return [' ' * self.indent + "vector4double _cr_perm = vec_gpci(0x9);"]
else:
return []
|
iulian787/spack | refs/heads/develop | lib/spack/external/py/_builtin.py | 259 | import sys
try:
reversed = reversed
except NameError:
def reversed(sequence):
"""reversed(sequence) -> reverse iterator over values of the sequence
Return a reverse iterator
"""
if hasattr(sequence, '__reversed__'):
return sequence.__reversed__()
if not hasattr(sequence, '__getitem__'):
raise TypeError("argument to reversed() must be a sequence")
return reversed_iterator(sequence)
class reversed_iterator(object):
def __init__(self, seq):
self.seq = seq
self.remaining = len(seq)
def __iter__(self):
return self
def next(self):
i = self.remaining
if i > 0:
i -= 1
item = self.seq[i]
self.remaining = i
return item
raise StopIteration
def __length_hint__(self):
return self.remaining
try:
any = any
except NameError:
def any(iterable):
for x in iterable:
if x:
return True
return False
try:
all = all
except NameError:
def all(iterable):
for x in iterable:
if not x:
return False
return True
try:
sorted = sorted
except NameError:
builtin_cmp = cmp # need to use cmp as keyword arg
def sorted(iterable, cmp=None, key=None, reverse=0):
use_cmp = None
if key is not None:
if cmp is None:
def use_cmp(x, y):
return builtin_cmp(x[0], y[0])
else:
def use_cmp(x, y):
return cmp(x[0], y[0])
l = [(key(element), element) for element in iterable]
else:
if cmp is not None:
use_cmp = cmp
l = list(iterable)
if use_cmp is not None:
l.sort(use_cmp)
else:
l.sort()
if reverse:
l.reverse()
if key is not None:
return [element for (_, element) in l]
return l
try:
set, frozenset = set, frozenset
except NameError:
from sets import set, frozenset
# pass through
enumerate = enumerate
try:
BaseException = BaseException
except NameError:
BaseException = Exception
try:
GeneratorExit = GeneratorExit
except NameError:
class GeneratorExit(Exception):
""" This exception is never raised, it is there to make it possible to
write code compatible with CPython 2.5 even in lower CPython
versions."""
pass
GeneratorExit.__module__ = 'exceptions'
_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
try:
callable = callable
except NameError:
def callable(obj):
return hasattr(obj, "__call__")
if sys.version_info >= (3, 0):
exec ("print_ = print ; exec_=exec")
import builtins
# some backward compatibility helpers
_basestring = str
def _totext(obj, encoding=None, errors=None):
if isinstance(obj, bytes):
if errors is None:
obj = obj.decode(encoding)
else:
obj = obj.decode(encoding, errors)
elif not isinstance(obj, str):
obj = str(obj)
return obj
def _isbytes(x):
return isinstance(x, bytes)
def _istext(x):
return isinstance(x, str)
text = str
bytes = bytes
def _getimself(function):
return getattr(function, '__self__', None)
def _getfuncdict(function):
return getattr(function, "__dict__", None)
def _getcode(function):
return getattr(function, "__code__", None)
def execfile(fn, globs=None, locs=None):
if globs is None:
back = sys._getframe(1)
globs = back.f_globals
locs = back.f_locals
del back
elif locs is None:
locs = globs
fp = open(fn, "r")
try:
source = fp.read()
finally:
fp.close()
co = compile(source, fn, "exec", dont_inherit=True)
exec_(co, globs, locs)
else:
import __builtin__ as builtins
_totext = unicode
_basestring = basestring
text = unicode
bytes = str
execfile = execfile
callable = callable
def _isbytes(x):
return isinstance(x, str)
def _istext(x):
return isinstance(x, unicode)
def _getimself(function):
return getattr(function, 'im_self', None)
def _getfuncdict(function):
return getattr(function, "__dict__", None)
def _getcode(function):
try:
return getattr(function, "__code__")
except AttributeError:
return getattr(function, "func_code", None)
def print_(*args, **kwargs):
""" minimal backport of py3k print statement. """
sep = ' '
if 'sep' in kwargs:
sep = kwargs.pop('sep')
end = '\n'
if 'end' in kwargs:
end = kwargs.pop('end')
file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
if kwargs:
args = ", ".join([str(x) for x in kwargs])
raise TypeError("invalid keyword arguments: %s" % args)
at_start = True
for x in args:
if not at_start:
file.write(sep)
file.write(str(x))
at_start = False
file.write(end)
def exec_(obj, globals=None, locals=None):
""" minimal backport of py3k exec statement. """
__tracebackhide__ = True
if globals is None:
frame = sys._getframe(1)
globals = frame.f_globals
if locals is None:
locals = frame.f_locals
elif locals is None:
locals = globals
exec2(obj, globals, locals)
if sys.version_info >= (3, 0):
def _reraise(cls, val, tb):
__tracebackhide__ = True
assert hasattr(val, '__traceback__')
raise cls.with_traceback(val, tb)
else:
exec ("""
def _reraise(cls, val, tb):
__tracebackhide__ = True
raise cls, val, tb
def exec2(obj, globals, locals):
__tracebackhide__ = True
exec obj in globals, locals
""")
def _tryimport(*names):
""" return the first successfully imported module. """
assert names
for name in names:
try:
__import__(name)
except ImportError:
excinfo = sys.exc_info()
else:
return sys.modules[name]
_reraise(*excinfo)
|
MichaelAquilina/restructured-preview | refs/heads/master | lib/docutils/utils/code_analyzer.py | 86 | #!/usr/bin/python
# coding: utf-8
"""Lexical analysis of formal languages (i.e. code) using Pygments."""
# :Author: Georg Brandl; Felix Wiemann; Günter Milde
# :Date: $Date: 2011-12-20 15:14:21 +0100 (Die, 20. Dez 2011) $
# :Copyright: This module has been placed in the public domain.
from docutils import ApplicationError
try:
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import _get_ttype_class
with_pygments = True
except ImportError:
with_pygments = False
# Filter the following token types from the list of class arguments:
unstyled_tokens = ['token', # Token (base token type)
'text', # Token.Text
''] # short name for Token and Text
# (Add, e.g., Token.Punctuation with ``unstyled_tokens += 'punctuation'``.)
class LexerError(ApplicationError):
pass
class Lexer(object):
"""Parse `code` lines and yield "classified" tokens.
Arguments
code -- string of source code to parse,
language -- formal language the code is written in,
tokennames -- either 'long', 'short', or '' (see below).
Merge subsequent tokens of the same token-type.
Iterating over an instance yields the tokens as ``(tokentype, value)``
tuples. The value of `tokennames` configures the naming of the tokentype:
'long': downcased full token type name,
'short': short name defined by pygments.token.STANDARD_TYPES
(= class argument used in pygments html output),
'none': skip lexical analysis.
"""
def __init__(self, code, language, tokennames='short'):
"""
Set up a lexical analyzer for `code` in `language`.
"""
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
'No Pygments lexer found for "%s".' % language)
# Since version 1.2. (released Jan 01, 2010) Pygments has a
# TokenMergeFilter. However, this requires Python >= 2.4. When Docutils
# requires same minimal version, ``self.merge(tokens)`` in __iter__ can
# be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__.
def merge(self, tokens):
"""Merge subsequent tokens of same token-type.
Also strip the final newline (added by pygments).
"""
tokens = iter(tokens)
(lasttype, lastval) = tokens.next()
for ttype, value in tokens:
if ttype is lasttype:
lastval += value
else:
yield(lasttype, lastval)
(lasttype, lastval) = (ttype, value)
if lastval.endswith('\n'):
lastval = lastval[:-1]
if lastval:
yield(lasttype, lastval)
def __iter__(self):
"""Parse self.code and yield "classified" tokens.
"""
if self.lexer is None:
yield ([], self.code)
return
tokens = pygments.lex(self.code, self.lexer)
for tokentype, value in self.merge(tokens):
if self.tokennames == 'long': # long CSS class args
classes = str(tokentype).lower().split('.')
else: # short CSS class args
classes = [_get_ttype_class(tokentype)]
classes = [cls for cls in classes if cls not in unstyled_tokens]
yield (classes, value)
class NumberLines(object):
"""Insert linenumber-tokens at the start of every code line.
Arguments
tokens -- iterable of ``(classes, value)`` tuples
startline -- first line number
endline -- last line number
Iterating over an instance yields the tokens with a
``(['ln'], '<the line number>')`` token added for every code line.
Multi-line tokens are splitted."""
def __init__(self, tokens, startline, endline):
self.tokens = tokens
self.startline = startline
# pad linenumbers, e.g. endline == 100 -> fmt_str = '%3d '
self.fmt_str = '%%%dd ' % len(str(endline))
def __iter__(self):
lineno = self.startline
yield (['ln'], self.fmt_str % lineno)
for ttype, value in self.tokens:
lines = value.split('\n')
for line in lines[:-1]:
yield (ttype, line + '\n')
lineno += 1
yield (['ln'], self.fmt_str % lineno)
yield (ttype, lines[-1])
|
wimnat/ansible | refs/heads/devel | test/integration/targets/module_utils_urls/library/test_peercert.py | 29 | #!/usr/bin/python
# Copyright: (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: test_perrcert
short_description: Test getting the peer certificate of a HTTP response
description: Test getting the peer certificate of a HTTP response.
options:
url:
description: The endpoint to get the peer cert for
required: true
type: str
author:
- Ansible Project
'''
EXAMPLES = r'''
#
'''
RETURN = r'''
#
'''
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.urls import getpeercert, Request
def get_x509_shorthand(name, value):
prefix = {
'countryName': 'C',
'stateOrProvinceName': 'ST',
'localityName': 'L',
'organizationName': 'O',
'commonName': 'CN',
'organizationalUnitName': 'OU',
}[name]
return '%s=%s' % (prefix, value)
def main():
module_args = dict(
url=dict(type='str', required=True),
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
)
result = {
'changed': False,
'cert': None,
'raw_cert': None,
}
req = Request().get(module.params['url'])
try:
cert = getpeercert(req)
b_cert = getpeercert(req, binary_form=True)
finally:
req.close()
if cert:
processed_cert = {
'issuer': '',
'not_after': cert.get('notAfter', None),
'not_before': cert.get('notBefore', None),
'serial_number': cert.get('serialNumber', None),
'subject': '',
'version': cert.get('version', None),
}
for field in ['issuer', 'subject']:
field_values = []
for x509_part in cert.get(field, []):
field_values.append(get_x509_shorthand(x509_part[0][0], x509_part[0][1]))
processed_cert[field] = ",".join(field_values)
result['cert'] = processed_cert
if b_cert:
result['raw_cert'] = to_text(base64.b64encode(b_cert))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
moble/scri | refs/heads/main | scri/asymptotic_bondi_data/bms_charges.py | 1 | # Copyright (c) 2020, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
### NOTE: The functions in this file are intended purely for inclusion in the AsymptoticBondData
### class. In particular, they assume that the first argument, `self` is an instance of
### AsymptoticBondData. They should probably not be used outside of that class.
import numpy as np
from math import sqrt, pi
def mass_aspect(self, truncate_ell=max):
"""Compute the Bondi mass aspect of the AsymptoticBondiData.
The Bondi mass aspect is given by
M = -ℜ{ψ₂ + σ ∂ₜσ̄}
Note that the last term is a product between two fields. If, for example, these both have
ell_max=8, then their full product would have ell_max=16, meaning that we would go from
tracking 81 modes to 289. This shows that deciding how to truncate the output ell is
important, which is why this function has the extra argument that it does.
Parameters
==========
truncate_ell: int, or callable [defaults to `max`]
Determines how the ell_max value of the output is determined. If an integer is passed,
each term in the output is truncated to have at most that ell_max. (In particular,
terms that will not be used in the output are simply not computed, without incurring any
errors due to aliasing.) If a callable is passed, it is passed on to the
spherical_functions.Modes.multiply method. See that function's docstring for details.
The default behavior will result in the output having ell_max equal to the largest of
any of the individual Modes objects in the equation for M above -- but not the
product.
"""
if callable(truncate_ell):
return -(self.psi2 + self.sigma.multiply(self.sigma.bar.dot, truncator=truncate_ell)).real
elif truncate_ell:
return -(
self.psi2.truncate_ell(truncate_ell)
+ self.sigma.multiply(self.sigma.bar.dot, truncator=lambda tup: truncate_ell)
).real
else:
return -(self.psi2 + self.sigma * self.sigma.bar.dot).real
def charge_vector_from_aspect(charge):
"""Output the ell<=1 modes of a BMS charge aspect as the charge four-vector.
Considering the aspect as a function a(θ, ϕ), we express the corresponding
four-vector in terms of Cartesian components as
vᵝ = (1/4π) ∫ ℜ{a} (tᵝ + rᵝ) dΩ
where the integral is taken over the 2-sphere, and tᵝ + rᵝ has components
(1, sinθ cosϕ, sinθ sinϕ, cosθ).
"""
four_vector = np.empty(charge.shape, dtype=float)
four_vector[..., 0] = charge[..., 0].real
four_vector[..., 1] = (charge[..., 1] - charge[..., 3]).real / sqrt(6)
four_vector[..., 2] = (charge[..., 1] + charge[..., 3]).imag / sqrt(6)
four_vector[..., 3] = charge[..., 2].real / sqrt(3)
return four_vector / np.sqrt(4 * np.pi)
def bondi_rest_mass(self):
"""Compute the rest mass from the Bondi four-momentum"""
four_momentum = self.bondi_four_momentum()
rest_mass = np.sqrt(four_momentum[:, 0] ** 2 - np.sum(four_momentum[:, 1:] ** 2, axis=1))
return rest_mass
def bondi_four_momentum(self):
"""Compute the Bondi four-momentum
This is just the ell<2 component of the mass aspect, expressed as a four-vector.
"""
ell_max = 1 # Compute only the parts we need, ell<=1
charge_aspect = self.mass_aspect(ell_max).view(np.ndarray)
return charge_vector_from_aspect(charge_aspect)
def bondi_angular_momentum(self, output_dimensionless=False):
"""Compute the total Bondi angular momentum vector
i (ψ₁ + σ ðσ̄)
See Eq. (8) of Dray (1985) iopscience.iop.org/article/10.1088/0264-9381/2/1/002
"""
ell_max = 1 # Compute only the parts we need, ell<=1
charge_aspect = (
1j
* (self.psi1.truncate_ell(ell_max) + self.sigma.multiply(self.sigma.bar.eth_GHP, truncator=lambda tup: ell_max))
).view(np.ndarray)
return charge_vector_from_aspect(charge_aspect)[:, 1:]
def bondi_dimensionless_spin(self):
"""Compute the dimensionless Bondi spin vector"""
N = self.bondi_boost_charge()
J = self.bondi_angular_momentum()
P = self.bondi_four_momentum()
M_sqr = (P[:, 0] ** 2 - np.sum(P[:, 1:] ** 2, axis=1))[:, np.newaxis]
v = P[:, 1:] / (P[:, 0])[:, np.newaxis]
v_norm = np.linalg.norm(v, axis=1)
# To prevent dividing by zero, we compute the normalized velocity vhat ONLY at
# timesteps with a non-zero velocity.
vhat = v.copy()
t_idx = v_norm != 0 # Get the indices for timesteps with non-zero velocity
vhat[t_idx] = v[t_idx] / v_norm[t_idx, np.newaxis]
gamma = (1 / np.sqrt(1 - v_norm ** 2))[:, np.newaxis]
J_dot_vhat = np.einsum("ij,ij->i", J, vhat)[:, np.newaxis]
spin_charge = (gamma * (J + np.cross(v, N)) - (gamma - 1) * J_dot_vhat * vhat) / M_sqr
return spin_charge
def bondi_boost_charge(self):
"""Compute the Bondi boost charge vector
- [ψ₁ + σ ðσ̄ + ½ð(σ σ̄) - t ð ℜ{ψ₂ + σ ∂ₜσ̄}]
See Eq. (8) of Dray (1985) iopscience.iop.org/article/10.1088/0264-9381/2/1/002
"""
ell_max = 1 # Compute only the parts we need, ell<=1
charge_aspect = -(
self.psi1.truncate_ell(ell_max)
+ self.sigma.multiply(self.sigma.bar.eth_GHP, truncator=lambda tup: ell_max)
+ 0.5 * (self.sigma.multiply(self.sigma.bar, truncator=lambda tup: ell_max)).eth_GHP
- self.t
* (
self.psi2.truncate_ell(ell_max) + self.sigma.multiply(self.sigma.bar.dot, truncator=lambda tup: ell_max)
).real.eth_GHP
).view(np.ndarray)
return charge_vector_from_aspect(charge_aspect)[:, 1:]
def bondi_CoM_charge(self):
"""Compute the center-of-mass charge vector
Gⁱ = Nⁱ + t Pⁱ = - [ψ₁ + σ ðσ̄ + ½ð(σ σ̄)]
where Nⁱ is the boost charge and Pⁱ is the momentum. See Eq. (3.4) of arXiv:1912.03164.
"""
ell_max = 1 # Compute only the parts we need, ell<=1
charge_aspect = -(
self.psi1.truncate_ell(ell_max)
+ self.sigma.multiply(self.sigma.bar.eth_GHP, truncator=lambda tup: ell_max)
+ 0.5 * (self.sigma.multiply(self.sigma.bar, truncator=lambda tup: ell_max)).eth_GHP
).view(np.ndarray)
return charge_vector_from_aspect(charge_aspect)[:, 1:]
def supermomentum(self, supermomentum_def, **kwargs):
"""Compute the supermomentum
This function allows for several different definitions of the
supermomentum. These differences only apply to ell > 1 modes,
so they do not affect the Bondi four-momentum. See
Eqs. (7-9) in arXiv:1404.2475 for the different supermomentum
definitions and links to further references.
In the literature, there is an ambiguity of vocabulary. When
it comes to other BMS charges, we clearly distinuish between
the "charge" and the "aspect". However, the
term "supermomentum" is used for both. Accordingly, this
function provides two ways to compute the supermomentum.
1) By default, the supermomentum will be computed as
Ψ = ψ₂ + σ ∂ₜσ̄ + f(θ, ϕ)
(See below for the definitions of `f`.)
2) By passing the option `integrated=True`, the supermomentum
will instead be computed as
Pₗₘ = - (1/4π) ∫ Ψ(θ, ϕ) Yₗₘ(θ, ϕ) dΩ
Parameters
----------
supermomentum_def : str
The definition of the supermomentum to be computed. One of the
following (case-insensitive) options can be specified:
* 'Bondi-Sachs' or 'BS' for f = 0
* 'Moreschi' or 'M' for f = ð²σ̄
* 'Geroch' or 'G' for f = ½ (ð²σ̄ - ð̄²σ)
* 'Geroch-Winicour' or 'GW' for f = - ð̄²σ
integrated : bool, optional
If True, then return the integrated form of the supermomentum — see
Eq. (6) in arXiv:1404.2475. Default is False
working_ell_max: int, optional
The value of ell_max to be used to define the computation grid. The
number of theta points and the number of phi points are set to
2*working_ell_max+1. Defaults to 2*self.ell_max.
Returns
-------
ModesTimeSeries
"""
return_integrated = kwargs.pop("integrated", False)
if supermomentum_def.lower() in ["bondi-sachs", "bs"]:
supermomentum = self.psi2 + self.sigma.grid_multiply(self.sigma.bar.dot, **kwargs)
elif supermomentum_def.lower() in ["moreschi", "m"]:
supermomentum = self.psi2 + self.sigma.grid_multiply(self.sigma.bar.dot, **kwargs) + self.sigma.bar.eth_GHP.eth_GHP
elif supermomentum_def.lower() in ["geroch", "g"]:
supermomentum = (
self.psi2
+ self.sigma.grid_multiply(self.sigma.bar.dot, **kwargs)
+ 0.5 * (self.sigma.bar.eth_GHP.eth_GHP - self.sigma.ethbar_GHP.ethbar_GHP)
)
elif supermomentum_def.lower() in ["geroch-winicour", "gw"]:
supermomentum = self.psi2 + self.sigma.grid_multiply(self.sigma.bar.dot, **kwargs) - self.sigma.ethbar_GHP.ethbar_GHP
else:
raise ValueError(
f"Supermomentum defintion '{supermomentum_def}' not recognized. Please choose one of "
"the following options:\n"
" * 'Bondi-Sachs' or 'BS'\n"
" * 'Moreschi' or 'M'\n"
" * 'Geroch' or 'G'\n"
" * 'Geroch-Winicour' or 'GW'"
)
if return_integrated:
return -0.5 * supermomentum.bar / np.sqrt(np.pi)
else:
return supermomentum
|
Coelhon/MasterRepo.repository | refs/heads/master | plugin.video.motorreplays/resources/libs/net.py | 15 | '''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cookielib
import gzip
import re
import StringIO
import urllib
import urllib2
import socket
#Set Global timeout - Useful for slow connections and Putlocker.
socket.setdefaulttimeout(60)
class HeadRequest(urllib2.Request):
'''A Request class that sends HEAD requests'''
def get_method(self):
return 'HEAD'
class Net:
'''
This class wraps :mod:`urllib2` and provides an easy way to make http
requests while taking care of cookies, proxies, gzip compression and
character encoding.
Example::
from addon.common.net import Net
net = Net()
response = net.http_GET('http://xbmc.org')
print response.content
'''
_cj = cookielib.LWPCookieJar()
_proxy = None
_user_agent = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 ' + \
'(KHTML, like Gecko) Chrome/13.0.782.99 Safari/535.1'
_http_debug = False
def __init__(self, cookie_file='', proxy='', user_agent='',
http_debug=False):
'''
Kwargs:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
user_agent (str): String to use as the User Agent header. If not
supplied the class will use a default user agent (chrome)
http_debug (bool): Set ``True`` to have HTTP header info written to
the XBMC log for all requests.
'''
if cookie_file:
self.set_cookies(cookie_file)
if proxy:
self.set_proxy(proxy)
if user_agent:
self.set_user_agent(user_agent)
self._http_debug = http_debug
self._update_opener()
def set_cookies(self, cookie_file):
'''
Set the cookie file and try to load cookies from it if it exists.
Args:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
'''
try:
self._cj.load(cookie_file, ignore_discard=True)
self._update_opener()
return True
except:
return False
def get_cookies(self):
'''Returns A dictionary containing all cookie information by domain.'''
return self._cj._cookies
def save_cookies(self, cookie_file):
'''
Saves cookies to a file.
Args:
cookie_file (str): Full path to a file to save cookies to.
'''
self._cj.save(cookie_file, ignore_discard=True)
def set_proxy(self, proxy):
'''
Args:
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
'''
self._proxy = proxy
self._update_opener()
def get_proxy(self):
'''Returns string containing proxy details.'''
return self._proxy
def set_user_agent(self, user_agent):
'''
Args:
user_agent (str): String to use as the User Agent header.
'''
self._user_agent = user_agent
def get_user_agent(self):
'''Returns user agent string.'''
return self._user_agent
def _update_opener(self):
'''
Builds and installs a new opener to be used by all future calls to
:func:`urllib2.urlopen`.
'''
if self._http_debug:
http = urllib2.HTTPHandler(debuglevel=1)
else:
http = urllib2.HTTPHandler()
if self._proxy:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.ProxyHandler({'http':
self._proxy}),
urllib2.HTTPBasicAuthHandler(),
http)
else:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.HTTPBasicAuthHandler(),
http)
urllib2.install_opener(opener)
def http_GET(self, url, headers={}, compression=True):
'''
Perform an HTTP GET request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, headers=headers, compression=compression)
def http_POST(self, url, form_data, headers={}, compression=True):
'''
Perform an HTTP POST request.
Args:
url (str): The URL to POST.
form_data (dict): A dictionary of form data to POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, form_data, headers=headers,
compression=compression)
def http_HEAD(self, url, headers={}):
'''
Perform an HTTP HEAD request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page.
'''
req = HeadRequest(url)
req.add_header('User-Agent', self._user_agent)
for k, v in headers.items():
req.add_header(k, v)
response = urllib2.urlopen(req)
return HttpResponse(response)
def _fetch(self, url, form_data={}, headers={}, compression=True):
'''
Perform an HTTP GET or POST request.
Args:
url (str): The URL to GET or POST.
form_data (dict): A dictionary of form data to POST. If empty, the
request will be a GET, if it contains form data it will be a POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
encoding = ''
req = urllib2.Request(url)
if form_data:
form_data = urllib.urlencode(form_data)
req = urllib2.Request(url, form_data)
req.add_header('User-Agent', self._user_agent)
for k, v in headers.items():
req.add_header(k, v)
if compression:
req.add_header('Accept-Encoding', 'gzip')
response = urllib2.urlopen(req)
return HttpResponse(response)
class HttpResponse:
'''
This class represents a resoponse from an HTTP request.
The content is examined and every attempt is made to properly encode it to
Unicode.
.. seealso::
:meth:`Net.http_GET`, :meth:`Net.http_HEAD` and :meth:`Net.http_POST`
'''
content = ''
'''Unicode encoded string containing the body of the reposne.'''
def __init__(self, response):
'''
Args:
response (:class:`mimetools.Message`): The object returned by a call
to :func:`urllib2.urlopen`.
'''
self._response = response
html = response.read()
try:
if response.headers['content-encoding'].lower() == 'gzip':
html = gzip.GzipFile(fileobj=StringIO.StringIO(html)).read()
except:
pass
try:
content_type = response.headers['content-type']
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
except:
pass
r = re.search('<meta\s+http-equiv="Content-Type"\s+content="(?:.+?);' +
'\s+charset=(.+?)"', html, re.IGNORECASE)
if r:
encoding = r.group(1)
try:
html = unicode(html, encoding)
except:
pass
self.content = html
def get_headers(self):
'''Returns a List of headers returned by the server.'''
return self._response.info().headers
def get_url(self):
'''
Return the URL of the resource retrieved, commonly used to determine if
a redirect was followed.
'''
return self._response.geturl()
|
openplans/shareabouts-clatsop | refs/heads/master | src/sa_web/config.py | 60 | import yaml
import os.path
import urllib2
from contextlib import closing
from django.conf import settings
from django.utils.translation import ugettext as _
def get_shareabouts_config(path_or_url):
if path_or_url.startswith('http://') or path_or_url.startswith('https://'):
return ShareaboutsRemoteConfig(path_or_url)
else:
return ShareaboutsLocalConfig(path_or_url)
def translate(data):
i18n_data = {}
# If it's an object, recurse
if isinstance(data, dict):
return dict([(k, translate(v))
for k, v in data.items()])
# If it's a list, recurse on each item
elif isinstance(data, list):
return [translate(item)
for item in data]
# If it's a string, output it, unless it should be excluded
elif isinstance(data, basestring):
msg = parse_msg(data)
if msg is not None:
return _(msg)
else:
return data
else:
return data
def parse_msg(s):
s = s.strip()
if s.startswith('_(') and s.endswith(')'):
return s[2:-1]
class _ShareaboutsConfig (object):
"""
Base class representing Shareabouts configuration options
"""
raw = False
@property
def data(self):
if not hasattr(self, '_yml'):
with closing(self.config_file()) as config_yml:
self._yml = yaml.load(config_yml)
if not self.raw:
self._yml = translate(self._yml)
return self._yml
def __getitem__(self, key):
return self.data[key]
def get(self, key, default=None):
return self.data.get(key, default)
def items(self):
return self.data.items()
def update(self, other):
self.data.update(other)
class ShareaboutsRemoteConfig (_ShareaboutsConfig):
def __init__(self, url):
self.url = url
def static_url(self):
return os.path.join(self.url, 'static/')
def config_file(self):
config_fileurl = os.path.join(self.url, 'config.yml')
return urllib2.urlopen(config_fileurl)
class ShareaboutsLocalConfig (_ShareaboutsConfig):
def __init__(self, path):
self.path = path
def static_url(self):
return settings.STATIC_URL
def config_file(self):
config_filename = os.path.join(self.path, 'config.yml')
return open(config_filename)
|
anksp21/Community-Zenpacks | refs/heads/master | ZenPacks.community.Gentoo/ZenPacks/community/Gentoo/tests/plugindata/linux/server1/uname.py | 3 | {"gentoo_uname_a":
dict(
snmpDescr = 'Linux gentoovmware 2.6.29-gentoo-r5 #1 SMP Sun Jun 7 01:18:09 EDT 2009 i686 Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz GenuineIntel GNU/Linux',
setHWProductKey = 'Linux',
snmpSysName = 'gentoovmware',
setOSProductKey = ('Linux 2.6.29-gentoo-r5', 'Gentoo')
),
}
|
JingJunYin/tensorflow | refs/heads/master | tensorflow/python/grappler/tf_optimizer.py | 43 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Provides a proper python API for the symbols exported through swig."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import graph_pb2
from tensorflow.python import pywrap_tensorflow as tf_opt
from tensorflow.python.framework import errors
from tensorflow.python.grappler import cluster as gcluster
def OptimizeGraph(rewriter_config,
metagraph,
verbose=True,
graph_id=b'graph_to_optimize',
cluster=None):
"""Optimize the provided metagraph."""
with errors.raise_exception_on_not_ok_status() as status:
if cluster is None:
cluster = gcluster.Cluster()
ret_from_swig = tf_opt.TF_OptimizeGraph(cluster.tf_cluster,
rewriter_config.SerializeToString(),
metagraph.SerializeToString(),
verbose, graph_id, status)
if ret_from_swig is None:
return None
out_graph = graph_pb2.GraphDef().FromString(ret_from_swig)
return out_graph
|
linjoahow/W16_test1 | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/logging/config.py | 739 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, traceback, re
import io
try:
import _thread as thread
import threading
except ImportError: #pragma: no cover
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
else:
logger.disabled = disable_existing
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
style = config.get('style', '%')
result = logging.Formatter(fmt, dfmt, style)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
config_copy = dict(config) # for restoring in case of error
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config.update(config_copy) # restore for deferred cfg
raise TypeError('target not configured yet')
config['target'] = th
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread: #pragma: no cover
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
|
dragondjf/musicplayer | refs/heads/master | config/constants.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
MainWindow_Width = 905
MainWindow_Height = 600
SimpleWindow_Width = 300
SimpleWindow_Height = 600
TitleBar_Height = 25
LeftBar_Width = 60
Bottom_Height = 100
Simple_Bottom_Height = 200
|
xiaoyaozi5566/DynamicCache | refs/heads/master | src/python/m5/util/region.py | 64 | # Copyright (c) 2006 Nathan Binkert <nate@binkert.org>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class _neg_inf(object):
'''This object always compares less than any other object'''
def __repr__(self): return '<neg_inf>'
def __lt__(self, other): return type(self) != type(other)
def __le__(self, other): return True
def __gt__(self, other): return False
def __ge__(self, other): return type(self) == type(other)
def __eq__(self, other): return type(self) == type(other)
def __ne__(self, other): return type(self) != type(other)
neg_inf = _neg_inf()
class _pos_inf(object):
'''This object always compares greater than any other object'''
def __repr__(self): return '<pos_inf>'
def __lt__(self, other): return False
def __le__(self, other): return type(self) == type(other)
def __gt__(self, other): return type(self) != type(other)
def __ge__(self, other): return True
def __eq__(self, other): return type(self) == type(other)
def __ne__(self, other): return type(self) != type(other)
pos_inf = _pos_inf()
class Region(tuple):
'''A region (range) of [start, end).
This includes utility functions to compare overlap of regions.'''
def __new__(cls, *args):
if len(args) == 1:
arg = args[0]
if isinstance(arg, Region):
return arg
args = tuple(arg)
if len(args) != 2:
raise AttributeError, \
"Only one or two arguments allowed, %d provided" % (alen, )
return tuple.__new__(cls, args)
def __repr__(self):
return 'Region(%s, %s)' % (self[0], self[1])
@property
def start(self):
return self[0]
@property
def end(self):
return self[1]
def __contains__(self, other):
'''other is
region: True if self and other is fully contained within self.
pos: True if other is within the region'''
if isinstance(other, tuple):
return self[0] <= other[0] and self[1] >= other[1]
return self[0] <= other and other < self[1]
def __eq__(self, other):
'''other is
region: True if self and other are identical.
pos: True if other is within the region'''
if isinstance(other, tuple):
return self[0] == other[0] and self[1] == other[1]
return self[0] <= other and other < self[1]
# @param self is a region.
# @param other is a region.
# @return if self and other are not identical.
def __ne__(self, other):
'''other is
region: true if they are not identical
pos: True if other is not in the region'''
if isinstance(other, tuple):
return self[0] != other[0] or self[1] != other[1]
return other < self[0] or self[1] <= other
# @param self is a region.
# @param other is a region.
# @return if self is less than other and does not overlap self.
def __lt__(self, other):
"self completely left of other (cannot overlap)"
if isinstance(other, tuple):
return self[1] <= other[0]
return self[1] <= other
# @param self is a region.
# @param other is a region.
# @return if self is less than other. self may overlap other,
# but not extend beyond the _end of other.
def __le__(self, other):
"self extends to the left of other (can overlap)"
if isinstance(other, tuple):
return self[0] <= other[0]
return self[0] <= other
# @param self is a region.
# @param other is a region.
# @return if self is greater than other and does not overlap other.
def __gt__(self, other):
"self is completely right of other (cannot overlap)"
if isinstance(other, tuple):
return self[0] >= other[1]
return self[0] > other
# @param self is a region.
# @param other is a region.
# @return if self is greater than other. self may overlap other,
# but not extend beyond the beginning of other.
def __ge__(self, other):
"self ex_ends beyond other to the right (can overlap)"
if isinstance(other, tuple):
return self[1] >= other[1]
return self[1] > other
class Regions(object):
'''A set of regions (ranges). Basically a region with holes.
Includes utility functions to merge regions and figure out if
something is in one of the regions.'''
def __init__(self, *args):
self.regions = []
self.extend(*args)
def copy(self):
copy = Regions()
copy.regions.extend(self.regions)
return copy
def append(self, *args):
self.regions.append(Region(*args))
def extend(self, *args):
self.regions.extend(Region(a) for a in args)
def __contains__(self, position):
for region in self.regions:
if position in region:
return True
return False
def __len__(self):
return len(self.regions)
def __iand__(self, other):
A = self.regions
B = other.regions
R = []
i = 0
j = 0
while i < len(self) and j < len(other):
a = A[i]
b = B[j]
if a[1] <= b[0]:
# A is completely before B. Skip A
i += 1
elif a[0] <= b[0]:
if a[1] <= b[1]:
# A and B overlap with B not left of A and A not right of B
R.append(Region(b[0], a[1]))
# Advance A because nothing is left
i += 1
if a[1] == b[1]:
# Advance B too
j += 1
else:
# A and B overlap with B completely within the bounds of A
R.append(Region(b[0], b[1]))
# Advance only B because some of A may still be useful
j += 1
elif b[1] <= a[0]:
# B is completely before A. Skip B.
j += 1
else:
assert b[0] < a[0]
if b[1] <= a[1]:
# A and B overlap with A not left of B and B not right of A
R.append(Region(a[0], b[1]))
# Advance B because nothing is left
j += 1
if a[1] == b[1]:
# Advance A too
i += 1
else:
# A and B overlap with A completely within the bounds of B
R.append(Region(a[0], a[1]))
# Advance only A because some of B may still be useful
i += 1
self.regions = R
return self
def __and__(self, other):
result = self.copy()
result &= other
return result
def __repr__(self):
return 'Regions(%s)' % ([(r[0], r[1]) for r in self.regions], )
if __name__ == '__main__':
x = Regions(*((i, i + 1) for i in xrange(0,30,2)))
y = Regions(*((i, i + 4) for i in xrange(0,30,5)))
z = Region(6,7)
n = Region(9,10)
def test(left, right):
print "%s == %s: %s" % (left, right, left == right)
print "%s != %s: %s" % (left, right, left != right)
print "%s < %s: %s" % (left, right, left < right)
print "%s <= %s: %s" % (left, right, left <= right)
print "%s > %s: %s" % (left, right, left > right)
print "%s >= %s: %s" % (left, right, left >= right)
print
test(neg_inf, neg_inf)
test(neg_inf, pos_inf)
test(pos_inf, neg_inf)
test(pos_inf, pos_inf)
test(neg_inf, 0)
test(neg_inf, -11111)
test(neg_inf, 11111)
test(0, neg_inf)
test(-11111, neg_inf)
test(11111, neg_inf)
test(pos_inf, 0)
test(pos_inf, -11111)
test(pos_inf, 11111)
test(0, pos_inf)
test(-11111, pos_inf)
test(11111, pos_inf)
print x
print y
print x & y
print z
print 4 in x
print 4 in z
print 5 not in x
print 6 not in z
print z in y
print n in y, n not in y
|
ytoyama/yans_chainer_hackathon | refs/heads/master | cupy/binary/packing.py | 17 | # flake8: NOQA
# "flake8: NOQA" to suppress warning "H104 File contains nothing but comments"
# TODO(okuta): Implement packbits
# TODO(okuta): Implement unpackbits
|
Hybrid-Cloud/cinder | refs/heads/master | cinder/message/resource_types.py | 7 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resource type constants."""
VOLUME = 'VOLUME'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.