code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007, 2008 Guilherme Mesquita Gondim and contributors
#
# This file is part of Django Diário.
#
# Django Diário is free software under terms of the GNU Lesser
# General Public License version 3 (LGPLv3) as published by the Free
# Software Foundation. See the file README for copying conditions.
#
"""
Django Diário setup.
"""
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from diario import get_version
setup(
name = 'django-diario',
version = get_version(),
description = 'Blog application for Django projects',
long_description = ('Django Diário is a pluggable weblog application for '
'Django Web Framework.'),
keywords = 'django apps weblog blog',
author = 'Guilherme Gondim',
author_email = 'semente@taurinus.org',
url = 'http://django-diario.googlecode.com',
download_url = 'http://code.google.com/p/django-diario/downloads/list',
license = 'GNU Lesser General Public License (LGPL), Version 3',
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = find_packages(),
include_package_data = True,
zip_safe = False,
)
| marinho/django-diario | setup.py | Python | lgpl-3.0 | 1,591 |
from tululbot.utils.kbbi import lookup_kbbi_definition
def test_lookup_kbbi(mocker):
class FakeResponse:
def json(self):
return {
'kateglo': {
'definition': [
{
'lex_class_ref': 'nomina',
'def_text': 'foo bar',
'sample': 'foo bar foo bar'
},
{
'lex_class_ref': 'adjektiva',
'def_text': 'baz quux',
'sample': 'baz baz quux quux'
}
]
}
}
def raise_for_status(self):
pass
fake_term = 'asdf asdf'
mock_get = mocker.patch('tululbot.utils.kbbi.requests.get', return_value=FakeResponse(),
autospec=True)
rv = lookup_kbbi_definition(fake_term)
assert len(rv) == 2
assert {
'class': 'nomina',
'def_text': 'foo bar',
'sample': 'foo bar foo bar'
} in rv
assert {
'class': 'adjektiva',
'def_text': 'baz quux',
'sample': 'baz baz quux quux'
} in rv
mock_get.assert_called_once_with('http://kateglo.com/api.php',
params={'format': 'json', 'phrase': fake_term})
def test_lookup_kbbi_term_not_found(mocker):
class FakeResponse:
def json(self):
raise ValueError
def raise_for_status(self):
pass
mocker.patch('tululbot.utils.kbbi.requests.get', return_value=FakeResponse(),
autospec=True)
rv = lookup_kbbi_definition('asdf asdf')
assert rv == []
| tulul/tululbot | tests/test_kbbi.py | Python | apache-2.0 | 1,754 |
import sys
def setup(core, object):
object.setAttachment('radial_filename', 'deeds/vehicleDeed')
return
def use(core, actor, object):
core.mountService.generateVehicle(actor, object, 'object/mobile/vehicle/shared_landspeeder_organa.iff', 'object/intangible/vehicle/shared_landspeeder_organa_pcd.iff')
return | ProjectSWGCore/NGECore2 | scripts/object/tangible/tcg/series1/vehicle_deed_organa_speeder.py | Python | lgpl-3.0 | 313 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Ryan Roden-Corrent (rcorre) <ryan@rcorre.net>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test the keyhint widget."""
from collections import OrderedDict
import pytest
from qutebrowser.misc.keyhintwidget import KeyHintView
def expected_text(*args):
"""Helper to format text we expect the KeyHintView to generate.
Args:
args: One tuple for each row in the expected output.
Tuples are of the form: (prefix, color, suffix, command).
"""
text = '<table>'
for group in args:
text += ("<tr>"
"<td>{}</td>"
"<td style='color: {}'>{}</td>"
"<td style='padding-left: 2ex'>{}</td>"
"</tr>").format(*group)
return text + '</table>'
@pytest.fixture
def keyhint_config(config_stub):
"""Fixture providing the necessary config settings for the KeyHintView."""
config_stub.data = {
'colors': {
'keyhint.fg': 'white',
'keyhint.fg.suffix': 'yellow',
'keyhint.bg': 'black',
},
'fonts': {'keyhint': 'Comic Sans'},
'ui': {
'keyhint-blacklist': '',
'keyhint-delay': 500,
'status-position': 'bottom',
},
}
return config_stub
@pytest.fixture
def keyhint(qtbot, keyhint_config, key_config_stub):
"""Fixture to initialize a KeyHintView."""
keyhint = KeyHintView(0, None)
qtbot.add_widget(keyhint)
assert keyhint.text() == ''
return keyhint
def test_show_and_hide(qtbot, keyhint):
with qtbot.waitSignal(keyhint.update_geometry):
with qtbot.waitExposed(keyhint):
keyhint.show()
keyhint.update_keyhint('normal', '')
assert not keyhint.isVisible()
def test_position_change(keyhint, config_stub):
config_stub.set('ui', 'status-position', 'top')
stylesheet = keyhint.styleSheet()
assert 'border-bottom-right-radius' in stylesheet
assert 'border-top-right-radius' not in stylesheet
def test_suggestions(keyhint, key_config_stub):
"""Test that keyhints are shown based on a prefix."""
# we want the dict to return sorted items() for reliable testing
key_config_stub.set_bindings_for('normal', OrderedDict([
('aa', 'cmd-aa'),
('ab', 'cmd-ab'),
('aba', 'cmd-aba'),
('abb', 'cmd-abb'),
('xd', 'cmd-xd'),
('xe', 'cmd-xe')]))
keyhint.update_keyhint('normal', 'a')
assert keyhint.text() == expected_text(
('a', 'yellow', 'a', 'cmd-aa'),
('a', 'yellow', 'b', 'cmd-ab'),
('a', 'yellow', 'ba', 'cmd-aba'),
('a', 'yellow', 'bb', 'cmd-abb'))
def test_special_bindings(keyhint, key_config_stub):
"""Ensure a prefix of '<' doesn't suggest special keys."""
# we want the dict to return sorted items() for reliable testing
key_config_stub.set_bindings_for('normal', OrderedDict([
('<a', 'cmd-<a'),
('<b', 'cmd-<b'),
('<ctrl-a>', 'cmd-ctrla')]))
keyhint.update_keyhint('normal', '<')
assert keyhint.text() == expected_text(
('<', 'yellow', 'a', 'cmd-<a'),
('<', 'yellow', 'b', 'cmd-<b'))
def test_color_switch(keyhint, config_stub, key_config_stub):
"""Ensure the keyhint suffix color can be updated at runtime."""
config_stub.set('colors', 'keyhint.fg.suffix', '#ABCDEF')
key_config_stub.set_bindings_for('normal', OrderedDict([
('aa', 'cmd-aa')]))
keyhint.update_keyhint('normal', 'a')
assert keyhint.text() == expected_text(('a', '#ABCDEF', 'a', 'cmd-aa'))
def test_no_matches(keyhint, key_config_stub):
"""Ensure the widget isn't visible if there are no keystrings to show."""
key_config_stub.set_bindings_for('normal', OrderedDict([
('aa', 'cmd-aa'),
('ab', 'cmd-ab')]))
keyhint.update_keyhint('normal', 'z')
assert not keyhint.text()
assert not keyhint.isVisible()
def test_blacklist(keyhint, config_stub, key_config_stub):
"""Test that blacklisted keychains aren't hinted."""
config_stub.set('ui', 'keyhint-blacklist', ['ab*'])
# we want the dict to return sorted items() for reliable testing
key_config_stub.set_bindings_for('normal', OrderedDict([
('aa', 'cmd-aa'),
('ab', 'cmd-ab'),
('aba', 'cmd-aba'),
('abb', 'cmd-abb'),
('xd', 'cmd-xd'),
('xe', 'cmd-xe')]))
keyhint.update_keyhint('normal', 'a')
assert keyhint.text() == expected_text(('a', 'yellow', 'a', 'cmd-aa'))
def test_blacklist_all(keyhint, config_stub, key_config_stub):
"""Test that setting the blacklist to * disables keyhints."""
config_stub.set('ui', 'keyhint-blacklist', ['*'])
# we want the dict to return sorted items() for reliable testing
key_config_stub.set_bindings_for('normal', OrderedDict([
('aa', 'cmd-aa'),
('ab', 'cmd-ab'),
('aba', 'cmd-aba'),
('abb', 'cmd-abb'),
('xd', 'cmd-xd'),
('xe', 'cmd-xe')]))
keyhint.update_keyhint('normal', 'a')
assert not keyhint.text()
def test_delay(qtbot, stubs, monkeypatch, keyhint_config, key_config_stub):
timer = stubs.FakeTimer()
monkeypatch.setattr(
'qutebrowser.misc.keyhintwidget.usertypes.Timer',
lambda *_: timer)
interval = 200
keyhint_config.set('ui', 'keyhint-delay', interval)
key_config_stub.set_bindings_for('normal', OrderedDict([('aa', 'cmd-aa')]))
keyhint = KeyHintView(0, None)
keyhint.update_keyhint('normal', 'a')
assert timer.interval() == interval
| pkill-nine/qutebrowser | tests/unit/misc/test_keyhints.py | Python | gpl-3.0 | 6,227 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
from nova.compute import manager as compute_manager
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import aggregate
from nova.objects import service
from nova import test
from nova.tests.unit.objects import test_compute_node
from nova.tests.unit.objects import test_objects
from nova.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
def _fake_service(**kwargs):
fake_service = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuidsentinel.service,
'host': 'fake-host',
'binary': 'nova-fake',
'topic': 'fake-service-topic',
'report_count': 1,
'forced_down': False,
'disabled': False,
'disabled_reason': None,
'last_seen_up': None,
'version': service.SERVICE_VERSION,
}
fake_service.update(kwargs)
return fake_service
fake_service = _fake_service()
OPTIONAL = ['availability_zone', 'compute_node']
class _TestServiceObject(object):
def supported_hv_specs_comparator(self, expected, obj_val):
obj_val = [inst.to_list() for inst in obj_val]
self.assertJsonEqual(expected, obj_val)
def pci_device_pools_comparator(self, expected, obj_val):
obj_val = obj_val.obj_to_primitive()
self.assertJsonEqual(expected, obj_val)
def comparators(self):
return {'stats': self.assertJsonEqual,
'host_ip': self.assertJsonEqual,
'supported_hv_specs': self.supported_hv_specs_comparator,
'pci_device_pools': self.pci_device_pools_comparator}
def subs(self):
return {'supported_hv_specs': 'supported_instances',
'pci_device_pools': 'pci_stats'}
def _test_query(self, db_method, obj_method, *args, **kwargs):
db_exception = kwargs.pop('db_exception', None)
if db_exception:
with mock.patch.object(db, db_method, side_effect=db_exception) \
as mock_db_method:
obj = getattr(service.Service, obj_method)(self.context, *args,
**kwargs)
self.assertIsNone(obj)
mock_db_method.assert_called_once_with(self.context, *args,
**kwargs)
else:
with mock.patch.object(db, db_method, return_value=fake_service) \
as mock_db_method:
obj = getattr(service.Service, obj_method)(self.context, *args,
**kwargs)
self.compare_obj(obj, fake_service, allow_missing=OPTIONAL)
mock_db_method.assert_called_once_with(self.context, *args,
**kwargs)
def test_get_by_id(self):
self._test_query('service_get', 'get_by_id', 123)
def test_get_by_uuid(self):
self._test_query('service_get_by_uuid', 'get_by_uuid',
uuidsentinel.service_uuid)
def test_get_by_host_and_topic(self):
self._test_query('service_get_by_host_and_topic',
'get_by_host_and_topic', 'fake-host', 'fake-topic')
def test_get_by_host_and_binary(self):
self._test_query('service_get_by_host_and_binary',
'get_by_host_and_binary', 'fake-host', 'fake-binary')
def test_get_by_host_and_binary_raises(self):
self._test_query('service_get_by_host_and_binary',
'get_by_host_and_binary', 'fake-host', 'fake-binary',
db_exception=exception.HostBinaryNotFound(
host='fake-host', binary='fake-binary'))
def test_get_by_compute_host(self):
self._test_query('service_get_by_compute_host', 'get_by_compute_host',
'fake-host')
def test_get_by_args(self):
self._test_query('service_get_by_host_and_binary', 'get_by_args',
'fake-host', 'fake-binary')
@mock.patch.object(db, 'service_create', return_value=fake_service)
def test_create(self, mock_service_create):
service_obj = service.Service(context=self.context)
service_obj.host = 'fake-host'
service_obj.uuid = uuidsentinel.service2
service_obj.create()
self.assertEqual(fake_service['id'], service_obj.id)
self.assertEqual(service.SERVICE_VERSION, service_obj.version)
mock_service_create.assert_called_once_with(
self.context, {'host': 'fake-host',
'uuid': uuidsentinel.service2,
'version': fake_service['version']})
@mock.patch('nova.objects.service.uuidutils.generate_uuid',
return_value=uuidsentinel.service3)
@mock.patch.object(db, 'service_create', return_value=fake_service)
def test_create_without_uuid_generates_one(
self, mock_service_create, generate_uuid):
service_obj = service.Service(context=self.context)
service_obj.create()
create_args = mock_service_create.call_args[0][1]
self.assertEqual(generate_uuid.return_value, create_args['uuid'])
@mock.patch.object(db, 'service_create', return_value=fake_service)
def test_recreate_fails(self, mock_service_create):
service_obj = service.Service(context=self.context)
service_obj.host = 'fake-host'
service_obj.create()
self.assertRaises(exception.ObjectActionError, service_obj.create)
mock_service_create(self.context, {'host': 'fake-host',
'version': fake_service['version']})
@mock.patch.object(db, 'service_update', return_value=fake_service)
def test_save(self, mock_service_update):
service_obj = service.Service(context=self.context)
service_obj.id = 123
service_obj.host = 'fake-host'
service_obj.save()
self.assertEqual(service.SERVICE_VERSION, service_obj.version)
mock_service_update.assert_called_once_with(
self.context, 123, {'host': 'fake-host',
'version': fake_service['version']})
@mock.patch.object(db, 'service_create',
return_value=fake_service)
def test_set_id_failure(self, db_mock):
service_obj = service.Service(context=self.context,
binary='nova-compute')
service_obj.create()
self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr,
service_obj, 'id', 124)
@mock.patch.object(db, 'service_destroy')
def _test_destroy(self, mock_service_destroy):
service_obj = service.Service(context=self.context)
service_obj.id = 123
service_obj.destroy()
mock_service_destroy.assert_called_once_with(self.context, 123)
def test_destroy(self):
# The test harness needs db.service_destroy to work,
# so avoid leaving it broken here after we're done
orig_service_destroy = db.service_destroy
try:
self._test_destroy()
finally:
db.service_destroy = orig_service_destroy
@mock.patch.object(db, 'service_get_all_by_topic',
return_value=[fake_service])
def test_get_by_topic(self, mock_service_get):
services = service.ServiceList.get_by_topic(self.context, 'fake-topic')
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
mock_service_get.assert_called_once_with(self.context, 'fake-topic')
@mock.patch('nova.db.service_get_all_by_binary')
def test_get_by_binary(self, mock_get):
mock_get.return_value = [fake_service]
services = service.ServiceList.get_by_binary(self.context,
'fake-binary')
self.assertEqual(1, len(services))
mock_get.assert_called_once_with(self.context,
'fake-binary',
include_disabled=False)
@mock.patch('nova.db.service_get_all_by_binary')
def test_get_by_binary_disabled(self, mock_get):
mock_get.return_value = [_fake_service(disabled=True)]
services = service.ServiceList.get_by_binary(self.context,
'fake-binary',
include_disabled=True)
self.assertEqual(1, len(services))
mock_get.assert_called_once_with(self.context,
'fake-binary',
include_disabled=True)
@mock.patch('nova.db.service_get_all_by_binary')
def test_get_by_binary_both(self, mock_get):
mock_get.return_value = [_fake_service(),
_fake_service(disabled=True)]
services = service.ServiceList.get_by_binary(self.context,
'fake-binary',
include_disabled=True)
self.assertEqual(2, len(services))
mock_get.assert_called_once_with(self.context,
'fake-binary',
include_disabled=True)
@mock.patch.object(db, 'service_get_all_by_host',
return_value=[fake_service])
def test_get_by_host(self, mock_service_get):
services = service.ServiceList.get_by_host(self.context, 'fake-host')
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
mock_service_get.assert_called_once_with(self.context, 'fake-host')
@mock.patch.object(db, 'service_get_all', return_value=[fake_service])
def test_get_all(self, mock_get_all):
services = service.ServiceList.get_all(self.context, disabled=False)
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
mock_get_all.assert_called_once_with(self.context, disabled=False)
@mock.patch.object(db, 'service_get_all')
@mock.patch.object(aggregate.AggregateList, 'get_by_metadata_key')
def test_get_all_with_az(self, mock_get_by_key, mock_get_all):
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'availability_zone': 'test-az'}
agg.create()
agg.hosts = [fake_service['host']]
mock_get_by_key.return_value = [agg]
mock_get_all.return_value = [dict(fake_service, topic='compute')]
services = service.ServiceList.get_all(self.context, set_zones=True)
self.assertEqual(1, len(services))
self.assertEqual('test-az', services[0].availability_zone)
mock_get_all.assert_called_once_with(self.context, disabled=None)
mock_get_by_key.assert_called_once_with(self.context,
'availability_zone', hosts=set(agg.hosts))
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
def test_compute_node(self, mock_get):
fake_compute_node = objects.ComputeNode._from_db_object(
self.context, objects.ComputeNode(),
test_compute_node.fake_compute_node)
mock_get.return_value = [fake_compute_node]
service_obj = service.Service(id=123, host="fake-host",
binary="nova-compute")
service_obj._context = self.context
self.assertEqual(service_obj.compute_node,
fake_compute_node)
# Make sure it doesn't re-fetch this
service_obj.compute_node
mock_get.assert_called_once_with(self.context, 'fake-host')
@mock.patch.object(db, 'service_get_all_computes_by_hv_type')
def test_get_all_computes_by_hv_type(self, mock_get_all):
mock_get_all.return_value = [fake_service]
services = service.ServiceList.get_all_computes_by_hv_type(
self.context, 'hv-type')
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
mock_get_all.assert_called_once_with(self.context, 'hv-type',
include_disabled=False)
def test_load_when_orphaned(self):
service_obj = service.Service()
service_obj.id = 123
self.assertRaises(exception.OrphanedObjectError,
getattr, service_obj, 'compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
def test_obj_make_compatible_for_compute_node(self, get_all_by_host):
service_obj = objects.Service(context=self.context)
fake_service_dict = fake_service.copy()
fake_compute_obj = objects.ComputeNode(host=fake_service['host'],
service_id=fake_service['id'])
get_all_by_host.return_value = [fake_compute_obj]
versions = ovo_base.obj_tree_get_versions('Service')
versions['ComputeNode'] = '1.10'
service_obj.obj_make_compatible_from_manifest(fake_service_dict, '1.9',
versions)
self.assertEqual(
fake_compute_obj.obj_to_primitive(target_version='1.10',
version_manifest=versions),
fake_service_dict['compute_node'])
@mock.patch('nova.db.service_get_minimum_version')
def test_get_minimum_version_none(self, mock_get):
mock_get.return_value = None
self.assertEqual(0,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
mock_get.assert_called_once_with(self.context, ['nova-compute'])
@mock.patch('nova.db.service_get_minimum_version')
def test_get_minimum_version(self, mock_get):
mock_get.return_value = {'nova-compute': 123}
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
mock_get.assert_called_once_with(self.context, ['nova-compute'])
@mock.patch('nova.db.service_get_minimum_version')
@mock.patch('nova.objects.service.LOG')
def test_get_minimum_version_checks_binary(self, mock_log, mock_get):
mock_get.return_value = None
self.assertEqual(0,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
self.assertFalse(mock_log.warning.called)
self.assertRaises(exception.ObjectActionError,
objects.Service.get_minimum_version,
self.context,
'compute')
self.assertTrue(mock_log.warning.called)
@mock.patch('nova.db.service_get_minimum_version')
def test_get_minimum_version_with_caching(self, mock_get):
objects.Service.enable_min_version_cache()
mock_get.return_value = {'nova-compute': 123}
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
self.assertEqual({"nova-compute": 123},
objects.Service._MIN_VERSION_CACHE)
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
mock_get.assert_called_once_with(self.context, ['nova-compute'])
objects.Service._SERVICE_VERSION_CACHING = False
objects.Service.clear_min_version_cache()
@mock.patch('nova.db.service_get_minimum_version')
def test_get_min_version_multiple_with_old(self, mock_gmv):
mock_gmv.return_value = {'nova-api': None,
'nova-scheduler': 2,
'nova-conductor': 3}
binaries = ['nova-api', 'nova-api', 'nova-conductor',
'nova-conductor', 'nova-api']
minimum = objects.Service.get_minimum_version_multi(self.context,
binaries)
self.assertEqual(0, minimum)
@mock.patch('nova.db.service_get_minimum_version')
def test_get_min_version_multiple(self, mock_gmv):
mock_gmv.return_value = {'nova-api': 1,
'nova-scheduler': 2,
'nova-conductor': 3}
binaries = ['nova-api', 'nova-api', 'nova-conductor',
'nova-conductor', 'nova-api']
minimum = objects.Service.get_minimum_version_multi(self.context,
binaries)
self.assertEqual(1, minimum)
@mock.patch('nova.db.service_get_minimum_version',
return_value={'nova-compute': 2})
def test_create_above_minimum(self, mock_get):
with mock.patch('nova.objects.service.SERVICE_VERSION',
new=3):
objects.Service(context=self.context,
binary='nova-compute').create()
@mock.patch('nova.db.service_get_minimum_version',
return_value={'nova-compute': 2})
def test_create_equal_to_minimum(self, mock_get):
with mock.patch('nova.objects.service.SERVICE_VERSION',
new=2):
objects.Service(context=self.context,
binary='nova-compute').create()
@mock.patch('nova.db.service_get_minimum_version',
return_value={'nova-compute': 2})
def test_create_below_minimum(self, mock_get):
with mock.patch('nova.objects.service.SERVICE_VERSION',
new=1):
self.assertRaises(exception.ServiceTooOld,
objects.Service(context=self.context,
binary='nova-compute',
).create)
@mock.patch('nova.objects.base.NovaObject'
'.obj_make_compatible_from_manifest', new=mock.Mock())
def test_obj_make_compatible_from_manifest_strips_uuid(self):
s = service.Service()
primitive = {'uuid': uuidsentinel.service}
s.obj_make_compatible_from_manifest(primitive, '1.20', mock.Mock())
self.assertNotIn('uuid', primitive)
@mock.patch('nova.objects.service.uuidutils.generate_uuid',
return_value=uuidsentinel.service4)
def test_from_db_object_without_uuid_generates_one(self, generate_uuid):
values = _fake_service(uuid=None, id=None)
db_service = db.api.service_create(self.context, values)
s = service.Service()
service.Service._from_db_object(self.context, s, db_service)
self.assertEqual(uuidsentinel.service4, s.uuid)
# Check the DB too
db_service2 = db.api.service_get(self.context, s.id)
self.assertEqual(s.uuid, db_service2['uuid'])
class TestServiceObject(test_objects._LocalTest,
_TestServiceObject):
pass
class TestRemoteServiceObject(test_objects._RemoteTest,
_TestServiceObject):
pass
class TestServiceVersion(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
super(TestServiceVersion, self).setUp()
def _collect_things(self):
data = {
'compute_rpc': compute_manager.ComputeManager.target.version,
}
return data
def test_version(self):
calculated = self._collect_things()
self.assertEqual(
len(service.SERVICE_VERSION_HISTORY), service.SERVICE_VERSION + 1,
'Service version %i has no history. Please update '
'nova.objects.service.SERVICE_VERSION_HISTORY '
'and add %s to it' % (service.SERVICE_VERSION, repr(calculated)))
current = service.SERVICE_VERSION_HISTORY[service.SERVICE_VERSION]
self.assertEqual(
current, calculated,
'Changes detected that require a SERVICE_VERSION change. Please '
'increment nova.objects.service.SERVICE_VERSION, and make sure it'
'is equal to nova.compute.manager.ComputeManager.target.version.')
def test_version_in_init(self):
self.assertRaises(exception.ObjectActionError,
objects.Service,
version=123)
def test_version_set_on_init(self):
self.assertEqual(service.SERVICE_VERSION,
objects.Service().version)
def test_version_loaded_from_db(self):
fake_version = fake_service['version'] + 1
fake_different_service = dict(fake_service)
fake_different_service['version'] = fake_version
obj = objects.Service()
obj._from_db_object(self.ctxt, obj, fake_different_service)
self.assertEqual(fake_version, obj.version)
| rajalokan/nova | nova/tests/unit/objects/test_service.py | Python | apache-2.0 | 22,159 |
from nose.tools import assert_raises
from werkzeug.datastructures import ResponseCacheControl
from werkzeug.http import parse_cache_control_header
from werkzeug.test import Client, create_environ
from werkzeug.wrappers import Request, Response
from werkzeug.contrib import fixers
@Request.application
def path_check_app(request):
return Response('PATH_INFO: %s\nSCRIPT_NAME: %s' % (
request.environ.get('PATH_INFO', ''),
request.environ.get('SCRIPT_NAME', '')
))
def test_lighttpd_cgi_root_fix():
"""Test the LighttpdCGIRootFix fixer"""
app = fixers.LighttpdCGIRootFix(path_check_app)
response = Response.from_app(app, dict(create_environ(),
SCRIPT_NAME='/foo',
PATH_INFO='/bar',
SERVER_SOFTWARE='lighttpd/1.4.27'
))
assert response.data == 'PATH_INFO: /foo/bar\nSCRIPT_NAME: '
def test_path_info_from_request_uri_fix():
"""Test the PathInfoFromRequestUriFix fixer"""
app = fixers.PathInfoFromRequestUriFix(path_check_app)
for key in 'REQUEST_URI', 'REQUEST_URL', 'UNENCODED_URL':
env = dict(create_environ(), SCRIPT_NAME='/test', PATH_INFO='/?????')
env[key] = '/test/foo%25bar?drop=this'
response = Response.from_app(app, env)
assert response.data == 'PATH_INFO: /foo%bar\nSCRIPT_NAME: /test'
def test_proxy_fix():
"""Test the ProxyFix fixer"""
@fixers.ProxyFix
@Request.application
def app(request):
return Response('%s|%s' % (
request.remote_addr,
# do not use request.host as this fixes too :)
request.environ['HTTP_HOST']
))
response = Response.from_app(app, dict(create_environ(),
HTTP_X_FORWARDED_HOST='example.com',
HTTP_X_FORWARDED_FOR='1.2.3.4, 5.6.7.8',
REMOTE_ADDR='127.0.0.1',
HTTP_HOST='fake'
))
assert response.data == '1.2.3.4|example.com'
def test_header_rewriter_fix():
"""Test the HeaderRewriterFix fixer"""
@Request.application
def application(request):
return Response("", headers=[
('X-Foo', 'bar')
])
application = fixers.HeaderRewriterFix(application, ('X-Foo',), (('X-Bar', '42'),))
response = Response.from_app(application, create_environ())
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
assert 'X-Foo' not in response.headers
assert response.headers['X-Bar'] == '42'
def test_ie_fixes():
"""Test IE fixes."""
@fixers.InternetExplorerFix
@Request.application
def application(request):
response = Response('binary data here', mimetype='application/vnd.ms-excel')
response.headers['Vary'] = 'Cookie'
response.headers['Content-Disposition'] = 'attachment; filename=foo.xls'
return response
c = Client(application, Response)
response = c.get('/', headers=[
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
])
# IE gets no vary
assert response.data == 'binary data here'
assert 'vary' not in response.headers
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
assert response.headers['content-type'] == 'application/vnd.ms-excel'
# other browsers do
c = Client(application, Response)
response = c.get('/')
assert response.data == 'binary data here'
assert 'vary' in response.headers
cc = ResponseCacheControl()
cc.no_cache = True
@fixers.InternetExplorerFix
@Request.application
def application(request):
response = Response('binary data here', mimetype='application/vnd.ms-excel')
response.headers['Pragma'] = ', '.join(pragma)
response.headers['Cache-Control'] = cc.to_header()
response.headers['Content-Disposition'] = 'attachment; filename=foo.xls'
return response
# IE has no pragma or cache control
pragma = ('no-cache',)
c = Client(application, Response)
response = c.get('/', headers=[
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
])
assert response.data == 'binary data here'
assert 'pragma' not in response.headers
assert 'cache-control' not in response.headers
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
# IE has simplified pragma
pragma = ('no-cache', 'x-foo')
cc.proxy_revalidate = True
response = c.get('/', headers=[
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
])
assert response.data == 'binary data here'
assert response.headers['pragma'] == 'x-foo'
assert response.headers['cache-control'] == 'proxy-revalidate'
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
# regular browsers get everything
response = c.get('/')
assert response.data == 'binary data here'
assert response.headers['pragma'] == 'no-cache, x-foo'
cc = parse_cache_control_header(response.headers['cache-control'],
cls=ResponseCacheControl)
assert cc.no_cache
assert cc.proxy_revalidate
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
| r-kitaev/lucid-python-werkzeug | tests/contrib/test_fixers.py | Python | bsd-3-clause | 5,198 |
# -*- coding: utf-8 -*-
from django.db import models
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.core import serializers
from homedisplay.utils import publish_ws
import json
__all__ = ["get_serialized_labels", "PrintLabel"]
def get_serialized_labels():
return serializers.serialize("json", PrintLabel.objects.all())
class PrintLabel(models.Model):
name = models.CharField(
max_length=15, verbose_name="Nimi", help_text="Käyttöliittymässä näkyvä nimi")
content = models.CharField(
max_length=15, verbose_name="Sisältö", help_text="Tarralle tuleva sisältö")
include_time = models.BooleanField(
default=False, blank=True, verbose_name="Tulosta aika")
include_date = models.BooleanField(
default=False, blank=True, verbose_name="Tulosta päiväys")
def __unicode__(self):
return u"%s (%s), time: %s, date: %s" % (self.name, self.content, self.include_time, self.include_date)
class Meta:
ordering = ("name",)
verbose_name = "Tarra"
verbose_name_plural = "Tarrat"
def publish_items():
publish_ws("printer-labels", json.loads(get_serialized_labels()))
@receiver(post_delete, sender=PrintLabel, dispatch_uid="printlabel_delete_signal")
def publish_printlabel_deleted(sender, instance, using, **kwargs):
publish_items()
@receiver(post_save, sender=PrintLabel, dispatch_uid="printlabel_saved_signal")
def publish_printlabel_saved(sender, instance, created, *args, **kwargs):
publish_items()
| ojarva/home-info-display | homedisplay/control_printer/models.py | Python | bsd-3-clause | 1,575 |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='podxm',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='Listen to podcasts on command line.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/jupito/podxm',
# Author details
author='Jussi Toivonen',
author_email='jupito@iki.fi',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
#'Topic :: Software Development :: Build Tools',
#'Topic :: Software Development :: Medical Imaging',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
#keywords='sample setuptools development',
keywords='audio podcast rss',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
#install_requires=['peppercorn'],
#install_requires=['pathlib2'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
#'sample': ['package_data.dat'],
#'examples': ['doit.cfg'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
)
| jupito/podxm | setup.py | Python | mit | 3,970 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
class CachedFileField(forms.FileField):
"""
A custom FileField class for use in conjunction with CachedFileModelFormMixin
that allows storing uploaded file in a cache for re-submission.
That requires moving the "required" validation into the form's clean
method instead of handling it on field level.
"""
def __init__(self, *args, **kwargs):
self.real_required = kwargs.pop("required", True)
kwargs["required"] = False
super().__init__(*args, **kwargs)
| mozilla/telemetry-analysis-service | atmo/forms/fields.py | Python | mpl-2.0 | 733 |
# -*- coding: utf-8 -*-
#
# Project Calico documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 3 11:36:20 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Project Calico'
copyright = u'2015, Metaswitch Networks'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.0'
# The full version, including alpha/beta/rc tags.
release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ProjectCalicodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ProjectCalico.tex', u'Project Calico Documentation',
u'Metaswitch Networks', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'projectcalico', u'Project Calico Documentation',
[u'Metaswitch Networks'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ProjectCalico', u'Project Calico Documentation',
u'Metaswitch Networks', 'ProjectCalico', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| kasisnu/calico | docs/source/conf.py | Python | apache-2.0 | 8,272 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUDemarcationService(NURESTObject):
""" Represents a DemarcationService in the VSD
Notes:
Demarcation services provide the interconnection between the datacenter networks and the Wide Area Network.
"""
__rest_name__ = "demarcationservice"
__resource_name__ = "demarcationservices"
## Constants
CONST_TYPE_GATEWAY = "GATEWAY"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_TYPE_BR_PORT = "BR_PORT"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a DemarcationService instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> demarcationservice = NUDemarcationService(id=u'xxxx-xxx-xxx-xxx', name=u'DemarcationService')
>>> demarcationservice = NUDemarcationService(data=my_dict)
"""
super(NUDemarcationService, self).__init__()
# Read/Write Attributes
self._last_updated_by = None
self._entity_scope = None
self._route_distinguisher = None
self._priority = None
self._associated_gateway_id = None
self._associated_vlanid = None
self._external_id = None
self._type = None
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="route_distinguisher", remote_name="routeDistinguisher", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="priority", remote_name="priority", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_gateway_id", remote_name="associatedGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_vlanid", remote_name="associatedVLANID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=False, is_unique=False, choices=[u'BR_PORT', u'GATEWAY'])
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def route_distinguisher(self):
""" Get route_distinguisher value.
Notes:
The route distinguisher associated with the next hop. This is a read only property automatically created by VSD.
This attribute is named `routeDistinguisher` in VSD API.
"""
return self._route_distinguisher
@route_distinguisher.setter
def route_distinguisher(self, value):
""" Set route_distinguisher value.
Notes:
The route distinguisher associated with the next hop. This is a read only property automatically created by VSD.
This attribute is named `routeDistinguisher` in VSD API.
"""
self._route_distinguisher = value
@property
def priority(self):
""" Get priority value.
Notes:
Next hop priority assigned by the user.
"""
return self._priority
@priority.setter
def priority(self, value):
""" Set priority value.
Notes:
Next hop priority assigned by the user.
"""
self._priority = value
@property
def associated_gateway_id(self):
""" Get associated_gateway_id value.
Notes:
The ID of the NSGBR Gateway used as next hop in the untrusted domain.
This attribute is named `associatedGatewayID` in VSD API.
"""
return self._associated_gateway_id
@associated_gateway_id.setter
def associated_gateway_id(self, value):
""" Set associated_gateway_id value.
Notes:
The ID of the NSGBR Gateway used as next hop in the untrusted domain.
This attribute is named `associatedGatewayID` in VSD API.
"""
self._associated_gateway_id = value
@property
def associated_vlanid(self):
""" Get associated_vlanid value.
Notes:
The VLAN ID of the BR VLAN used as next hop in the trusted domain.
This attribute is named `associatedVLANID` in VSD API.
"""
return self._associated_vlanid
@associated_vlanid.setter
def associated_vlanid(self, value):
""" Set associated_vlanid value.
Notes:
The VLAN ID of the BR VLAN used as next hop in the trusted domain.
This attribute is named `associatedVLANID` in VSD API.
"""
self._associated_vlanid = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def type(self):
""" Get type value.
Notes:
The type of next hop determines linking direction for a demarcation service, possible values: BR_PORT, GATEWAY
"""
return self._type
@type.setter
def type(self, value):
""" Set type value.
Notes:
The type of next hop determines linking direction for a demarcation service, possible values: BR_PORT, GATEWAY
"""
self._type = value
| nuagenetworks/vspk-python | vspk/v5_0/nudemarcationservice.py | Python | bsd-3-clause | 10,092 |
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from .forms import LoginForm
class LoginViewTest(TestCase):
def setUp(self):
self.client = Client()
self.response = self.client.get(reverse('login:login'))
def tearDown(self):
self.client.logout()
def test_get(self):
self.assertEqual(self.response.status_code, 200)
def test_template(self):
self.assertTemplateUsed(self.response, 'login.html')
def test_html(self):
'HTML must have contain 3 inputs(user, pass and csrf token) and a submit'
self.assertContains(self.response, '<input', 3)
self.assertContains(self.response, 'submit')
class LoginPostTest(TestCase):
def setUp(self):
user = User.objects.create_user(
'admin', 'admin@admin.com', '123'
)
self.client = Client()
def tearDown(self):
self.client.logout()
User.objects.all().delete()
def test_already_logged(self):
'If already logged, will have a redirect, so, must return code 302'
self.response = self.client.post(
reverse('login:login'), self.make_validated_data()
)
self.response = self.client.get(reverse('login:login'))
self.assertEqual(self.response.status_code, 302)
def test_valid_login(self):
'With valid login, will have a redirect, so, must return code 302'
self.response = self.client.post(
reverse('login:login'), self.make_validated_data()
)
self.assertEqual(self.response.status_code, 302)
def test_invalid_login(self):
'With invalid login, will not have a redirect, so, must return code 200'
self.response = self.client.post(
reverse('login:login'), self.make_validated_data(password='1')
)
self.assertEqual(self.response.status_code, 200)
def make_validated_data(self, **kwargs):
data = {
'username': 'admin',
'password': '123'
}
data.update(kwargs)
return data
#TODO - FIX THESE TESTS.
#I DON'T KNOW WHY IT IS NOT RETURNING ERRORS
#WHEN USERNAME OR PASSWORD IS EMPTY.
class LoginFormTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', 'admin@admin.com', '123')
def test_if_has_fields(self):
form = LoginForm()
existing_fields = list(form.fields.keys())
expected_field = ['username', 'password']
self.assertEqual(existing_fields, expected_field)
# def test_username_is_not_optional(self):
# form = self.make_validated_form(username='')
# self.assertTrue(form.errors)
# def test_password_is_not_optional(self):
# form = self.make_validated_form(password='')
# self.assertTrue(form.errors)
def test_form(self):
form = self.make_validated_form()
self.assertFalse(form.errors)
def make_validated_form(self, **kwargs):
data = {
'username': 'admin',
'password': '123',
}
data.update(kwargs)
form = LoginForm(data)
form.is_valid()
return form
| delete/estofadora | estofadora/login/tests.py | Python | mit | 3,269 |
'''Pull all the default settings and then all overrides.
Created on Aug 19, 2010
@author: jnaous
'''
import sys, traceback
from expedient.clearinghouse.defaultsettings.django import *
from expedient.clearinghouse.defaultsettings.database import *
from expedient.clearinghouse.defaultsettings.admins import *
from expedient.clearinghouse.defaultsettings.email import *
from expedient.clearinghouse.defaultsettings.expedient import *
from expedient.clearinghouse.defaultsettings.logging import *
from expedient.clearinghouse.defaultsettings.gcf import *
from expedient.clearinghouse.defaultsettings.messaging import *
from expedient.clearinghouse.defaultsettings.openflow import *
from expedient.clearinghouse.defaultsettings.site import *
from expedient.clearinghouse.defaultsettings.xmlrpc import *
from expedient.clearinghouse.defaultsettings.openflowtests import *
from expedient.clearinghouse.defaultsettings.tests import *
from expedient.clearinghouse.defaultsettings.ldapSettings import *
from expedient.clearinghouse.defaultsettings.plugin import *
# Import the list of required variables
from expedient.clearinghouse.defaultsettings.required import REQUIRED_SETTINGS
# Try getting importing the secret key from a secret_key module
try:
from secret_key import SECRET_KEY
except ImportError:
print(
"Error importing secret_key module. Using default insecure key."
"Please run the 'create_secret_key' manage.py command to create "
"a new secret key. Do this only after setting up your local settings."
" If you are not yet running the production server, you can ignore "
"this error."
)
# Now import the local settings
try:
# do the import here to check that the path exists before doing anything
from localsettings import *
# Delete all the default required settings
_modname = globals()['__name__']
_this_mod = sys.modules[_modname]
for item in REQUIRED_SETTINGS:
for var in item[1]:
delattr(_this_mod, var)
# now import again to re-insert the deleted settings
from localsettings import *
# check that all the required settings are set
for item in REQUIRED_SETTINGS:
for var in item[1]:
if not hasattr(_this_mod, var):
raise Exception(
"Missing required setting %s. See the "
"documentation for this setting at "
"expedient.clearinghouse.defaultsettings.%s"
% (var, item[0])
)
except ImportError as e:
if "No module named localsettings" in "%s" % e:
print(
"ERROR: No localsettings module defined. Please run the "
" 'bootstrap_local_settings' command if you have not yet "
"created a localsettings module and add the parent "
"directory to your PYTHONPATH. Proceeding with missing "
"required settings."
)
else:
raise
# Logging
from expedient.common import loggingconf
import logging
if DEBUG:
loggingconf.set_up(logging.DEBUG, LOGGING_LEVELS)
else:
loggingconf.set_up(logging.INFO, LOGGING_LEVELS)
#GENI CONTROL FRAMEWORK SETTINGS (NOT NEEDED AT THIS MOMENT)
GCF_BASE_NAME = "expedient//your_affiliation"
GCF_URN_PREFIX = "expedient:your_afiliation"
#OFREG URL
OFREG_URL = " https://register.fp7-ofelia.eu"
OFREG_RESET_PATH = '/password_reset/forgotten'
OPENFLOW_GAPI_RSC_URN_PREFIX = "urn:publicid:IDN+expedient:your_affiliation:openflow"
OPENFLOW_GAPI_AM_URN = OPENFLOW_GAPI_RSC_URN_PREFIX+"+am"
#Openflow Test (NOT NEEDED, BUT KEPT HERE JUST IN CASE)
MININET_VMS = [
("84.88.41.12", 22),
]
#Monitoring
MONITORING_INTERVAL = 38
| avlach/univbris-ocf | expedient/src/python/expedient/clearinghouse/settings.py | Python | bsd-3-clause | 3,709 |
import os
import sys
import unittest
import importlib
from unittest import mock
from pykickstart.options import KSOptionParser
from pykickstart.base import KickstartCommand, BaseData
class ClassDefinitionTestCase(unittest.TestCase):
"""
Search for command and data classes defined like so:
RHEL7_AutoPart = F21_AutoPart.
This kind of definitions makes it possible for other
tests to omit possible errors and we don't like to have
them. Either we use the existing class name if we haven't
redefined anything or provide a boilerplate definition:
class RHEL7_Autopart(F21_Autopart):
pass
"""
def runTest(self):
errors = 0
commands_dir = os.path.join(os.path.dirname(__file__), "..", "..", "pykickstart", "commands")
commands_dir = os.path.abspath(commands_dir)
self.assertTrue(os.path.exists(commands_dir))
if commands_dir not in sys.path:
sys.path.append(commands_dir)
for _dirpath, _dirnames, paths in os.walk(commands_dir):
for path in paths:
if path == '__init__.py' or not path.endswith('.py'):
continue
# load the module defining all possible command implementations
command_module = importlib.import_module(path.replace(".py", ""))
module_commands = [] # a list of already checked commands
for impl_name, impl_class in command_module.__dict__.items():
# skip everything which isn't a class
if type(impl_class) is not type:
continue
# skip everything which doesn't inherit
# from KickstartCommand or BaseData
if not (issubclass(impl_class, KickstartCommand) or issubclass(impl_class, BaseData)):
continue
# skip base classes as well
if impl_class.__name__ in ['KickstartCommand', 'DeprecatedCommand']:
continue
if impl_class not in module_commands and \
impl_class.__name__ == impl_name:
module_commands.append(impl_class)
else:
errors += 1
message = "ERROR: In `commands/%s` %s = %s" % (path, impl_name, impl_class.__name__)
print(message)
# assert for errors presence
self.assertEqual(0, errors)
class TestKSOptionParser(KSOptionParser):
"""
Wrapper class that will raise exception if some of the
help attributes are empty.
"""
def __init__(self, *args, **kwargs):
self._test_errors = []
for arg_name in ['prog', 'version', 'description']:
if not kwargs.get(arg_name) and not kwargs.get("deprecated"):
self._test_errors.append("%s: %s can't be blank" % (args[0], arg_name))
super(TestKSOptionParser, self).__init__(*args, **kwargs)
def add_argument(self, *args, **kwargs):
for arg_name in ['help', 'version']:
if not kwargs.get(arg_name) and not kwargs.get("deprecated"):
self._test_errors.append("%s: %s can't be blank" % (args[0], arg_name))
return super(TestKSOptionParser, self).add_argument(*args, **kwargs)
class HelpAndDescription_TestCase(unittest.TestCase):
"""
Check that all commands and their options have some description text.
"""
def runTest(self):
errors = 0
commands_dir = os.path.join(os.path.dirname(__file__), "..", "..", "pykickstart", "commands")
commands_dir = os.path.abspath(commands_dir)
self.assertTrue(os.path.exists(commands_dir))
if commands_dir not in sys.path:
sys.path.append(commands_dir)
for _dirpath, _dirnames, paths in os.walk(commands_dir):
for path in paths:
if path == '__init__.py' or not path.endswith('.py'):
continue
# load the module defining all possible command implementations
command_module = importlib.import_module(path.replace(".py", ""))
for _, impl_class in command_module.__dict__.items():
# skip everything which isn't a class
if type(impl_class) is not type:
continue
# skip everything which doesn't inherit from KickstartCommand
if not issubclass(impl_class, KickstartCommand):
continue
# skip base classes as well
if impl_class.__name__ in ['KickstartCommand', 'DeprecatedCommand']:
continue
# In order for patch to locate the function to be patched, it must be
# specified using its fully qualified name, which may not be what you expect.
# For example, if a class is imported in the module my_module.py as follows:
# from module import ClassA
# It must be patched as patch(my_module.ClassA), rather than patch(module.ClassA),
# due to the semantics of the from ... import ... statement, which imports
# classes and functions into the current namespace.
command_module_name = command_module.__name__
# the install.py command inherits from upgrade.py and doesn't import
# KSOptionParser on its own
if command_module_name == 'install':
command_module_name = 'upgrade'
with mock.patch('%s.KSOptionParser' % command_module_name, new=TestKSOptionParser):
# just construct the option parser
# the wrapper class will raise an exception in case
# there are empty help strings
klass = impl_class()
op = klass._getParser()
if hasattr(op, "_test_errors") and len(op._test_errors) > 0:
errors += len(op._test_errors)
print("ERROR: In `%s`" % impl_class)
for err in op._test_errors:
print(err)
# assert for errors presence
self.assertEqual(0, errors)
if __name__ == "__main__":
unittest.main()
| bcl/pykickstart | tests/commands/__init__.py | Python | gpl-2.0 | 6,556 |
import mimetypes
import sys
import typing as t
import warnings
from collections import defaultdict
from datetime import datetime
from datetime import timedelta
from http.cookiejar import CookieJar
from io import BytesIO
from itertools import chain
from random import random
from tempfile import TemporaryFile
from time import time
from urllib.request import Request as _UrllibRequest
from ._internal import _get_environ
from ._internal import _make_encode_wrapper
from ._internal import _wsgi_decoding_dance
from ._internal import _wsgi_encoding_dance
from .datastructures import Authorization
from .datastructures import CallbackDict
from .datastructures import CombinedMultiDict
from .datastructures import EnvironHeaders
from .datastructures import FileMultiDict
from .datastructures import Headers
from .datastructures import MultiDict
from .http import dump_cookie
from .http import dump_options_header
from .http import parse_options_header
from .sansio.multipart import Data
from .sansio.multipart import Epilogue
from .sansio.multipart import Field
from .sansio.multipart import File
from .sansio.multipart import MultipartEncoder
from .sansio.multipart import Preamble
from .urls import iri_to_uri
from .urls import url_encode
from .urls import url_fix
from .urls import url_parse
from .urls import url_unparse
from .urls import url_unquote
from .utils import get_content_type
from .wrappers.request import Request
from .wrappers.response import Response
from .wsgi import ClosingIterator
from .wsgi import get_current_url
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
def stream_encode_multipart(
data: t.Mapping[str, t.Any],
use_tempfile: bool = True,
threshold: int = 1024 * 500,
boundary: t.Optional[str] = None,
charset: str = "utf-8",
) -> t.Tuple[t.IO[bytes], int, str]:
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = f"---------------WerkzeugFormPart_{time()}{random()}"
stream: t.IO[bytes] = BytesIO()
total_length = 0
on_disk = False
if use_tempfile:
def write_binary(s: bytes) -> int:
nonlocal stream, total_length, on_disk
if on_disk:
return stream.write(s)
else:
length = len(s)
if length + total_length <= threshold:
stream.write(s)
else:
new_stream = t.cast(t.IO[bytes], TemporaryFile("wb+"))
new_stream.write(stream.getvalue()) # type: ignore
new_stream.write(s)
stream = new_stream
on_disk = True
total_length += length
return length
else:
write_binary = stream.write
encoder = MultipartEncoder(boundary.encode())
write_binary(encoder.send_event(Preamble(data=b"")))
for key, value in _iter_data(data):
reader = getattr(value, "read", None)
if reader is not None:
filename = getattr(value, "filename", getattr(value, "name", None))
content_type = getattr(value, "content_type", None)
if content_type is None:
content_type = (
filename
and mimetypes.guess_type(filename)[0]
or "application/octet-stream"
)
headers = Headers([("Content-Type", content_type)])
if filename is None:
write_binary(encoder.send_event(Field(name=key, headers=headers)))
else:
write_binary(
encoder.send_event(
File(name=key, filename=filename, headers=headers)
)
)
while True:
chunk = reader(16384)
if not chunk:
break
write_binary(encoder.send_event(Data(data=chunk, more_data=True)))
else:
if not isinstance(value, str):
value = str(value)
write_binary(encoder.send_event(Field(name=key, headers=Headers())))
write_binary(
encoder.send_event(Data(data=value.encode(charset), more_data=False))
)
write_binary(encoder.send_event(Epilogue(data=b"")))
length = stream.tell()
stream.seek(0)
return stream, length, boundary
def encode_multipart(
values: t.Mapping[str, t.Any],
boundary: t.Optional[str] = None,
charset: str = "utf-8",
) -> t.Tuple[str, bytes]:
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is bytes.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset
)
return boundary, stream.read()
class _TestCookieHeaders:
"""A headers adapter for cookielib"""
def __init__(self, headers: t.Union[Headers, t.List[t.Tuple[str, str]]]) -> None:
self.headers = headers
def getheaders(self, name: str) -> t.Iterable[str]:
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(
self, name: str, default: t.Optional[t.Iterable[str]] = None
) -> t.Iterable[str]:
headers = self.getheaders(name)
if not headers:
return default # type: ignore
return headers
class _TestCookieResponse:
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers: t.Union[Headers, t.List[t.Tuple[str, str]]]) -> None:
self.headers = _TestCookieHeaders(headers)
def info(self) -> _TestCookieHeaders:
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ: "WSGIEnvironment") -> None:
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = [f"{c.name}={c.value}" for c in self]
if cvals:
environ["HTTP_COOKIE"] = "; ".join(cvals)
else:
environ.pop("HTTP_COOKIE", None)
def extract_wsgi(
self,
environ: "WSGIEnvironment",
headers: t.Union[Headers, t.List[t.Tuple[str, str]]],
) -> None:
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers), # type: ignore
_UrllibRequest(get_current_url(environ)),
)
def _iter_data(data: t.Mapping[str, t.Any]) -> t.Iterator[t.Tuple[str, t.Any]]:
"""Iterate over a mapping that might have a list of values, yielding
all key, value pairs. Almost like iter_multi_items but only allows
lists, not tuples, of values so tuples can be used for files.
"""
if isinstance(data, MultiDict):
yield from data.items(multi=True)
else:
for key, value in data.items():
if isinstance(value, list):
for v in value:
yield key, v
else:
yield key, value
_TAnyMultiDict = t.TypeVar("_TAnyMultiDict", bound=MultiDict)
class EnvironBuilder:
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`Response.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str` or `bytes` object: The object is converted into an
:attr:`input_stream`, the :attr:`content_length` is set and you have to
provide a :attr:`content_type`.
- a `dict` or :class:`MultiDict`: The keys have to be strings. The values
have to be either any of the following objects, or a list of any of the
following objects:
- a :class:`file`-like object: These are converted into
:class:`FileStorage` objects automatically.
- a `tuple`: The :meth:`~FileMultiDict.add_file` method is called
with the key and the unpacked `tuple` items as positional
arguments.
- a `str`: The string is set as form data for the associated key.
- a file-like object: The object content is loaded in memory and then
handled like a regular `str` or a `bytes`.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data or a file-object.
See explanation above.
:param json: An object to be serialized and assigned to ``data``.
Defaults the content type to ``"application/json"``.
Serialized with the function assigned to :attr:`json_dumps`.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode string data.
:param auth: An authorization object to use for the
``Authorization`` header value. A ``(username, password)`` tuple
is a shortcut for ``Basic`` authorization.
.. versionchanged:: 2.0
``REQUEST_URI`` and ``RAW_URI`` is the full raw URI including
the query string, not only the path.
.. versionchanged:: 2.0
The default :attr:`request_class` is ``Request`` instead of
``BaseRequest``.
.. versionadded:: 2.0
Added the ``auth`` parameter.
.. versionadded:: 0.15
The ``json`` param and :meth:`json_dumps` method.
.. versionadded:: 0.15
The environ has keys ``REQUEST_URI`` and ``RAW_URI`` containing
the path before perecent-decoding. This is not part of the WSGI
PEP, but many WSGI servers include it.
.. versionchanged:: 0.6
``path`` and ``base_url`` can now be unicode strings that are
encoded with :func:`iri_to_uri`.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = "HTTP/1.1"
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: The default request class used by :meth:`get_request`.
request_class = Request
import json
#: The serialization function used when ``json`` is passed.
json_dumps = staticmethod(json.dumps)
del json
_args: t.Optional[MultiDict]
_query_string: t.Optional[str]
_input_stream: t.Optional[t.IO[bytes]]
_form: t.Optional[MultiDict]
_files: t.Optional[FileMultiDict]
def __init__(
self,
path: str = "/",
base_url: t.Optional[str] = None,
query_string: t.Optional[t.Union[t.Mapping[str, str], str]] = None,
method: str = "GET",
input_stream: t.Optional[t.IO[bytes]] = None,
content_type: t.Optional[str] = None,
content_length: t.Optional[int] = None,
errors_stream: t.Optional[t.IO[str]] = None,
multithread: bool = False,
multiprocess: bool = False,
run_once: bool = False,
headers: t.Optional[t.Union[Headers, t.Iterable[t.Tuple[str, str]]]] = None,
data: t.Optional[
t.Union[t.IO[bytes], str, bytes, t.Mapping[str, t.Any]]
] = None,
environ_base: t.Optional[t.Mapping[str, t.Any]] = None,
environ_overrides: t.Optional[t.Mapping[str, t.Any]] = None,
charset: str = "utf-8",
mimetype: t.Optional[str] = None,
json: t.Optional[t.Mapping[str, t.Any]] = None,
auth: t.Optional[t.Union[Authorization, t.Tuple[str, str]]] = None,
) -> None:
path_s = _make_encode_wrapper(path)
if query_string is not None and path_s("?") in path:
raise ValueError("Query string is defined in the path and as an argument")
request_uri = url_parse(path)
if query_string is None and path_s("?") in path:
query_string = request_uri.query
self.charset = charset
self.path = iri_to_uri(request_uri.path)
self.request_uri = path
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url # type: ignore
if isinstance(query_string, (bytes, str)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if auth is not None:
if isinstance(auth, tuple):
auth = Authorization(
"basic", {"username": auth[0], "password": auth[1]}
)
self.headers.set("Authorization", auth.to_header())
if json is not None:
if data is not None:
raise TypeError("can't provide both json and data")
data = self.json_dumps(json)
if self.content_type is None:
self.content_type = "application/json"
if data:
if input_stream is not None:
raise TypeError("can't provide input stream and data")
if hasattr(data, "read"):
data = data.read() # type: ignore
if isinstance(data, str):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data): # type: ignore
if isinstance(value, (tuple, dict)) or hasattr(value, "read"):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
if mimetype is not None:
self.mimetype = mimetype
@classmethod
def from_environ(
cls, environ: "WSGIEnvironment", **kwargs: t.Any
) -> "EnvironBuilder":
"""Turn an environ dict back into a builder. Any extra kwargs
override the args extracted from the environ.
.. versionchanged:: 2.0
Path and query values are passed through the WSGI decoding
dance to avoid double encoding.
.. versionadded:: 0.15
"""
headers = Headers(EnvironHeaders(environ))
out = {
"path": _wsgi_decoding_dance(environ["PATH_INFO"]),
"base_url": cls._make_base_url(
environ["wsgi.url_scheme"],
headers.pop("Host"),
_wsgi_decoding_dance(environ["SCRIPT_NAME"]),
),
"query_string": _wsgi_decoding_dance(environ["QUERY_STRING"]),
"method": environ["REQUEST_METHOD"],
"input_stream": environ["wsgi.input"],
"content_type": headers.pop("Content-Type", None),
"content_length": headers.pop("Content-Length", None),
"errors_stream": environ["wsgi.errors"],
"multithread": environ["wsgi.multithread"],
"multiprocess": environ["wsgi.multiprocess"],
"run_once": environ["wsgi.run_once"],
"headers": headers,
}
out.update(kwargs)
return cls(**out)
def _add_file_from_data(
self,
key: str,
value: t.Union[
t.IO[bytes], t.Tuple[t.IO[bytes], str], t.Tuple[t.IO[bytes], str, str]
],
) -> None:
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
else:
self.files.add_file(key, value)
@staticmethod
def _make_base_url(scheme: str, host: str, script_root: str) -> str:
return url_unparse((scheme, host, script_root, "", "")).rstrip("/") + "/"
@property
def base_url(self) -> str:
"""The base URL is used to extract the URL scheme, host name,
port, and root path.
"""
return self._make_base_url(self.url_scheme, self.host, self.script_root)
@base_url.setter
def base_url(self, value: t.Optional[str]) -> None:
if value is None:
scheme = "http"
netloc = "localhost"
script_root = ""
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError("base url must not contain a query string or fragment")
self.script_root = script_root.rstrip("/")
self.host = netloc
self.url_scheme = scheme
@property
def content_type(self) -> t.Optional[str]:
"""The content type for the request. Reflected from and to
the :attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.
"""
ct = self.headers.get("Content-Type")
if ct is None and not self._input_stream:
if self._files:
return "multipart/form-data"
if self._form:
return "application/x-www-form-urlencoded"
return None
return ct
@content_type.setter
def content_type(self, value: t.Optional[str]) -> None:
if value is None:
self.headers.pop("Content-Type", None)
else:
self.headers["Content-Type"] = value
@property
def mimetype(self) -> t.Optional[str]:
"""The mimetype (content type without charset etc.)
.. versionadded:: 0.14
"""
ct = self.content_type
return ct.split(";")[0].strip() if ct else None
@mimetype.setter
def mimetype(self, value: str) -> None:
self.content_type = get_content_type(value, self.charset)
@property
def mimetype_params(self) -> t.Mapping[str, str]:
"""The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.14
"""
def on_update(d: t.Mapping[str, str]) -> None:
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get("content-type", ""))[1]
return CallbackDict(d, on_update)
@property
def content_length(self) -> t.Optional[int]:
"""The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.
"""
return self.headers.get("Content-Length", type=int)
@content_length.setter
def content_length(self, value: t.Optional[int]) -> None:
if value is None:
self.headers.pop("Content-Length", None)
else:
self.headers["Content-Length"] = str(value)
def _get_form(self, name: str, storage: t.Type[_TAnyMultiDict]) -> _TAnyMultiDict:
"""Common behavior for getting the :attr:`form` and
:attr:`files` properties.
:param name: Name of the internal cached attribute.
:param storage: Storage class used for the data.
"""
if self.input_stream is not None:
raise AttributeError("an input stream is defined")
rv = getattr(self, name)
if rv is None:
rv = storage()
setattr(self, name, rv)
return rv # type: ignore
def _set_form(self, name: str, value: MultiDict) -> None:
"""Common behavior for setting the :attr:`form` and
:attr:`files` properties.
:param name: Name of the internal cached attribute.
:param value: Value to assign to the attribute.
"""
self._input_stream = None
setattr(self, name, value)
@property
def form(self) -> MultiDict:
"""A :class:`MultiDict` of form values."""
return self._get_form("_form", MultiDict)
@form.setter
def form(self, value: MultiDict) -> None:
self._set_form("_form", value)
@property
def files(self) -> FileMultiDict:
"""A :class:`FileMultiDict` of uploaded files. Use
:meth:`~FileMultiDict.add_file` to add new files.
"""
return self._get_form("_files", FileMultiDict)
@files.setter
def files(self, value: FileMultiDict) -> None:
self._set_form("_files", value)
@property
def input_stream(self) -> t.Optional[t.IO[bytes]]:
"""An optional input stream. This is mutually exclusive with
setting :attr:`form` and :attr:`files`, setting it will clear
those. Do not provide this if the method is not ``POST`` or
another method that has a body.
"""
return self._input_stream
@input_stream.setter
def input_stream(self, value: t.Optional[t.IO[bytes]]) -> None:
self._input_stream = value
self._form = None
self._files = None
@property
def query_string(self) -> str:
"""The query string. If you set this to a string
:attr:`args` will no longer be available.
"""
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ""
return self._query_string
@query_string.setter
def query_string(self, value: t.Optional[str]) -> None:
self._query_string = value
self._args = None
@property
def args(self) -> MultiDict:
"""The URL arguments as :class:`MultiDict`."""
if self._query_string is not None:
raise AttributeError("a query string is defined")
if self._args is None:
self._args = MultiDict()
return self._args
@args.setter
def args(self, value: t.Optional[MultiDict]) -> None:
self._query_string = None
self._args = value
@property
def server_name(self) -> str:
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(":", 1)[0]
@property
def server_port(self) -> int:
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(":", 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
if self.url_scheme == "https":
return 443
return 80
def __del__(self) -> None:
try:
self.close()
except Exception:
pass
def close(self) -> None:
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = self.files.values()
except AttributeError:
files = () # type: ignore
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self) -> "WSGIEnvironment":
"""Return the built environ.
.. versionchanged:: 0.15
The content type and length headers are set based on
input stream detection. Previously this only set the WSGI
keys.
"""
input_stream = self.input_stream
content_length = self.content_length
mimetype = self.mimetype
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif mimetype == "multipart/form-data":
input_stream, content_length, boundary = stream_encode_multipart(
CombinedMultiDict([self.form, self.files]), charset=self.charset
)
content_type = f'{mimetype}; boundary="{boundary}"'
elif mimetype == "application/x-www-form-urlencoded":
form_encoded = url_encode(self.form, charset=self.charset).encode("ascii")
content_length = len(form_encoded)
input_stream = BytesIO(form_encoded)
else:
input_stream = BytesIO()
result: "WSGIEnvironment" = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x: str) -> str:
return _wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
raw_uri = _wsgi_encoding_dance(self.request_uri, self.charset)
result.update(
{
"REQUEST_METHOD": self.method,
"SCRIPT_NAME": _path_encode(self.script_root),
"PATH_INFO": _path_encode(self.path),
"QUERY_STRING": _wsgi_encoding_dance(self.query_string, self.charset),
# Non-standard, added by mod_wsgi, uWSGI
"REQUEST_URI": raw_uri,
# Non-standard, added by gunicorn
"RAW_URI": raw_uri,
"SERVER_NAME": self.server_name,
"SERVER_PORT": str(self.server_port),
"HTTP_HOST": self.host,
"SERVER_PROTOCOL": self.server_protocol,
"wsgi.version": self.wsgi_version,
"wsgi.url_scheme": self.url_scheme,
"wsgi.input": input_stream,
"wsgi.errors": self.errors_stream,
"wsgi.multithread": self.multithread,
"wsgi.multiprocess": self.multiprocess,
"wsgi.run_once": self.run_once,
}
)
headers = self.headers.copy()
if content_type is not None:
result["CONTENT_TYPE"] = content_type
headers.set("Content-Type", content_type)
if content_length is not None:
result["CONTENT_LENGTH"] = str(content_length)
headers.set("Content-Length", content_length)
combined_headers = defaultdict(list)
for key, value in headers.to_wsgi_list():
combined_headers[f"HTTP_{key.upper().replace('-', '_')}"].append(value)
for key, values in combined_headers.items():
result[key] = ", ".join(values)
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls: t.Optional[t.Type[Request]] = None) -> Request:
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client:
"""This class allows you to send requests to a wrapped application.
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionchanged:: 2.0
``response_wrapper`` is always a subclass of
:class:``TestResponse``.
.. versionchanged:: 0.5
Added the ``use_cookies`` parameter.
"""
def __init__(
self,
application: "WSGIApplication",
response_wrapper: t.Optional[t.Type["Response"]] = None,
use_cookies: bool = True,
allow_subdomain_redirects: bool = False,
) -> None:
self.application = application
if response_wrapper in {None, Response}:
response_wrapper = TestResponse
elif not isinstance(response_wrapper, TestResponse):
response_wrapper = type(
"WrapperTestResponse",
(TestResponse, response_wrapper), # type: ignore
{},
)
self.response_wrapper = t.cast(t.Type["TestResponse"], response_wrapper)
if use_cookies:
self.cookie_jar: t.Optional[_TestCookieJar] = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(
self,
server_name: str,
key: str,
value: str = "",
max_age: t.Optional[t.Union[timedelta, int]] = None,
expires: t.Optional[t.Union[str, datetime, int, float]] = None,
path: str = "/",
domain: t.Optional[str] = None,
secure: bool = False,
httponly: bool = False,
samesite: t.Optional[str] = None,
charset: str = "utf-8",
) -> None:
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, "cookies disabled"
header = dump_cookie(
key,
value,
max_age,
expires,
path,
domain,
secure,
httponly,
charset,
samesite=samesite,
)
environ = create_environ(path, base_url=f"http://{server_name}")
headers = [("Set-Cookie", header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(
self,
server_name: str,
key: str,
path: str = "/",
domain: t.Optional[str] = None,
secure: bool = False,
httponly: bool = False,
samesite: t.Optional[str] = None,
) -> None:
"""Deletes a cookie in the test client."""
self.set_cookie(
server_name,
key,
expires=0,
max_age=0,
path=path,
domain=domain,
secure=secure,
httponly=httponly,
samesite=samesite,
)
def run_wsgi_app(
self, environ: "WSGIEnvironment", buffered: bool = False
) -> t.Tuple[t.Iterable[bytes], str, Headers]:
"""Runs the wrapped WSGI app with the given environment.
:meta private:
"""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(
self, response: "TestResponse", buffered: bool = False
) -> "TestResponse":
"""Perform a new request to the location given by the redirect
response to the previous request.
:meta private:
"""
scheme, netloc, path, qs, anchor = url_parse(response.location)
builder = EnvironBuilder.from_environ(response.request.environ, query_string=qs)
to_name_parts = netloc.split(":", 1)[0].split(".")
from_name_parts = builder.server_name.split(".")
if to_name_parts != [""]:
# The new location has a host, use it for the base URL.
builder.url_scheme = scheme
builder.host = netloc
else:
# A local redirect with autocorrect_location_header=False
# doesn't have a host, so use the request's host.
to_name_parts = from_name_parts
# Explain why a redirect to a different server name won't be followed.
if to_name_parts != from_name_parts:
if to_name_parts[-len(from_name_parts) :] == from_name_parts:
if not self.allow_subdomain_redirects:
raise RuntimeError("Following subdomain redirects is not enabled.")
else:
raise RuntimeError("Following external redirects is not supported.")
path_parts = path.split("/")
root_parts = builder.script_root.split("/")
if path_parts[: len(root_parts)] == root_parts:
# Strip the script root from the path.
builder.path = path[len(builder.script_root) :]
else:
# The new location is not under the script root, so use the
# whole path and clear the previous root.
builder.path = path
builder.script_root = ""
# Only 307 and 308 preserve all of the original request.
if response.status_code not in {307, 308}:
# HEAD is preserved, everything else becomes GET.
if builder.method != "HEAD":
builder.method = "GET"
# Clear the body and the headers that describe it.
if builder.input_stream is not None:
builder.input_stream.close()
builder.input_stream = None
builder.content_type = None
builder.content_length = None
builder.headers.pop("Transfer-Encoding", None)
return self.open(builder, buffered=buffered)
def open(
self,
*args: t.Any,
as_tuple: bool = False,
buffered: bool = False,
follow_redirects: bool = False,
**kwargs: t.Any,
) -> "TestResponse":
"""Generate an environ dict from the given arguments, make a
request to the application using it, and return the response.
:param args: Passed to :class:`EnvironBuilder` to create the
environ for the request. If a single arg is passed, it can
be an existing :class:`EnvironBuilder` or an environ dict.
:param buffered: Convert the iterator returned by the app into
a list. If the iterator has a ``close()`` method, it is
called automatically.
:param follow_redirects: Make additional requests to follow HTTP
redirects until a non-redirect status is returned.
:attr:`TestResponse.history` lists the intermediate
responses.
.. versionchanged:: 2.0
``as_tuple`` is deprecated and will be removed in Werkzeug
2.1. Use :attr:`TestResponse.request` and
``request.environ`` instead.
.. versionchanged:: 2.0
The request input stream is closed when calling
``response.close()``. Input streams for redirects are
automatically closed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the ``data``
parameter the content type has to be called ``content_type``
instead of ``mimetype``. This change was made for
consistency with :class:`werkzeug.FileWrapper`.
.. versionchanged:: 0.5
Added the ``follow_redirects`` parameter.
"""
request: t.Optional["Request"] = None
if not kwargs and len(args) == 1:
arg = args[0]
if isinstance(arg, EnvironBuilder):
request = arg.get_request()
elif isinstance(arg, dict):
request = EnvironBuilder.from_environ(arg).get_request()
elif isinstance(arg, Request):
request = arg
if request is None:
builder = EnvironBuilder(*args, **kwargs)
try:
request = builder.get_request()
finally:
builder.close()
response = self.run_wsgi_app(request.environ, buffered=buffered)
response = self.response_wrapper(*response, request=request)
redirects = set()
history: t.List["TestResponse"] = []
while follow_redirects and response.status_code in {
301,
302,
303,
305,
307,
308,
}:
# Exhaust intermediate response bodies to ensure middleware
# that returns an iterator runs any cleanup code.
if not buffered:
response.make_sequence()
response.close()
new_redirect_entry = (response.location, response.status_code)
if new_redirect_entry in redirects:
raise ClientRedirectError(
f"Loop detected: A {response.status_code} redirect"
f" to {response.location} was already made."
)
redirects.add(new_redirect_entry)
response.history = tuple(history)
history.append(response)
response = self.resolve_redirect(response, buffered=buffered)
else:
# This is the final request after redirects, or not
# following redirects.
response.history = tuple(history)
# Close the input stream when closing the response, in case
# the input is an open temporary file.
response.call_on_close(request.input_stream.close)
if as_tuple:
warnings.warn(
"'as_tuple' is deprecated and will be removed in"
" Werkzeug 2.1. Access 'response.request.environ'"
" instead.",
DeprecationWarning,
stacklevel=2,
)
return request.environ, response # type: ignore
return response
def get(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``GET``."""
kw["method"] = "GET"
return self.open(*args, **kw)
def post(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``POST``."""
kw["method"] = "POST"
return self.open(*args, **kw)
def put(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``PUT``."""
kw["method"] = "PUT"
return self.open(*args, **kw)
def delete(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``DELETE``."""
kw["method"] = "DELETE"
return self.open(*args, **kw)
def patch(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``PATCH``."""
kw["method"] = "PATCH"
return self.open(*args, **kw)
def options(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``OPTIONS``."""
kw["method"] = "OPTIONS"
return self.open(*args, **kw)
def head(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``HEAD``."""
kw["method"] = "HEAD"
return self.open(*args, **kw)
def trace(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``TRACE``."""
kw["method"] = "TRACE"
return self.open(*args, **kw)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.application!r}>"
def create_environ(*args: t.Any, **kwargs: t.Any) -> "WSGIEnvironment":
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(
app: "WSGIApplication", environ: "WSGIEnvironment", buffered: bool = False
) -> t.Tuple[t.Iterable[bytes], str, Headers]:
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
# Copy environ to ensure any mutations by the app (ProxyFix, for
# example) don't affect subsequent requests (such as redirects).
environ = _get_environ(environ).copy()
status: str
response: t.Optional[t.Tuple[str, t.List[t.Tuple[str, str]]]] = None
buffer: t.List[bytes] = []
def start_response(status, headers, exc_info=None): # type: ignore
nonlocal response
if exc_info:
try:
raise exc_info[1].with_traceback(exc_info[2])
finally:
exc_info = None
response = (status, headers)
return buffer.append
app_rv = app(environ, start_response)
close_func = getattr(app_rv, "close", None)
app_iter: t.Iterable[bytes] = iter(app_rv)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have a response, chain
# the already received data with the already collected data and wrap it in
# a new `ClosingIterator` if we need to restore a `close` callable from the
# original return value.
else:
for item in app_iter:
buffer.append(item)
if response is not None:
break
if buffer:
app_iter = chain(buffer, app_iter)
if close_func is not None and app_iter is not app_rv:
app_iter = ClosingIterator(app_iter, close_func)
status, headers = response # type: ignore
return app_iter, status, Headers(headers)
class TestResponse(Response):
""":class:`~werkzeug.wrappers.Response` subclass that provides extra
information about requests made with the test :class:`Client`.
Test client requests will always return an instance of this class.
If a custom response class is passed to the client, it is
subclassed along with this to support test information.
If the test request included large files, or if the application is
serving a file, call :meth:`close` to close any open files and
prevent Python showing a ``ResourceWarning``.
"""
request: Request
"""A request object with the environ used to make the request that
resulted in this response.
"""
history: t.Tuple["TestResponse", ...]
"""A list of intermediate responses. Populated when the test request
is made with ``follow_redirects`` enabled.
"""
def __init__(
self,
response: t.Iterable[bytes],
status: str,
headers: Headers,
request: Request,
history: t.Tuple["TestResponse"] = (), # type: ignore
**kwargs: t.Any,
) -> None:
super().__init__(response, status, headers, **kwargs)
self.request = request
self.history = history
self._compat_tuple = response, status, headers
def __iter__(self) -> t.Iterator:
warnings.warn(
(
"The test client no longer returns a tuple, it returns"
" a 'TestResponse'. Tuple unpacking is deprecated and"
" will be removed in Werkzeug 2.1. Access the"
" attributes 'data', 'status', and 'headers' instead."
),
DeprecationWarning,
stacklevel=2,
)
return iter(self._compat_tuple)
def __getitem__(self, item: int) -> t.Any:
warnings.warn(
(
"The test client no longer returns a tuple, it returns"
" a 'TestResponse'. Item indexing is deprecated and"
" will be removed in Werkzeug 2.1. Access the"
" attributes 'data', 'status', and 'headers' instead."
),
DeprecationWarning,
stacklevel=2,
)
return self._compat_tuple[item]
| fkazimierczak/werkzeug | src/werkzeug/test.py | Python | bsd-3-clause | 48,130 |
import bpy
bl_info = {
"name" : "Vertex Tricker",
"description" : "You Can Trick Vertex Slection in Edit Mode",
"author" : "Kozo Oeda",
"version" : (1, 0),
"location" : "",
"warning" : "",
"support" : "COMMUNITY",
"wiki_url" : "",
"tracker_url" : "",
"category" : "3D View"
}
def get_current_object_name():
return bpy.context.object.name
def get_selected_vertices_index():
import bmesh
vertices_index = []
edit_data = bpy.context.edit_object.data
bm = bmesh.from_edit_mesh(edit_data)
selected_vertices = [v for v in bm.verts if v.select]
for v in selected_vertices:
vertices_index.append(v.index)
return vertices_index
def get_v_groups(self, context):
items = []
items_ref = []
v_groups = []
ob_name = get_current_object_name()
ob = bpy.data.objects[ob_name]
vertices = bpy.data.objects[ob_name].data.vertices
v_groups = ob.vertex_groups
enum_num = 1
try:
for v in vertices:
for g in v.groups:
if v_groups[g.group].name not in items_ref:
items.append((v_groups[g.group].name, v_groups[g.group].name, "", enum_num))
items_ref.append(v_groups[g.group].name)
enum_num += 1
except IndexError:
pass
return items
def get_v_groups_from_v(self, context):
items = []
items_ref = []
selected_vertices_indexes = []
ob_name = get_current_object_name()
ob = bpy.data.objects[ob_name]
selected_vertices_indexes = get_selected_vertices_index()
vertices = bpy.data.objects[ob_name].data.vertices
v_groups = ob.vertex_groups
enum_num = 1
try:
for v_i in selected_vertices_indexes:
for g in vertices[v_i].groups:
if v_groups[g.group].name not in items_ref:
items.append((v_groups[g.group].name, v_groups[g.group].name, '', enum_num))
items_ref.append(v_groups[g.group].name)
enum_num += 1
except IndexError:
pass
return items
def update_groups():
ob_name = get_current_object_name()
bpy.data.objects[ob_name].vertex_groups.update()
def toggle():
for i in range(2):
bpy.ops.object.editmode_toggle()
counter_1_ = 0
prev_sum_1_ = 0
even_checker_1_ = 0
counter_2_ = 0
prev_sum_2_ = 0
even_checker_2_ = 0
class VTXUpdater(bpy.types.Operator):
bl_idname = "wm.vtx_group_updater"
bl_label = "Updater"
def execute(self, context):
toggle()
bpy.ops.wm.call_menu_pie(name = 'VTXPie')
return {'FINISHED'}
class VTXUpdaterFv(bpy.types.Operator):
bl_idname = "wm.vtx_group_updater_from_v"
bl_label = "Upadater fv"
def execute(self, context):
toggle()
bpy.ops.wm.call_menu_pie(name = 'VTXPieFv')
return {'FINISHED'}
class VTXOps(bpy.types.Operator):
bl_idname = "wm.select_v_groups"
bl_label = "Select Vertices Group"
group = bpy.props.EnumProperty(items = get_v_groups, name = "VerticesGroup")
def execute(self, context):
global counter_1_
global prev_sum_1_
global even_checker_1_
counter_1_ = 0
prev_sum_1_ = 0
even_checker_1_ = 0
bpy.context.object.vertex_groups.active_index = bpy.context.object.vertex_groups[self.group].index
bpy.ops.mesh.select_all(action = 'DESELECT')
bpy.ops.object.vertex_group_select()
return {'FINISHED'}
class VTXOpsFv(bpy.types.Operator):
bl_idname = "wm.select_v_groups_from_v"
bl_label = "Select Vertices Group"
group = bpy.props.EnumProperty(items = get_v_groups_from_v, name = "VerticesGroupFromV")
def execute(self, context):
global counter_2_
global prev_sum_2_
global even_checker_2_
counter_2_ = 0
prev_sum_2_ = 0
even_checker_2_ = 0
bpy.context.object.vertex_groups.active_index = bpy.context.object.vertex_groups[self.group].index
bpy.ops.mesh.select_all(action = 'DESELECT')
bpy.ops.object.vertex_group_select()
return {'FINISHED'}
class VTXPie(bpy.types.Menu):
bl_idname = "VTXPie"
bl_label = "Vertices Groups"
def draw(self, context):
global counter_1_
global prev_sum_1_
global even_checker_1_
layout = self.layout
pie = layout.menu_pie()
ops = "wm.select_v_groups"
ops2 = "wm.call_menu_pie"
items = get_v_groups(self, context)
if len(items) - prev_sum_1_ < 9:
for i in range(7 * counter_1_, len(items)):
pie.operator(ops, text = items[i][0], icon = 'MESH_DATA').group = items[i][0]
counter_1_ = 0
prev_sum_1_ = 0
even_checker_1_ = 0
else:
if even_checker_1_ % 2 == 0:
for i in range(7 * counter_1_, 7 * counter_1_ + 7):
pie.operator(ops, text = items[i][0], icon = 'MESH_DATA').group = items[i][0]
pie.operator(ops2, text = "Next", icon = 'ANIM_DATA').name = 'VTXPie'
even_checker_1_ += 1
else:
for i in range(7 * counter_1_, 7 * counter_1_ + 4):
pie.operator(ops, text = items[i][0], icon = 'MESH_DATA').group = items[i][0]
pie.operator(ops2, text = "Next", icon = 'ANIM_DATA').name = 'VTXPie'
for i in range(7 * counter_1_ + 4, 7 * counter_1_ + 7):
pie.operator(ops, text = items[i][0], icon = 'MESH_DATA').group = items[i][0]
even_checker_1_ += 1
counter_1_ += 1
prev_sum_1_ += 7
class VTXPieFv(bpy.types.Menu):
#bl_idname = "VTXPieFV"
bl_label = "Vertices Groups From Vertices"
def draw(self, context):
global counter_2_
global prev_sum_2_
global even_checker_2_
layout = self.layout
pie = layout.menu_pie()
ops = "wm.select_v_groups_from_v"
ops2 = "wm.call_menu_pie"
items = get_v_groups_from_v(self, context)
if len(items) - prev_sum_2_ < 9:
for i in range(7 * counter_2_, len(items)):
pie.operator(ops, text = items[i][0], icon = 'MESH_DATA').group = items[i][0]
counter_2_ = 0
prev_sum_2_ = 0
even_checker_2_ = 0
else:
if even_checker_2_ % 2 == 0:
for i in range(7 * counter_2_, 7 * counter_2_ + 7):
pie.operator(ops, text = items[i][0], icon = 'MESH_DATA').group = items[i][0]
pie.operator(ops2, text = "Next", icon = 'ANIM_DATA').name = 'VTXPieFv'
even_checker_2_ += 1
else:
for i in range(7 * counter_2_, 7 * counter_2_ + 4):
pie.operator(ops, text = items[i][0], icon = 'MESH_DATA').group = items[i][0]
pie.operator(ops2, text = "Next", icon = 'ANIM_DATA').name = 'VTXPieFv'
for i in range(7 * counter_2_ + 4, 7 * counter_2_ + 7):
pie.operator(ops, text = items[i][0], icon = 'MESH_DATA').group = items[i][0]
even_checker_2_ += 1
counter_2_ += 1
prev_sum_2_ += 7
class VTXInformer(bpy.types.Operator):
bl_idname = 'object.assign_new'
bl_label = 'Assign New Group'
def execute(self, context):
bpy.ops.object.vertex_group_assign_new()
self.report({'INFO'}, "Assigned Vertices!")
return{'FINISHED'}
addon_keymaps = []
def register():
bpy.utils.register_class(VTXUpdater)
bpy.utils.register_class(VTXOps)
bpy.utils.register_class(VTXPie)
bpy.utils.register_class(VTXUpdaterFv)
bpy.utils.register_class(VTXOpsFv)
bpy.utils.register_class(VTXPieFv)
bpy.utils.register_class(VTXInformer)
wm = bpy.context.window_manager
km = wm.keyconfigs.addon.keymaps.new(name = 'Mesh')
kmi = km.keymap_items.new('wm.vtx_group_updater', 'F' ,'PRESS', ctrl = False, shift = True, alt = False)
kmi = km.keymap_items.new('wm.vtx_group_updater_from_v', 'V', 'PRESS', ctrl = False, shift = True, alt = False)
kmi = km.keymap_items.new('object.assign_new', 'L', 'PRESS', ctrl = False, shift = True, alt = False)
addon_keymaps.append((km, kmi))
def unregister():
bpy.utils.unregister_class(VTXUpdater)
bpy.utils.unregister_class(VTXOps)
bpy.utils.unregister_class(VTXPie)
bpy.utils.unregister_class(VTXUpdaterFv)
bpy.utils.unregister_class(VTXOpsFv)
bpy.utils.unregister_class(VTXPieFv)
bpy.utils.unregister_class(VTXInformer)
wm = bpy.context.window_manager
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
if __name__ == "__main__":
register()
| kozmof/vertex_tricker | vertex_tricker.py | Python | mit | 8,816 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
PostTelemac
A QGIS plugin
Post Traitment or Telemac
-------------------
begin : 2015-07-07
git sha : $Format:%H$
copyright : (C) 2015 by Artelia
email : patrice.Verchere@arteliagroup.com
***************************************************************************/
***************************************************************************/
get Image class
Generate a Qimage from selafin file to be displayed in map canvas
with tht draw method of posttelemacpluginlayer
Versions :
0.0 : debut
***************************************************************************/
"""
import sys
import os
#from PyQt4 import uic, QtCore, QtGui
from PostTelemac.meshlayer.post_telemac_pluginlayer import SelafinPluginLayer
def testCoreParserValue():
print('begin')
path = os.path.normpath('C://00_Bureau//data2//SMEAG_REF_Q100.res')
slf = SelafinPluginLayer()
print('slf created')
slf.load_selafin(path,'TELEMAC')
print('slf loaded')
print('params',slf.hydrauparser.getVarNames())
print('done')
slf.propertiesdialog.show()
testCoreParserValue()
| ArteliaTelemac/PostTelemac | tests/test_core_parservalues.py | Python | gpl-3.0 | 1,425 |
# -*- coding: utf-8 -*-
#
# src documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 1 22:23:19 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
ROOT = os.path.dirname(os.path.dirname(__file__))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(ROOT, 'src')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'numpydoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'maidenhair-jasco-parser'
copyright = u'2014, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'srcdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'src.tex', u'src Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'src', u'src Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'src', u'src Documentation',
u'Author', 'src', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'src'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2014, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| lambdalisue/maidenhair-jasco-parser | docs/conf.py | Python | mit | 9,017 |
# Copyright 2012 Twitter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
#('Eric Florenzano', 'ericflo@boilerplateinc.com'),
#('Eric Maguire', 'etmaguire@boilerplateinc.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'clutch',
'USER': 'clutch',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'media', 'static')
STATIC_URL = '/media/static/'
ADMIN_MEDIA_PREFIX = '/media/static/admin/'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
SECRET_KEY = ''
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
MIDDLEWARE_CLASSES = (
'django_ext.middleware.log.LoggingMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'pagination.middleware.PaginationMiddleware',
)
ROOT_URLCONF = 'clutch.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
'pagination',
'gunicorn',
'django_ext',
'admin_ext',
'dashboard',
'accounts',
'stats',
'ab',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# If you add another one, things will break, leave it at one backend plz, KTHX!
AUTHENTICATION_BACKENDS = (
'django_ext.authentication_backends.ModelBackend',
)
INTERNAL_IPS = (
'127.0.0.1',
)
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#EMAIL_HOST = 'smtp.mailgun.org'
#EMAIL_HOST_USER = 'postmaster@mailgun.org'
#EMAIL_HOST_PASSWORD = '8ccj4pzu$fh5'
#EMAIL_PORT = 25
#EMAIL_USE_TLS = True
EMAIL_SUBJECT_PREFIX = '[Clutch] '
SERVER_EMAIL = 'noreply@clutch.io'
DEFAULT_FROM_EMAIL = 'noreply@clutch.io'
#SESSION_COOKIE_DOMAIN = '.clutch.io'
SESSION_COOKIE_AGE = 2419200 # 3 weeks in seconds
SESSION_COOKIE_NAME = 'clutchid'
SESSION_COOKIE_SECURE = False
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake'
}
}
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
AWS_ACCESS_KEY = 'AKIAIYWXGPY7LXHM2GBA'
AWS_ACCESS_SECRET = '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'
AWS_BUCKET_NAME = 'clutchio'
AWS_DOCS_BUCKET_NAME = 'docs.clutch.io'
AWS_QUERYSTRING_AUTH = False
CLUTCH_CONF = os.environ.get('CLUTCH_CONF')
if CLUTCH_CONF:
execfile(CLUTCH_CONF)
| mowenGithub/Clutch | configure/settings.py | Python | apache-2.0 | 5,789 |
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, models
class CompassionChild(models.Model):
_inherit = "compassion.child"
@api.multi
def get_sms_sponsor_child_data(self):
"""
Returns JSON data of the child for the mobile sponsor page
:return: JSON data
"""
result = super().get_sms_sponsor_child_data()
if self.env.lang == "fr_CH":
result["description"] = self.desc_fr
elif self.env.lang == "de_DE":
result["description"] = self.desc_de
elif self.env.lang == "it_IT":
result["description"] = self.desc_it
return result
| CompassionCH/compassion-switzerland | sms_939/models/compassion_child.py | Python | agpl-3.0 | 994 |
class Solution:
def validTree(self, n: int, edges: List[List[int]]) -> bool:
if len(edges) != n - 1:
return False
parent = [i for i in range(n)]
def findParent(x):
while parent[x] != x:
parent[x] = parent[parent[x]]
x = parent[x]
return x
for u, v in edges:
pu = findParent(u)
pv = findParent(v)
if pu != pv:
parent[pv] = pu
else:
return False
return True
| jiadaizhao/LeetCode | 0201-0300/0261-Graph Valid Tree/0261-Graph Valid Tree.py | Python | mit | 548 |
#!/usr/bin/env python2
#Copyright (C) 2014, Cameron Brandon White
# -*- coding: utf-8 -*-
import setuptools
import textwrap
if __name__ == "__main__":
setuptools.setup(
name="Flask-CAS",
version="1.0.1",
description="Flask extension for CAS",
author="Cameron Brandon White",
author_email="cameronbwhite90@gmail.com",
url="https://github.com/cameronbwhite/Flask-CAS",
long_description=textwrap.dedent("""\
Flask-CAS
=========
Flask-CAS is a Flask extension which makes it easy to
authenticate with a CAS.
CAS
===
The Central Authentication Service (CAS) is a single sign-on
protocol for the web. Its purpose is to permit a user to access
multiple applications while providing their credentials (such as
userid and password) only once. It also allows web applications
to authenticate users without gaining access to a user's security
credentials, such as a password. The name CAS also refers to a
software package that implements this protocol.
(Very short) Setup Tutorial
===========================
First create a Flask instance:
.. code:: python
from flask import Flask
app = Flask(__name__)
Apply CAS on your Flask instance:
.. code:: python
from flask.ext.cas import CAS
CAS(app)
Do needed configuration:
.. code:: python
app.config['CAS_SERVER'] = 'https://sso.pdx.edu'
app.config['CAS_AFTER_LOGIN'] = 'route_root'
Using
=====
After you setup you will get two new routes `/login/`
and `/logout/`.
Reference documentation
=======================
See https://github.com/cameronbwhite/Flask-CAS"""),
packages=[
"flask_cas",
],
install_requires = [
"Flask",
"xmltodict",
],
test_requires = [
"Nose",
"Mock",
],
test_suite = "nose.collector",
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
zip_safe=False,
)
| UMDIEEE/ieee-web | third-party/Flask-CAS-1.0.1/setup.py | Python | gpl-3.0 | 2,760 |
"""
WSGI config for XXX_PROJECT_NAME_XXX.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'XXX_PROJECT_NAME_XXX.settings')
application = get_wsgi_application()
| mmcardle/django_builder | src/django/python/wsgi.py | Python | mit | 380 |
# -*- encoding: utf-8 -*-
class RouteFactory(object):
def __init__(self, request):
self.request = request
def getTitle(self):
return "Portfólio Pyramid"
| Bleno/portfolio | portfolio/views/routes/factory.py | Python | gpl-3.0 | 180 |
from django import template
import socket
import struct
from iptocountry.models import IpToCountry
from django.conf import settings
import os
register = template.Library()
def ip2long(ip):
ip_array = ip.split('.')
ip_long = int(ip_array[0]) * 16777216 + int(ip_array[1]) * 65536 + int(ip_array[2]) * 256 + int(ip_array[3])
return ip_long
def long2ip(long):
return socket.inet_ntoa(struct.pack("!I", long))
class FlagObject(template.Node):
def __init__(self, ip, varname):
self.ip = ip
self.varname = varname
def render(self, context):
ip = ip2long(template.resolve_variable(self.ip, context))
try:
iptc = IpToCountry.objects.get(IP_FROM__lte=ip, IP_TO__gte=ip)
iptc.flag_url = os.path.join(os.path.join(settings.MEDIA_URL, 'iptocountry/flags'), iptc.COUNTRY_CODE2.lower()+'.gif')
context.update({self.varname: iptc,})
except IpToCountry.DoesNotExist:
pass
return ''
def get_flag(parser, token):
"""
Retrieves a IpToCountry object given ip.
Usage::
{% get_flag [ip] as [varname] %}
Example::
{% get_flag object.ip_address as flag %}
"""
bits = token.contents.split()
if len(bits) != 4:
raise template.TemplateSyntaxError(_('%s tag requires exactly three arguments') % bits[0])
if bits[2] != 'as':
raise template.TemplateSyntaxError(_("second argument to %s tag must be 'as'") % bits[0])
return FlagObject(bits[1], bits[3])
register.tag('get_flag', get_flag) | maweis1981/hey001 | mayversion/mayversion/iptocountry/templatetags/iptocountry_flag.py | Python | lgpl-3.0 | 1,569 |
#!/usr/bin/env python3
from setuptools import setup, find_packages
DESCRIPTION = open("README.rst", encoding="utf-8").read()
CLASSIFIERS = '''\
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved
Operating System :: POSIX
Operating System :: Unix
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3 :: Only
Topic :: Scientific/Engineering'''
setup(
name="pyfdas",
version="0.1.dev1",
packages=find_packages(),
install_requires=[],
tests_require=["pytest"],
# metadata for upload to PyPI
author="Dimas Abreu Dutra",
author_email="dimasadutra@gmail.com",
description='CEA Flight Data Acquisition System.',
classifiers=CLASSIFIERS.split('\n'),
platforms=["Linux", "Unix"],
license="MIT",
url="http://github.com/cea-ufmg/pyfdas",
)
| cea-ufmg/pyfdas | setup.py | Python | mit | 882 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateFeed
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-asset
# [START cloudasset_v1p2beta1_generated_AssetService_UpdateFeed_async]
from google.cloud import asset_v1p2beta1
async def sample_update_feed():
# Create a client
client = asset_v1p2beta1.AssetServiceAsyncClient()
# Initialize request argument(s)
feed = asset_v1p2beta1.Feed()
feed.name = "name_value"
request = asset_v1p2beta1.UpdateFeedRequest(
feed=feed,
)
# Make the request
response = await client.update_feed(request=request)
# Handle the response
print(response)
# [END cloudasset_v1p2beta1_generated_AssetService_UpdateFeed_async]
| googleapis/python-asset | samples/generated_samples/cloudasset_v1p2beta1_generated_asset_service_update_feed_async.py | Python | apache-2.0 | 1,527 |
import six
from databasin.exceptions import LoginRequiredError, ForbiddenError
urlparse = six.moves.urllib_parse.urlparse # IDE inspection trips over this as an import
class ResourcePaginator(object):
def __init__(self, resource):
self.resource = resource
self.loaded_urls = set()
def __iter__(self):
while True:
for obj in self.resource.objects:
yield obj
if not self.resource.meta.next:
break
else:
o = urlparse(self.resource._url)
url = '{0}://{1}{2}'.format(o.scheme, o.netloc, self.resource.meta.next)
if url.lower() in self.loaded_urls:
break
self.loaded_urls.add(url.lower())
self.resource = self.resource.get(url, session=self.resource._session)
def __len__(self):
return self.count()
def count(self):
return self.resource.meta.total_count
def raise_for_authorization(response, is_logged_in):
"""Raises `LoginRequiredError` or `ForbiddenError` when appropriate"""
if response.status_code == 401:
# Data Basin will return 401 in cases where it should return 403. So if we get this HTTP code and are already
# logged in, then `ForbiddenError` is the correct response.
if is_logged_in:
response.status_code = 403
else:
raise LoginRequiredError(response=response)
if response.status_code == 403:
raise ForbiddenError(response=response)
| consbio/python-databasin | databasin/utils.py | Python | bsd-3-clause | 1,554 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.modeling.show_analysis_runs Show the analysis runs.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments
from pts.modeling.core.environment import load_modeling_environment_cwd
from pts.core.tools import formatting as fmt
from pts.core.tools.stringify import tostr
from pts.modeling.config.parameters import parameter_descriptions
from pts.modeling.analysis.heating.component import contributions
# -----------------------------------------------------------------
# Determine the modeling path
environment = load_modeling_environment_cwd()
context = environment.analysis_context
# -----------------------------------------------------------------
# Create the configuration definition
definition = ConfigurationDefinition()
# Add positional optional
definition.add_positional_optional("runs", "string_list", "names of analysis runs")
# Add flags
definition.add_flag("info", "show the analysis run info", False)
definition.add_flag("cached", "include cached analysis runs", True)
definition.add_flag("all_info", "show all info", False)
# Different sections
definition.add_flag("basic", "show basic info", True)
definition.add_flag("model", "show model info", False)
definition.add_flag("wavelength", "show wavelength grid info", True)
definition.add_flag("grid", "show dust grid info", True)
definition.add_flag("launch", "show launch info", False)
definition.add_flag("parallelization", "show parallelization", False)
definition.add_flag("timing", "show timing info", False)
definition.add_flag("memory", "show memory usage info", False)
definition.add_flag("directory", "show directory info", False)
definition.add_flag("heating", "show heating launch info", False)
definition.add_flag("residuals", "show residuals analysis info", False)
definition.add_flag("colours", "show colours analysis info", False)
definition.add_flag("attenuation", "show attenuation analysis info", False)
definition.add_flag("maps", "show maps analysis info", False)
# Add optional
definition.add_optional("parameters", "string_list", "show the values of these parameters", choices=parameter_descriptions)
# Get configuration
config = parse_arguments("show_model", definition)
# -----------------------------------------------------------------
# Cannot define parameters and enable info
if config.info and config.parameters is not None: raise ValueError("Cannot specify parameters and enable info")
# -----------------------------------------------------------------
ignore_properties = ["name", "path"]
# -----------------------------------------------------------------
# All info
if config.all_info:
if not config.info: raise ValueError("All info is enabled but showing info is disabled")
config.basic = True
config.model = True
config.wavelength = True
config.grid = True
config.launch = True
config.parallelization = True
config.timing = True
config.memory = True
config.directory = True
config.heating = True
config.residuals = True
config.colours = True
config.attenuation = True
config.maps = True
# -----------------------------------------------------------------
def show_basic_info(run):
"""
This function ...
:param run:
:return:
"""
# Show the contents of the info file
print(run.info.to_string(line_prefix=" ", ignore_none=True, ignore=ignore_properties))
# From config
print(" - " + fmt.bold + "old scale heights: " + fmt.reset + tostr(run.config.old_scale_heights))
# -----------------------------------------------------------------
def show_model_info(run):
"""
This function ...
:param run:
:return:
"""
print(" - " + fmt.bold + "model:" + fmt.reset)
print("")
print(" - old stars:")
print(" - map name: " + run.model_old_map_name)
print(" - methods: " + tostr(run.old_map_methods))
print(" - origins: " + tostr(run.old_map_origins))
print(" - young stars:")
print(" - map name: " + run.model_young_map_name)
print(" - methods: " + tostr(run.young_map_methods))
print(" - origins: " + tostr(run.young_map_origins))
print(" - ionizing stars:")
print(" - map name: " + run.model_ionizing_map_name)
print(" - methods: " + tostr(run.ionizing_map_methods))
print(" - origins: " + tostr(run.ionizing_map_origins))
print(" - dust:")
print(" - map name: " + run.model_dust_map_name)
print(" - methods: " + tostr(run.dust_map_methods))
print(" - origins: " + tostr(run.dust_map_origins))
# -----------------------------------------------------------------
def show_wavelength_grid_info(run):
"""
This function ...
:param run:
:return:
"""
print(" - " + fmt.bold + "wavelength grid:" + fmt.reset)
print(" - " + fmt.bold + "number of points: " + fmt.reset + tostr(run.nwavelengths))
print(" - " + fmt.bold + "emission lines: " + fmt.reset + tostr(run.config.wg.add_emission_lines))
# -----------------------------------------------------------------
def show_dust_grid_info(run):
"""
This function ...
:param run:
:return:
"""
# Title
print(" - " + fmt.bold + "dust grid:" + fmt.reset)
# From log file or tree file
print(" - " + fmt.bold + "number of cells: " + fmt.reset + tostr(run.ncells))
# From config
print(" - " + fmt.bold + "type: " + fmt.reset + run.config.dg.grid_type)
print(" - " + fmt.bold + "relative scale: " + fmt.reset + tostr(run.config.dg.scale))
print(" - " + fmt.bold + "scale heights: " + fmt.reset + tostr(run.config.dg.scale_heights))
if run.config.dg.grid_type == "bintree": print(" - " + fmt.bold + "min level: " + fmt.reset + tostr(run.config.dg.bintree_min_level))
elif run.config.dg.grid_type == "octtree": print(" - " + fmt.bold + "min level: " + fmt.reset + tostr(run.config.dg.octtree_min_level))
else: raise ValueError("Invalid grid type: " + run.config.dg.grid_type)
print(" - " + fmt.bold + "maximum mass fraction: " + fmt.reset + tostr(run.config.dg.max_mass_fraction))
# From dust grid object
print(" - " + fmt.bold + "sample count: " + fmt.reset + tostr(run.dust_grid.sample_count))
print(" - " + fmt.bold + "min x: " + fmt.reset + tostr(run.dust_grid.min_x))
print(" - " + fmt.bold + "max x: " + fmt.reset + tostr(run.dust_grid.max_x))
print(" - " + fmt.bold + "min y: " + fmt.reset + tostr(run.dust_grid.min_y))
print(" - " + fmt.bold + "max y: " + fmt.reset + tostr(run.dust_grid.max_y))
print(" - " + fmt.bold + "min z: " + fmt.reset + tostr(run.dust_grid.min_z))
print(" - " + fmt.bold + "max z: " + fmt.reset + tostr(run.dust_grid.max_z))
print(" - " + fmt.bold + "direction method: " + fmt.reset + tostr(run.dust_grid.direction_method))
print(" - " + fmt.bold + "maximum optical depth: " + fmt.reset + tostr(run.dust_grid.max_optical_depth))
print(" - " + fmt.bold + "maximum density dispersion fraction: " + fmt.reset + tostr(run.dust_grid.max_dens_disp_fraction))
print(" - " + fmt.bold + "search method: " + fmt.reset + tostr(run.dust_grid.search_method))
# -----------------------------------------------------------------
def show_launch_info(run):
"""
This function ...
:param run:
:return:
"""
# Get the ski file
ski = run.ski_file
# Show the launch options
print(" - " + fmt.bold + "launch info:" + fmt.reset)
print(" - " + fmt.bold + "number of photon packages: " + fmt.reset + tostr(ski.packages()))
print(" - " + fmt.bold + "dust self-absorption: " + fmt.reset + tostr(ski.dustselfabsorption()))
print(" - " + fmt.bold + "transient heating: " + fmt.reset + tostr(ski.transient_dust_emissivity))
print(" - " + fmt.bold + "has output: " + fmt.reset + tostr(run.has_output))
print(" - " + fmt.bold + "has extracted data: " + fmt.reset + tostr(run.has_extracted))
print(" - " + fmt.bold + "has plots: " + fmt.reset + tostr(run.has_plots))
print(" - " + fmt.bold + "has misc output: " + fmt.reset + tostr(run.has_misc))
# -----------------------------------------------------------------
def show_parallelization_info(run):
"""
This function ...
:param run:
:return:
"""
# Get the log file
logfile = run.logfile
# Show the parallelization info
print(" - " + fmt.bold + "parallelization options:" + fmt.reset)
print(" - " + fmt.bold + "number of processes: " + fmt.reset + tostr(logfile.nprocesses))
print(" - " + fmt.bold + "number of threads: " + fmt.reset + tostr(logfile.nthreads))
print(" - " + fmt.bold + "data parallelization: " + fmt.reset + tostr(logfile.data_parallel))
# -----------------------------------------------------------------
def show_timing_info(run):
"""
Thisn function ...
:param run:
:return:
"""
# Show the timing info
print(" - " + fmt.bold + "timing:" + fmt.reset)
print(" - " + fmt.bold + "total: " + fmt.reset + tostr(run.timeline.total))
print(" - " + fmt.bold + "setup: " + fmt.reset + tostr(run.timeline.setup))
print(" - " + fmt.bold + "stellar: " + fmt.reset + tostr(run.timeline.stellar))
print(" - " + fmt.bold + "spectra: " + fmt.reset + tostr(run.timeline.spectra))
print(" - " + fmt.bold + "dust: " + fmt.reset + tostr(run.timeline.dust))
print(" - " + fmt.bold + "writing: " + fmt.reset + tostr(run.timeline.writing))
print(" - " + fmt.bold + "communication: " + fmt.reset + tostr(run.timeline.communication))
print(" - " + fmt.bold + "waiting: " + fmt.reset + tostr(run.timeline.waiting))
# -----------------------------------------------------------------
def show_memory_info(run):
"""
This function ...
:param run:
:return:
"""
# Show the memory info
print(" - " + fmt.bold + "memory:" + fmt.reset)
print(" - " + fmt.bold + "peak: " + fmt.reset + tostr(run.memory.peak))
print(" - " + fmt.bold + "peak per process: " + fmt.reset + tostr(run.memory.peak_per_process))
# -----------------------------------------------------------------
def show_directory_info(run):
"""
This function ...
:param run:
:return:
"""
# Show the launch options
print(" - " + fmt.bold + "directory info:" + fmt.reset)
print(" - " + fmt.bold + "number of files: " + fmt.reset + tostr(run.nfiles))
print(" - " + fmt.bold + "directory size: " + fmt.reset + tostr(run.disk_space))
# -----------------------------------------------------------------
def show_heating_info(run):
"""
Thisf unction ...
:param run:
:return:
"""
print(" - " + fmt.bold + "heating simulations:" + fmt.reset)
print("")
# BASIC
print(" - " + fmt.bold + "basic:" + fmt.reset)
print(" - " + fmt.bold + "old scale heights: " + fmt.reset + tostr(run.heating_config.old_scale_heights))
print("")
# Loop over the contributions
npackages = None
selfabsorption = None
transient_heating = None
for contribution in contributions:
# Get the ski path
#ski_path = run.heating_ski_path_for_contribution(contribution)
#ski = SkiFile(ski_path)
ski = run.get_ski_for_contribution(contribution)
if npackages is None: npackages = ski.packages()
elif ski.packages() != npackages: raise RuntimeError("")
if selfabsorption is None: selfabsorption = ski.dustselfabsorption()
elif ski.dustselfabsorption() != selfabsorption: raise RuntimeError("")
if transient_heating is None: transient_heating = ski.transient_dust_emissivity
elif ski.transient_dust_emissivity != transient_heating: raise RuntimeError("")
# Get the output path
#output_path = run.heating_output_path_for_contribution(contribution)
# LAUNCH INFO
print(" - " + fmt.bold + "launch info:" + fmt.reset)
print(" - " + fmt.bold + "number of photon packages: " + fmt.reset + tostr(npackages))
print(" - " + fmt.bold + "dust self-absorption: " + fmt.reset + tostr(selfabsorption))
print(" - " + fmt.bold + "transient heating: " + fmt.reset + tostr(transient_heating))
print("")
# Loop over the contributions
print(" - " + fmt.bold + "finished:" + fmt.reset)
for contribution in contributions:
output_path = run.output_path_for_contribution(contribution)
print(" - " + fmt.bold + contribution + ": " + fmt.reset + tostr(output_path))
# -----------------------------------------------------------------
def show_residuals_info(run):
"""
This function ...
:param run:
:return:
"""
print(" - " + fmt.bold + "residuals:" + fmt.reset)
print("")
print(" - images: " + tostr(run.residual_image_names, delimiter=", "))
# -----------------------------------------------------------------
def show_colours_info(run):
"""
This function ...
:param run:
:return:
"""
print(" - " + fmt.bold + "colours:" + fmt.reset)
print("")
print(" - colours: " + tostr(run.colour_names, delimiter=", "))
# -----------------------------------------------------------------
def show_attenuation_info(run):
"""
This function ...
:param run:
:return:
"""
print(" - " + fmt.bold + "attenuation:" + fmt.reset)
print("")
# -----------------------------------------------------------------
def show_maps_info(run):
"""
Thisf unction ...
:param run:
:return:
"""
print(" - " + fmt.bold + "maps:" + fmt.reset)
print("")
if run.has_maps_colours: print(" - colours: " + str(run.ncolour_maps) + " maps")
if run.has_maps_ssfr: print(" - ssfr: " + str(run.nssfr_maps) + " maps")
if run.has_maps_tir: print(" - tir: " + str(run.ntir_maps) + " maps")
if run.has_maps_attenuation: print(" - attenuation: " + str(run.nattenuation_maps) + " maps")
if run.has_maps_old: print(" - old: " + str(run.nold_maps) + " maps")
if run.has_maps_dust: print(" - dust:" + str(run.dust_maps) + " maps")
if run.has_maps_young: print(" - young: " + str(run.nyoung_maps) + " maps")
if run.has_maps_ionizing: print(" - ionizing:" + str(run.nionizing_maps) + " maps")
# -----------------------------------------------------------------
def show_run_info(run):
"""
This function ...
:param run:
:return:
"""
# Show basic info
if config.basic:
show_basic_info(run)
print("")
# Show model info
if config.model:
show_model_info(run)
print("")
# Show wavelength grid info
if config.wavelength:
show_wavelength_grid_info(run)
print("")
# Show dust grid info
if config.grid:
show_dust_grid_info(run)
print("")
# Show launch info
if config.launch:
show_launch_info(run)
print("")
# Show parallelization
if config.parallelization and run.has_logfile:
show_parallelization_info(run)
print("")
# Show timing info
if config.timing and run.has_timeline:
show_timing_info(run)
print("")
# Show memory info
if config.memory and run.has_memory:
show_memory_info(run)
print("")
# Show directory info
if config.directory:
show_directory_info(run)
print("")
# Show heating launch info
if config.heating and run.has_heating:
show_heating_info(run)
print("")
# Show residuals analysis info
if config.residuals and run.has_residuals:
show_residuals_info(run)
print("")
# Show colours analysis info
if config.colours and run.has_colours:
show_colours_info(run)
print("")
# Show attenuation analysis info
if config.attenuation and run.has_attenuation:
show_attenuation_info(run)
print("")
# Show maps analysis info
if config.maps and run.has_maps:
show_maps_info(run)
print("")
# -----------------------------------------------------------------
if config.cached:
# Loop over the cache host IDS
for host_id in context.cache_host_ids:
print("")
print(fmt.yellow + host_id.upper() + ":" + fmt.reset)
print("")
# Get the run names
run_names = context.get_run_names_for_host_id(host_id)
# Loop over the runs
for name in run_names:
# Check in runs
if config.runs is not None and name not in config.runs: continue
# Show the name
print(" - " + fmt.underlined + fmt.blue + name + fmt.reset)
# Show the info
if config.info:
# Load the run
run = context.get_cached_run(name)
# Show the info
print("")
show_run_info(run)
#print("")
# Show the parameters
elif config.parameters is not None:
# Load the run
run = context.get_cached_run(name)
print("")
# Show the parameter values
for name in config.parameters:
print(" - " + fmt.bold + name + ": " + fmt.reset + tostr(run.info.parameter_values[name]))
print("")
# -----------------------------------------------------------------
# Empty line to separate
if not (config.info or config.parameters is not None): print("")
# Show the local analysis runs
print(fmt.yellow + "LOCAL:" + fmt.reset)
print("")
# Loop over the names
for name in context.analysis_run_names:
# Check in runs
if config.runs is not None and name not in config.runs: continue
# Show the name
print(" - " + fmt.underlined + fmt.blue + name + fmt.reset)
# Show the info
if config.info:
# Load the run
run = context.get_run(name)
# Show the info
print("")
show_run_info(run)
#print("")
# Show the parameters
elif config.parameters is not None:
# Load the run
run = context.get_run(name)
print("")
# Show the parameter values
for name in config.parameters:
print(" - " + fmt.bold + name + ": " + fmt.reset + tostr(run.info.parameter_values[name]))
print("")
# End with empty line
if not (config.info or config.parameters is not None): print("")
# -----------------------------------------------------------------
| SKIRT/PTS | do/modeling/show_analysis_runs.py | Python | agpl-3.0 | 19,526 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Versioneer 2
============
* Like a rocketeer, but for versions!
* Based on https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
* Edited by Ryan Dwyer
This is a tool for managing a recorded version number in python projects.
The goal is to remove the tedious and error-prone "update the embedded version
string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control system,
and maybe making new tarballs.
## Cookiecutter
* If you got this file using cookiecutter, the manual steps listed below should
all be done.
* Run `git tag 1.0` (for example), and register and upload to PyPI as usual.
## Manual Install
* Copy this file to beside your setup.py
* Add the following to your setup.py:
import imp
fp, pathname, description = imp.find_module('versioneer')
try:
versioneer = imp.load_module('versioneer', fp, pathname, description)
finally:
if fp: fp.close()
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
* Add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* Now run `python setup.py versioneer`, which will create `_version.py`, and will
modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`). It will also
modify your `MANIFEST.in` to include both `versioneer.py` and the generated
`_version.py` in sdist tarballs.
* Commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
## Detailed Installation Instructions
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py versioneer` command (described below) will
append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None` and override `distutils.command.build_scripts`
to explicitly insert a copy of `versioneer.get_version()` into your
generated script.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
import imp
fp, pathname, description = imp.find_module('versioneer')
try:
versioneer = imp.load_module('versioneer', fp, pathname, description)
finally:
if fp: fp.close()
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and will
modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`). It will also
modify your `MANIFEST.in` to include both `versioneer.py` and the generated
`_version.py` in sdist tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue #12).
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: condensed tag+distance+shortid+dirty identifier.
This is based on the output of `git describe --tags --dirty --always` but
strips the tag_prefix. For example "0.11.post0.dev2+g1076c97-dirty" indicates
that the tree is like the "1076c97" commit but has uncommitted changes
("-dirty"), and that this commit is two revisions (".dev2") beyond the "0.11"
tag. For released software (exactly equal to a known tag),
the identifier will only contain the stripped tag, e.g. "0.11".
This version string is always fully PEP440 compliant.
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by "-dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Updating Versioneer2
To upgrade your project to a new release of Versioneer, do the following:
* install the new versioneer2 (`pip install -U versioneer2` or equivalent)
* re-run `versioneer2installer` in your source tree to replace your copy of
`versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
"""
import os
import sys
import re
import subprocess
import errno
import string
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
__version__ = '0.1.7'
# these configuration settings will be overridden by setup.py after it
# imports us
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = None
# This is hard-coded for now.
release_type_string = "post0.dev"
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer2-(%(__version__)s) (https://github.com/ryanpdwyer/versioneer2)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):].replace("_2", "+").strip(".egg"), "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full, '__version__': __version__}
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):].replace("_", "+").strip(".egg"), "full": ""}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (%(__version__)s) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
with open(filename) as f:
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
except EnvironmentError:
return {}
return versions
def write_to_version_file(filename, versions):
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % versions)
print("set %s to '%s'" % (filename, versions["version"]))
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def vcs_function(vcs, suffix):
return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version first from _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose: print("got version from expanded keyword %s" % ver)
return rep_by_pep440(ver)
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return rep_by_pep440(ver)
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from VCS %s" % ver)
return rep_by_pep440(ver)
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return rep_by_pep440(ver)
if verbose: print("got version from default %s" % default)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
def git2pep440(ver_str):
ver_parts = ver_str.split('-')
tag = ver_parts[0]
if len(ver_parts) == 1:
return tag
elif len(ver_parts) == 2:
commits = 0
git_hash = ''
dirty = 'dirty'
elif len(ver_parts) == 3:
commits = ver_parts[1]
git_hash = ver_parts[2]
dirty=''
elif len(ver_parts) == 4:
commits = ver_parts[1]
git_hash = ver_parts[2]
dirty = '.dirty'
else:
raise Warning("git version string could not be parsed.")
return ver_str
return "{tag}.{release_type_string}{commits}+{git_hash}{dirty}".format(
tag=tag,
release_type_string=release_type_string,
commits=commits,
git_hash=git_hash,
dirty=dirty)
def rep_by_pep440(ver):
ver["version"] = git2pep440(ver["version"])
return ver
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
if versionfile_build:
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY(version=__version__) % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
"__version__": __version__
})
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
"__version__": __version__
})
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, versionfile_source, ipy)
def get_cmdclass():
cmds = {'version': cmd_version,
'versioneer': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
| ryanpdwyer/teensyio | versioneer.py | Python | mit | 37,855 |
# Local modules
from .beam import * # noqa
from .segmentation import * # noqa
| MartinThoma/hwrt | hwrt/segmentation/__init__.py | Python | mit | 80 |
import base64
import matplotlib
import os
import sys
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import FigureManagerBase, ShowBase
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from datalore.display import debug, display, SHOW_DEBUG_INFO
PY3 = sys.version_info[0] >= 3
index = int(os.getenv("PYCHARM_MATPLOTLIB_INDEX", 0))
rcParams = matplotlib.rcParams
class Show(ShowBase):
def __call__(self, **kwargs):
debug("show() called with args %s" % kwargs)
managers = Gcf.get_all_fig_managers()
if not managers:
debug("Error: Managers list in `Gcf.get_all_fig_managers()` is empty")
return
for manager in managers:
manager.show(**kwargs)
def mainloop(self):
pass
show = Show()
# from pyplot API
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.show()
else:
debug("Error: Figure manager `Gcf.get_active()` is None")
# from pyplot API
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
# from pyplot API
def new_figure_manager_given_figure(num, figure):
canvas = FigureCanvasInterAgg(figure)
manager = FigureManagerInterAgg(canvas, num)
return manager
# from pyplot API
class FigureCanvasInterAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self, figure)
def show(self):
FigureCanvasAgg.draw(self)
if matplotlib.__version__ < '1.2':
buffer = self.tostring_rgb(0, 0)
else:
buffer = self.tostring_rgb()
if len(set(buffer)) <= 1:
# do not plot empty
debug("Error: Buffer FigureCanvasAgg.tostring_rgb() is empty")
return
render = self.get_renderer()
width = int(render.width)
debug("Image width: %d" % width)
is_interactive = os.getenv("PYCHARM_MATPLOTLIB_INTERACTIVE", False)
if is_interactive:
debug("Using interactive mode (Run with Python Console)")
debug("Plot index = %d" % index)
else:
debug("Using non-interactive mode (Run without Python Console)")
plot_index = index if is_interactive else -1
display(DisplayDataObject(plot_index, width, buffer))
def draw(self):
FigureCanvasAgg.draw(self)
is_interactive = os.getenv("PYCHARM_MATPLOTLIB_INTERACTIVE", False)
if is_interactive and matplotlib.is_interactive():
self.show()
else:
debug("Error: calling draw() in non-interactive mode won't show a plot. Try to 'Run with Python Console'")
class FigureManagerInterAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
global index
index += 1
self.canvas = canvas
self._num = num
self._shown = False
def show(self, **kwargs):
self.canvas.show()
Gcf.destroy(self._num)
class DisplayDataObject:
def __init__(self, plot_index, width, image_bytes):
self.plot_index = plot_index
self.image_width = width
self.image_bytes = image_bytes
def _repr_display_(self):
image_bytes_base64 = base64.b64encode(self.image_bytes)
if PY3:
image_bytes_base64 = image_bytes_base64.decode()
body = {
'plot_index': self.plot_index,
'image_width': self.image_width,
'image_base64': image_bytes_base64
}
return ('pycharm-plot-image', body)
| jwren/intellij-community | python/helpers/pycharm_matplotlib_backend/backend_interagg.py | Python | apache-2.0 | 3,831 |
from fnmatch import fnmatch
import os
_EXCLUDE_PATTERNS = []
def set_exclude_patters(excludes):
global _EXCLUDE_PATTERNS
_EXCLUDE_PATTERNS = excludes
def get_exclude_patterns():
return _EXCLUDE_PATTERNS
def walk_excl(path, **kwargs):
"""
Do os.walk dropping our excluded directories on the way.
"""
for (dirpath, dirnames, filenames) in os.walk(path, **kwargs):
dirnames[:] = [dn for dn in dirnames if not is_excluded_filename(dn)]
yield (dirpath, dirnames, filenames)
def is_excluded_filename(filename):
return any(fnmatch(filename, pat) for pat in _EXCLUDE_PATTERNS)
| arth-co/shoop | shoop_setup_utils/excludes.py | Python | agpl-3.0 | 627 |
#!/usr/bin/env python2
# Terminator by Chris Jones <cmsj@tenshu.net>
# GPL v2 only
"""terminal_popup_menu.py - classes necessary to provide a terminal context
menu"""
import string
from gi.repository import Gtk
from version import APP_NAME
from translation import _
from encoding import TerminatorEncoding
from terminator import Terminator
from util import err, dbg
from config import Config
from prefseditor import PrefsEditor
import plugin
class TerminalPopupMenu(object):
"""Class implementing the Terminal context menu"""
terminal = None
terminator = None
config = None
def __init__(self, terminal):
"""Class initialiser"""
self.terminal = terminal
self.terminator = Terminator()
self.config = Config()
def show(self, widget, event=None):
"""Display the context menu"""
terminal = self.terminal
menu = Gtk.Menu()
self.popup_menu = menu
url = None
button = None
time = None
self.config.set_profile(terminal.get_profile())
if event:
url = terminal.vte.match_check_event(event)
button = event.button
time = event.time
else:
time = 0
button = 3
if url and url[0]:
dbg("URL matches id: %d" % url[1])
if not url[1] in terminal.matches.values():
err("Unknown URL match id: %d" % url[1])
dbg("Available matches: %s" % terminal.matches)
nameopen = None
namecopy = None
if url[1] == terminal.matches['email']:
nameopen = _('_Send email to...')
namecopy = _('_Copy email address')
elif url[1] == terminal.matches['voip']:
nameopen = _('Ca_ll VoIP address')
namecopy = _('_Copy VoIP address')
elif url[1] in terminal.matches.values():
# This is a plugin match
for pluginname in terminal.matches:
if terminal.matches[pluginname] == url[1]:
break
dbg("Found match ID (%d) in terminal.matches plugin %s" %
(url[1], pluginname))
registry = plugin.PluginRegistry()
registry.load_plugins()
plugins = registry.get_plugins_by_capability('url_handler')
for urlplugin in plugins:
if urlplugin.handler_name == pluginname:
dbg("Identified matching plugin: %s" %
urlplugin.handler_name)
nameopen = _(urlplugin.nameopen)
namecopy = _(urlplugin.namecopy)
break
if not nameopen:
nameopen = _('_Open link')
if not namecopy:
namecopy = _('_Copy address')
icon = Gtk.Image.new_from_stock(Gtk.STOCK_JUMP_TO,
Gtk.IconSize.MENU)
item = Gtk.ImageMenuItem.new_with_mnemonic(nameopen)
item.set_property('image', icon)
item.connect('activate', lambda x: terminal.open_url(url, True))
menu.append(item)
item = Gtk.MenuItem.new_with_mnemonic(namecopy)
item.connect('activate',
lambda x: terminal.clipboard.set_text(terminal.prepare_url(url), len(terminal.prepare_url(url))))
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
item = Gtk.ImageMenuItem.new_with_mnemonic(_('_Copy'))
item.connect('activate', lambda x: terminal.vte.copy_clipboard())
item.set_sensitive(terminal.vte.get_has_selection())
menu.append(item)
item = Gtk.ImageMenuItem.new_with_mnemonic(_('_Paste'))
item.connect('activate', lambda x: terminal.paste_clipboard())
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
if not terminal.is_zoomed():
item = Gtk.ImageMenuItem.new_with_mnemonic(_('Split H_orizontally'))
image = Gtk.Image()
image.set_from_icon_name(APP_NAME + '_horiz', Gtk.IconSize.MENU)
item.set_image(image)
if hasattr(item, 'set_always_show_image'):
item.set_always_show_image(True)
item.connect('activate', lambda x: terminal.emit('split-horiz',
self.terminal.get_cwd()))
menu.append(item)
item = Gtk.ImageMenuItem.new_with_mnemonic(_('Split V_ertically'))
image = Gtk.Image()
image.set_from_icon_name(APP_NAME + '_vert', Gtk.IconSize.MENU)
item.set_image(image)
if hasattr(item, 'set_always_show_image'):
item.set_always_show_image(True)
item.connect('activate', lambda x: terminal.emit('split-vert',
self.terminal.get_cwd()))
menu.append(item)
item = Gtk.MenuItem.new_with_mnemonic(_('Open _Tab'))
item.connect('activate', lambda x: terminal.emit('tab-new', False,
terminal))
menu.append(item)
if self.terminator.debug_address is not None:
item = Gtk.MenuItem.new_with_mnemonic(_('Open _Debug Tab'))
item.connect('activate', lambda x:
terminal.emit('tab-new', True, terminal))
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
item = Gtk.ImageMenuItem.new_with_mnemonic(_('_Close'))
item.connect('activate', lambda x: terminal.close())
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
if not terminal.is_zoomed():
sensitive = not terminal.get_toplevel() == terminal.get_parent()
item = Gtk.MenuItem.new_with_mnemonic(_('_Zoom terminal'))
item.connect('activate', terminal.zoom)
item.set_sensitive(sensitive)
menu.append(item)
item = Gtk.MenuItem.new_with_mnemonic(_('Ma_ximize terminal'))
item.connect('activate', terminal.maximise)
item.set_sensitive(sensitive)
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
else:
item = Gtk.MenuItem.new_with_mnemonic(_('_Restore all terminals'))
item.connect('activate', terminal.unzoom)
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
if self.config['show_titlebar'] == False:
item = Gtk.MenuItem.new_with_mnemonic(_('Grouping'))
submenu = self.terminal.populate_group_menu()
submenu.show_all()
item.set_submenu(submenu)
menu.append(item)
menu.append(Gtk.SeparatorMenuItem())
item = Gtk.CheckMenuItem.new_with_mnemonic(_('Show _scrollbar'))
item.set_active(terminal.scrollbar.get_property('visible'))
item.connect('toggled', lambda x: terminal.do_scrollbar_toggle())
menu.append(item)
item = gtk.CheckMenuItem(_('Toggle tab visibility'))
item.set_active(terminal.scrollbar.get_property('visible'))
item.connect('toggled', self.toggle_tab_visibility)
menu.append(item)
if hasattr(Gtk, 'Builder'): # VERIFY FOR GTK3: is this ever false?
item = Gtk.MenuItem.new_with_mnemonic(_('_Preferences'))
item.connect('activate', lambda x: PrefsEditor(self.terminal))
menu.append(item)
profilelist = sorted(self.config.list_profiles(), key=string.lower)
if len(profilelist) > 1:
item = Gtk.MenuItem.new_with_mnemonic(_('Profiles'))
submenu = Gtk.Menu()
item.set_submenu(submenu)
menu.append(item)
current = terminal.get_profile()
group = None
for profile in profilelist:
profile_label = profile
if profile_label == 'default':
profile_label = profile.capitalize()
item = Gtk.RadioMenuItem(profile_label, group)
if profile == current:
item.set_active(True)
item.connect('activate', terminal.force_set_profile, profile)
submenu.append(item)
self.add_encoding_items(menu)
try:
menuitems = []
registry = plugin.PluginRegistry()
registry.load_plugins()
plugins = registry.get_plugins_by_capability('terminal_menu')
for menuplugin in plugins:
menuplugin.callback(menuitems, menu, terminal)
if len(menuitems) > 0:
menu.append(Gtk.SeparatorMenuItem())
for menuitem in menuitems:
menu.append(menuitem)
except Exception, ex:
err('TerminalPopupMenu::show: %s' % ex)
menu.show_all()
menu.popup(None, None, None, None, button, time)
return(True)
def toggle_tab_visibility(self, widget):
"""tab visibility"""
status = self.config['tab_position']
old_tab_position = self.config['old_tab_position']
if status == 'hidden':
if old_tab_position:
#if there's no oldstatus, hidden is default option
self.config['tab_position'] = old_tab_position
self.config.save()
else:
self.config['old_tab_position'] = status
self.config['tab_position'] = 'hidden'
self.config.save()
terminator = Terminator()
terminator.reconfigure()
def add_encoding_items(self, menu):
"""Add the encoding list to the menu"""
terminal = self.terminal
active_encodings = terminal.config['active_encodings']
item = Gtk.MenuItem.new_with_mnemonic(_("Encodings"))
menu.append (item)
submenu = Gtk.Menu ()
item.set_submenu (submenu)
encodings = TerminatorEncoding ().get_list ()
encodings.sort (lambda x, y: cmp (x[2].lower (), y[2].lower ()))
current_encoding = terminal.vte.get_encoding ()
group = None
if current_encoding not in active_encodings:
active_encodings.insert (0, _(current_encoding))
for encoding in active_encodings:
if encoding == terminal.default_encoding:
extratext = " (%s)" % _("Default")
elif encoding == current_encoding and \
terminal.custom_encoding == True:
extratext = " (%s)" % _("User defined")
else:
extratext = ""
radioitem = Gtk.RadioMenuItem (_(encoding) + extratext, group)
if encoding == current_encoding:
radioitem.set_active (True)
if group is None:
group = radioitem
radioitem.connect ('activate', terminal.on_encoding_change,
encoding)
submenu.append (radioitem)
item = Gtk.MenuItem.new_with_mnemonic(_("Other Encodings"))
submenu.append (item)
#second level
submenu = Gtk.Menu ()
item.set_submenu (submenu)
group = None
for encoding in encodings:
if encoding[1] in active_encodings:
continue
if encoding[1] is None:
label = "%s %s" % (encoding[2], terminal.vte.get_encoding ())
else:
label = "%s %s" % (encoding[2], encoding[1])
radioitem = Gtk.RadioMenuItem (label, group)
if group is None:
group = radioitem
if encoding[1] == current_encoding:
radioitem.set_active (True)
radioitem.connect ('activate', terminal.on_encoding_change,
encoding[1])
submenu.append (radioitem)
| wpjunior/terminator | terminatorlib/terminal_popup_menu.py | Python | gpl-2.0 | 11,983 |
# -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import datetime
import os
from ansible.module_utils.urls import (Request, open_url, urllib_request, HAS_SSLCONTEXT, cookiejar, RequestWithMethod,
UnixHTTPHandler, UnixHTTPSConnection, httplib)
from ansible.module_utils.urls import SSLValidationHandler, HTTPSClientAuthHandler, RedirectHandlerFactory
import pytest
from mock import call
if HAS_SSLCONTEXT:
import ssl
@pytest.fixture
def urlopen_mock(mocker):
return mocker.patch('ansible.module_utils.urls.urllib_request.urlopen')
@pytest.fixture
def install_opener_mock(mocker):
return mocker.patch('ansible.module_utils.urls.urllib_request.install_opener')
def test_Request_fallback(urlopen_mock, install_opener_mock, mocker):
cookies = cookiejar.CookieJar()
request = Request(
headers={'foo': 'bar'},
use_proxy=False,
force=True,
timeout=100,
validate_certs=False,
url_username='user',
url_password='passwd',
http_agent='ansible-tests',
force_basic_auth=True,
follow_redirects='all',
client_cert='/tmp/client.pem',
client_key='/tmp/client.key',
cookies=cookies,
unix_socket='/foo/bar/baz.sock',
ca_path='/foo/bar/baz.pem',
)
fallback_mock = mocker.spy(request, '_fallback')
r = request.open('GET', 'https://ansible.com')
calls = [
call(None, False), # use_proxy
call(None, True), # force
call(None, 100), # timeout
call(None, False), # validate_certs
call(None, 'user'), # url_username
call(None, 'passwd'), # url_password
call(None, 'ansible-tests'), # http_agent
call(None, True), # force_basic_auth
call(None, 'all'), # follow_redirects
call(None, '/tmp/client.pem'), # client_cert
call(None, '/tmp/client.key'), # client_key
call(None, cookies), # cookies
call(None, '/foo/bar/baz.sock'), # unix_socket
call(None, '/foo/bar/baz.pem'), # ca_path
]
fallback_mock.assert_has_calls(calls)
assert fallback_mock.call_count == 14 # All but headers use fallback
args = urlopen_mock.call_args[0]
assert args[1] is None # data, this is handled in the Request not urlopen
assert args[2] == 100 # timeout
req = args[0]
assert req.headers == {
'Authorization': b'Basic dXNlcjpwYXNzd2Q=',
'Cache-control': 'no-cache',
'Foo': 'bar',
'User-agent': 'ansible-tests'
}
assert req.data is None
assert req.get_method() == 'GET'
def test_Request_open(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/')
args = urlopen_mock.call_args[0]
assert args[1] is None # data, this is handled in the Request not urlopen
assert args[2] == 10 # timeout
req = args[0]
assert req.headers == {}
assert req.data is None
assert req.get_method() == 'GET'
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
if not HAS_SSLCONTEXT:
expected_handlers = (
SSLValidationHandler,
RedirectHandlerFactory(), # factory, get handler
)
else:
expected_handlers = (
RedirectHandlerFactory(), # factory, get handler
)
found_handlers = []
for handler in handlers:
if isinstance(handler, SSLValidationHandler) or handler.__class__.__name__ == 'RedirectHandler':
found_handlers.append(handler)
assert len(found_handlers) == len(expected_handlers)
def test_Request_open_http(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/')
args = urlopen_mock.call_args[0]
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
found_handlers = []
for handler in handlers:
if isinstance(handler, SSLValidationHandler):
found_handlers.append(handler)
assert len(found_handlers) == 0
def test_Request_open_unix_socket(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', unix_socket='/foo/bar/baz.sock')
args = urlopen_mock.call_args[0]
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
found_handlers = []
for handler in handlers:
if isinstance(handler, UnixHTTPHandler):
found_handlers.append(handler)
assert len(found_handlers) == 1
def test_Request_open_https_unix_socket(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/', unix_socket='/foo/bar/baz.sock')
args = urlopen_mock.call_args[0]
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
found_handlers = []
for handler in handlers:
if isinstance(handler, HTTPSClientAuthHandler):
found_handlers.append(handler)
assert len(found_handlers) == 1
inst = found_handlers[0]._build_https_connection('foo')
assert isinstance(inst, UnixHTTPSConnection)
def test_Request_open_ftp(urlopen_mock, install_opener_mock, mocker):
mocker.patch('ansible.module_utils.urls.ParseResultDottedDict.as_list', side_effect=AssertionError)
# Using ftp scheme should prevent the AssertionError side effect to fire
r = Request().open('GET', 'ftp://foo@ansible.com/')
def test_Request_open_headers(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', headers={'Foo': 'bar'})
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers == {'Foo': 'bar'}
def test_Request_open_username(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', url_username='user')
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert len(found_handlers) == 2
assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user', None)}
def test_Request_open_username_in_url(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://user2@ansible.com/')
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user2', '')}
def test_Request_open_username_force_basic(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://ansible.com/', url_username='user', url_password='passwd', force_basic_auth=True)
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert len(found_handlers) == 0
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q='
def test_Request_open_auth_in_netloc(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'http://user:passwd@ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.get_full_url() == 'http://ansible.com/'
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert len(found_handlers) == 2
def test_Request_open_netrc(urlopen_mock, install_opener_mock, monkeypatch):
here = os.path.dirname(__file__)
monkeypatch.setenv('NETRC', os.path.join(here, 'fixtures/netrc'))
r = Request().open('GET', 'http://ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q='
r = Request().open('GET', 'http://foo.ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert 'Authorization' not in req.headers
monkeypatch.setenv('NETRC', os.path.join(here, 'fixtures/netrc.nonexistant'))
r = Request().open('GET', 'http://ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert 'Authorization' not in req.headers
def test_Request_open_no_proxy(urlopen_mock, install_opener_mock, mocker):
build_opener_mock = mocker.patch('ansible.module_utils.urls.urllib_request.build_opener')
r = Request().open('GET', 'http://ansible.com/', use_proxy=False)
handlers = build_opener_mock.call_args[0]
found_handlers = []
for handler in handlers:
if isinstance(handler, urllib_request.ProxyHandler):
found_handlers.append(handler)
assert len(found_handlers) == 1
@pytest.mark.skipif(not HAS_SSLCONTEXT, reason="requires SSLContext")
def test_Request_open_no_validate_certs(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/', validate_certs=False)
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
ssl_handler = None
for handler in handlers:
if isinstance(handler, HTTPSClientAuthHandler):
ssl_handler = handler
break
assert ssl_handler is not None
inst = ssl_handler._build_https_connection('foo')
assert isinstance(inst, httplib.HTTPSConnection)
context = ssl_handler._context
assert context.protocol == ssl.PROTOCOL_SSLv23
if ssl.OP_NO_SSLv2:
assert context.options & ssl.OP_NO_SSLv2
assert context.options & ssl.OP_NO_SSLv3
assert context.verify_mode == ssl.CERT_NONE
assert context.check_hostname is False
def test_Request_open_client_cert(urlopen_mock, install_opener_mock):
here = os.path.dirname(__file__)
client_cert = os.path.join(here, 'fixtures/client.pem')
client_key = os.path.join(here, 'fixtures/client.key')
r = Request().open('GET', 'https://ansible.com/', client_cert=client_cert, client_key=client_key)
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
ssl_handler = None
for handler in handlers:
if isinstance(handler, HTTPSClientAuthHandler):
ssl_handler = handler
break
assert ssl_handler is not None
assert ssl_handler.client_cert == client_cert
assert ssl_handler.client_key == client_key
https_connection = ssl_handler._build_https_connection('ansible.com')
assert https_connection.key_file == client_key
assert https_connection.cert_file == client_cert
def test_Request_open_cookies(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/', cookies=cookiejar.CookieJar())
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
cookies_handler = None
for handler in handlers:
if isinstance(handler, urllib_request.HTTPCookieProcessor):
cookies_handler = handler
break
assert cookies_handler is not None
def test_Request_open_invalid_method(urlopen_mock, install_opener_mock):
r = Request().open('UNKNOWN', 'https://ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.data is None
assert req.get_method() == 'UNKNOWN'
# assert r.status == 504
def test_Request_open_custom_method(urlopen_mock, install_opener_mock):
r = Request().open('DELETE', 'https://ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert isinstance(req, RequestWithMethod)
def test_Request_open_user_agent(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/', http_agent='ansible-tests')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('User-agent') == 'ansible-tests'
def test_Request_open_force(urlopen_mock, install_opener_mock):
r = Request().open('GET', 'https://ansible.com/', force=True, last_mod_time=datetime.datetime.now())
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('Cache-control') == 'no-cache'
assert 'If-modified-since' not in req.headers
def test_Request_open_last_mod(urlopen_mock, install_opener_mock):
now = datetime.datetime.now()
r = Request().open('GET', 'https://ansible.com/', last_mod_time=now)
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('If-modified-since') == now.strftime('%a, %d %b %Y %H:%M:%S -0000')
def test_Request_open_headers_not_dict(urlopen_mock, install_opener_mock):
with pytest.raises(ValueError):
Request().open('GET', 'https://ansible.com/', headers=['bob'])
def test_Request_init_headers_not_dict(urlopen_mock, install_opener_mock):
with pytest.raises(ValueError):
Request(headers=['bob'])
@pytest.mark.parametrize('method,kwargs', [
('get', {}),
('options', {}),
('head', {}),
('post', {'data': None}),
('put', {'data': None}),
('patch', {'data': None}),
('delete', {}),
])
def test_methods(method, kwargs, mocker):
expected = method.upper()
open_mock = mocker.patch('ansible.module_utils.urls.Request.open')
request = Request()
getattr(request, method)('https://ansible.com')
open_mock.assert_called_once_with(expected, 'https://ansible.com', **kwargs)
def test_open_url(urlopen_mock, install_opener_mock, mocker):
req_mock = mocker.patch('ansible.module_utils.urls.Request.open')
open_url('https://ansible.com/')
req_mock.assert_called_once_with('GET', 'https://ansible.com/', data=None, headers=None, use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None,
force_basic_auth=False, follow_redirects='urllib2',
client_cert=None, client_key=None, cookies=None, use_gssapi=False,
unix_socket=None, ca_path=None)
| cchurch/ansible | test/units/module_utils/urls/test_Request.py | Python | gpl-3.0 | 14,937 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import EqOp as EqOp_
from jx_python.expressions._utils import Python
class EqOp(EqOp_):
def to_python(self, not_null=False, boolean=False, many=False):
return (
"("
+ (self.rhs).to_python()
+ ") in listwrap("
+ (self.lhs).to_python()
+ ")"
)
| klahnakoski/jx-sqlite | vendor/jx_python/expressions/eq_op.py | Python | mpl-2.0 | 691 |
from oioioi.contests.models import Submission
from oioioi.dashboard.registry import dashboard_registry
from oioioi.disqualification.controllers import \
DisqualificationContestControllerMixin
@dashboard_registry.register_decorator(order=10)
def disqualification_fragment(request):
if not request.user.is_authenticated():
return None
cc = request.contest.controller
if not isinstance(cc, DisqualificationContestControllerMixin):
return None
submissions = Submission.objects \
.filter(problem_instance__contest=request.contest) \
.order_by('-date').select_related()
submissions = cc.filter_my_visible_submissions(request, submissions)
return cc.render_disqualifications(request, submissions)
| papedaniel/oioioi | oioioi/disqualification/views.py | Python | gpl-3.0 | 755 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# modesetting.py
#
# Copyright © 2013-2015 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Modesetting driver installation """
""" Hardware-agnostic DDX driver that (theoretically) works with
any hardware having a DRM/KMS graphics driver """
from hardware.hardware import Hardware
CLASS_NAME = "ModeSetting"
CLASS_ID = "0x0300"
DEVICES = []
class ModeSetting(Hardware):
def __init__(self):
Hardware.__init__(self)
def get_packages(self):
# following ustream, xf86-video-modesetting is now provided with xorg-server package.
# return ["xf86-video-modesetting"]
return []
def post_install(self, dest_dir):
pass
def check_device(self, class_id, vendor_id, product_id):
""" Checks if the driver supports this device """
if class_id == CLASS_ID:
# Should return true only for KMS able devices (all open drivers)
return True
else:
return False
def get_name(self):
return CLASS_NAME
| yucefsourani/Cnchi | cnchi/hardware/modesetting.py | Python | gpl-3.0 | 1,768 |
from __future__ import annotations
import xia2.Modules.CctbxFrenchWilson
from xia2.Driver.DefaultDriver import DefaultDriver
def FrenchWilson(DriverType=None):
"""A factory for FrenchWilsonWrapper classes."""
class FrenchWilsonWrapper(DefaultDriver):
"""A wrapper for cctbx French and Wilson analysis."""
def __init__(self):
super().__init__()
self._executable = "cctbx_FrenchWilson"
self._outbuffer = []
self._anomalous = False
self._nres = 0
# should we do wilson scaling?
self._wilson = True
self._b_factor = 0.0
self._moments = None
self._wilson_fit_grad = 0.0
self._wilson_fit_grad_sd = 0.0
self._wilson_fit_m = 0.0
self._wilson_fit_m_sd = 0.0
self._wilson_fit_range = None
# numbers of reflections in and out, and number of absences
# counted
self._nref_in = 0
self._nref_out = 0
self._nabsent = 0
self._xmlout = None
def set_anomalous(self, anomalous):
self._anomalous = anomalous
def set_wilson(self, wilson):
"""Set the use of Wilson scaling - if you set this to False
Wilson scaling will be switched off..."""
self._wilson = wilson
def set_hklin(self, hklin):
self._hklin = hklin
def get_hklin(self):
return self._hklin
def set_hklout(self, hklout):
self._hklout = hklout
def get_hklout(self):
return self._hklout
def check_hklout(self):
return self.checkHklout()
def get_xmlout(self):
return self._xmlout
def truncate(self):
"""Actually perform the truncation procedure."""
self.add_command_line(self._hklin)
self.add_command_line("hklout=%s" % self._hklout)
if self._anomalous:
self.add_command_line("anomalous=true")
else:
self.add_command_line("anomalous=false")
output = xia2.Modules.CctbxFrenchWilson.do_french_wilson(
self._hklin, self._hklout, self._anomalous
)
self._outbuffer = output.splitlines(True)
self.close_wait()
lines = self.get_all_output()
for i, line in enumerate(lines):
if "ML estimate of overall B value:" in line:
self._b_factor = float(lines[i + 1].strip().split()[0])
def get_b_factor(self):
return self._b_factor
def get_wilson_fit(self):
return (
self._wilson_fit_grad,
self._wilson_fit_grad_sd,
self._wilson_fit_m,
self._wilson_fit_m_sd,
)
def get_wilson_fit_range(self):
return self._wilson_fit_range
def get_moments(self):
return self._moments
def get_nref_in(self):
return self._nref_in
def get_nref_out(self):
return self._nref_out
def get_nabsent(self):
return self._nabsent
def start(self):
pass
def close(self):
pass
def _output(self):
try:
return self._outbuffer.pop(0)
except IndexError:
return ""
return FrenchWilsonWrapper()
| xia2/xia2 | src/xia2/Wrappers/XIA/FrenchWilson.py | Python | bsd-3-clause | 3,508 |
from django.views.generic.list import ListView
from django.utils import timezone
from django.db.models import Sum, F, Q
from entries.models import Entry, Category
import json
class ReportingView(ListView):
model = Entry
template_name = "admin/reporting.html"
entry_admin = None # the Entry ModelAdmin
def get_queryset(self):
qs = super().get_queryset()
filter_names = list(self.request.GET)
for filter_name in filter_names:
if filter_name == 'q':
continue
filter = {}
value = self.request.GET.get(filter_name)
if value:
filter[filter_name] = value
qs = qs.filter(**filter)
search = self.request.GET.get('q')
if search:
qs = qs.filter(title__contains=search)
return qs
def get_categories(self):
filters = []
filter_names = list(self.request.GET)
for filter_name in filter_names:
if filter_name == 'q':
continue
filter = {}
value = self.request.GET.get(filter_name)
if value:
filter['entry__'+filter_name] = value
filters.append(Q(**filter))
return Category.objects \
.annotate(paid_amount=Sum(F('entry__amount')/F('entry___num_people'))) \
.filter(*filters).order_by('-paid_amount').all()
def get_summary(self):
"""
* sum de paid_amount
* sum des paid_amount des entries paid_by: "nico" beneficiary: "nath"
* sum des paid_amount des entries paid_by: "nath" beneficiary : "nico"
"""
self.balance = round(sum(i.paid_amount for i in self.object_list.all()), 2) # XXX
qs = self.object_list.annotate(paid_amount=F('amount')/F('_num_people'))
total = round(qs.aggregate(total=Sum('paid_amount'))['total'], 2)
nath_owe_nico = qs.filter(paid_by__username="nico", for_people__username="nath") \
.aggregate(nath_owe_nico=Sum('paid_amount'))['nath_owe_nico'] or 0
if nath_owe_nico:
nath_owe_nico = round(nath_owe_nico, 2)
nico_owe_nath = qs.filter(paid_by__username="nath", for_people__username="nico") \
.aggregate(nico_owe_nath=Sum('paid_amount'))['nico_owe_nath'] or 0
if nico_owe_nath:
nico_owe_nath = round(nico_owe_nath, 2)
final_owe_person = "nobody"
final_owe_other_person = ""
final_owe_amount = 0
if nath_owe_nico > nico_owe_nath:
final_owe_person = "nath"
final_owe_other_person = "nico"
final_owe_amount = nath_owe_nico - nico_owe_nath
elif nico_owe_nath > nath_owe_nico:
final_owe_person = "nico"
final_owe_other_person = "nath"
final_owe_amount = nico_owe_nath - nath_owe_nico
return {
'total': total,
'nath_owe_nico': nath_owe_nico,
'nico_owe_nath': nico_owe_nath,
'final_owe_person': final_owe_person,
'final_owe_other_person': final_owe_other_person,
'final_owe_amount': final_owe_amount
}
def categories_chart_options(self):
data = [{'name': cat.title, 'y': round(cat.paid_amount, 2)} for cat in self.get_categories()]
return json.dumps({
"chart": {
"plotBackgroundColor": None,
"plotBorderWidth": None,
"plotShadow": False,
"type": 'pie'
},
"title": {
"text": 'Categories'
},
"tooltip": {
"pointFormat": '{series.name}: <b>{point.percentage:.1f}%</b>'
},
"series": [{
"name": 'Categories',
"colorByPoint": True,
"data": data
}]
})
def get_context_data(self, **kwargs):
context = super(ReportingView, self).get_context_data(**kwargs)
context.update(self.entry_admin.changelist_view(self.request).context_data)
context['now'] = timezone.now()
context['categories_chart_options'] = self.categories_chart_options()
context.update(self.get_summary())
return context
| namlook/moneyland | entries/admin_views.py | Python | mit | 4,295 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Standard template documentation fragment, use by template and win_template.
DOCUMENTATION = r'''
short_description: Template a file out to a remote server
description:
- Templates are processed by the L(Jinja2 templating language,http://jinja.pocoo.org/docs/).
- Documentation on the template formatting can be found in the
L(Template Designer Documentation,http://jinja.pocoo.org/docs/templates/).
- Additional variables listed below can be used in templates.
- C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to
describe the template name, host, modification time of the template file and the owner uid.
- C(template_host) contains the node name of the template's machine.
- C(template_uid) is the numeric user id of the owner.
- C(template_path) is the path of the template.
- C(template_fullpath) is the absolute path of the template.
- C(template_destpath) is the path of the template on the remote system (added in 2.8).
- C(template_run_date) is the date that the template was rendered.
options:
src:
description:
- Path of a Jinja2 formatted template on the Ansible controller.
- This can be a relative or an absolute path.
- The file must be encoded with C(utf-8) but I(output_encoding) can be used to control the encoding of the output
template.
type: path
required: yes
dest:
description:
- Location to render the template to on the remote machine.
type: path
required: yes
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
newline_sequence:
description:
- Specify the newline sequence to use for templating files.
type: str
choices: [ '\n', '\r', '\r\n' ]
default: '\n'
version_added: '2.4'
block_start_string:
description:
- The string marking the beginning of a block.
type: str
default: '{%'
version_added: '2.4'
block_end_string:
description:
- The string marking the end of a block.
type: str
default: '%}'
version_added: '2.4'
variable_start_string:
description:
- The string marking the beginning of a print statement.
type: str
default: '{{'
version_added: '2.4'
variable_end_string:
description:
- The string marking the end of a print statement.
type: str
default: '}}'
version_added: '2.4'
trim_blocks:
description:
- Determine when newlines should be removed from blocks.
- When set to C(yes) the first newline after a block is removed (block, not variable tag!).
type: bool
default: yes
version_added: '2.4'
lstrip_blocks:
description:
- Determine when leading spaces and tabs should be stripped.
- When set to C(yes) leading spaces and tabs are stripped from the start of a line to a block.
- This functionality requires Jinja 2.7 or newer.
type: bool
default: no
version_added: '2.6'
force:
description:
- Determine when the file is being transferred if the destination already exists.
- When set to C(yes), replace the remote file when contents are different than the source.
- When set to C(no), the file will only be transferred if the destination does not exist.
type: bool
default: yes
output_encoding:
description:
- Overrides the encoding used to write the template file defined by C(dest).
- It defaults to C(utf-8), but any encoding supported by python can be used.
- The source template file must always be encoded using C(utf-8), for homogeneity.
type: str
default: utf-8
version_added: '2.7'
notes:
- Including a string that uses a date in the template will result in the template being marked 'changed' each time.
- Since Ansible 0.9, templates are loaded with C(trim_blocks=True).
- >
Also, you can override jinja2 settings by adding a special header to template file.
i.e. C(#jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: False)
which changes the variable interpolation markers to C([% var %]) instead of C({{ var }}).
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
- Using raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively
evaluated.
- To find Byte Order Marks in files, use C(Format-Hex <file> -Count 16) on Windows, and use C(od -a -t x1 -N 16 <file>)
on Linux.
'''
| aperigault/ansible | lib/ansible/plugins/doc_fragments/template_common.py | Python | gpl-3.0 | 4,909 |
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='TinyContent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
('content', models.TextField()),
],
options={
'verbose_name': 'Content block',
},
),
]
| dominicrodger/django-tinycontent | tinycontent/migrations/0001_initial.py | Python | bsd-3-clause | 570 |
import synapse.tests.utils as s_t_utils
import synapse.lookup.iso3166 as s_l_country
class CountryLookTest(s_t_utils.SynTest):
def test_lookup_countries(self):
self.eq(s_l_country.country2iso.get('united states of america'), 'us')
self.eq(s_l_country.country2iso.get('mexico'), 'mx')
self.eq(s_l_country.country2iso.get('vertexLandia'), None)
| vertexproject/synapse | synapse/tests/test_lookup_iso3166.py | Python | apache-2.0 | 375 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 18:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('silk', '0002_auto_update_uuid4_id_field'),
]
operations = [
migrations.AddField(
model_name='request',
name='prof_file',
field=models.FileField(null=True, upload_to=b''),
),
]
| crunchr/silk | silk/migrations/0003_request_prof_file.py | Python | mit | 468 |
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
# All possible class names must be inserted into the globals collection.
# If there is a better way of doing this, please suggest!
from amaascore.parties.broker import Broker
from amaascore.parties.company import Company
from amaascore.parties.asset_manager import AssetManager
from amaascore.parties.exchange import Exchange
from amaascore.parties.fund import Fund
from amaascore.parties.government_agency import GovernmentAgency
from amaascore.parties.individual import Individual
from amaascore.parties.organisation import Organisation
from amaascore.parties.party import Party
from amaascore.parties.sub_fund import SubFund
def json_to_party(json_to_convert):
# Iterate through the party children, converting the various JSON attributes into the relevant class type
for (collection_name, clazz) in Party.children().items():
children = json_to_convert.pop(collection_name, {})
collection = {}
for (child_type, child_json) in children.items():
# Handle the case where there are multiple children for a given type - e.g. links
if isinstance(child_json, list):
child = set()
for child_json_in_list in child_json:
child.add(clazz(**child_json_in_list))
else:
child = clazz(**child_json)
collection[child_type] = child
json_to_convert[collection_name] = collection
clazz = globals().get(json_to_convert.get('party_type'))
if not clazz:
raise ValueError('Missing Party Type: %s' % json_to_convert.get('party_type'))
args = inspect.getargspec(clazz.__init__)
# Some fields are always added in, even though they're not explicitly part of the constructor
clazz_args = args.args + clazz.amaas_model_attributes()
# is not None is important so it includes zeros and False
constructor_dict = {arg: json_to_convert.get(arg) for arg in clazz_args
if json_to_convert.get(arg) is not None and arg != 'self'}
party = clazz(**constructor_dict)
return party
| amaas-fintech/amaas-core-sdk-python | amaascore/parties/utils.py | Python | apache-2.0 | 2,169 |
from gwpy.plotter import TimeSeriesPlot
plot = TimeSeriesPlot(
data.crop(*data.span.contract(1)),
b.crop(*b.span.contract(1)),
figsize=[12, 8], sep=True, sharex=True)
plot.axes[0].set_title('LIGO-Hanford strain data around GW150914')
plot.axes[0].text(
1.0, 1.0, 'Unfiltered data',
transform=plot.axes[0].transAxes, ha='right')
plot.axes[0].set_ylabel('Amplitude [strain]', y=-0.2)
plot.axes[1].text(
1.0, 1.0, '50-250\,Hz bandpass, notches at 60, 120, 180 Hz',
transform=plot.axes[1].transAxes, ha='right')
plot.show() | gwpy/gwpy.github.io | docs/v0.5/examples/signal/gw150914-6.py | Python | gpl-3.0 | 547 |
#!/user/bin/python
# Printing the power of a number
g = lambda x: x**2
print g(8)
| ramesharpu/python | basic-coding/comprehensions-not-complete/lambda/power.py | Python | gpl-2.0 | 83 |
# -*- coding: utf-8 -*-
#Copyright (C) Fiz Vazquez vud1@sindominio.net
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys
import logging
from .lib.xmlUtils import XMLParser
from .gui.windowextensions import WindowExtensions
class Extension:
def __init__(self, data_path = None, parent = None):
self.data_path=data_path
self.parent = parent
self.pytrainer_main = parent
def getActiveExtensions(self):
retorno = []
for extension in self.getExtensionList():
if self.getExtensionInfo(extension[0])[2] == "1":
retorno.append(extension[0])
return retorno
def manageExtensions(self):
ExtensionList = self.getExtensionList()
windowextension = WindowExtensions(self.data_path, self)
windowextension.setList(ExtensionList)
windowextension.run()
def getExtensionList(self):
extensiondir = self.data_path+"/extensions"
extensionList = []
for extension in os.listdir(extensiondir):
extensionxmlfile = extensiondir+"/"+extension+"/conf.xml"
if os.path.isfile(extensionxmlfile):
extensioninfo = XMLParser(extensionxmlfile)
name = extensioninfo.getValue("pytrainer-extension","name")
description = extensioninfo.getValue("pytrainer-extension","description")
extensionList.append((extensiondir+"/"+extension,name,description))
return extensionList
def getExtensionInfo(self,pathExtension):
info = XMLParser(pathExtension+"/conf.xml")
name = info.getValue("pytrainer-extension","name")
description = info.getValue("pytrainer-extension","description")
code = info.getValue("pytrainer-extension","extensioncode")
extensiondir = self.pytrainer_main.profile.extensiondir
helpfile = pathExtension+"/"+info.getValue("pytrainer-extension","helpfile")
type = info.getValue("pytrainer-extension","type")
if not os.path.isfile(extensiondir+"/"+code+"/conf.xml"):
status = 0
else:
info = XMLParser(extensiondir+"/"+code+"/conf.xml")
status = info.getValue("pytrainer-extension","status")
#print name,description,status,helpfile,type
return name,description,status,helpfile,type
def getExtensionConfParams(self,pathExtension):
info = XMLParser(pathExtension+"/conf.xml")
code = info.getValue("pytrainer-extension","extensioncode")
extensiondir = self.pytrainer_main.profile.extensiondir
params = {}
if not os.path.isfile(extensiondir+"/"+code+"/conf.xml"):
prefs = info.getAllValues("conf-values")
prefs.append(("status","0"))
for pref in prefs:
params[pref[0]] = info.getValue("pytrainer-extension",pref[0])
else:
prefs = info.getAllValues("conf-values")
prefs.append(("status","0"))
info = XMLParser(extensiondir+"/"+code+"/conf.xml")
for pref in prefs:
params[pref[0]] = info.getValue("pytrainer-extension",pref[0])
#params.append((pref[0],info.getValue("pytrainer-extension",pref[0])))
return params
def setExtensionConfParams(self,pathExtension,savedOptions):
info = XMLParser(pathExtension+"/conf.xml")
code = info.getValue("pytrainer-extension","extensioncode")
extensiondir = self.pytrainer_main.profile.extensiondir+"/"+code
if not os.path.isdir(extensiondir):
os.mkdir(extensiondir)
if not os.path.isfile(extensiondir+"/conf.xml"):
savedOptions.append(("status","0"))
info = XMLParser(extensiondir+"/conf.xml")
info.createXMLFile("pytrainer-extension",savedOptions)
def loadExtension(self,pathExtension):
info = XMLParser(pathExtension+"/conf.xml")
txtbutton = info.getValue("pytrainer-extension","extensionbutton")
name = info.getValue("pytrainer-extension","name")
type = info.getValue("pytrainer-extension","type")
#print "Loading Extension %s" %name
return txtbutton,pathExtension,type
def getCodeConfValue(self,code,value):
extensiondir = self.pytrainer_main.profile.extensiondir
info = XMLParser(extensiondir+"/"+code+"/conf.xml")
return info.getValue("pytrainer-extension",value)
def importClass(self, pathExtension):
logging.debug('>>')
info = XMLParser(pathExtension+"/conf.xml")
#import extension
extension_dir = os.path.realpath(pathExtension)
extension_filename = info.getValue("pytrainer-extension","executable")
extension_classname = info.getValue("pytrainer-extension","extensioncode")
extension_type = info.getValue("pytrainer-extension","type")
options = self.getExtensionConfParams(pathExtension)
logging.debug("Extension Filename: %s", extension_filename )
logging.debug("Extension Classname: %s", extension_classname)
logging.debug("Extension Type: %s", extension_type)
logging.debug("Extension options: %s", options)
sys.path.insert(0, extension_dir)
module = __import__(extension_filename)
extensionMain = getattr(module, extension_classname)
logging.debug('<<')
return extensionMain(parent=self, pytrainer_main=self.parent, conf_dir=self.pytrainer_main.profile.confdir, options=options)
| pytrainer/pytrainer | pytrainer/extension.py | Python | gpl-2.0 | 5,489 |
# Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <bram@topydo.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from topydo.lib.MultiCommand import MultiCommand
from topydo.lib.prettyprinters.Numbers import PrettyPrinterNumbers
from topydo.lib.printers.PrettyPrinter import PrettyPrinter
class DCommand(MultiCommand):
"""
A common class for the 'do' and 'del' operations, because they're quite
alike.
"""
def __init__(self, p_args, p_todolist, #pragma: no branch
p_out=lambda a: None,
p_err=lambda a: None,
p_prompt=lambda a: None):
super().__init__(
p_args, p_todolist, p_out, p_err, p_prompt)
self.force = False
self._delta = []
self.condition = lambda _: True
self.condition_failed_text = ""
def get_flags(self):
return ("f", ["force"])
def process_flag(self, p_option, p_value):
if p_option == "-f" or p_option == "--force":
self.force = True
def _uncompleted_children(self, p_todo):
return sorted(
[t for t in self.todolist.children(p_todo) if not t.is_completed()],
key=self.todolist.number
)
def _print_list(self, p_todos):
printer = PrettyPrinter()
printer.add_filter(PrettyPrinterNumbers(self.todolist))
self.out(printer.print_list(p_todos))
def prompt_text(self):
raise NotImplementedError
def prefix(self):
raise NotImplementedError
def _process_subtasks(self, p_todo):
children = self._uncompleted_children(p_todo)
if children:
self._print_list(children)
if not self.force:
confirmation = self.prompt(self.prompt_text())
if not self.force and re.match('^y(es)?$', confirmation, re.I):
for child in children:
self.execute_specific_core(child)
self.out(self.prefix() + self.printer.print_todo(child))
def _print_unlocked_todos(self):
if self._delta:
self.out("The following todo item(s) became active:")
self._print_list(self._delta)
def _active_todos(self):
"""
Returns a list of active todos, taking uncompleted subtodos into
account.
The stored length of the todolist is taken into account, to prevent new
todos created by recurrence to pop up as newly activated tasks.
Since these todos pop up at the end of the list, we cut off the list
just before that point.
"""
return [todo for todo in self.todolist.todos()
if not self._uncompleted_children(todo) and todo.is_active()]
def execute_specific(self, _):
raise NotImplementedError
def execute_specific_core(self, p_todo):
"""
The core operation on the todo itself. Also used to operate on
child/parent tasks.
"""
raise NotImplementedError
def _execute_multi_specific(self):
old_active = self._active_todos()
for todo in self.todos:
if todo and self.condition(todo):
self._process_subtasks(todo)
self.execute_specific(todo)
else:
self.error(self.condition_failed_text)
current_active = self._active_todos()
self._delta = [todo for todo in current_active
if todo not in old_active]
def execute_post_archive_actions(self):
self._print_unlocked_todos()
| bram85/topydo | topydo/lib/DCommand.py | Python | gpl-3.0 | 4,194 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import Queue
import threading
import time
import sys
import socket
import optparse
from time import sleep
def ip2num(ip):
ip = [int(x) for x in ip.split('.')]
return ip[0] << 24 | ip[1] << 16 | ip[2] << 8 | ip[3]
def num2ip(num):
return '%s.%s.%s.%s' % ((num & 0xff000000) >> 24,(num & 0x00ff0000) >> 16,(num & 0x0000ff00) >> 8,num & 0x000000ff)
def ip_range(start, end):
return [num2ip(num) for num in range(ip2num(start), ip2num(end) + 1) if num & 0xff]
def scan_open_port_server(done, queue, ports, lock):
while True:
host,port = queue.get()
connect = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect.settimeout(2)
try:
connect.connect((host, port))
lock.acquire()
print "%s open port %s %s" % (host, port, ports[port])
lock.release()
connect.close()
except Exception, error:
pass
done.put(None)
def start_scan(number, ips, ports):
lock = threading.Lock()
queue = Queue.Queue()
done_queue = Queue.Queue()
for host in ips:
for port in ports.keys():
queue.put((host,port))
while number:
number -= 1
create_thread = threading.Thread(target=scan_open_port_server, args=(done_queue, queue, ports, lock, ))
create_thread.setDaemon(True)
create_thread.start()
while done_queue.qsize() < len(ips):
sleep(10)
if __name__ == '__main__':
usage="usage: l_scan.py -s 192.168.1.1 -e 192.168.1.254 -t 20"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-t", "--threads", dest="threads",help="Maximum threads, default 20")
parser.add_option("-s", "--start-ip", dest="start_ip",help="start_ip")
parser.add_option("-e", "--end-ip", dest="end_ip",help="end_ip")
(options, args) = parser.parse_args()
if not options.start_ip and not options.end_ip:
parser.print_help()
sys.exit()
if options.threads is not None and int(options.threads) > 0:
thread_number= int(options.threads)
else:
thread_number= 20
start_ip =str(options.start_ip)
end_ip = str(options.end_ip)
# port_list = {80:"web",8080:"web",3311:"kangle",3312:"kangle",3389:"rdp",4440:"rundeck",5672:"rabbitMQ",5900:"vnc",6082:"varnish",7001:"weblogic",8161:"activeMQ",8649:"ganglia",9000:"fastcgi",9090:"ibm",9200:"elasticsearch",9300:"elasticsearch",9999:"amg",10050:"zabbix",11211:"memcache",27017:"mongodb",28017:"mondodb",3777:"",50000:"sap netweaver",50060:"hadoop",50070:"hadoop",21:"ftp",22:"ssh",23:"telnet",25:"smtp",53:"dns",123:"ntp",161:"snmp",8161:"snmp",162:"snmp",389:"ldap",443:"ssl",512:"rlogin",513:"rlogin",873:"rsync",1433:"mssql",1080:"socks",1521:"oracle",1900:"bes",2049:"nfs",2601:"zebra",2604:"zebra",2082:"cpanle",2083:"cpanle",3128:"squid",3312:"squid",3306:"mysql",4899:"radmin",8834:'nessus',4848:'glashfish'}
port_list = {
80: "web"
}
start_time = time.time()
ip_list = ip_range(start_ip, end_ip)
print "Start %s ip..." % str(len(ip_list))
start_scan(thread_number, ip_list, port_list)
print "End %.2f" % float(time.time() - start_time)
| LxiaoGirl/hack | 扫描脚本/端口扫描/lscan_port.py | Python | gpl-2.0 | 3,234 |
class Token:
def __init__(self, type, data):
self.type = type
self.data = data
| fredmorcos/attic | snippets/python/lex-parse/token.py | Python | isc | 84 |
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import crypt
import glob
import hashlib
import itertools
import json
import ntpath
import os.path
import re
import string
import sys
import uuid
from collections import MutableMapping, MutableSequence
from datetime import datetime
from functools import partial
from random import Random, SystemRandom, shuffle
import yaml
from jinja2.filters import environmentfilter, do_groupby as _do_groupby
try:
import passlib.hash
HAS_PASSLIB = True
except:
HAS_PASSLIB = False
from ansible import errors
from ansible.module_utils.six import iteritems, string_types, integer_types
from ansible.module_utils.six.moves import reduce, shlex_quote
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap
from ansible.utils.vars import merge_hash
from ansible.vars.hostvars import HostVars
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
class AnsibleJSONEncoder(json.JSONEncoder):
'''
Simple encoder class to deal with JSON encoding of internal
types like HostVars
'''
def default(self, o):
if isinstance(o, HostVars):
return dict(o)
else:
return super(AnsibleJSONEncoder, self).default(o)
def to_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw)
return to_text(transformed)
def to_nice_yaml(a, indent=4, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
return to_text(transformed)
def to_json(a, *args, **kw):
''' Convert the value to JSON '''
return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
def to_nice_json(a, indent=4, *args, **kw):
'''Make verbose, human readable JSON'''
# python-2.6's json encoder is buggy (can't encode hostvars)
if sys.version_info < (2, 7):
try:
import simplejson
except ImportError:
pass
else:
try:
major = int(simplejson.__version__.split('.')[0])
except:
pass
else:
if major >= 2:
return simplejson.dumps(a, indent=indent, sort_keys=True, *args, **kw)
try:
return json.dumps(a, indent=indent, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
except:
# Fallback to the to_json filter
return to_json(a, *args, **kw)
def to_bool(a):
''' return a bool for the arg '''
if a is None or isinstance(a, bool):
return a
if isinstance(a, string_types):
a = a.lower()
if a in ('yes', 'on', '1', 'true', 1):
return True
return False
def to_datetime(string, format="%Y-%d-%m %H:%M:%S"):
return datetime.strptime(string, format)
def quote(a):
''' return its argument quoted for shell usage '''
return shlex_quote(a)
def fileglob(pathname):
''' return list of matched regular files for glob '''
return [ g for g in glob.glob(pathname) if os.path.isfile(g) ]
def regex_replace(value='', pattern='', replacement='', ignorecase=False):
''' Perform a `re.sub` returning a string '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def regex_findall(value, regex, multiline=False, ignorecase=False):
''' Perform re.findall and return the list of matches '''
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def regex_search(value, regex, *args, **kwargs):
''' Perform re.search and return the list of matches or a backref '''
groups = list()
for arg in args:
if arg.startswith('\\g'):
match = re.match(r'\\g<(\S+)>', arg).group(1)
groups.append(match)
elif arg.startswith('\\'):
match = int(re.match(r'\\(\d+)', arg).group(1))
groups.append(match)
else:
raise errors.AnsibleFilterError('Unknown argument')
flags = 0
if kwargs.get('ignorecase'):
flags |= re.I
if kwargs.get('multiline'):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def ternary(value, true_val, false_val):
''' value ? true_val : false_val '''
if value:
return true_val
else:
return false_val
def regex_escape(string):
'''Escape all regular expressions special characters from STRING.'''
return re.escape(string)
def from_yaml(data):
if isinstance(data, string_types):
return yaml.safe_load(data)
return data
@environmentfilter
def rand(environment, end, start=None, step=None, seed=None):
if seed is None:
r = SystemRandom()
else:
r = Random(seed)
if isinstance(end, integer_types):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise errors.AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise errors.AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try: # see if hash is supported
h = hashlib.new(hashtype)
except:
return None
h.update(to_bytes(data, errors='surrogate_then_strict'))
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None):
# TODO: find a way to construct dynamically from system
cryptmethod= {
'md5': '1',
'blowfish': '2a',
'sha256': '5',
'sha512': '6',
}
if hashtype in cryptmethod:
if salt is None:
r = SystemRandom()
if hashtype in ['md5']:
saltsize = 8
else:
saltsize = 16
saltcharset = string.ascii_letters + string.digits + '/.'
salt = ''.join([r.choice(saltcharset) for _ in range(saltsize)])
if not HAS_PASSLIB:
if sys.platform.startswith('darwin'):
raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin')
saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
encrypted = crypt.crypt(password, saltstring)
else:
if hashtype == 'blowfish':
cls = passlib.hash.bcrypt
else:
cls = getattr(passlib.hash, '%s_crypt' % hashtype)
encrypted = cls.encrypt(password, salt=salt)
return encrypted
return None
def to_uuid(string):
return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
def mandatory(a):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
raise errors.AnsibleFilterError('Mandatory variable not defined.')
return a
def combine(*terms, **kwargs):
recursive = kwargs.get('recursive', False)
if len(kwargs) > 1 or (len(kwargs) == 1 and 'recursive' not in kwargs):
raise errors.AnsibleFilterError("'recursive' is the only valid keyword argument")
for t in terms:
if not isinstance(t, dict):
raise errors.AnsibleFilterError("|combine expects dictionaries, got " + repr(t))
if recursive:
return reduce(merge_hash, terms)
else:
return dict(itertools.chain(*map(iteritems, terms)))
def comment(text, style='plain', **kw):
# Predefined comment types
comment_styles = {
'plain': {
'decoration': '# '
},
'erlang': {
'decoration': '% '
},
'c': {
'decoration': '// '
},
'cblock': {
'beginning': '/*',
'decoration': ' * ',
'end': ' */'
},
'xml': {
'beginning': '<!--',
'decoration': ' - ',
'end': '-->'
}
}
# Pointer to the right comment type
style_params = comment_styles[style]
if 'decoration' in kw:
prepostfix = kw['decoration']
else:
prepostfix = style_params['decoration']
# Default params
p = {
'newline': '\n',
'beginning': '',
'prefix': (prepostfix).rstrip(),
'prefix_count': 1,
'decoration': '',
'postfix': (prepostfix).rstrip(),
'postfix_count': 1,
'end': ''
}
# Update default params
p.update(style_params)
p.update(kw)
# Compose substrings for the final string
str_beginning = ''
if p['beginning']:
str_beginning = "%s%s" % (p['beginning'], p['newline'])
str_prefix = ''
if p['prefix']:
if p['prefix'] != p['newline']:
str_prefix = str(
"%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
else:
str_prefix = str(
"%s" % (p['newline'])) * int(p['prefix_count'])
str_text = ("%s%s" % (
p['decoration'],
# Prepend each line of the text with the decorator
text.replace(
p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
# Remove trailing spaces when only decorator is on the line
"%s%s" % (p['decoration'], p['newline']),
"%s%s" % (p['decoration'].rstrip(), p['newline']))
str_postfix = p['newline'].join(
[''] + [p['postfix'] for x in range(p['postfix_count'])])
str_end = ''
if p['end']:
str_end = "%s%s" % (p['newline'], p['end'])
# Return the final string
return "%s%s%s%s%s" % (
str_beginning,
str_prefix,
str_text,
str_postfix,
str_end)
def extract(item, container, morekeys=None):
from jinja2.runtime import Undefined
value = container[item]
if value is not Undefined and morekeys is not None:
if not isinstance(morekeys, list):
morekeys = [morekeys]
try:
value = reduce(lambda d, k: d[k], morekeys, value)
except KeyError:
value = Undefined()
return value
def failed(*a, **kw):
''' Test if task result yields failed '''
item = a[0]
if not isinstance(item, MutableMapping):
raise errors.AnsibleFilterError("|failed expects a dictionary")
rc = item.get('rc', 0)
failed = item.get('failed', False)
if rc != 0 or failed:
return True
else:
return False
def success(*a, **kw):
''' Test if task result yields success '''
return not failed(*a, **kw)
def changed(*a, **kw):
''' Test if task result yields changed '''
item = a[0]
if not isinstance(item, MutableMapping):
raise errors.AnsibleFilterError("|changed expects a dictionary")
if not 'changed' in item:
changed = False
if ('results' in item # some modules return a 'results' key
and isinstance(item['results'], MutableSequence)
and isinstance(item['results'][0], MutableMapping)):
for result in item['results']:
changed = changed or result.get('changed', False)
else:
changed = item.get('changed', False)
return changed
def skipped(*a, **kw):
''' Test if task result yields skipped '''
item = a[0]
if not isinstance(item, MutableMapping):
raise errors.AnsibleFilterError("|skipped expects a dictionary")
skipped = item.get('skipped', False)
return skipped
@environmentfilter
def do_groupby(environment, value, attribute):
"""Overridden groupby filter for jinja2, to address an issue with
jinja2>=2.9.0,<2.9.5 where a namedtuple was returned which
has repr that prevents ansible.template.safe_eval.safe_eval from being
able to parse and eval the data.
jinja2<2.9.0,>=2.9.5 is not affected, as <2.9.0 uses a tuple, and
>=2.9.5 uses a standard tuple repr on the namedtuple.
The adaptation here, is to run the jinja2 `do_groupby` function, and
cast all of the namedtuples to a regular tuple.
See https://github.com/ansible/ansible/issues/20098
We may be able to remove this in the future.
"""
return [tuple(t) for t in _do_groupby(environment, value, attribute)]
def b64encode(string):
return to_text(base64.b64encode(to_bytes(string, errors='surrogate_then_strict')))
def b64decode(string):
return to_text(base64.b64decode(to_bytes(string, errors='surrogate_then_strict')))
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
# jinja2 overrides
'groupby': do_groupby,
# base 64
'b64decode': b64decode,
'b64encode': b64encode,
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': from_yaml,
#date
'to_datetime': to_datetime,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
# value as boolean
'bool': to_bool,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digeset of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksuming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# file glob
'fileglob': fileglob,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
'regex_search': regex_search,
'regex_findall': regex_findall,
# ? : ;
'ternary': ternary,
# list
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# merge dicts
'combine': combine,
# comment-style decoration
'comment': comment,
# array and dict lookups
'extract': extract,
# failure testing
'failed' : failed,
'failure' : failed,
'success' : success,
'succeeded' : success,
# changed testing
'changed' : changed,
'change' : changed,
# skip testing
'skipped' : skipped,
'skip' : skipped,
# debug
'type_debug': lambda o: o.__class__.__name__,
}
| Sodki/ansible | lib/ansible/plugins/filter/core.py | Python | gpl-3.0 | 17,081 |
from redash.query_runner import BaseSQLQueryRunner, BaseQueryRunner
from tests import BaseTestCase
from redash.models import db
from redash.utils import json_dumps
from redash.handlers.query_results import error_messages
class TestQueryResultsCacheHeaders(BaseTestCase):
def test_uses_cache_headers_for_specific_result(self):
query_result = self.factory.create_query_result()
query = self.factory.create_query(latest_query_data=query_result)
rv = self.make_request(
"get", "/api/queries/{}/results/{}.json".format(query.id, query_result.id)
)
self.assertIn("Cache-Control", rv.headers)
def test_doesnt_use_cache_headers_for_non_specific_result(self):
query_result = self.factory.create_query_result()
query = self.factory.create_query(latest_query_data=query_result)
rv = self.make_request("get", "/api/queries/{}/results.json".format(query.id))
self.assertNotIn("Cache-Control", rv.headers)
def test_returns_404_if_no_cached_result_found(self):
query = self.factory.create_query(latest_query_data=None)
rv = self.make_request("get", "/api/queries/{}/results.json".format(query.id))
self.assertEqual(404, rv.status_code)
class TestQueryResultsContentDispositionHeaders(BaseTestCase):
def test_supports_unicode(self):
query_result = self.factory.create_query_result()
query = self.factory.create_query(name="עברית", latest_query_data=query_result)
rv = self.make_request("get", "/api/queries/{}/results.json".format(query.id))
# This is what gunicorn will do with it
try:
rv.headers['Content-Disposition'].encode('ascii')
except Exception as e:
self.fail(repr(e))
class TestQueryResultListAPI(BaseTestCase):
def test_get_existing_result(self):
query_result = self.factory.create_query_result()
query = self.factory.create_query()
rv = self.make_request(
"post",
"/api/query_results",
data={
"data_source_id": self.factory.data_source.id,
"query": query.query_text,
},
)
self.assertEqual(rv.status_code, 200)
self.assertEqual(query_result.id, rv.json["query_result"]["id"])
def test_execute_new_query(self):
query_result = self.factory.create_query_result()
query = self.factory.create_query()
rv = self.make_request(
"post",
"/api/query_results",
data={
"data_source_id": self.factory.data_source.id,
"query": query.query_text,
"max_age": 0,
},
)
self.assertEqual(rv.status_code, 200)
self.assertNotIn("query_result", rv.json)
self.assertIn("job", rv.json)
def test_add_limit_change_query_sql(self):
ds = self.factory.create_data_source(
group=self.factory.org.default_group, type="pg"
)
query = self.factory.create_query(query_text="SELECT 2", data_source=ds)
query_result = self.factory.create_query_result(data_source=ds, query_hash=query.query_hash)
rv = self.make_request(
"post",
"/api/query_results",
data={
"data_source_id": ds.id,
"query": query.query_text,
"apply_auto_limit": True
},
)
self.assertEqual(rv.status_code, 200)
self.assertNotIn("query_result", rv.json)
self.assertIn("job", rv.json)
def test_add_limit_no_change_for_nonsql(self):
ds = self.factory.create_data_source(
group=self.factory.org.default_group, type="prometheus"
)
query = self.factory.create_query(query_text="SELECT 5", data_source=ds)
query_result = self.factory.create_query_result(data_source=ds, query_hash=query.query_hash)
rv = self.make_request(
"post",
"/api/query_results",
data={
"data_source_id": ds.id,
"query": query.query_text,
"apply_auto_limit": True
},
)
self.assertEqual(rv.status_code, 200)
self.assertEqual(query_result.id, rv.json["query_result"]["id"])
def test_execute_query_without_access(self):
group = self.factory.create_group()
db.session.commit()
user = self.factory.create_user(group_ids=[group.id])
query = self.factory.create_query()
rv = self.make_request(
"post",
"/api/query_results",
data={
"data_source_id": self.factory.data_source.id,
"query": query.query_text,
"max_age": 0,
},
user=user,
)
self.assertEqual(rv.status_code, 403)
self.assertIn("job", rv.json)
def test_execute_query_with_params(self):
query = "SELECT {{param}}"
rv = self.make_request(
"post",
"/api/query_results",
data={
"data_source_id": self.factory.data_source.id,
"query": query,
"max_age": 0,
},
)
self.assertEqual(rv.status_code, 400)
self.assertIn("job", rv.json)
rv = self.make_request(
"post",
"/api/query_results",
data={
"data_source_id": self.factory.data_source.id,
"query": query,
"parameters": {"param": 1},
"max_age": 0,
},
)
self.assertEqual(rv.status_code, 200)
self.assertIn("job", rv.json)
rv = self.make_request(
"post",
"/api/query_results?p_param=1",
data={
"data_source_id": self.factory.data_source.id,
"query": query,
"max_age": 0,
},
)
self.assertEqual(rv.status_code, 200)
self.assertIn("job", rv.json)
def test_execute_on_paused_data_source(self):
self.factory.data_source.pause()
rv = self.make_request(
"post",
"/api/query_results",
data={
"data_source_id": self.factory.data_source.id,
"query": "SELECT 1",
"max_age": 0,
},
)
self.assertEqual(rv.status_code, 400)
self.assertNotIn("query_result", rv.json)
self.assertIn("job", rv.json)
def test_execute_without_data_source(self):
rv = self.make_request(
"post", "/api/query_results", data={"query": "SELECT 1", "max_age": 0}
)
self.assertEqual(rv.status_code, 401)
self.assertDictEqual(rv.json, error_messages["select_data_source"][0])
class TestQueryResultAPI(BaseTestCase):
def test_has_no_access_to_data_source(self):
ds = self.factory.create_data_source(group=self.factory.create_group())
query_result = self.factory.create_query_result(data_source=ds)
rv = self.make_request("get", "/api/query_results/{}".format(query_result.id))
self.assertEqual(rv.status_code, 403)
def test_has_view_only_access_to_data_source(self):
ds = self.factory.create_data_source(
group=self.factory.org.default_group, view_only=True
)
query_result = self.factory.create_query_result(data_source=ds)
rv = self.make_request("get", "/api/query_results/{}".format(query_result.id))
self.assertEqual(rv.status_code, 200)
def test_has_full_access_to_data_source(self):
ds = self.factory.create_data_source(
group=self.factory.org.default_group, view_only=False
)
query_result = self.factory.create_query_result(data_source=ds)
rv = self.make_request("get", "/api/query_results/{}".format(query_result.id))
self.assertEqual(rv.status_code, 200)
def test_execute_new_query(self):
query = self.factory.create_query()
rv = self.make_request(
"post", "/api/queries/{}/results".format(query.id), data={"parameters": {}}
)
self.assertEqual(rv.status_code, 200)
self.assertIn("job", rv.json)
def test_execute_but_has_no_access_to_data_source(self):
ds = self.factory.create_data_source(group=self.factory.create_group())
query = self.factory.create_query(data_source=ds)
rv = self.make_request("post", "/api/queries/{}/results".format(query.id))
self.assertEqual(rv.status_code, 403)
self.assertDictEqual(rv.json, error_messages["no_permission"][0])
def test_execute_with_no_parameter_values(self):
query = self.factory.create_query()
rv = self.make_request("post", "/api/queries/{}/results".format(query.id))
self.assertEqual(rv.status_code, 200)
self.assertIn("job", rv.json)
def test_prevents_execution_of_unsafe_queries_on_view_only_data_sources(self):
ds = self.factory.create_data_source(
group=self.factory.org.default_group, view_only=True
)
query = self.factory.create_query(
data_source=ds, options={"parameters": [{"name": "foo", "type": "text"}]}
)
rv = self.make_request(
"post", "/api/queries/{}/results".format(query.id), data={"parameters": {}}
)
self.assertEqual(rv.status_code, 403)
self.assertDictEqual(rv.json, error_messages["unsafe_on_view_only"][0])
def test_allows_execution_of_safe_queries_on_view_only_data_sources(self):
ds = self.factory.create_data_source(
group=self.factory.org.default_group, view_only=True
)
query = self.factory.create_query(
data_source=ds, options={"parameters": [{"name": "foo", "type": "number"}]}
)
rv = self.make_request(
"post", "/api/queries/{}/results".format(query.id), data={"parameters": {}}
)
self.assertEqual(rv.status_code, 200)
def test_prevents_execution_of_unsafe_queries_using_api_key(self):
ds = self.factory.create_data_source(
group=self.factory.org.default_group, view_only=True
)
query = self.factory.create_query(
data_source=ds, options={"parameters": [{"name": "foo", "type": "text"}]}
)
data = {"parameters": {"foo": "bar"}}
rv = self.make_request(
"post",
"/api/queries/{}/results?api_key={}".format(query.id, query.api_key),
data=data,
)
self.assertEqual(rv.status_code, 403)
self.assertDictEqual(rv.json, error_messages["unsafe_when_shared"][0])
def test_access_with_query_api_key(self):
ds = self.factory.create_data_source(
group=self.factory.org.default_group, view_only=False
)
query = self.factory.create_query()
query_result = self.factory.create_query_result(
data_source=ds, query_text=query.query_text
)
rv = self.make_request(
"get",
"/api/queries/{}/results/{}.json?api_key={}".format(
query.id, query_result.id, query.api_key
),
user=False,
)
self.assertEqual(rv.status_code, 200)
def test_access_with_query_api_key_without_query_result_id(self):
ds = self.factory.create_data_source(
group=self.factory.org.default_group, view_only=False
)
query = self.factory.create_query()
query_result = self.factory.create_query_result(
data_source=ds, query_text=query.query_text, query_hash=query.query_hash
)
query.latest_query_data = query_result
rv = self.make_request(
"get",
"/api/queries/{}/results.json?api_key={}".format(query.id, query.api_key),
user=False,
)
self.assertEqual(rv.status_code, 200)
def test_query_api_key_and_different_query_result(self):
ds = self.factory.create_data_source(
group=self.factory.org.default_group, view_only=False
)
query = self.factory.create_query(query_text="SELECT 8")
query_result2 = self.factory.create_query_result(
data_source=ds, query_hash="something-different"
)
rv = self.make_request(
"get",
"/api/queries/{}/results/{}.json?api_key={}".format(
query.id, query_result2.id, query.api_key
),
user=False,
)
self.assertEqual(rv.status_code, 404)
def test_signed_in_user_and_different_query_result(self):
ds2 = self.factory.create_data_source(
group=self.factory.org.admin_group, view_only=False
)
query = self.factory.create_query(query_text="SELECT 8")
query_result2 = self.factory.create_query_result(
data_source=ds2, query_hash="something-different"
)
rv = self.make_request(
"get", "/api/queries/{}/results/{}.json".format(query.id, query_result2.id)
)
self.assertEqual(rv.status_code, 403)
class TestQueryResultDropdownResource(BaseTestCase):
def test_checks_for_access_to_the_query(self):
ds2 = self.factory.create_data_source(
group=self.factory.org.admin_group, view_only=False
)
query = self.factory.create_query(data_source=ds2)
rv = self.make_request("get", "/api/queries/{}/dropdown".format(query.id))
self.assertEqual(rv.status_code, 403)
class TestQueryDropdownsResource(BaseTestCase):
def test_prevents_access_if_unassociated_and_doesnt_have_access(self):
query = self.factory.create_query()
ds2 = self.factory.create_data_source(
group=self.factory.org.admin_group, view_only=False
)
unrelated_dropdown_query = self.factory.create_query(data_source=ds2)
# unrelated_dropdown_query has not been associated with query
# user does not have direct access to unrelated_dropdown_query
rv = self.make_request(
"get",
"/api/queries/{}/dropdowns/{}".format(
query.id, unrelated_dropdown_query.id
),
)
self.assertEqual(rv.status_code, 403)
def test_allows_access_if_unassociated_but_user_has_access(self):
query = self.factory.create_query()
query_result = self.factory.create_query_result()
data = {"rows": [], "columns": [{"name": "whatever"}]}
query_result = self.factory.create_query_result(data=json_dumps(data))
unrelated_dropdown_query = self.factory.create_query(
latest_query_data=query_result
)
# unrelated_dropdown_query has not been associated with query
# user has direct access to unrelated_dropdown_query
rv = self.make_request(
"get",
"/api/queries/{}/dropdowns/{}".format(
query.id, unrelated_dropdown_query.id
),
)
self.assertEqual(rv.status_code, 200)
def test_allows_access_if_associated_and_has_access_to_parent(self):
query_result = self.factory.create_query_result()
data = {"rows": [], "columns": [{"name": "whatever"}]}
query_result = self.factory.create_query_result(data=json_dumps(data))
dropdown_query = self.factory.create_query(latest_query_data=query_result)
options = {"parameters": [{"type": "query", "queryId": dropdown_query.id}]}
query = self.factory.create_query(options=options)
# dropdown_query has been associated with query
# user has access to query
rv = self.make_request(
"get", "/api/queries/{}/dropdowns/{}".format(query.id, dropdown_query.id)
)
self.assertEqual(rv.status_code, 200)
def test_prevents_access_if_associated_and_doesnt_have_access_to_parent(self):
ds2 = self.factory.create_data_source(
group=self.factory.org.admin_group, view_only=False
)
dropdown_query = self.factory.create_query(data_source=ds2)
options = {"parameters": [{"type": "query", "queryId": dropdown_query.id}]}
query = self.factory.create_query(data_source=ds2, options=options)
# dropdown_query has been associated with query
# user doesnt have access to either query
rv = self.make_request(
"get", "/api/queries/{}/dropdowns/{}".format(query.id, dropdown_query.id)
)
self.assertEqual(rv.status_code, 403)
class TestQueryResultExcelResponse(BaseTestCase):
def test_renders_excel_file(self):
query = self.factory.create_query()
query_result = self.factory.create_query_result()
rv = self.make_request(
"get",
"/api/queries/{}/results/{}.xlsx".format(query.id, query_result.id),
is_json=False,
)
self.assertEqual(rv.status_code, 200)
def test_renders_excel_file_when_rows_have_missing_columns(self):
query = self.factory.create_query()
data = {
"rows": [{"test": 1}, {"test": 2, "test2": 3}],
"columns": [{"name": "test"}, {"name": "test2"}],
}
query_result = self.factory.create_query_result(data=json_dumps(data))
rv = self.make_request(
"get",
"/api/queries/{}/results/{}.xlsx".format(query.id, query_result.id),
is_json=False,
)
self.assertEqual(rv.status_code, 200)
class TestJobResource(BaseTestCase):
def test_cancels_queued_queries(self):
QUEUED = 1
FAILED = 4
query = self.factory.create_query()
job_id = self.make_request(
"post", f"/api/queries/{query.id}/results", data={"parameters": {}},
).json["job"]["id"]
status = self.make_request("get", f"/api/jobs/{job_id}").json["job"]["status"]
self.assertEqual(status, QUEUED)
self.make_request("delete", f"/api/jobs/{job_id}")
job = self.make_request("get", f"/api/jobs/{job_id}").json["job"]
self.assertEqual(job["status"], FAILED)
self.assertTrue("cancelled" in job["error"]) | getredash/redash | tests/handlers/test_query_results.py | Python | bsd-2-clause | 18,324 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class server_args :
ur""" Provides additional arguments required for fetching the server resource.
"""
def __init__(self) :
self._Internal = False
@property
def Internal(self) :
ur"""Display names of the servers that have been created for internal use.
"""
try :
return self._Internal
except Exception as e:
raise e
@Internal.setter
def Internal(self, Internal) :
ur"""Display names of the servers that have been created for internal use.
"""
try :
self._Internal = Internal
except Exception as e:
raise e
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/basic/server_args.py | Python | apache-2.0 | 1,161 |
# -*- coding: utf-8 -*-
#
# Copyright (C)2005-2010 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
import os
import re
import weakref
from trac.config import ListOption
from trac.core import *
from trac.db.api import IDatabaseConnector
from trac.db.util import ConnectionWrapper, IterableCursor
from trac.util import get_pkginfo, getuser
from trac.util.translation import _
_like_escape_re = re.compile(r'([/_%])')
try:
import pysqlite2.dbapi2 as sqlite
have_pysqlite = 2
except ImportError:
try:
import sqlite3 as sqlite
have_pysqlite = 2
except ImportError:
have_pysqlite = 0
if have_pysqlite == 2:
# Force values to integers because PySQLite 2.2.0 had (2, 2, '0')
sqlite_version = tuple([int(x) for x in sqlite.sqlite_version_info])
sqlite_version_string = sqlite.sqlite_version
class PyFormatCursor(sqlite.Cursor):
def _rollback_on_error(self, function, *args, **kwargs):
try:
return function(self, *args, **kwargs)
except sqlite.DatabaseError:
self.cnx.rollback()
raise
def execute(self, sql, args=None):
if args:
sql = sql % (('?',) * len(args))
return self._rollback_on_error(sqlite.Cursor.execute, sql,
args or [])
def executemany(self, sql, args):
if not args:
return
sql = sql % (('?',) * len(args[0]))
return self._rollback_on_error(sqlite.Cursor.executemany, sql,
args)
# EagerCursor taken from the example in pysqlite's repository:
#
# http://code.google.com/p/pysqlite/source/browse/misc/eager.py
#
# Only change is to subclass it from PyFormatCursor instead of
# sqlite.Cursor.
class EagerCursor(PyFormatCursor):
def __init__(self, con):
PyFormatCursor.__init__(self, con)
self.rows = []
self.pos = 0
def execute(self, *args):
result = PyFormatCursor.execute(self, *args)
self.rows = PyFormatCursor.fetchall(self)
self.pos = 0
return result
def fetchone(self):
try:
row = self.rows[self.pos]
self.pos += 1
return row
except IndexError:
return None
def fetchmany(self, num=None):
if num is None:
num = self.arraysize
result = self.rows[self.pos:self.pos+num]
self.pos += num
return result
def fetchall(self):
result = self.rows[self.pos:]
self.pos = len(self.rows)
return result
# Mapping from "abstract" SQL types to DB-specific types
_type_map = {
'int': 'integer',
'int64': 'integer',
}
def _to_sql(table):
sql = ["CREATE TABLE %s (" % table.name]
coldefs = []
for column in table.columns:
ctype = column.type.lower()
ctype = _type_map.get(ctype, ctype)
if column.auto_increment:
ctype = "integer PRIMARY KEY"
elif len(table.key) == 1 and column.name in table.key:
ctype += " PRIMARY KEY"
coldefs.append(" %s %s" % (column.name, ctype))
if len(table.key) > 1:
coldefs.append(" UNIQUE (%s)" % ','.join(table.key))
sql.append(',\n'.join(coldefs) + '\n);')
yield '\n'.join(sql)
for index in table.indices:
unique = index.unique and 'UNIQUE' or ''
yield "CREATE %s INDEX %s_%s_idx ON %s (%s);" % (unique, table.name,
'_'.join(index.columns), table.name, ','.join(index.columns))
class SQLiteConnector(Component):
"""Database connector for SQLite.
Database URLs should be of the form:
{{{
sqlite:path/to/trac.db
}}}
"""
implements(IDatabaseConnector)
extensions = ListOption('sqlite', 'extensions',
doc="""Paths to sqlite extensions, relative to Trac environment's
directory or absolute. (''since 0.12'')""")
def __init__(self):
self._version = None
self.error = None
self._extensions = None
def get_supported_schemes(self):
if not have_pysqlite:
self.error = _("Cannot load Python bindings for SQLite")
elif sqlite_version >= (3, 3, 3) and sqlite.version_info[0] == 2 and \
sqlite.version_info < (2, 0, 7):
self.error = _("Need at least PySqlite %(version)s or higher",
version='2.0.7')
elif (2, 5, 2) <= sqlite.version_info < (2, 5, 5):
self.error = _("PySqlite 2.5.2 - 2.5.4 break Trac, please use "
"2.5.5 or higher")
yield ('sqlite', self.error and -1 or 1)
def get_connection(self, path, log=None, params={}):
if not self._version:
self._version = get_pkginfo(sqlite).get(
'version', '%d.%d.%s' % sqlite.version_info)
self.env.systeminfo.extend([('SQLite', sqlite_version_string),
('pysqlite', self._version)])
self.required = True
# construct list of sqlite extension libraries
if self._extensions is None:
self._extensions = []
for extpath in self.extensions:
if not os.path.isabs(extpath):
extpath = os.path.join(self.env.path, extpath)
self._extensions.append(extpath)
params['extensions'] = self._extensions
return SQLiteConnection(path, log, params)
def init_db(self, path, log=None, params={}):
if path != ':memory:':
# make the directory to hold the database
if os.path.exists(path):
raise TracError(_('Database already exists at %(path)s',
path=path))
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
if isinstance(path, unicode): # needed with 2.4.0
path = path.encode('utf-8')
cnx = sqlite.connect(path, timeout=int(params.get('timeout', 10000)))
cursor = cnx.cursor()
from trac.db_default import schema
for table in schema:
for stmt in self.to_sql(table):
cursor.execute(stmt)
cnx.commit()
def to_sql(self, table):
return _to_sql(table)
def alter_column_types(self, table, columns):
"""Yield SQL statements altering the type of one or more columns of
a table.
Type changes are specified as a `columns` dict mapping column names
to `(from, to)` SQL type tuples.
"""
for name, (from_, to) in sorted(columns.iteritems()):
if _type_map.get(to, to) != _type_map.get(from_, from_):
raise NotImplementedError('Conversion from %s to %s is not '
'implemented' % (from_, to))
return ()
def backup(self, dest_file):
"""Simple SQLite-specific backup of the database.
@param dest_file: Destination file basename
"""
import shutil
db_str = self.config.get('trac', 'database')
try:
db_str = db_str[:db_str.index('?')]
except ValueError:
pass
db_name = os.path.join(self.env.path, db_str[7:])
shutil.copy(db_name, dest_file)
if not os.path.exists(dest_file):
raise TracError(_("No destination file created"))
return dest_file
class SQLiteConnection(ConnectionWrapper):
"""Connection wrapper for SQLite."""
__slots__ = ['_active_cursors', '_eager']
poolable = have_pysqlite and sqlite_version >= (3, 3, 8) \
and sqlite.version_info >= (2, 5, 0)
def __init__(self, path, log=None, params={}):
assert have_pysqlite > 0
self.cnx = None
if path != ':memory:':
if not os.access(path, os.F_OK):
raise TracError(_('Database "%(path)s" not found.', path=path))
dbdir = os.path.dirname(path)
if not os.access(path, os.R_OK + os.W_OK) or \
not os.access(dbdir, os.R_OK + os.W_OK):
raise TracError(
_('The user %(user)s requires read _and_ write '
'permissions to the database file %(path)s '
'and the directory it is located in.',
user=getuser(), path=path))
self._active_cursors = weakref.WeakKeyDictionary()
timeout = int(params.get('timeout', 10.0))
self._eager = params.get('cursor', 'eager') == 'eager'
# eager is default, can be turned off by specifying ?cursor=
if isinstance(path, unicode): # needed with 2.4.0
path = path.encode('utf-8')
cnx = sqlite.connect(path, detect_types=sqlite.PARSE_DECLTYPES,
check_same_thread=sqlite_version < (3, 3, 1),
timeout=timeout)
# load extensions
extensions = params.get('extensions', [])
if len(extensions) > 0:
cnx.enable_load_extension(True)
for ext in extensions:
cnx.load_extension(ext)
cnx.enable_load_extension(False)
ConnectionWrapper.__init__(self, cnx, log)
def cursor(self):
cursor = self.cnx.cursor((PyFormatCursor, EagerCursor)[self._eager])
self._active_cursors[cursor] = True
cursor.cnx = self
return IterableCursor(cursor, self.log)
def rollback(self):
for cursor in self._active_cursors.keys():
cursor.close()
self.cnx.rollback()
def cast(self, column, type):
if sqlite_version >= (3, 2, 3):
return 'CAST(%s AS %s)' % (column, _type_map.get(type, type))
elif type == 'int':
# hack to force older SQLite versions to convert column to an int
return '1*' + column
else:
return column
def concat(self, *args):
return '||'.join(args)
def like(self):
"""Return a case-insensitive LIKE clause."""
if sqlite_version >= (3, 1, 0):
return "LIKE %s ESCAPE '/'"
else:
return 'LIKE %s'
def like_escape(self, text):
if sqlite_version >= (3, 1, 0):
return _like_escape_re.sub(r'/\1', text)
else:
return text
def quote(self, identifier):
"""Return the quoted identifier."""
return "`%s`" % identifier
def get_last_id(self, cursor, table, column='id'):
return cursor.lastrowid
def update_sequence(self, cursor, table, column='id'):
# SQLite handles sequence updates automagically
# http://www.sqlite.org/autoinc.html
pass
| zjj/trac_hack | trac/db/sqlite_backend.py | Python | bsd-3-clause | 11,446 |
"""Deepstream RPC handling."""
from __future__ import absolute_import, division, print_function, with_statement
from __future__ import unicode_literals
from deepstreampy.constants import topic as topic_constants
from deepstreampy.constants import actions
from deepstreampy.constants import event as event_constants
from deepstreampy.message import message_builder
from deepstreampy.message import message_parser
from deepstreampy import utils
from tornado import concurrent
from tornado import gen
from functools import partial
class RPCResponse(object):
"""Allows a RPC provider to respond to a request.
Attributes:
auto_ack (bool): Specifies whether requests should be auto acknowledged
"""
def __init__(self, connection, name, correlation_id):
"""
Args:
connection (deepstreampy.client._Connection): The current connection
name (str): The name of the RPC
correlation_id (str): Correlation ID of the RPC
"""
self._connection = connection
self._name = name
self._correletaion_id = correlation_id
self._is_acknowledged = False
self._is_complete = False
self.auto_ack = True
self._connection._io_loop.add_callback(self._perform_auto_ack)
def ack(self):
"""Acknowledge the receiving the request.
Will happen implicitly unless the request callback sets ``auto_ack``
to False.
"""
if not self._is_acknowledged:
future = self._connection.send_message(
topic_constants.RPC,
actions.ACK,
[actions.REQUEST, self._name, self._correletaion_id])
self._is_acknowledged = True
else:
future = concurrent.Future()
future.set_result(None)
return future
def reject(self):
"""Reject the request.
This might be necessary if the client is already processing a large
number of requests. If deepstream receives a rejection message it will
try to route the request to another provider - or return a
NO_RPC_PROVIDER error if there are no providers left.
If autoAck is disabled and the response is sent before the ack message
the request will still be completed and the ack message ignored.
"""
self.auto_ack = False
self._is_complete = True
self._is_acknowledged = True
return self._connection.send_message(
topic_constants.RPC,
actions.REJECTION,
[self._name, self._correletaion_id])
def send(self, data):
"""Complete the request by sending the response data to the server.
Args:
data: JSON serializable data to send to the server.
"""
if self._is_complete:
raise ValueError('RPC {0} already completed'.format(self._name))
self.ack()
typed_data = message_builder.typed(data)
self._is_complete = True
return self._connection.send_message(
topic_constants.RPC,
actions.RESPONSE,
[self._name, self._correletaion_id, typed_data])
def error(self, error_str):
"""Notify the server that an error has occured.
This will also complete the RPC.
"""
self.auto_ack = False
self._is_complete = True
self._is_acknowledged = True
return self._connection.send_message(
topic_constants.RPC,
actions.ERROR,
[error_str, self._name, self._correletaion_id])
def _perform_auto_ack(self):
if self.auto_ack:
self.ack()
class RPCException(Exception):
def __init__(self, message):
super(RPCException, self).__init__(message)
class RPC(object):
"""Represents a single RPC made from the client to the server.
Encapsulates logic around timeouts and converts the incoming response data.
"""
def __init__(self, future, client, **options):
self._options = options
self._future = future
self._client = client
self._connection = client._connection
self._ack_timeout = self._connection._io_loop.call_later(
options.get('rpcAckTimeout', 6),
partial(self.error, event_constants.ACK_TIMEOUT))
self._response_timeout = self._connection._io_loop.call_later(
options.get('rpcResponseTimeout', 6),
partial(self.error, event_constants.RESPONSE_TIMEOUT))
def ack(self):
self._connection._io_loop.remove_timeout(self._ack_timeout)
def respond(self, data):
converted_data = message_parser.convert_typed(data, self._client)
self._future.set_result(converted_data)
self._complete()
def error(self, error_msg):
self._future.set_exception(RPCException(error_msg))
self._complete()
def _complete(self):
self._connection._io_loop.remove_timeout(self._ack_timeout)
self._connection._io_loop.remove_timeout(self._response_timeout)
class RPCHandler(object):
def __init__(self, connection, client, **options):
self._options = options
self._connection = connection
self._client = client
self._rpcs = {}
self._providers = {}
self._provide_ack_timeouts = {}
subscription_timeout = options.get("subscriptionTimeout", 15)
self._ack_timeout_registry = utils.AckTimeoutRegistry(
client, topic_constants.RPC, subscription_timeout)
self._resubscribe_notifier = utils.ResubscribeNotifier(
client, self._reprovide)
def provide(self, name, callback):
if not name:
raise ValueError("invalid argument: name")
if not callback:
raise ValueError("invalid argument: callback")
if not callable(callback):
raise TypeError("expected callback to be a callable")
if name in self._providers:
raise ValueError("RPC {0} already registered".format(name))
self._ack_timeout_registry.add(name, actions.SUBSCRIBE)
self._providers[name] = callback
return self._connection.send_message(topic_constants.RPC,
actions.SUBSCRIBE,
[name])
def unprovide(self, name):
if not name:
raise ValueError("invalid argument name")
if name in self._providers:
del self._providers[name]
self._ack_timeout_registry.add(name, actions.UNSUBSCRIBE)
future = self._connection.send_message(topic_constants.RPC,
actions.UNSUBSCRIBE,
[name])
else:
future = concurrent.Future()
future.set_result(None)
return future
@gen.coroutine
def make(self, name, data):
f = concurrent.Future()
uid = utils.get_uid()
typed_data = message_builder.typed(data)
self._rpcs[uid] = RPC(f, self._client, **self._options)
self._connection.send_message(
topic_constants.RPC, actions.REQUEST, [name, uid, typed_data])
result = yield f
raise gen.Return(result)
def _get_rpc(self, correlation_id, rpc_name, raw_message):
if correlation_id not in self._rpcs:
self._client._on_error(topic_constants.RPC,
event_constants.UNSOLICITED_MESSAGE,
raw_message)
return
rpc = self._rpcs[correlation_id]
return rpc
def _respond_to_rpc(self, message):
name = message['data'][0]
correlation_id = message['data'][1]
if message['data'][2]:
data = message_parser.convert_typed(message['data'][2],
self._client)
if name in self._providers:
response = RPCResponse(self._connection, name, correlation_id)
self._providers[name](data, response)
else:
self._connection.send_message(topic_constants.RPC,
actions.REJECTION,
[name, correlation_id])
def handle(self, message):
action = message['action']
data = message['data']
if action == actions.REQUEST:
self._respond_to_rpc(message)
return
if (action == actions.ACK and
(data[0] in (actions.SUBSCRIBE, actions.UNSUBSCRIBE))):
self._ack_timeout_registry.clear(message)
return
if action == actions.ERROR:
if data[0] in (event_constants.MESSAGE_PERMISSION_ERROR,
event_constants.INVALID_RPC_CORRELATION_ID):
return
if (data[0] == event_constants.MESSAGE_DENIED and
data[2] == actions.SUBSCRIBE):
self._ack_timeout_registry.remove(data[1], actions.SUBSCRIBE)
return
if action in (actions.ERROR, actions.ACK):
if (data[0] == event_constants.MESSAGE_DENIED and
data[2] == actions.REQUEST):
correlation_id = data[3]
else:
correlation_id = data[2]
rpc_name = data[1]
else:
rpc_name = data[0]
correlation_id = data[1]
rpc = self._get_rpc(correlation_id, rpc_name, message.get('raw', ''))
if rpc is None:
return
if action == actions.ACK:
rpc.ack()
elif action == actions.RESPONSE:
rpc.respond(data[2])
del self._rpcs[correlation_id]
elif action == actions.ERROR:
message['processedError'] = True
rpc.error(data[0])
del self._rpcs[correlation_id]
def _reprovide(self):
for rpc_name in self._providers:
self._connection.send_message(topic_constants.RPC,
actions.SUBSCRIBE,
[rpc_name])
| YavorPaunov/deepstreampy | deepstreampy/rpc.py | Python | mit | 10,203 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from mock import MagicMock, patch
from polyaxon.cli.artifacts import artifacts
from polyaxon_sdk import V1ProjectVersionKind
from tests.test_cli.utils import BaseCommandTestCase
@pytest.mark.cli_mark
class TestCliArtifacts(BaseCommandTestCase):
@patch("polyaxon_sdk.ProjectsV1Api.create_version")
@patch("polyaxon_sdk.ProjectsV1Api.patch_version")
@patch("polyaxon_sdk.ProjectsV1Api.get_version")
def test_create_artifact(self, get_version, patch_version, create_version):
self.runner.invoke(artifacts, ["register"])
assert create_version.call_count == 0
assert patch_version.call_count == 0
assert get_version.call_count == 0
get_version.return_value = None
self.runner.invoke(artifacts, ["register", "--project=owner/foo"])
assert get_version.call_count == 1
assert patch_version.call_count == 0
assert create_version.call_count == 1
get_version.return_value = MagicMock(
kind=V1ProjectVersionKind.ARTIFACT,
)
self.runner.invoke(artifacts, ["register", "--project=owner/foo"])
assert get_version.call_count == 2
assert patch_version.call_count == 0
assert create_version.call_count == 1
self.runner.invoke(artifacts, ["register", "--project=owner/foo", "--force"])
assert get_version.call_count == 3
assert patch_version.call_count == 1
assert create_version.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.list_versions")
def test_list_artifacts(self, list_artifacts):
self.runner.invoke(artifacts, ["ls", "--project=owner/foo"])
assert list_artifacts.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.get_version")
def test_get_artifact(self, get_artifact):
self.runner.invoke(artifacts, ["get", "-p", "admin/foo"])
assert get_artifact.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.patch_version")
def test_update_artifact(self, update_artifact):
self.runner.invoke(
artifacts, ["update", "-p", "admin/foo", "--description=foo"]
)
assert update_artifact.call_count == 1
| polyaxon/polyaxon | core/tests/test_cli/test_artifacts.py | Python | apache-2.0 | 2,783 |
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^$', 'blog.views.entry_list', name="entry-list"),
url(r'^archive/(?P<year>\d{4})/$', 'blog.views.entry_archive_year', name="year-archive"),
url(r'^archive/(?P<year>\d{4})/(?P<month>\d{1,2})/$', 'blog.views.entry_archive_month', name="month-archive"),
url(r'^(?P<slug>[-\w]+)/$', 'blog.views.entry_detail', name="entry-detail"),
)
| tsoporan/tehorng | blog/urls.py | Python | agpl-3.0 | 421 |
import socket, threading
from wendy import Worker
from logger import logger
import time
config_file = [['A', '192.168.1.1', 5],
['B', '192.168.1.2', 8]]
INF = 99
US = 'C'
class ForwardClass(Worker,dvector):
def __init__(self, sckt, address):
# init things
self.dvector = dvector
self.close = False
self.data = ''
super().__init__(sckt, address)
def start(self):
thrd = threading.Thread(name='ForwardWorker-'+str(address), target=self.recv)
thrd.start()
while !self.close:
if(self.data!=''):
msg = self.data
splitted = self.data.split("~")
#Get TO:
toAddr = splitted[1].split(":")[1]
ipaddress = self.dvector.forwarding(toAddr)
if(ipaddress!=None):
logger.info('Forwarding message to: %s' % ipaddress)
s = socket.socket()
s.connect((ipaddress,1981))
s.send(msg)
self.data = ''
s.close()
else:
logger.info('Receiving message from: %s' % splitted[0].split(":")[1])
s = socket.socket()
s.connect(("127.0.0.1",1992))
s.send(msg)
self.data = ''
s.close()
self.close = True
def recv(self):
chunks = []
bytes_rcvd = 0
while True:
chunk = self.socket.recv(1024).decode('ascii')
if chunk == '':
raise RuntimeError("Socket connection broken")
chunks.append(chunk)
bytes_rcvd += len(chunk)
msg = self.iseof(chunks)
if len(msg):
self.data = msg
msg = ''
def close(self):
return self.close
| wichovw/rhonda | forwarding.py | Python | gpl-2.0 | 1,891 |
import mysql.connector
from fixture.db_data import DbFixture
db = DbFixture(host="localhost", name="addressbook", user="root", password="")
try:
contacts = db.get_contact_in_group()
for contact in contacts:
print("Contact id: " + str(contact.id) + ", group id: " + str(contact.group_id) )
print(len(contacts))
finally:
db.destroy() | ksemish/KseniyaRepository | check_db_connection.py | Python | apache-2.0 | 361 |
# -*- coding: utf-8 -*-
#################################################################################
### Jesus Garcia Manday
### apply-Descriptor-LipVireo.py
### @Descripcion: script para obtener los ficheros descriptores de los puntos
### de interés de cada imagen
###
### @Params:
### - detector: el tipo de detector de puntos de interés a aplicar
### - descriptor: el tipo de descriptor de puntos de interés a aplicar
### - pathSource: path origen donde se almacenan las imágenes
### - pathDestiny: path destino donde se van a almacenar los ficheros ".pkeys"
###
### @Execute: ./lip-vireo -dir ../databases/images-grouped/reflexion-especular/ -d hesslap -p SURF
### -dsdir ../outputs/lip-vireo-kp-images/reflexion-especular/SURF/hesslap/ -c lip-vireo.conf
###################################################################################
import os
import subprocess
from subprocess import Popen, PIPE
import sys
def applyDescriptor(detector, descriptor, pathSource, pathDestiny):
cmd = "./lip-vireo -dir " + pathSource " -d " + detector " -p " + descriptor + " -dsdir " + pathDestiny + " -c lip-vireo.conf"
os.system(cmd)
if __name__ == "__main__":
detector = sys.argv[1]
descriptor = sys.argv[2]
pathSource = sys.argv[3]
pathDestiny = sys.argv[4]
applyDescriptor(detector, descriptor, pathSource, pathDestiny)
| jmanday/Master | TFM/scripts/apply-Descriptor-LipVireo.py | Python | apache-2.0 | 1,416 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
To run these tests against a live database:
1. Modify the file `tests/backend_sql.conf` to use the connection for your
live database
2. Set up a blank, live database.
3. run the tests using
./run_tests.sh -N test_sql_upgrade
WARNING::
Your database will be wiped.
Do not do this against a Database with valuable data as
all data will be lost.
"""
import copy
import json
import uuid
from migrate.versioning import api as versioning_api
import sqlalchemy
from keystone.common import sql
from keystone.common.sql import migration
from keystone import config
from keystone import test
import default_fixtures
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class SqlUpgradeTests(test.TestCase):
def initialize_sql(self):
self.metadata = sqlalchemy.MetaData()
self.metadata.bind = self.engine
_config_file_list = [test.etcdir('keystone.conf.sample'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf')]
#override this to sepcify the complete list of configuration files
def config_files(self):
return self._config_file_list
def setUp(self):
super(SqlUpgradeTests, self).setUp()
self.config(self.config_files())
self.base = sql.Base()
# create and share a single sqlalchemy engine for testing
self.engine = self.base.get_engine(allow_global_engine=False)
self.Session = self.base.get_sessionmaker(engine=self.engine,
autocommit=False)
self.initialize_sql()
self.repo_path = migration._find_migrate_repo()
self.schema = versioning_api.ControlledSchema.create(
self.engine,
self.repo_path, 0)
# auto-detect the highest available schema version in the migrate_repo
self.max_version = self.schema.repository.version().version
def tearDown(self):
sqlalchemy.orm.session.Session.close_all()
table = sqlalchemy.Table("migrate_version", self.metadata,
autoload=True)
self.downgrade(0)
table.drop(self.engine, checkfirst=True)
super(SqlUpgradeTests, self).tearDown()
def test_blank_db_to_start(self):
self.assertTableDoesNotExist('user')
def test_start_version_0(self):
version = migration.db_version()
self.assertEqual(version, 0, "DB is at version 0")
def test_two_steps_forward_one_step_back(self):
"""You should be able to cleanly undo and re-apply all upgrades.
Upgrades are run in the following order::
0 -> 1 -> 0 -> 1 -> 2 -> 1 -> 2 -> 3 -> 2 -> 3 ...
^---------^ ^---------^ ^---------^
"""
for x in range(1, self.max_version + 1):
self.upgrade(x)
self.downgrade(x - 1)
self.upgrade(x)
def assertTableColumns(self, table_name, expected_cols):
"""Asserts that the table contains the expected set of columns."""
self.initialize_sql()
table = self.select_table(table_name)
actual_cols = [col.name for col in table.columns]
self.assertEqual(expected_cols, actual_cols, '%s table' % table_name)
def test_upgrade_add_initial_tables(self):
self.upgrade(1)
self.assertTableColumns("user", ["id", "name", "extra"])
self.assertTableColumns("tenant", ["id", "name", "extra"])
self.assertTableColumns("role", ["id", "name"])
self.assertTableColumns("user_tenant_membership",
["user_id", "tenant_id"])
self.assertTableColumns("metadata", ["user_id", "tenant_id", "data"])
self.populate_user_table()
def test_upgrade_add_policy(self):
self.upgrade(5)
self.assertTableDoesNotExist('policy')
self.upgrade(6)
self.assertTableExists('policy')
self.assertTableColumns('policy', ['id', 'type', 'blob', 'extra'])
def test_upgrade_normalize_identity(self):
self.upgrade(8)
self.populate_user_table()
self.populate_tenant_table()
self.upgrade(10)
self.assertTableColumns("user",
["id", "name", "extra",
"password", "enabled"])
self.assertTableColumns("tenant",
["id", "name", "extra", "description",
"enabled"])
self.assertTableColumns("role", ["id", "name", "extra"])
self.assertTableColumns("user_tenant_membership",
["user_id", "tenant_id"])
self.assertTableColumns("metadata", ["user_id", "tenant_id", "data"])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='foo'").one()
self.assertTrue(a_user.enabled)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertFalse(a_user.enabled)
tenant_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
a_tenant = session.query(tenant_table).filter("id='baz'").one()
self.assertEqual(a_tenant.description, 'description')
session.commit()
session.close()
def test_normalized_enabled_states(self):
self.upgrade(8)
users = {
'bool_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': True})},
'bool_disabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': False})},
'str_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 'True'})},
'str_disabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 'False'})},
'int_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 1})},
'int_disabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 0})},
'null_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': None})},
'unset_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({})}}
session = self.Session()
for user in users.values():
self.insert_dict(session, 'user', user)
session.commit()
self.upgrade(10)
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
q = session.query(user_table, 'enabled')
user = q.filter_by(id=users['bool_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['bool_disabled_user']['id']).one()
self.assertFalse(user.enabled)
user = q.filter_by(id=users['str_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['str_disabled_user']['id']).one()
self.assertFalse(user.enabled)
user = q.filter_by(id=users['int_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['int_disabled_user']['id']).one()
self.assertFalse(user.enabled)
user = q.filter_by(id=users['null_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['unset_enabled_user']['id']).one()
self.assertTrue(user.enabled)
def test_downgrade_10_to_8(self):
self.upgrade(10)
self.populate_user_table(with_pass_enab=True)
self.populate_tenant_table(with_desc_enab=True)
self.downgrade(8)
self.assertTableColumns('user',
['id', 'name', 'extra'])
self.assertTableColumns('tenant',
['id', 'name', 'extra'])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertEqual(a_user.name, default_fixtures.USERS[2]['name'])
tenant_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
a_tenant = session.query(tenant_table).filter("id='baz'").one()
self.assertEqual(a_tenant.name, default_fixtures.TENANTS[1]['name'])
session.commit()
session.close()
def test_upgrade_endpoints(self):
self.upgrade(10)
service_extra = {
'name': uuid.uuid4().hex,
}
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'extra': json.dumps(service_extra),
}
endpoint_extra = {
'publicurl': uuid.uuid4().hex,
'internalurl': uuid.uuid4().hex,
'adminurl': uuid.uuid4().hex,
}
endpoint = {
'id': uuid.uuid4().hex,
'region': uuid.uuid4().hex,
'service_id': service['id'],
'extra': json.dumps(endpoint_extra),
}
session = self.Session()
self.insert_dict(session, 'service', service)
self.insert_dict(session, 'endpoint', endpoint)
session.commit()
session.close()
self.upgrade(13)
self.assertTableColumns(
'service',
['id', 'type', 'extra'])
self.assertTableColumns(
'endpoint',
['id', 'legacy_endpoint_id', 'interface', 'region', 'service_id',
'url', 'extra'])
endpoint_table = sqlalchemy.Table(
'endpoint', self.metadata, autoload=True)
session = self.Session()
self.assertEqual(session.query(endpoint_table).count(), 3)
for interface in ['public', 'internal', 'admin']:
q = session.query(endpoint_table)
q = q.filter_by(legacy_endpoint_id=endpoint['id'])
q = q.filter_by(interface=interface)
ref = q.one()
self.assertNotEqual(ref.id, endpoint['id'])
self.assertEqual(ref.legacy_endpoint_id, endpoint['id'])
self.assertEqual(ref.interface, interface)
self.assertEqual(ref.region, endpoint['region'])
self.assertEqual(ref.service_id, endpoint['service_id'])
self.assertEqual(ref.url, endpoint_extra['%surl' % interface])
self.assertEqual(ref.extra, '{}')
session.commit()
session.close()
def assertTenantTables(self):
self.assertTableExists('tenant')
self.assertTableExists('user_tenant_membership')
self.assertTableDoesNotExist('project')
self.assertTableDoesNotExist('user_project_membership')
def assertProjectTables(self):
self.assertTableExists('project')
self.assertTableExists('user_project_membership')
self.assertTableDoesNotExist('tenant')
self.assertTableDoesNotExist('user_tenant_membership')
def test_upgrade_tenant_to_project(self):
self.upgrade(14)
self.assertTenantTables()
self.upgrade(15)
self.assertProjectTables()
def test_downgrade_project_to_tenant(self):
# TODO(henry-nash): Debug why we need to re-load the tenant
# or user_tenant_membership ahead of upgrading to project
# in order for the assertProjectTables to work on sqlite
# (MySQL is fine without it)
self.upgrade(14)
self.assertTenantTables()
self.upgrade(15)
self.assertProjectTables()
self.downgrade(14)
self.assertTenantTables()
def test_upgrade_add_group_tables(self):
self.upgrade(13)
self.upgrade(14)
self.assertTableExists('group')
self.assertTableExists('group_project_metadata')
self.assertTableExists('group_domain_metadata')
self.assertTableExists('user_group_membership')
def test_upgrade_14_to_16(self):
self.upgrade(14)
self.populate_user_table(with_pass_enab=True)
self.populate_tenant_table(with_desc_enab=True)
self.upgrade(16)
self.assertTableColumns("user",
["id", "name", "extra",
"password", "enabled", "domain_id"])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='foo'").one()
self.assertTrue(a_user.enabled)
self.assertEqual(a_user.domain_id, DEFAULT_DOMAIN_ID)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertEqual(a_user.name, default_fixtures.USERS[2]['name'])
self.assertEqual(a_user.domain_id, DEFAULT_DOMAIN_ID)
project_table = sqlalchemy.Table("project",
self.metadata,
autoload=True)
a_project = session.query(project_table).filter("id='baz'").one()
self.assertEqual(a_project.description,
default_fixtures.TENANTS[1]['description'])
self.assertEqual(a_project.domain_id, DEFAULT_DOMAIN_ID)
session.commit()
session.close()
self.check_uniqueness_constraints()
def test_downgrade_16_to_14(self):
self.upgrade(16)
self.populate_user_table(with_pass_enab_domain=True)
self.populate_tenant_table(with_desc_enab_domain=True)
self.downgrade(14)
self.assertTableColumns("user",
["id", "name", "extra",
"password", "enabled"])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='foo'").one()
self.assertTrue(a_user.enabled)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertEqual(a_user.name, default_fixtures.USERS[2]['name'])
tenant_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
a_tenant = session.query(tenant_table).filter("id='baz'").one()
self.assertEqual(a_tenant.description,
default_fixtures.TENANTS[1]['description'])
session.commit()
session.close()
def test_downgrade_remove_group_tables(self):
self.upgrade(14)
self.downgrade(13)
self.assertTableDoesNotExist('group')
self.assertTableDoesNotExist('group_project_metadata')
self.assertTableDoesNotExist('group_domain_metadata')
self.assertTableDoesNotExist('user_group_membership')
def test_downgrade_endpoints(self):
self.upgrade(13)
service_extra = {
'name': uuid.uuid4().hex,
}
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'extra': json.dumps(service_extra),
}
common_endpoint_attrs = {
'legacy_endpoint_id': uuid.uuid4().hex,
'region': uuid.uuid4().hex,
'service_id': service['id'],
'extra': json.dumps({}),
}
endpoints = {
'public': {
'id': uuid.uuid4().hex,
'interface': 'public',
'url': uuid.uuid4().hex,
},
'internal': {
'id': uuid.uuid4().hex,
'interface': 'internal',
'url': uuid.uuid4().hex,
},
'admin': {
'id': uuid.uuid4().hex,
'interface': 'admin',
'url': uuid.uuid4().hex,
},
}
session = self.Session()
self.insert_dict(session, 'service', service)
for endpoint in endpoints.values():
endpoint.update(common_endpoint_attrs)
self.insert_dict(session, 'endpoint', endpoint)
session.commit()
session.close()
self.downgrade(9)
self.assertTableColumns(
'service',
['id', 'type', 'extra'])
self.assertTableColumns(
'endpoint',
['id', 'region', 'service_id', 'extra'])
endpoint_table = sqlalchemy.Table(
'endpoint', self.metadata, autoload=True)
session = self.Session()
self.assertEqual(session.query(endpoint_table).count(), 1)
q = session.query(endpoint_table)
q = q.filter_by(id=common_endpoint_attrs['legacy_endpoint_id'])
ref = q.one()
self.assertEqual(ref.id, common_endpoint_attrs['legacy_endpoint_id'])
self.assertEqual(ref.region, endpoint['region'])
self.assertEqual(ref.service_id, endpoint['service_id'])
extra = json.loads(ref.extra)
for interface in ['public', 'internal', 'admin']:
expected_url = endpoints[interface]['url']
self.assertEqual(extra['%surl' % interface], expected_url)
session.commit()
session.close()
def insert_dict(self, session, table_name, d):
"""Naively inserts key-value pairs into a table, given a dictionary."""
this_table = sqlalchemy.Table(table_name, self.metadata, autoload=True)
insert = this_table.insert()
insert.execute(d)
session.commit()
def test_downgrade_to_0(self):
self.upgrade(self.max_version)
self.downgrade(0)
for table_name in ["user", "token", "role", "user_tenant_membership",
"metadata"]:
self.assertTableDoesNotExist(table_name)
def test_upgrade_add_domain_tables(self):
self.upgrade(6)
self.assertTableDoesNotExist('credential')
self.assertTableDoesNotExist('domain')
self.assertTableDoesNotExist('user_domain_metadata')
self.upgrade(7)
self.assertTableExists('credential')
self.assertTableColumns('credential', ['id', 'user_id', 'project_id',
'blob', 'type', 'extra'])
self.assertTableExists('domain')
self.assertTableColumns('domain', ['id', 'name', 'enabled', 'extra'])
self.assertTableExists('user_domain_metadata')
self.assertTableColumns('user_domain_metadata',
['user_id', 'domain_id', 'data'])
def test_metadata_table_migration(self):
# Scaffolding
session = self.Session()
self.upgrade(16)
domain_table = sqlalchemy.Table('domain', self.metadata, autoload=True)
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
project_table = sqlalchemy.Table(
'project', self.metadata, autoload=True)
metadata_table = sqlalchemy.Table(
'metadata', self.metadata, autoload=True)
# Create a Domain
domain = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(domain))
# Create a Project
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': "{}"}
session.execute(project_table.insert().values(project))
# Create another Project
project2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': "{}"}
session.execute(project_table.insert().values(project2))
# Create a User
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
session.execute(user_table.insert().values(user))
# Create a Role
role = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role))
# And another role
role2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role2))
# Grant Role to User
role_grant = {'user_id': user['id'],
'tenant_id': project['id'],
'data': json.dumps({"roles": [role['id']]})}
session.execute(metadata_table.insert().values(role_grant))
role_grant = {'user_id': user['id'],
'tenant_id': project2['id'],
'data': json.dumps({"roles": [role2['id']]})}
session.execute(metadata_table.insert().values(role_grant))
session.commit()
self.upgrade(17)
user_project_metadata_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
r = session.execute('select data from metadata where '
'user_id=:user and tenant_id=:tenant',
{'user': user['id'], 'tenant': project['id']})
test_project1 = json.loads(r.fetchone()['data'])
self.assertEqual(len(test_project1['roles']), 1)
self.assertIn(role['id'], test_project1['roles'])
# Test user in project2 has role2
r = session.execute('select data from metadata where '
'user_id=:user and tenant_id=:tenant',
{'user': user['id'], 'tenant': project2['id']})
test_project2 = json.loads(r.fetchone()['data'])
self.assertEqual(len(test_project2['roles']), 1)
self.assertIn(role2['id'], test_project2['roles'])
# Test for user in project has role in user_project_metadata
# Migration 17 does not properly migrate this data, so this should
# be None.
r = session.execute('select data from user_project_metadata where '
'user_id=:user and project_id=:project',
{'user': user['id'], 'project': project['id']})
self.assertIsNone(r.fetchone())
# Create a conflicting user-project in user_project_metadata with
# a different role
data = json.dumps({"roles": [role2['id']]})
role_grant = {'user_id': user['id'],
'project_id': project['id'],
'data': data}
cmd = user_project_metadata_table.insert().values(role_grant)
self.engine.execute(cmd)
# End Scaffolding
session.commit()
# Migrate to 20
self.upgrade(20)
# The user-project pairs should have all roles from the previous
# metadata table in addition to any roles currently in
# user_project_metadata
r = session.execute('select data from user_project_metadata where '
'user_id=:user and project_id=:project',
{'user': user['id'], 'project': project['id']})
role_ids = json.loads(r.fetchone()['data'])['roles']
self.assertEqual(len(role_ids), 3)
self.assertIn(CONF.member_role_id, role_ids)
self.assertIn(role['id'], role_ids)
self.assertIn(role2['id'], role_ids)
# pairs that only existed in old metadata table should be in
# user_project_metadata
r = session.execute('select data from user_project_metadata where '
'user_id=:user and project_id=:project',
{'user': user['id'], 'project': project2['id']})
role_ids = json.loads(r.fetchone()['data'])['roles']
self.assertEqual(len(role_ids), 2)
self.assertIn(CONF.member_role_id, role_ids)
self.assertIn(role2['id'], role_ids)
self.assertTableDoesNotExist('metadata')
def test_upgrade_default_roles(self):
def count_member_roles():
session = self.Session()
query_string = ("select count(*) as c from role "
"where name='%s'" % config.CONF.member_role_name)
role_count = session.execute(query_string).fetchone()['c']
session.close()
return role_count
self.upgrade(16)
self.assertEquals(0, count_member_roles())
self.upgrade(17)
self.assertEquals(1, count_member_roles())
self.downgrade(16)
self.assertEquals(0, count_member_roles())
def check_uniqueness_constraints(self):
# Check uniqueness constraints for User & Project tables are
# correct following schema modification. The Group table's
# schema is never modified, so we don't bother to check that.
domain_table = sqlalchemy.Table('domain',
self.metadata,
autoload=True)
domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
domain2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
cmd = domain_table.insert().values(domain1)
self.engine.execute(cmd)
cmd = domain_table.insert().values(domain2)
self.engine.execute(cmd)
# First, the User table.
this_table = sqlalchemy.Table('user',
self.metadata,
autoload=True)
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain1['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
cmd = this_table.insert().values(user)
self.engine.execute(cmd)
# now insert a user with the same name into a different
# domain - which should work.
user['id'] = uuid.uuid4().hex
user['domain_id'] = domain2['id']
cmd = this_table.insert().values(user)
self.engine.execute(cmd)
# TODO(henry-nash): For now, as part of clean-up we delete one of these
# users. Although not part of this test, unless we do so the
# downgrade(16->15) that is part of teardown with fail due to having
# two uses with clashing name as we try to revert to a single global
# name space. This limitation is raised as Bug #1125046 and the delete
# could be removed depending on how that bug is resolved.
cmd = this_table.delete(id=user['id'])
self.engine.execute(cmd)
# Now, the Project table.
this_table = sqlalchemy.Table('project',
self.metadata,
autoload=True)
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain1['id'],
'description': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
cmd = this_table.insert().values(project)
self.engine.execute(cmd)
# now insert a project with the same name into a different
# domain - which should work.
project['id'] = uuid.uuid4().hex
project['domain_id'] = domain2['id']
cmd = this_table.insert().values(project)
self.engine.execute(cmd)
# TODO(henry-nash): For now, we delete one of the projects for the same
# reason as we delete one of the users (Bug #1125046). This delete
# could be removed depending on that bug resolution.
cmd = this_table.delete(id=project['id'])
self.engine.execute(cmd)
def test_upgrade_trusts(self):
self.assertEqual(self.schema.version, 0, "DB is at version 0")
self.upgrade(20)
self.assertTableColumns("token",
["id", "expires", "extra", "valid"])
self.upgrade(21)
self.assertTableColumns("trust",
["id", "trustor_user_id",
"trustee_user_id",
"project_id", "impersonation",
"deleted_at",
"expires_at", "extra"])
self.assertTableColumns("trust_role",
["trust_id", "role_id"])
self.assertTableColumns("token",
["id", "expires", "extra", "valid",
"trust_id", "user_id"])
def test_fixup_role(self):
session = self.Session()
self.assertEqual(self.schema.version, 0, "DB is at version 0")
self.upgrade(1)
self.insert_dict(session, "role", {"id": "test", "name": "test"})
self.upgrade(18)
self.insert_dict(session, "role", {"id": "test2",
"name": "test2",
"extra": None})
r = session.execute('select count(*) as c from role '
'where extra is null')
self.assertEqual(r.fetchone()['c'], 2)
session.commit()
self.upgrade(19)
r = session.execute('select count(*) as c from role '
'where extra is null')
self.assertEqual(r.fetchone()['c'], 0)
def test_legacy_endpoint_id(self):
session = self.Session()
self.upgrade(21)
service = {
'id': uuid.uuid4().hex,
'name': 'keystone',
'type': 'identity'}
self.insert_dict(session, 'service', service)
legacy_endpoint_id = uuid.uuid4().hex
endpoint = {
'id': uuid.uuid4().hex,
'service_id': service['id'],
'interface': uuid.uuid4().hex[:8],
'url': uuid.uuid4().hex,
'extra': json.dumps({
'legacy_endpoint_id': legacy_endpoint_id})}
self.insert_dict(session, 'endpoint', endpoint)
session.commit()
self.upgrade(22)
endpoint_table = sqlalchemy.Table(
'endpoint', self.metadata, autoload=True)
self.assertEqual(session.query(endpoint_table).count(), 1)
ref = session.query(endpoint_table).one()
self.assertEqual(ref.id, endpoint['id'], ref)
self.assertEqual(ref.service_id, endpoint['service_id'])
self.assertEqual(ref.interface, endpoint['interface'])
self.assertEqual(ref.url, endpoint['url'])
self.assertEqual(ref.legacy_endpoint_id, legacy_endpoint_id)
self.assertEqual(ref.extra, '{}')
def populate_user_table(self, with_pass_enab=False,
with_pass_enab_domain=False):
# Populate the appropriate fields in the user
# table, depending on the parameters:
#
# Default: id, name, extra
# pass_enab: Add password, enabled as well
# pass_enab_domain: Add password, enabled and domain as well
#
this_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
for user in default_fixtures.USERS:
extra = copy.deepcopy(user)
extra.pop('id')
extra.pop('name')
if with_pass_enab:
password = extra.pop('password', None)
enabled = extra.pop('enabled', True)
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'password': password,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
if with_pass_enab_domain:
password = extra.pop('password', None)
enabled = extra.pop('enabled', True)
extra.pop('domain_id')
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'domain_id': user['domain_id'],
'password': password,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'extra': json.dumps(extra)})
self.engine.execute(ins)
def populate_tenant_table(self, with_desc_enab=False,
with_desc_enab_domain=False):
# Populate the appropriate fields in the tenant or
# project table, depending on the parameters
#
# Default: id, name, extra
# desc_enab: Add description, enabled as well
# desc_enab_domain: Add description, enabled and domain as well,
# plus use project instead of tenant
#
if with_desc_enab_domain:
# By this time tenants are now projects
this_table = sqlalchemy.Table("project",
self.metadata,
autoload=True)
else:
this_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
for tenant in default_fixtures.TENANTS:
extra = copy.deepcopy(tenant)
extra.pop('id')
extra.pop('name')
if with_desc_enab:
desc = extra.pop('description', None)
enabled = extra.pop('enabled', True)
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'description': desc,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
if with_desc_enab_domain:
desc = extra.pop('description', None)
enabled = extra.pop('enabled', True)
extra.pop('domain_id')
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'domain_id': tenant['domain_id'],
'description': desc,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'extra': json.dumps(extra)})
self.engine.execute(ins)
def select_table(self, name):
table = sqlalchemy.Table(name,
self.metadata,
autoload=True)
s = sqlalchemy.select([table])
return s
def assertTableExists(self, table_name):
try:
self.select_table(table_name)
except sqlalchemy.exc.NoSuchTableError:
raise AssertionError('Table "%s" does not exist' % table_name)
def assertTableDoesNotExist(self, table_name):
"""Asserts that a given table exists cannot be selected by name."""
# Switch to a different metadata otherwise you might still
# detect renamed or dropped tables
try:
temp_metadata = sqlalchemy.MetaData()
temp_metadata.bind = self.engine
sqlalchemy.Table(table_name, temp_metadata, autoload=True)
except sqlalchemy.exc.NoSuchTableError:
pass
else:
raise AssertionError('Table "%s" already exists' % table_name)
def upgrade(self, *args, **kwargs):
self._migrate(*args, **kwargs)
def downgrade(self, *args, **kwargs):
self._migrate(*args, downgrade=True, **kwargs)
def _migrate(self, version, repository=None, downgrade=False):
repository = repository or self.repo_path
err = ''
version = versioning_api._migrate_version(self.schema,
version,
not downgrade,
err)
changeset = self.schema.changeset(version)
for ver, change in changeset:
self.schema.runchange(ver, change, changeset.step)
self.assertEqual(self.schema.version, version)
| kwss/keystone | tests/test_sql_upgrade.py | Python | apache-2.0 | 38,724 |
import time
from math import fabs
import putil.timer
from putil.testing import UtilTest
class TestTimer(UtilTest):
def setUp(self):
self.op1_times = iter([ .01, .02 ])
self.a1 = putil.timer.Accumulator()
self.op2_step1_times = iter([ .005, .015, .005, .005])
self.op2_step2_times = iter([ .01, .02, .01, .01])
self.a2 = putil.timer.Accumulator()
def test_found_caller(self):
import importable.create_timer
t = importable.create_timer.t
self.assertEquals('timing.putil.test.importable.create_timer', t.logger.name)
def test_time_event(self):
t = putil.timer.Timer()
time.sleep(0.01)
t.complete_step('pause')
time.sleep(0.02)
t.complete_step()
self.assertEquals(3, len(t.times))
def one_step_operation(self):
t = putil.timer.Timer()
time.sleep(self.op1_times.next())
t.complete_step()
self.a1.add(t)
def test_stats_one_step(self):
try:
while True:
self.one_step_operation()
except StopIteration:
pass
self.assertEquals(2, self.a1.get_count())
self.assertAlmostEqual(self.a1.get_average(), 0.015, places=2)
self.assertTrue( fabs(self.a1.get_average()-0.015) < .002 )
self.assertAlmostEqual(self.a1.get_standard_deviation(), 0.005, places=2)
def two_step_operation(self):
t = putil.timer.Timer()
time.sleep(self.op2_step1_times.next())
t.complete_step('one')
time.sleep(self.op2_step2_times.next())
t.complete_step('two')
self.a2.add(t)
def test_stats_two_steps(self):
try:
while True:
self.two_step_operation()
except StopIteration:
pass
self.assertEquals(8, self.a2.get_count())
self.assertEquals(4, self.a2.get_count("one"))
self.assertEquals(4, self.a2.get_count("two"))
self.assertAlmostEqual(self.a2.get_average(), 0.01, places=2)
self.assertAlmostEqual(self.a2.get_average("one"), 0.008, places=2)
self.assertAlmostEqual(self.a2.get_average("two"), 0.013, places=2)
self.assertNotEquals(0, self.a2.get_standard_deviation())
| crchemist/scioncc | src/putil/test/test_timer.py | Python | bsd-2-clause | 2,264 |
# Copyright 2017 AT&T Corporation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.services.network import subnetpools_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestSubnetsClient(base.BaseServiceTest):
FAKE_SUBNETPOOLS = {
"subnetpools": [
{
"min_prefixlen": "64",
"address_scope_id": None,
"default_prefixlen": "64",
"id": "03f761e6-eee0-43fc-a921-8acf64c14988",
"max_prefixlen": "64",
"name": "my-subnet-pool-ipv6",
"default_quota": None,
"is_default": False,
"project_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"prefixes": [
"2001:db8:0:2::/64",
"2001:db8::/63"
],
"ip_version": 6,
"shared": False,
"description": "",
"revision_number": 2
},
{
"min_prefixlen": "24",
"address_scope_id": None,
"default_prefixlen": "25",
"id": "f49a1319-423a-4ee6-ba54-1d95a4f6cc68",
"max_prefixlen": "30",
"name": "my-subnet-pool-ipv4",
"default_quota": None,
"is_default": False,
"project_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"prefixes": [
"10.10.0.0/21",
"192.168.0.0/16"
],
"ip_version": 4,
"shared": False,
"description": "",
"revision_number": 2
}
]
}
FAKE_SUBNETPOOL_ID = "03f761e6-eee0-43fc-a921-8acf64c14988"
def setUp(self):
super(TestSubnetsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.subnetpools_client = subnetpools_client.SubnetpoolsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_subnetpools(self, bytes_body=False):
self.check_service_client_function(
self.subnetpools_client.list_subnetpools,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_SUBNETPOOLS,
bytes_body,
200)
def _test_create_subnetpool(self, bytes_body=False):
self.check_service_client_function(
self.subnetpools_client.create_subnetpool,
'tempest.lib.common.rest_client.RestClient.post',
{'subnetpool': self.FAKE_SUBNETPOOLS['subnetpools'][1]},
bytes_body,
201,
name="my-subnet-pool-ipv4",
prefixes=["192.168.0.0/16", "10.10.0.0/21"])
def _test_show_subnetpool(self, bytes_body=False):
self.check_service_client_function(
self.subnetpools_client.show_subnetpool,
'tempest.lib.common.rest_client.RestClient.get',
{'subnetpool': self.FAKE_SUBNETPOOLS['subnetpools'][0]},
bytes_body,
200,
subnetpool_id=self.FAKE_SUBNETPOOL_ID)
def _test_update_subnetpool(self, bytes_body=False):
update_kwargs = {
"name": "my-new-subnetpool-name",
"prefixes": [
"2001:db8::/64",
"2001:db8:0:1::/64",
"2001:db8:0:2::/64"
],
"min_prefixlen": 64,
"default_prefixlen": 64,
"max_prefixlen": 64
}
resp_body = {
'subnetpool': copy.deepcopy(
self.FAKE_SUBNETPOOLS['subnetpools'][0])
}
resp_body['subnetpool'].update(update_kwargs)
self.check_service_client_function(
self.subnetpools_client.update_subnetpool,
'tempest.lib.common.rest_client.RestClient.put',
resp_body,
bytes_body,
200,
subnetpool_id=self.FAKE_SUBNETPOOL_ID,
**update_kwargs)
def test_list_subnetpools_with_str_body(self):
self._test_list_subnetpools()
def test_list_subnetpools_with_bytes_body(self):
self._test_list_subnetpools(bytes_body=True)
def test_create_subnetpool_with_str_body(self):
self._test_create_subnetpool()
def test_create_subnetpool_with_bytes_body(self):
self._test_create_subnetpool(bytes_body=True)
def test_show_subnetpool_with_str_body(self):
self._test_show_subnetpool()
def test_show_subnetpool_with_bytes_body(self):
self._test_show_subnetpool(bytes_body=True)
def test_update_subnet_with_str_body(self):
self._test_update_subnetpool()
def test_update_subnet_with_bytes_body(self):
self._test_update_subnetpool(bytes_body=True)
def test_delete_subnetpool(self):
self.check_service_client_function(
self.subnetpools_client.delete_subnetpool,
'tempest.lib.common.rest_client.RestClient.delete',
{},
status=204,
subnetpool_id=self.FAKE_SUBNETPOOL_ID)
| vedujoshi/tempest | tempest/tests/lib/services/network/test_subnetpools_client.py | Python | apache-2.0 | 5,814 |
# -*- coding: utf-8 -*-
# from twisted.words.xish import domish
from base import *
import random
from twisted.internet import defer
answers = ('Pong, чо.',
'Pong, хуле.',
'Pong, блин, pong.',
'Pong. А что я по-твоему должен был ответить?',
'Pong です!',
'Pong. А ты с какова раёна будешь?',
'P\xcc\x83\xcc\x8c\xcc\x8c\xcc\x94\xcc\x82\xcc\xa1\xcc\xa2\xcd\x9c\xcd\x93\xcc\x96\xcd\x8e\xcc\xa4\xcc\xb3\xcc\xa3\xcd\x96\xcc\xb9\xcd\x99o\xcc\x8e\xcd\xa5\xcd\x8a\xcc\x90\xcc\xbe\xcd\xaf\xcd\x8d\xcc\xb0\xcc\x9c\xcc\xad\xcc\xa6\xcd\x96\xcc\xaf\xcc\x9f\xcc\x97n\xcd\x8b\xcc\xa2\xcc\x98\xcc\xb0\xcd\x8e\xcc\xb0\xcc\x9c\xcc\xaa\xcc\xa0\xcd\x99g\xcc\x8c\xcd\x8b\xcc\x86\xcd\xa5\xcd\x82\xcc\xb6\xcc\xb4\xcc\xa3\xcd\x96\xcc\xb9\xcd\x87\xcc\xb3\xcd\x95\xcc\x98\xcc\xa0',
'Pong. А ты знаешь об опции -s / --safe?')
@require_auth
def cmd_ping(request, safe=None):
""" Пинг """
return dict(ok=True, desc='Pong.' if safe else random.choice(answers))
def cmd_fuckoff(request, *args, **kwargs):
return dict(ok=False, desc='Fuck off. Not implemented')
| ojab/bnw | bnw/handlers/command_ping.py | Python | bsd-2-clause | 1,209 |
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
import logging
from core.plugins import ProtocolPlugin
from core.decorators import *
from core.constants import *
from core.server import *
class KickBanPlugin(ProtocolPlugin):
commands = {
"ban": "commandBan",
"banb": "commandBanBoth",
"ipban": "commandIpban",
"ipreason": "commandIpreason",
"kick": "commandKick",
"mkick": "commandMassKick",
"masskick": "commandMassKick",
"banreason": "commandReason",
"unban": "commandUnban",
"unipban": "commandUnipban",
"banned": "commandBanned",
"freeze": "commandFreeze",
"stop": "commandFreeze",
"unfreeze": "commandUnFreeze",
"defreeze": "commandUnFreeze",
"unstop": "commandUnFreeze",
#"ipshun": "commandIpshun",
#"unipshun": "commandUnipshun",
#"ipspec": "commandIpshun",
#"unipspec": "commandUnipshun",
}
@player_list
@admin_only
def commandBanned(self, parts, fromloc, overriderank):
"/banned [page] - Admin\nShows who is Banned."
if len(parts)==2:
try:
page = int(parts[1])
except ValueError:
self.client.sendServerMessage("Page must be a Number.")
return
else:
page = 1
bannedNames = []
for element in self.client.factory.banned.keys():
bannedNames.append(element)
if len(bannedNames) > 0:
bannedNames.sort()
self.client.sendServerPagedList("Banned:", bannedNames, page)
else:
self.client.sendServerList(["Banned: No one."])
@player_list
@mod_only
@username_command
def commandKick(self, user, fromloc, overriderank, params=[]):
"/kick username [reason] - Mod\nKicks the user off the server."
reason = " ".join(params)
user.sendErrorAction(ACTION_KICK, self.client, reason)
self.client.announceGlobal(ACTION_KICK, user.username, reason)
self.client.sendServerMessage("User %s kicked." % user.username)
@player_list
@director_only
def commandMassKick(self, parts, fromloc, overriderank):
"/mkick - Director\nKicks all users off the server."
for user in self.client.factory.usernames:
if user.lower() != self.client.username.lower():
self.client.factory.usernames[user].sendError("%s kicked everyone!" % self.client.username)
self.client.factory.queue.put((self.client, TASK_SERVERURGENTMESSAGE, "[MASSKICK] %s kicked everyone." % self.client.username))
@player_list
@director_only
@only_username_command
def commandBanBoth(self, username, fromloc, overriderank, params=[]):
"/banb username reason - Director\nName and IP ban a user from this server."
if not params:
self.client.sendServerMessage("Please give a reason.")
else:
if username in self.client.factory.usernames:
self.commandIpban(["/banb", username] + params, fromloc, overriderank)
self.commandBan(["/banb", username] + params, fromloc, overriderank)
@player_list
@admin_only
def commandBan(self, parts, fromloc, overriderank):
"/ban username reason - Admin\nBans the Player from this server."
username = parts[1].lower()
if len(parts) <= 1:
self.client.sendServerMessage("Please specify a reason.")
return
if self.client.factory.isBanned(username):
self.client.sendServerMessage("%s is already banned." % username)
else:
reason = " ".join(parts[2:])
self.client.factory.addBan(username, reason)
if username in self.client.factory.usernames:
self.client.factory.usernames[username].sendErrorAction(ACTION_BAN, self.client, reason)
self.client.announceGlobal(ACTION_BAN, username, reason)
self.client.sendServerMessage("%s has been banned for %s." % (username, reason))
@director_only
def commandIpban(self, parts, fromloc, overriderank):
"/ipban username reason - Director\nBan a user's IP from this server."
if len(parts) >= 2:
username = parts[1].lower()
if username in self.client.factory.usernames:
ip = self.client.factory.usernames[username].transport.getPeer().host
if self.client.factory.isIpBanned(ip):
self.client.sendServerMessage("%s is already IPBanned." % ip)
else:
reason = " ".join(parts[2:])
self.client.factory.addIpBan(ip, reason)
self.client.factory.usernames[username].sendErrorAction(ACTION_IPBAN, self.client, reason)
self.client.announceGlobal(ACTION_IPBAN, username, reason)
self.client.sendServerMessage("%s has been IPBanned." % ip)
else:
self.client.sendServerMessage("%s is not online." % username)
else:
self.client.sendServerMessage("Please include a user to IPBan.")
@player_list
@admin_only
@only_username_command
def commandUnban(self, username, fromloc, overriderank):
"/unban username - Admin\nRemoves the Ban on the user."
if not self.client.factory.isBanned(username):
self.client.sendServerMessage("%s is not banned." % username)
else:
self.client.factory.removeBan(username)
self.client.announceGlobal(ACTION_UNBAN, username)
self.client.sendServerMessage("%s has been unbanned." % username)
@player_list
@director_only
@only_string_command("IP")
def commandUnipban(self, ip, fromloc, overriderank):
"/unipban ip - Director\nRemoves the Ban on the IP."
if not self.client.factory.isIpBanned(ip):
self.client.sendServerMessage("%s is not Banned." % ip)
else:
self.client.factory.removeIpBan(ip)
self.client.sendServerMessage("%s UnBanned." % ip)
@player_list
@admin_only
@only_username_command
def commandReason(self, username, fromloc, overriderank):
"/banreason username - Admin\nGives the reason a user was Banned."
if not self.client.factory.isBanned(username):
self.client.sendServerMessage("%s is not Banned." % username)
else:
self.client.sendServerMessage("Reason: %s" % self.client.factory.banReason(username))
@player_list
@director_only
@only_string_command("IP")
def commandIpreason(self, ip, fromloc, overriderank):
"/ipreason username - Director\nGives the reason an IP was Banned."
if not self.client.factory.isIpBanned(ip):
self.client.sendServerMessage("%s is not Banned." % ip)
else:
self.client.sendServerMessage("Reason: %s" % self.client.factory.ipBanReason(ip))
@player_list
@mod_only
def commandUnFreeze(self, parts, fromloc, overriderank):
"/unfreeze username - Mod\nAliases: defreeze, unstop\nUnfreezes the user, allowing them to move again."
try:
username = parts[1]
except:
self.client.sendServerMessage("No username given.")
return
try:
user = self.client.factory.usernames[username]
except:
self.client.sendServerMessage("User is not online.")
return
user.frozen = False
self.client.sendServerMessage("%s has been unfrozen." % username)
user.sendNormalMessage("&4You have been unfrozen by %s!" % self.client.username)
@player_list
@mod_only
def commandFreeze(self, parts, fromloc, overriderank):
"/freeze username - Mod\nAliases: stop\nFreezes the user, preventing them from moving."
try:
username = parts[1]
except:
self.client.sendServerMessage("No username given.")
return
try:
user = self.client.factory.usernames[username]
except:
self.client.sendErrorMessage("User is not online.")
return
user.frozen = True
if self.client.isOnlyHiddenNotVisibleStaff():
user.sendNormalMessage("&4You have been frozen!")
else:
user.sendNormalMessage("&4You have been frozen by %s!" % self.client.username)
self.client.sendServerMessage("%s has been frozen." % username)
#@player_list
#@mod_only
#@only_username_command
#def commandIpshun(self, username, fromloc, overriderank):
# "/ipspec username - Mod\nAliases: ipshun\nIPSpec a user's IP in this server."
# ip = self.client.factory.usernames[username].transport.getPeer().host
# if self.client.factory.isIpShunned(ip):
# self.client.sendServerMessage("%s is already IPSpecced." % ip)
# else:
# self.client.factory.addIpShun(ip)
# if username in self.client.factory.usernames:
# self.client.factory.usernames[username].sendServerMessage("You got IPSpecced!")
# self.client.sendServerMessage("%s has been IPSpecced." % ip)
# logging.log(logging.INFO,self.client.username + ' IPSpecced ' + username + ip)
#@player_list
#@mod_only
#@only_string_command("IP")
#def commandUnipshun(self, ip, fromloc, overriderank):
# "/unipspec ip - Mod\nAliases: unipshun\nRemoves the IPSpec on the IP."
# if not self.client.factory.isIpShunned(ip):
# self.client.sendServerMessage("%s is not IPSpecced." % ip)
# else:
# self.client.factory.removeIpShun(ip)
# self.client.sendServerMessage("%s UnIPSpecced." % ip)
# logging.log(logging.INFO,self.client.username + ' UnIPSpecced ' + ip)
| TheArchives/Nexus | core/plugins/kickban.py | Python | bsd-2-clause | 10,108 |
import os.path
import string
import subprocess
import sys
try:
"""
If used in a package, package logging functions are used instead of stderr.
"""
from . import debug, info, warning, error, fatal
except:
def error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
debug = info = warning = fatal = error
from . import ConverterBase, SplitterException
class AviDemuxException(SplitterException):
pass
# The first entry here becomes the default
containers = [
('AVI', 'odmlType=1'),
('MKV', 'forceDisplayWidth=False', 'displayWidth=1280'),
('MP4V2', 'optimize=0', 'add_itunes_metadata=0'),
('MP4', 'muxerType=0', 'useAlternateMp3Tag=True'),
('OGM') ]
debug( "Default container is {}".format(containers[0]) )
dirname, _ = os.path.split(__file__)
with open(os.path.join(dirname,'AviDemux.template')) as fi:
script_template = fi.read()
def probe(*args):
'''
AviDemux doesn't have an info command (right?), so this is a wrapper for the 'file' utility
'''
def parse_line(b, prefix='', encoding=stream_encoding):
line = b.decode(encoding).rstrip()
return any(w in line for w in [ 'AVI', 'MPEG' ])
for filename in args:
proc = subprocess.Popen([ 'file', filename ],
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE)
assert not proc.returncode
stdout_contents, _ = proc.communicate()
if not stdout_contents:
return False
lines = stdout_contents.split(b'\n')
if not parse_line(lines[0]):
return False
return True
class AviDemuxConverter(ConverterBase):
@staticmethod
def check_filenames(*args):
for filename in args:
if 255 < len(os.path.abspath(filename)):
raise TinyPyException("Filename too long")
@staticmethod
def match_filenames(*args):
r = []
y = r.append
for arg in args:
_, ext = os.path.splitext(arg)
if ext.upper() in ( '.AVI', '.DIVX', '.FLV', '.MKV', '.OGM', '.WEBM', '.XVID' ):
y(arg)
return r
def __init__(self, **kwargs):
self.dry_run = kwargs.pop('dry_run', None)
if sys.platform.startswith('win'):
self.executable = 'AVIDEMUX.EXE'
else:
self.executable = 'avidemux3_cli'
self.extra_options = kwargs
def get_commands(self, input_filename,
output_filename='',
script_filename='',
container=containers[0],
video_filters=[],
**kwargs):
options = kwargs
self.check_filenames(input_filename)
dirname, basename = os.path.split(input_filename)
filepart, ext = os.path.splitext(basename)
if not script_filename:
script_filename = basename+'.AviDemux.py'
if output_filename:
output_filepart, output_ext = os.path.splitext(output_filename) # inelegant
else:
output_filepart, output_ext = filepart, ext
output_ext = options.pop('output_ext', output_ext.upper())
if video_filters:
container = containers[1]
parts, frames = [], []
if 'splits' in options:
# expects decimal seconds
parts = options.pop('splits')
if 'frames' in options:
frames = [ (b or None, e or None) for (b, e) in options.pop('frames') ]
if parts and frames:
warning("Refusing to split on both second and frame number, using {}".format(parts))
t = string.Template(options.pop('template', script_template))
# prepare local variables for TinyPy:
if parts:
parts = '\n'.join(wrap(parts))
if frames:
frames = '\n'.join(wrap(frames))
loc = locals()
#
for k, v in options.items():
debug("Extra parameter unused: {}={}".format(k, v))
for k, v in loc.items():
if not k.startswith('_'):
debug("AviDemux variable: {}={}".format(k, v))
with open(script_filename, 'w') as ofo:
ofo.write(t.substitute(loc))
return [ [ self.executable, '--run', script_filename ] ]
def parse_output(self, streams, **kwargs):
'''
Encoding is Latin-1 because AviDemux emits control characters
'''
stdout_contents, stderr_contents = streams
debug( "{:,}B of stdout".format(len(stdout_contents)) )
debug( "{:,}B of stderr".format(len(stderr_contents)) )
for b in avoid_duplicates(stderr_contents.split(b'\n'), encoding='latin-1'):
parse_line(b, prefix='STDERR')
for b in avoid_duplicates(stdout_contents.split(b'\n'), encoding='latin-1'):
parse_line(b)
return kwargs.pop('returncode')==0, []
def parse_line(b, prefix='STDOUT', encoding='latin-1'):
line = b.decode(encoding).rstrip()
if not line or 'PerfectAudio' in line: # TONS of output
return
if line.startswith('[Script]'):
line = line[len('[Script]')+1:]
engine, line = line.split(' ', 1)
if engine=='Tinypy' and line.startswith('INFO'): # INFO -
info(line[len('INFO - '):])
else:
debug(engine+' '+line)
else:
debug(prefix+' '+line)
###
if sys.platform.startswith('win'):
executable = 'AVIDEMUX.EXE'
else:
executable = 'avidemux3_cli'
debug("AviDemux is "+executable)
def AviDemux_command(input_filename, output_filename='', script_filename='', container=containers[0], **kwargs):
if 255 < len(os.path.abspath(input_filename)):
raise TinyPyException("Filename {} too long".format(input_filename))
dirname, basename = os.path.split(input_filename)
filepart, ext = os.path.splitext(basename)
if not script_filename:
script_filename = basename+'.AviDemux.py'
if output_filename:
output_filepart, output_ext = os.path.splitext(output_filename) # inelegant
else:
output_filepart, output_ext = filepart, ext
output_ext = kwargs.pop('output_ext', output_ext.upper())
video_filters = kwargs.pop('video_filters', [])
if video_filters:
container = containers[1]
parts, frames = [], []
if 'splits' in kwargs:
# expects decimal seconds
parts = kwargs.pop('splits')
if 'frames' in kwargs:
frames = [ (b or None, e or None) for (b, e) in kwargs.pop('frames') ]
if parts and frames:
warning("Refusing to split on both second and frame number, using {}".format(parts))
t = string.Template(kwargs.pop('template', None) or script_template)
# prepare local variables for TinyPy:
if parts:
parts = '\n'.join(wrap(parts))
if frames:
frames = '\n'.join(wrap(frames))
loc = locals()
#
for k, v in kwargs.items():
debug("Extra parameter unused: {}={}".format(k, v))
for k, v in loc.items():
if not k.startswith('_'):
debug("AviDemux variable: {}={}".format(k, v))
with open(script_filename, 'w') as ofo:
ofo.write(t.substitute(loc))
return [ executable, '--run', script_filename ]
def parse_output(outs, errs='', returncode=None, stream_encoding='latin-1'):
'''
Encoding is Latin-1 because AviDemux emits control characters
'''
def parse_line(b, prefix='STDOUT', encoding=stream_encoding):
line = b.decode(encoding).rstrip()
if not line:
pass
elif line.startswith('[Script]'):
line = line[9:]
engine, line = line.split(' ', 1)
if engine=='Tinypy' and line.startswith('INFO'): # INFO -
info(line[7:])
else:
debug(engine+' '+line)
elif 'PerfectAudio' in line: # silently drop TONS of output
pass
else:
debug(prefix+' '+line)
for b in avoid_duplicates(errs.splitlines(), encoding=stream_encoding):
parse_line(b, prefix='STDERR')
for b in avoid_duplicates(outs.splitlines(), encoding=stream_encoding):
parse_line(b)
return returncode
### EOF
| matt-hayden/video-clip-splitter | videoclipsplitter/AviDemux.py | Python | unlicense | 7,071 |
import time
from torba.server import util
def sessions_lines(data):
"""A generator returning lines for a list of sessions.
data is the return value of rpc_sessions()."""
fmt = ('{:<6} {:<5} {:>17} {:>5} {:>5} {:>5} '
'{:>7} {:>7} {:>7} {:>7} {:>7} {:>9} {:>21}')
yield fmt.format('ID', 'Flags', 'Client', 'Proto',
'Reqs', 'Txs', 'Subs',
'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer')
for (id_, flags, peer, client, proto, reqs, txs_sent, subs,
recv_count, recv_size, send_count, send_size, time) in data:
yield fmt.format(id_, flags, client, proto,
'{:,d}'.format(reqs),
'{:,d}'.format(txs_sent),
'{:,d}'.format(subs),
'{:,d}'.format(recv_count),
'{:,d}'.format(recv_size // 1024),
'{:,d}'.format(send_count),
'{:,d}'.format(send_size // 1024),
util.formatted_time(time, sep=''), peer)
def groups_lines(data):
"""A generator returning lines for a list of groups.
data is the return value of rpc_groups()."""
fmt = ('{:<6} {:>9} {:>9} {:>6} {:>6} {:>8}'
'{:>7} {:>9} {:>7} {:>9}')
yield fmt.format('ID', 'Sessions', 'Bwidth KB', 'Reqs', 'Txs', 'Subs',
'Recv', 'Recv KB', 'Sent', 'Sent KB')
for (id_, session_count, bandwidth, reqs, txs_sent, subs,
recv_count, recv_size, send_count, send_size) in data:
yield fmt.format(id_,
'{:,d}'.format(session_count),
'{:,d}'.format(bandwidth // 1024),
'{:,d}'.format(reqs),
'{:,d}'.format(txs_sent),
'{:,d}'.format(subs),
'{:,d}'.format(recv_count),
'{:,d}'.format(recv_size // 1024),
'{:,d}'.format(send_count),
'{:,d}'.format(send_size // 1024))
def peers_lines(data):
"""A generator returning lines for a list of peers.
data is the return value of rpc_peers()."""
def time_fmt(t):
if not t:
return 'Never'
return util.formatted_time(now - t)
now = time.time()
fmt = ('{:<30} {:<6} {:>5} {:>5} {:<17} {:>4} '
'{:>4} {:>8} {:>11} {:>11} {:>5} {:>20} {:<15}')
yield fmt.format('Host', 'Status', 'TCP', 'SSL', 'Server', 'Min',
'Max', 'Pruning', 'Last Good', 'Last Try',
'Tries', 'Source', 'IP Address')
for item in data:
features = item['features']
hostname = item['host']
host = features['hosts'][hostname]
yield fmt.format(hostname[:30],
item['status'],
host.get('tcp_port') or '',
host.get('ssl_port') or '',
features['server_version'] or 'unknown',
features['protocol_min'],
features['protocol_max'],
features['pruning'] or '',
time_fmt(item['last_good']),
time_fmt(item['last_try']),
item['try_count'],
item['source'][:20],
item['ip_addr'] or '')
| lbryio/lbry | torba/torba/server/text.py | Python | mit | 3,433 |
import pygame
from constants import SCREEN, MAX_FPS, SHOW_FPS
from misc_functions import show_fps, set_joysticks
def Demo_TitleScreen():
title = pygame.image.load('images/demo/titlescreen.png').convert()
'''
if MUSIC:
music = pygame.mixer.music.load('sound/music/cheetah.mp3')
pygame.mixer.music.play(-1)
'''
clock = pygame.time.Clock()
over = False
while not over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return True
elif event.key == pygame.K_RETURN:
return False
SCREEN.blit(title, (0,0))
pygame.display.flip()
clock.tick(10)
def Main_TitleScreen(MUTE_MUSIC): #Booleano
title = pygame.image.load('images/backgrounds/TitleScreen.png').convert()
music = pygame.mixer.music.load('sound/music/RHFgameselect.mp3')
pygame.mixer.music.play(-1)
if MUTE_MUSIC:
pygame.mixer.music.pause()
joysticks = set_joysticks()
for joy in joysticks:
joy.init()
clock = pygame.time.Clock()
over = False
while not over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True, True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return True, True
elif event.key == pygame.K_RETURN:
return False, False
elif event.type == pygame.JOYBUTTONDOWN:
if event.button == 3:
return True, True
return False, False
SCREEN.blit(title, (0,0))
FPS = clock.get_fps()
if SHOW_FPS:
show_fps(FPS)
pygame.display.flip()
clock.tick(MAX_FPS)
| AsparagusEdu/GraviSwitch | code/titlescreen.py | Python | apache-2.0 | 1,582 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import threading
import time
import tensorflow as tf
from tensorboard.plugins.debugger_v2 import debug_data_multiplexer
mock = tf.compat.v1.test.mock
class RunInBackgroundRepeatedlyTest(tf.test.TestCase):
def testRunInBackgroundRepeatedlyThreeTimes(self):
state = {"counter": 0}
def run_three_times():
state["counter"] += 1
if state["counter"] == 3:
raise StopIteration()
OriginalThread = threading.Thread
with mock.patch.object(
threading,
"Thread",
# Use a non-daemon thread for testing. A non-daemon thread
# will block the test process from exiting if not terminated
# properly. Here the thread is expected to be terminated by the
# `StopIteration` raised by `run_three_times()`.
lambda target, daemon: OriginalThread(target=target, daemon=False),
):
(
interrupt_event,
thread,
) = debug_data_multiplexer.run_repeatedly_in_background(
run_three_times,
None, # `interval_sec is None` means indefinite wait()
)
interrupt_event.set()
time.sleep(0.05)
interrupt_event.set()
time.sleep(0.05)
interrupt_event.set()
thread.join()
self.assertEqual(state["counter"], 3)
class ParseTensorNameTest(tf.test.TestCase):
def testParseTensorNameWithNoOutputSlot(self):
op_name, slot = debug_data_multiplexer.parse_tensor_name("MatMul_1")
self.assertEqual(op_name, "MatMul_1")
self.assertEqual(slot, 0)
def testParseTensorNameWithZeroOutputSlot(self):
op_name, slot = debug_data_multiplexer.parse_tensor_name("MatMul_1:0")
self.assertEqual(op_name, "MatMul_1")
self.assertEqual(slot, 0)
def testParseTensorNameWithNonZeroOutputSlot(self):
op_name, slot = debug_data_multiplexer.parse_tensor_name("Unpack:10")
self.assertEqual(op_name, "Unpack")
self.assertEqual(slot, 10)
def testParseTensorNameWithInvalidSlotRaisesValueError(self):
with self.assertRaises(ValueError):
debug_data_multiplexer.parse_tensor_name("Unpack:10:10")
if __name__ == "__main__":
tf.test.main()
| tensorflow/tensorboard | tensorboard/plugins/debugger_v2/debug_data_multiplexer_test.py | Python | apache-2.0 | 3,017 |
# -*- coding: utf8 -*-
from __future__ import print_function, division, absolute_import
import collections
import functools
import os
import struct
import sys
import uuid
import weakref
from copy import deepcopy
from numba import _dispatcher, compiler, utils, types, config, errors
from numba.typeconv.rules import default_type_manager
from numba import sigutils, serialize, typing
from numba.typing.templates import fold_arguments
from numba.typing.typeof import Purpose, typeof
from numba.bytecode import get_code_object
from numba.six import create_bound_method, reraise
from .caching import NullCache, FunctionCache
class OmittedArg(object):
"""
A placeholder for omitted arguments with a default value.
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return "omitted arg(%r)" % (self.value,)
@property
def _numba_type_(self):
return types.Omitted(self.value)
class _FunctionCompiler(object):
def __init__(self, py_func, targetdescr, targetoptions, locals,
pipeline_class):
self.py_func = py_func
self.targetdescr = targetdescr
self.targetoptions = targetoptions
self.locals = locals
self.pysig = utils.pysignature(self.py_func)
self.pipeline_class = pipeline_class
# Remember key=(args, return_type) combinations that will fail
# compilation to avoid compilation attempt on them. The values are
# the exceptions.
self._failed_cache = {}
def fold_argument_types(self, args, kws):
"""
Given positional and named argument types, fold keyword arguments
and resolve defaults by inserting types.Omitted() instances.
A (pysig, argument types) tuple is returned.
"""
def normal_handler(index, param, value):
return value
def default_handler(index, param, default):
return types.Omitted(default)
def stararg_handler(index, param, values):
return types.Tuple(values)
# For now, we take argument values from the @jit function, even
# in the case of generated jit.
args = fold_arguments(self.pysig, args, kws,
normal_handler,
default_handler,
stararg_handler)
return self.pysig, args
def compile(self, args, return_type):
status, retval = self._compile_cached(args, return_type)
if status:
return retval
else:
raise retval
def _compile_cached(self, args, return_type):
key = tuple(args), return_type
try:
return False, self._failed_cache[key]
except KeyError:
pass
try:
retval = self._compile_core(args, return_type)
except errors.TypingError as e:
self._failed_cache[key] = e
return False, e
else:
return True, retval
def _compile_core(self, args, return_type):
flags = compiler.Flags()
self.targetdescr.options.parse_as_flags(flags, self.targetoptions)
flags = self._customize_flags(flags)
impl = self._get_implementation(args, {})
cres = compiler.compile_extra(self.targetdescr.typing_context,
self.targetdescr.target_context,
impl,
args=args, return_type=return_type,
flags=flags, locals=self.locals,
pipeline_class=self.pipeline_class)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
return cres
def get_globals_for_reduction(self):
return serialize._get_function_globals_for_reduction(self.py_func)
def _get_implementation(self, args, kws):
return self.py_func
def _customize_flags(self, flags):
return flags
class _GeneratedFunctionCompiler(_FunctionCompiler):
def __init__(self, py_func, targetdescr, targetoptions, locals,
pipeline_class):
super(_GeneratedFunctionCompiler, self).__init__(
py_func, targetdescr, targetoptions, locals, pipeline_class)
self.impls = set()
def get_globals_for_reduction(self):
# This will recursively get the globals used by any nested
# implementation function.
return serialize._get_function_globals_for_reduction(self.py_func)
def _get_implementation(self, args, kws):
impl = self.py_func(*args, **kws)
# Check the generating function and implementation signatures are
# compatible, otherwise compiling would fail later.
pysig = utils.pysignature(self.py_func)
implsig = utils.pysignature(impl)
ok = len(pysig.parameters) == len(implsig.parameters)
if ok:
for pyparam, implparam in zip(pysig.parameters.values(),
implsig.parameters.values()):
# We allow the implementation to omit default values, but
# if it mentions them, they should have the same value...
if (pyparam.name != implparam.name or
pyparam.kind != implparam.kind or
(implparam.default is not implparam.empty and
implparam.default != pyparam.default)):
ok = False
if not ok:
raise TypeError("generated implementation %s should be compatible "
"with signature '%s', but has signature '%s'"
% (impl, pysig, implsig))
self.impls.add(impl)
return impl
_CompileStats = collections.namedtuple(
'_CompileStats', ('cache_path', 'cache_hits', 'cache_misses'))
class _CompilingCounter(object):
"""
A simple counter that increment in __enter__ and decrement in __exit__.
"""
def __init__(self):
self.counter = 0
def __enter__(self):
assert self.counter >= 0
self.counter += 1
def __exit__(self, *args, **kwargs):
self.counter -= 1
assert self.counter >= 0
def __bool__(self):
return self.counter > 0
__nonzero__ = __bool__
class _DispatcherBase(_dispatcher.Dispatcher):
"""
Common base class for dispatcher Implementations.
"""
__numba__ = "py_func"
def __init__(self, arg_count, py_func, pysig, can_fallback,
exact_match_required):
self._tm = default_type_manager
# A mapping of signatures to compile results
self.overloads = collections.OrderedDict()
self.py_func = py_func
# other parts of Numba assume the old Python 2 name for code object
self.func_code = get_code_object(py_func)
# but newer python uses a different name
self.__code__ = self.func_code
argnames = tuple(pysig.parameters)
default_values = self.py_func.__defaults__ or ()
defargs = tuple(OmittedArg(val) for val in default_values)
try:
lastarg = list(pysig.parameters.values())[-1]
except IndexError:
has_stararg = False
else:
has_stararg = lastarg.kind == lastarg.VAR_POSITIONAL
_dispatcher.Dispatcher.__init__(self, self._tm.get_pointer(),
arg_count, self._fold_args,
argnames, defargs,
can_fallback,
has_stararg,
exact_match_required)
self.doc = py_func.__doc__
self._compiling_counter = _CompilingCounter()
utils.finalize(self, self._make_finalizer())
def _reset_overloads(self):
self._clear()
self.overloads.clear()
def _make_finalizer(self):
"""
Return a finalizer function that will release references to
related compiled functions.
"""
overloads = self.overloads
targetctx = self.targetctx
# Early-bind utils.shutting_down() into the function's local namespace
# (see issue #689)
def finalizer(shutting_down=utils.shutting_down):
# The finalizer may crash at shutdown, skip it (resources
# will be cleared by the process exiting, anyway).
if shutting_down():
return
# This function must *not* hold any reference to self:
# we take care to bind the necessary objects in the closure.
for cres in overloads.values():
try:
targetctx.remove_user_function(cres.entry_point)
except KeyError:
pass
return finalizer
@property
def signatures(self):
"""
Returns a list of compiled function signatures.
"""
return list(self.overloads)
@property
def nopython_signatures(self):
return [cres.signature for cres in self.overloads.values()
if not cres.objectmode and not cres.interpmode]
def disable_compile(self, val=True):
"""Disable the compilation of new signatures at call time.
"""
# If disabling compilation then there must be at least one signature
assert (not val) or len(self.signatures) > 0
self._can_compile = not val
def add_overload(self, cres):
args = tuple(cres.signature.args)
sig = [a._code for a in args]
self._insert(sig, cres.entry_point, cres.objectmode, cres.interpmode)
self.overloads[args] = cres
def fold_argument_types(self, args, kws):
return self._compiler.fold_argument_types(args, kws)
def get_call_template(self, args, kws):
"""
Get a typing.ConcreteTemplate for this dispatcher and the given
*args* and *kws* types. This allows to resolve the return type.
A (template, pysig, args, kws) tuple is returned.
"""
# XXX how about a dispatcher template class automating the
# following?
# Fold keyword arguments and resolve default values
pysig, args = self._compiler.fold_argument_types(args, kws)
kws = {}
# Ensure an overload is available
if self._can_compile:
self.compile(tuple(args))
# Create function type for typing
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
# The `key` isn't really used except for diagnosis here,
# so avoid keeping a reference to `cfunc`.
call_template = typing.make_concrete_template(
name, key=func_name, signatures=self.nopython_signatures)
return call_template, pysig, args, kws
def get_overload(self, sig):
"""
Return the compiled function for the given signature.
"""
args, return_type = sigutils.normalize_signature(sig)
return self.overloads[tuple(args)].entry_point
@property
def is_compiling(self):
"""
Whether a specialization is currently being compiled.
"""
return self._compiling_counter
def _compile_for_args(self, *args, **kws):
"""
For internal use. Compile a specialized version of the function
for the given *args* and *kws*, and return the resulting callable.
"""
assert not kws
def error_rewrite(e, issue_type):
"""
Rewrite and raise Exception `e` with help supplied based on the
specified issue_type.
"""
if config.SHOW_HELP:
help_msg = errors.error_extras[issue_type]
e.patch_message(''.join(e.args) + help_msg)
if config.FULL_TRACEBACKS:
raise e
else:
reraise(type(e), e, None)
argtypes = []
for a in args:
if isinstance(a, OmittedArg):
argtypes.append(types.Omitted(a.value))
else:
argtypes.append(self.typeof_pyval(a))
try:
return self.compile(tuple(argtypes))
except errors.TypingError as e:
# Intercept typing error that may be due to an argument
# that failed inferencing as a Numba type
failed_args = []
for i, arg in enumerate(args):
val = arg.value if isinstance(arg, OmittedArg) else arg
try:
tp = typeof(val, Purpose.argument)
except ValueError as typeof_exc:
failed_args.append((i, str(typeof_exc)))
else:
if tp is None:
failed_args.append(
(i,
"cannot determine Numba type of value %r" % (val,)))
if failed_args:
# Patch error message to ease debugging
msg = str(e).rstrip() + (
"\n\nThis error may have been caused by the following argument(s):\n%s\n"
% "\n".join("- argument %d: %s" % (i, err)
for i, err in failed_args))
e.patch_message(msg)
error_rewrite(e, 'typing')
except errors.UnsupportedError as e:
# Something unsupported is present in the user code, add help info
error_rewrite(e, 'unsupported_error')
except (errors.NotDefinedError, errors.RedefinedError,
errors.VerificationError) as e:
# These errors are probably from an issue with either the code supplied
# being syntactically or otherwise invalid
error_rewrite(e, 'interpreter')
except errors.ConstantInferenceError as e:
# this is from trying to infer something as constant when it isn't
# or isn't supported as a constant
error_rewrite(e, 'constant_inference')
except Exception as e:
if config.SHOW_HELP:
if hasattr(e, 'patch_message'):
help_msg = errors.error_extras['reportable']
e.patch_message(''.join(e.args) + help_msg)
# ignore the FULL_TRACEBACKS config, this needs reporting!
raise e
def inspect_llvm(self, signature=None):
if signature is not None:
lib = self.overloads[signature].library
return lib.get_llvm_str()
return dict((sig, self.inspect_llvm(sig)) for sig in self.signatures)
def inspect_asm(self, signature=None):
if signature is not None:
lib = self.overloads[signature].library
return lib.get_asm_str()
return dict((sig, self.inspect_asm(sig)) for sig in self.signatures)
def inspect_types(self, file=None, signature=None, **kwargs):
"""
print or return annotated source with Numba intermediate IR
Pass `pretty=True` to attempt color highlighting, and HTML rendering in
Jupyter and IPython by returning an Annotate Object. `file` must be
None if used in conjunction with `pretty=True`.
"""
pretty = kwargs.get('pretty', False)
style = kwargs.get('style', 'default')
overloads = self.overloads
if signature is not None:
overloads = {signature: self.overloads[signature]}
if not pretty:
if file is None:
file = sys.stdout
for ver, res in utils.iteritems(overloads):
print("%s %s" % (self.py_func.__name__, ver), file=file)
print('-' * 80, file=file)
print(res.type_annotation, file=file)
print('=' * 80, file=file)
else:
if file is not None:
raise ValueError("`file` must be None if `pretty=True`")
from .pretty_annotate import Annotate
return Annotate(self, signature=signature, style=style)
def inspect_cfg(self, signature=None, show_wrapper=None):
"""
For inspecting the CFG of the function.
By default the CFG of the user function is showed. The *show_wrapper*
option can be set to "python" or "cfunc" to show the python wrapper
function or the *cfunc* wrapper function, respectively.
"""
if signature is not None:
cres = self.overloads[signature]
lib = cres.library
if show_wrapper == 'python':
fname = cres.fndesc.llvm_cpython_wrapper_name
elif show_wrapper == 'cfunc':
fname = cres.fndesc.llvm_cfunc_wrapper_name
else:
fname = cres.fndesc.mangled_name
return lib.get_function_cfg(fname)
return dict((sig, self.inspect_cfg(sig, show_wrapper=show_wrapper))
for sig in self.signatures)
def get_annotation_info(self, signature=None):
"""
Gets the annotation information for the function specified by
signature. If no signature is supplied a dictionary of signature to
annotation information is returned.
"""
signatures = self.signatures if signature is None else [signature]
out = collections.OrderedDict()
for sig in signatures:
cres = self.overloads[sig]
ta = cres.type_annotation
key = (ta.func_id.filename + ':' + str(ta.func_id.firstlineno + 1),
ta.signature)
out[key] = ta.annotate_raw()[key]
return out
def _explain_ambiguous(self, *args, **kws):
"""
Callback for the C _Dispatcher object.
"""
assert not kws, "kwargs not handled"
args = tuple([self.typeof_pyval(a) for a in args])
# The order here must be deterministic for testing purposes, which
# is ensured by the OrderedDict.
sigs = self.nopython_signatures
# This will raise
self.typingctx.resolve_overload(self.py_func, sigs, args, kws,
allow_ambiguous=False)
def _explain_matching_error(self, *args, **kws):
"""
Callback for the C _Dispatcher object.
"""
assert not kws, "kwargs not handled"
args = [self.typeof_pyval(a) for a in args]
msg = ("No matching definition for argument type(s) %s"
% ', '.join(map(str, args)))
raise TypeError(msg)
def _search_new_conversions(self, *args, **kws):
"""
Callback for the C _Dispatcher object.
Search for approximately matching signatures for the given arguments,
and ensure the corresponding conversions are registered in the C++
type manager.
"""
assert not kws, "kwargs not handled"
args = [self.typeof_pyval(a) for a in args]
found = False
for sig in self.nopython_signatures:
conv = self.typingctx.install_possible_conversions(args, sig.args)
if conv:
found = True
return found
def __repr__(self):
return "%s(%s)" % (type(self).__name__, self.py_func)
def typeof_pyval(self, val):
"""
Resolve the Numba type of Python value *val*.
This is called from numba._dispatcher as a fallback if the native code
cannot decide the type.
"""
# Not going through the resolve_argument_type() indirection
# can save a couple µs.
try:
tp = typeof(val, Purpose.argument)
except ValueError:
tp = types.pyobject
else:
if tp is None:
tp = types.pyobject
return tp
class Dispatcher(_DispatcherBase):
"""
Implementation of user-facing dispatcher objects (i.e. created using
the @jit decorator).
This is an abstract base class. Subclasses should define the targetdescr
class attribute.
"""
_fold_args = True
_impl_kinds = {
'direct': _FunctionCompiler,
'generated': _GeneratedFunctionCompiler,
}
# A {uuid -> instance} mapping, for deserialization
_memo = weakref.WeakValueDictionary()
# hold refs to last N functions deserialized, retaining them in _memo
# regardless of whether there is another reference
_recent = collections.deque(maxlen=config.FUNCTION_CACHE_SIZE)
__uuid = None
__numba__ = 'py_func'
def __init__(self, py_func, locals={}, targetoptions={},
impl_kind='direct', pipeline_class=compiler.Pipeline):
"""
Parameters
----------
py_func: function object to be compiled
locals: dict, optional
Mapping of local variable names to Numba types. Used to override
the types deduced by the type inference engine.
targetoptions: dict, optional
Target-specific config options.
impl_kind: str
Select the compiler mode for `@jit` and `@generated_jit`
pipeline_class: type numba.compiler.BasePipeline
The compiler pipeline type.
"""
self.typingctx = self.targetdescr.typing_context
self.targetctx = self.targetdescr.target_context
pysig = utils.pysignature(py_func)
arg_count = len(pysig.parameters)
can_fallback = not targetoptions.get('nopython', False)
_DispatcherBase.__init__(self, arg_count, py_func, pysig, can_fallback,
exact_match_required=False)
functools.update_wrapper(self, py_func)
self.targetoptions = targetoptions
self.locals = locals
self._cache = NullCache()
compiler_class = self._impl_kinds[impl_kind]
self._impl_kind = impl_kind
self._compiler = compiler_class(py_func, self.targetdescr,
targetoptions, locals, pipeline_class)
self._cache_hits = collections.Counter()
self._cache_misses = collections.Counter()
self._type = types.Dispatcher(self)
self.typingctx.insert_global(self, self._type)
@property
def _numba_type_(self):
return types.Dispatcher(self)
def enable_caching(self):
self._cache = FunctionCache(self.py_func)
def __get__(self, obj, objtype=None):
'''Allow a JIT function to be bound as a method to an object'''
if obj is None: # Unbound method
return self
else: # Bound method
return create_bound_method(self, obj)
def __reduce__(self):
"""
Reduce the instance for pickling. This will serialize
the original function as well the compilation options and
compiled signatures, but not the compiled code itself.
"""
if self._can_compile:
sigs = []
else:
sigs = [cr.signature for cr in self.overloads.values()]
globs = self._compiler.get_globals_for_reduction()
return (serialize._rebuild_reduction,
(self.__class__, str(self._uuid),
serialize._reduce_function(self.py_func, globs),
self.locals, self.targetoptions, self._impl_kind,
self._can_compile, sigs))
@classmethod
def _rebuild(cls, uuid, func_reduced, locals, targetoptions, impl_kind,
can_compile, sigs):
"""
Rebuild an Dispatcher instance after it was __reduce__'d.
"""
try:
return cls._memo[uuid]
except KeyError:
pass
py_func = serialize._rebuild_function(*func_reduced)
self = cls(py_func, locals, targetoptions, impl_kind)
# Make sure this deserialization will be merged with subsequent ones
self._set_uuid(uuid)
for sig in sigs:
self.compile(sig)
self._can_compile = can_compile
return self
@property
def _uuid(self):
"""
An instance-specific UUID, to avoid multiple deserializations of
a given instance.
Note this is lazily-generated, for performance reasons.
"""
u = self.__uuid
if u is None:
u = str(uuid.uuid1())
self._set_uuid(u)
return u
def _set_uuid(self, u):
assert self.__uuid is None
self.__uuid = u
self._memo[u] = self
self._recent.append(self)
@compiler.global_compiler_lock
def compile(self, sig):
if not self._can_compile:
raise RuntimeError("compilation disabled")
# Use counter to track recursion compilation depth
with self._compiling_counter:
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exists
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
# Try to load from disk cache
cres = self._cache.load_overload(sig, self.targetctx)
if cres is not None:
self._cache_hits[sig] += 1
# XXX fold this in add_overload()? (also see compiler.py)
if not cres.objectmode and not cres.interpmode:
self.targetctx.insert_user_function(cres.entry_point,
cres.fndesc, [cres.library])
self.add_overload(cres)
return cres.entry_point
self._cache_misses[sig] += 1
cres = self._compiler.compile(args, return_type)
self.add_overload(cres)
self._cache.save_overload(sig, cres)
return cres.entry_point
def recompile(self):
"""
Recompile all signatures afresh.
"""
sigs = list(self.overloads)
old_can_compile = self._can_compile
# Ensure the old overloads are disposed of, including compiled functions.
self._make_finalizer()()
self._reset_overloads()
self._cache.flush()
self._can_compile = True
try:
for sig in sigs:
self.compile(sig)
finally:
self._can_compile = old_can_compile
@property
def stats(self):
return _CompileStats(
cache_path=self._cache.cache_path,
cache_hits=self._cache_hits,
cache_misses=self._cache_misses,
)
def parallel_diagnostics(self, signature=None, level=1):
"""
Print parallel diagnostic information for the given signature. If no
signature is present it is printed for all known signatures. level is
used to adjust the verbosity, level=1 (default) is minimal verbosity,
and 2, 3, and 4 provide increasing levels of verbosity.
"""
def dump(sig):
ol = self.overloads[sig]
pfdiag = ol.metadata.get('parfor_diagnostics', None)
if pfdiag is None:
msg = "No parfors diagnostic available, is 'parallel=True' set?"
raise ValueError(msg)
pfdiag.dump(level)
if signature is not None:
dump(signature)
else:
[dump(sig) for sig in self.signatures]
def get_metadata(self, signature=None):
"""
Obtain the compilation metadata for a given signature.
"""
if signature is not None:
return self.overloads[signature].metadata
else:
return dict((sig, self.overloads[sig].metadata) for sig in self.signatures)
class LiftedCode(_DispatcherBase):
"""
Implementation of the hidden dispatcher objects used for lifted code
(a lifted loop is really compiled as a separate function).
"""
_fold_args = False
def __init__(self, func_ir, typingctx, targetctx, flags, locals):
self.func_ir = func_ir
self.lifted_from = None
self.typingctx = typingctx
self.targetctx = targetctx
self.flags = flags
self.locals = locals
_DispatcherBase.__init__(self, self.func_ir.arg_count,
self.func_ir.func_id.func,
self.func_ir.func_id.pysig,
can_fallback=True,
exact_match_required=False)
def get_source_location(self):
"""Return the starting line number of the loop.
"""
return self.func_ir.loc.line
def _pre_compile(self, args, return_type, flags):
"""Pre-compile actions
"""
pass
@compiler.global_compiler_lock
def compile(self, sig):
# Use counter to track recursion compilation depth
with self._compiling_counter:
# XXX this is mostly duplicated from Dispatcher.
flags = self.flags
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exists
# (e.g. if another thread compiled it before we got the lock)
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
self._pre_compile(args, return_type, flags)
# Clone IR to avoid (some of the) mutation in the rewrite pass
cloned_func_ir = self.func_ir.copy()
cres = compiler.compile_ir(typingctx=self.typingctx,
targetctx=self.targetctx,
func_ir=cloned_func_ir,
args=args, return_type=return_type,
flags=flags, locals=self.locals,
lifted=(),
lifted_from=self.lifted_from,
is_lifted_loop=True,)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
class LiftedLoop(LiftedCode):
def _pre_compile(self, args, return_type, flags):
assert not flags.enable_looplift, "Enable looplift flags is on"
class LiftedWith(LiftedCode):
@property
def _numba_type_(self):
return types.Dispatcher(self)
def get_call_template(self, args, kws):
"""
Get a typing.ConcreteTemplate for this dispatcher and the given
*args* and *kws* types. This enables the resolving of the return type.
A (template, pysig, args, kws) tuple is returned.
"""
# Ensure an overload is available
if self._can_compile:
self.compile(tuple(args))
pysig = None
# Create function type for typing
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
# The `key` isn't really used except for diagnosis here,
# so avoid keeping a reference to `cfunc`.
call_template = typing.make_concrete_template(
name, key=func_name, signatures=self.nopython_signatures)
return call_template, pysig, args, kws
class ObjModeLiftedWith(LiftedWith):
def __init__(self, *args, **kwargs):
self.output_types = kwargs.pop('output_types', None)
super(LiftedWith, self).__init__(*args, **kwargs)
if not self.flags.force_pyobject:
raise ValueError("expecting `flags.force_pyobject`")
if self.output_types is None:
raise TypeError('`output_types` must be provided')
@property
def _numba_type_(self):
return types.ObjModeDispatcher(self)
def get_call_template(self, args, kws):
"""
Get a typing.ConcreteTemplate for this dispatcher and the given
*args* and *kws* types. This enables the resolving of the return type.
A (template, pysig, args, kws) tuple is returned.
"""
assert not kws
self._legalize_arg_types(args)
# Coerce to object mode
args = [types.ffi_forced_object] * len(args)
if self._can_compile:
self.compile(tuple(args))
signatures = [typing.signature(self.output_types, *args)]
pysig = None
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
call_template = typing.make_concrete_template(
name, key=func_name, signatures=signatures)
return call_template, pysig, args, kws
def _legalize_arg_types(self, args):
for i, a in enumerate(args, start=1):
if isinstance(a, types.List):
msg = (
'Does not support list type inputs into '
'with-context for arg {}'
)
raise errors.TypingError(msg.format(i))
elif isinstance(a, types.Dispatcher):
msg = (
'Does not support function type inputs into '
'with-context for arg {}'
)
raise errors.TypingError(msg.format(i))
# Initialize typeof machinery
_dispatcher.typeof_init(
OmittedArg,
dict((str(t), t._code) for t in types.number_domain))
| jriehl/numba | numba/dispatcher.py | Python | bsd-2-clause | 33,487 |
import xml.etree.ElementTree as ElementTree
import re
pVersion = re.compile(r"(\d+)\.(\d+)\.(\d+)")
class AdminData:
def __init__(self):
self.specialDataGroups = []
def asdict(self):
retval={'type': self.__class__.__name__, 'specialDataGroups':[]}
for elem in self.specialDataGroups:
retval['specialDataGroups'].append(elem.asdict())
return retval
def __eq__(self, other):
if isinstance(other, self.__class__) and len(self.specialDataGroups) == len(other.specialDataGroups):
for i,elem in enumerate(self.specialDataGroups):
if elem != other.specialDataGroups[i]:
return False
return True
return False
def __ne__(self, other): return not (self == other)
class SpecialDataGroup(object):
def __init__(self,SDG_GID,SD=None,SD_GID=None):
self.SDG_GID=SDG_GID
self.SD = []
if SD is not None or SD_GID is not None:
self.SD.append(SpecialData(SD, SD_GID))
# def asdict(self):
# data = {'type': self.__class__.__name__}
# if self.SDG_GID is not None: data['SDG_GID']=self.SDG_GID
# if self.SD is not None: data['SD']=self.SD
# if self.SD_GID is not None: data['SD_GID']=self.SD_GID
# return data
def __eq__(self, other):
if isinstance(other, self.__class__):
if self.SDG_GID == other.SDG_GID:
for i,SD in enumerate(self.SD):
other_SD = other.SD[i]
if SD.TEXT != other_SD.TEXT or SD.GID != other_SD.GID:
return False
return True
return False
def __ne__(self, other): return not (self == other)
class SpecialData:
def __init__(self, TEXT, GID):
self.TEXT = TEXT
self.GID = GID
def removeNamespace(doc, namespace):
"""Removes XML namespace in place."""
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.iter():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
def parseXMLFile(filename,namespace=None):
arxml_tree = ElementTree.ElementTree()
arxml_tree.parse(filename)
arxml_root = arxml_tree.getroot()
if namespace is not None:
removeNamespace(arxml_root,namespace)
return arxml_root
def getXMLNamespace(element):
m = re.match(r'\{(.*)\}', element.tag)
return m.group(1) if m else None
def splitRef(ref):
"""splits an autosar url string into an array"""
if isinstance(ref,str):
if ref[0]=='/': return ref[1:].split('/')
else: return ref.split('/')
return None
def hasAdminData(xmlRoot):
return True if xmlRoot.find('ADMIN-DATA') is not None else False
def parseAdminDataNode(xmlRoot):
if xmlRoot is None: return None
assert(xmlRoot.tag=='ADMIN-DATA')
adminData=AdminData()
xmlSDGS = xmlRoot.find('./SDGS')
if xmlSDGS is not None:
for xmlElem in xmlSDGS.findall('./SDG'):
GID=xmlElem.attrib['GID']
SD=None
SD_GID=None
xmlSD = xmlElem.find('SD')
if xmlSD is not None:
SD=xmlSD.text
try:
SD_GID=xmlSD.attrib['GID']
except KeyError: pass
adminData.specialDataGroups.append(SpecialDataGroup(GID,SD,SD_GID))
return adminData
def parseTextNode(xmlElem):
return None if xmlElem is None else xmlElem.text
def parseIntNode(xmlElem):
return None if xmlElem is None else int(xmlElem.text)
def parseFloatNode(xmlElem):
return None if xmlElem is None else float(xmlElem.text)
def parseBooleanNode(xmlElem):
return None if xmlElem is None else parseBoolean(xmlElem.text)
def parseBoolean(value):
if value is None:
return None
if isinstance(value,str):
if value == 'true': return True
elif value =='false': return False
raise ValueError(value)
def indexByName(lst,name):
assert(isinstance(lst,list))
assert(isinstance(name,str))
for i,item in enumerate(lst):
if item.name == name: return i
raise ValueError('%s not in list'%name)
def createAdminData(data):
adminData = AdminData()
SDG_GID = data.get('SDG_GID',None)
if 'SDG' in data:
group = SpecialDataGroup(SDG_GID)
for item in data['SDG']:
SD_GID = item.get('SD_GID',None)
SD = item.get('SD',None)
group.SD.append(SpecialData(SD, SD_GID))
adminData.specialDataGroups.append(group)
else:
SD_GID = data.get('SD_GID',None)
SD = data.get('SD',None)
adminData.specialDataGroups.append(SpecialDataGroup(SDG_GID,SD,SD_GID))
return adminData
def parseAutosarVersionAndSchema(xmlRoot):
"""
Parses AUTOSAR version from the schemaLocation attribute in the root AUTOSAR tag
For AUTOSAR versions 4.3 and below (e.g. "http://autosar.org/schema/r4.0 AUTOSAR_4-3-0.xsd")
Returns a tuple with major, minor, patch, None, schemaFile. Types are (int, int, int, NoneType, str)
For AUTOSAR versions 4.4 and above (e.g. "http://autosar.org/schema/r4.0 AUTOSAR_00044.xsd")
Returns a tuple with major, minor, None, release, schemaFile
This will now report (major,minor) as (4,0) since it will now extract from the "r4.0"-part of the attribute.
"""
schemaLocation = None
for key in xmlRoot.attrib.keys():
if key.endswith('schemaLocation'):
value = xmlRoot.attrib[key]
#Retreive the schema file
result = re.search(r'(^[ ]+\.xsd)', value)
tmp = value.partition(' ')
if len(tmp[2])>0:
schemaFile = tmp[2]
else:
schemaFile = None
#Is this AUTOSAR 3?
result = re.search(r'(\d)\.(\d)\.(\d)', value)
if result is not None:
return (int(result.group(1)), int(result.group(2)), int(result.group(3)), None, schemaFile)
else:
#Is this AUTOSAR 4.0 to 4.3?
result = re.search(r'(\d)-(\d)-(\d).*\.xsd', value)
if result is not None:
return (int(result.group(1)),int(result.group(2)),int(result.group(3)), None, schemaFile)
else:
#Is this AUTOSAR 4.4 or above?
result = re.search(r'r(\d+)\.(\d+)\s+AUTOSAR_(\d+).xsd', value)
if result is not None:
return (int(result.group(1)),int(result.group(2)),None, int(result.group(3)), schemaFile)
return (None, None, None, None, None)
def applyFilter(ref, filters):
if filters is None:
return True
if ref[0] == '/': ref=ref[1:]
tmp = ref.split('/')
for f in filters:
match = True
for i,filter_elem in enumerate(f):
if i >=len(tmp): return True
ref_elem = tmp[i]
if (filter_elem != '*') and (ref_elem != filter_elem):
match = False
break
if match: return True
return False
def prepareFilter(fstr):
if fstr[0] == '/': fstr=fstr[1:]
if fstr[-1] == '/': fstr+='*'
return fstr.split('/')
def parseVersionString(versionString):
"""
takes a string of the format <major>.<minor>.<patch> (e.g. "3.2.2") and returns a tuple with three integers (major, minor, patch)
"""
result = pVersion.match(versionString)
if result is None:
raise ValueError("VersionString argument did not match the pattern '<major>.<minor>.<patch>'")
else:
return (int(result.group(1)),int(result.group(2)),int(result.group(3)))
def findUniqueNameInList(elementList, baseName):
"""
Attempts to find a unique name in the list of objects based on baseName.
This function can modify names in gived list.
Returns a new name which is guaranteed to be unique
"""
foundElem = None
highestIndex = 0
hasIndex = False
p0 = re.compile(baseName+r'_(\d+)')
for elem in elementList:
result = p0.match(elem.name)
if result is not None:
hasIndex = True
index = int(result.group(1))
if index > highestIndex:
highestIndex = index
elif elem.name == baseName:
foundElem = elem
if foundElem is not None:
foundElem.name = '_'.join([foundElem.name, '0'])
if hasIndex or foundElem is not None:
return '_'.join([baseName, str(highestIndex+1)])
else:
return baseName
class SwDataDefPropsConditional:
def tag(self,version=None): return 'SW-DATA-DEF-PROPS-CONDITIONAL'
def __init__(self, baseTypeRef = None, implementationTypeRef = None, swAddressMethodRef = None, swCalibrationAccess = None, swImplPolicy = None, swPointerTargetProps = None, compuMethodRef = None, dataConstraintRef = None, unitRef = None, parent = None):
self.baseTypeRef = baseTypeRef
self.swCalibrationAccess = swCalibrationAccess
self.swAddressMethodRef = swAddressMethodRef
self.compuMethodRef = compuMethodRef
self.dataConstraintRef = dataConstraintRef
self.implementationTypeRef = implementationTypeRef
self.swPointerTargetProps = swPointerTargetProps
self.unitRef = unitRef
self.swImplPolicy = swImplPolicy
self.parent = parent
@property
def swImplPolicy(self):
return self._swImplPolicy
@swImplPolicy.setter
def swImplPolicy(self, value):
if value is None:
self._swImplPolicy=None
else:
ucvalue=str(value).upper()
enum_values = ["CONST", "FIXED", "MEASUREMENT-POINT", "QUEUED", "STANDARD"]
if ucvalue in enum_values:
self._swImplPolicy = ucvalue
else:
raise ValueError('invalid swImplPolicy value: ' + value)
def hasAnyProp(self):
"""
Returns True if any internal attribute is not None, else False.
The check excludes the parent attribute.
"""
retval = False
attr_names = ['baseTypeRef',
'swCalibrationAccess',
'swAddressMethodRef',
'compuMethodRef',
'dataConstraintRef',
'implementationTypeRef',
'swPointerTargetProps',
'unitRef',
'swImplPolicy'
]
for name in attr_names:
if getattr(self, name) is not None:
retval = True
break
return retval
class SwPointerTargetProps:
"""
(AUTOSAR 4)
Implements <SW-POINTER-TARGET-PROPS>
"""
def tag(self, version=None): return 'SW-POINTER-TARGET-PROPS'
def __init__(self, targetCategory=None, variants = None):
self.targetCategory = targetCategory
if variants is None:
self.variants = []
else:
if isinstance(variants, SwDataDefPropsConditional):
self.variants = [variants]
else:
self.variants = list(variants)
class SymbolProps:
"""
(AUTOSAR 4)
Implements <SYMBOL-PROPS>
"""
def tag(self, version=None): return 'SYMBOL-PROPS'
def __init__(self, name = None, symbol = None):
self.name = name
self.symbol = symbol
#Exceptions
class InvalidUnitRef(ValueError):
pass
class InvalidPortInterfaceRef(ValueError):
pass
class InvalidComponentTypeRef(ValueError):
pass
class InvalidDataTypeRef(ValueError):
pass
class InvalidDataElementRef(ValueError):
pass
class InvalidPortRef(ValueError):
pass
class InvalidInitValueRef(ValueError):
pass
class InvalidDataConstraintRef(ValueError):
pass
class InvalidCompuMethodRef(ValueError):
pass
class DataConstraintError(ValueError):
pass
class InvalidMappingRef(ValueError):
pass
class InvalidModeGroupRef(ValueError):
pass
class InvalidModeDeclarationGroupRef(ValueError):
pass
class InvalidModeDeclarationRef(ValueError):
pass
class InvalidEventSourceRef(ValueError):
pass
class InvalidRunnableRef(ValueError):
pass
class InvalidBehaviorRef(ValueError):
pass
class InvalidSwAddrmethodRef(ValueError):
pass
| cogu/autosar | autosar/base.py | Python | mit | 12,669 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetContext
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_Contexts_GetContext_async]
from google.cloud import dialogflow_v2
async def sample_get_context():
# Create a client
client = dialogflow_v2.ContextsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.GetContextRequest(
name="name_value",
)
# Make the request
response = await client.get_context(request=request)
# Handle the response
print(response)
# [END dialogflow_v2_generated_Contexts_GetContext_async]
| googleapis/python-dialogflow | samples/generated_samples/dialogflow_v2_generated_contexts_get_context_async.py | Python | apache-2.0 | 1,444 |
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from oslo.db import exception as db_exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import utils
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class DhcpRpcCallback(n_rpc.RpcCallback):
"""DHCP agent RPC callback in plugin implementations."""
# API version history:
# 1.0 - Initial version.
# 1.1 - Added get_active_networks_info, create_dhcp_port,
# and update_dhcp_port methods.
RPC_API_VERSION = '1.1'
def _get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active networks."""
host = kwargs.get('host')
plugin = manager.NeutronManager.get_plugin()
if utils.is_extension_supported(
plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS):
if cfg.CONF.network_auto_schedule:
plugin.auto_schedule_networks(context, host)
nets = plugin.list_active_networks_on_active_dhcp_agent(
context, host)
else:
filters = dict(admin_state_up=[True])
nets = plugin.get_networks(context, filters=filters)
return nets
def _port_action(self, plugin, context, port, action):
"""Perform port operations taking care of concurrency issues."""
try:
if action == 'create_port':
return plugin.create_port(context, port)
elif action == 'update_port':
return plugin.update_port(context, port['id'], port['port'])
else:
msg = _('Unrecognized action')
raise n_exc.Invalid(message=msg)
except (db_exc.DBError, n_exc.NetworkNotFound,
n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e:
with excutils.save_and_reraise_exception(reraise=False) as ctxt:
if isinstance(e, n_exc.IpAddressGenerationFailure):
# Check if the subnet still exists and if it does not,
# this is the reason why the ip address generation failed.
# In any other unlikely event re-raise
try:
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
plugin.get_subnet(context, subnet_id)
except n_exc.SubnetNotFound:
pass
else:
ctxt.reraise = True
net_id = port['port']['network_id']
LOG.warn(_("Action %(action)s for network %(net_id)s "
"could not complete successfully: %(reason)s")
% {"action": action, "net_id": net_id, 'reason': e})
def get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active network ids."""
# NOTE(arosen): This method is no longer used by the DHCP agent but is
# left so that neutron-dhcp-agents will still continue to work if
# neutron-server is upgraded and not the agent.
host = kwargs.get('host')
LOG.debug(_('get_active_networks requested from %s'), host)
nets = self._get_active_networks(context, **kwargs)
return [net['id'] for net in nets]
def get_active_networks_info(self, context, **kwargs):
"""Returns all the networks/subnets/ports in system."""
host = kwargs.get('host')
LOG.debug(_('get_active_networks_info from %s'), host)
networks = self._get_active_networks(context, **kwargs)
plugin = manager.NeutronManager.get_plugin()
filters = {'network_id': [network['id'] for network in networks]}
ports = plugin.get_ports(context, filters=filters)
filters['enable_dhcp'] = [True]
subnets = plugin.get_subnets(context, filters=filters)
for network in networks:
network['subnets'] = [subnet for subnet in subnets
if subnet['network_id'] == network['id']]
network['ports'] = [port for port in ports
if port['network_id'] == network['id']]
return networks
def get_network_info(self, context, **kwargs):
"""Retrieve and return a extended information about a network."""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
LOG.debug(_('Network %(network_id)s requested from '
'%(host)s'), {'network_id': network_id,
'host': host})
plugin = manager.NeutronManager.get_plugin()
try:
network = plugin.get_network(context, network_id)
except n_exc.NetworkNotFound:
LOG.warn(_("Network %s could not be found, it might have "
"been deleted concurrently."), network_id)
return
filters = dict(network_id=[network_id])
network['subnets'] = plugin.get_subnets(context, filters=filters)
network['ports'] = plugin.get_ports(context, filters=filters)
return network
def get_dhcp_port(self, context, **kwargs):
"""Allocate a DHCP port for the host and return port information.
This method will re-use an existing port if one already exists. When a
port is re-used, the fixed_ip allocation will be updated to the current
network state. If an expected failure occurs, a None port is returned.
"""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
LOG.debug(_('Port %(device_id)s for %(network_id)s requested from '
'%(host)s'), {'device_id': device_id,
'network_id': network_id,
'host': host})
plugin = manager.NeutronManager.get_plugin()
retval = None
filters = dict(network_id=[network_id])
subnets = dict([(s['id'], s) for s in
plugin.get_subnets(context, filters=filters)])
dhcp_enabled_subnet_ids = [s['id'] for s in
subnets.values() if s['enable_dhcp']]
try:
filters = dict(network_id=[network_id], device_id=[device_id])
ports = plugin.get_ports(context, filters=filters)
if ports:
# Ensure that fixed_ips cover all dhcp_enabled subnets.
port = ports[0]
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids:
dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id'])
port['fixed_ips'].extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
retval = plugin.update_port(context, port['id'],
dict(port=port))
except n_exc.NotFound as e:
LOG.warning(e)
if retval is None:
# No previous port exists, so create a new one.
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s '
'does not exist on %(host)s'),
{'device_id': device_id,
'network_id': network_id,
'host': host})
try:
network = plugin.get_network(context, network_id)
except n_exc.NetworkNotFound:
LOG.warn(_("Network %s could not be found, it might have "
"been deleted concurrently."), network_id)
return
port_dict = dict(
admin_state_up=True,
device_id=device_id,
network_id=network_id,
tenant_id=network['tenant_id'],
mac_address=attributes.ATTR_NOT_SPECIFIED,
name='',
device_owner=constants.DEVICE_OWNER_DHCP,
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
retval = self._port_action(plugin, context, {'port': port_dict},
'create_port')
if not retval:
return
# Convert subnet_id to subnet dict
for fixed_ip in retval['fixed_ips']:
subnet_id = fixed_ip.pop('subnet_id')
fixed_ip['subnet'] = subnets[subnet_id]
return retval
def release_dhcp_port(self, context, **kwargs):
"""Release the port currently being used by a DHCP agent."""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
LOG.debug(_('DHCP port deletion for %(network_id)s request from '
'%(host)s'),
{'network_id': network_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
plugin.delete_ports_by_device_id(context, device_id, network_id)
def release_port_fixed_ip(self, context, **kwargs):
"""Release the fixed_ip associated the subnet on a port."""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
subnet_id = kwargs.get('subnet_id')
LOG.debug(_('DHCP port remove fixed_ip for %(subnet_id)s request '
'from %(host)s'),
{'subnet_id': subnet_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
filters = dict(network_id=[network_id], device_id=[device_id])
ports = plugin.get_ports(context, filters=filters)
if ports:
port = ports[0]
fixed_ips = port.get('fixed_ips', [])
for i in range(len(fixed_ips)):
if fixed_ips[i]['subnet_id'] == subnet_id:
del fixed_ips[i]
break
plugin.update_port(context, port['id'], dict(port=port))
def update_lease_expiration(self, context, **kwargs):
"""Release the fixed_ip associated the subnet on a port."""
# NOTE(arosen): This method is no longer used by the DHCP agent but is
# left so that neutron-dhcp-agents will still continue to work if
# neutron-server is upgraded and not the agent.
host = kwargs.get('host')
LOG.warning(_('Updating lease expiration is now deprecated. Issued '
'from host %s.'), host)
def create_dhcp_port(self, context, **kwargs):
"""Create and return dhcp port information.
If an expected failure occurs, a None port is returned.
"""
host = kwargs.get('host')
port = kwargs.get('port')
LOG.debug(_('Create dhcp port %(port)s '
'from %(host)s.'),
{'port': port,
'host': host})
port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP
port['port'][portbindings.HOST_ID] = host
if 'mac_address' not in port['port']:
port['port']['mac_address'] = attributes.ATTR_NOT_SPECIFIED
plugin = manager.NeutronManager.get_plugin()
return self._port_action(plugin, context, port, 'create_port')
def update_dhcp_port(self, context, **kwargs):
"""Update the dhcp port."""
host = kwargs.get('host')
port_id = kwargs.get('port_id')
port = kwargs.get('port')
LOG.debug(_('Update dhcp port %(port)s '
'from %(host)s.'),
{'port': port,
'host': host})
plugin = manager.NeutronManager.get_plugin()
return self._port_action(plugin, context,
{'id': port_id, 'port': port},
'update_port')
| CingHu/neutron-ustack | neutron/api/rpc/handlers/dhcp_rpc.py | Python | apache-2.0 | 12,773 |
from rdrf.helpers.utils import report_function
from registry.patients.models import PatientAddress
@report_function
def professionals(patient_model):
return patient_model.clinician
@report_function
def country(patient_model):
try:
patient_address = PatientAddress.objects.get(patient=patient_model)
return patient_address.country
except BaseException:
pass
@report_function
def last_login(patient_model):
# if there is a user associated with this patient
# return it's last login time
# this only makes sense for FKRP like registries
user = patient_model.user
if user is not None:
return user.last_login
| muccg/rdrf | rdrf/rdrf/services/io/reporting/report_field_functions.py | Python | agpl-3.0 | 674 |
import abc
from Component import Component
class ComponentCavity(Component):
"""
Abstract base class for all cavity component adapter classes.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, obj, system, material, position):
Component.__init__(self, obj, system, material)
obj.addProperty("App::PropertyVector", "Position", "Cavity", "Position within the cavity.")
obj.setEditorMode("Position", 1)
obj.addProperty("App::PropertyLink", "Structure", "Structure", "Fused structure.")
obj.setEditorMode("Structure", 2)
obj.addProperty("App::PropertyFloatList", "Pressure", "Subsystem", "Mean pressure.")
obj.setEditorMode('Pressure', 1)
obj.addProperty("App::PropertyFloatList", "PressureLevel", "Subsystem", "Pressure level.")
obj.setEditorMode('PressureLevel', 1)
obj.Structure = system.Structure
obj.Position = position
self.execute(obj)
def onChanged(self, obj, prop):
Component.onChanged(self, obj, prop)
def execute(self, obj):
self.updateCavity(obj)
Component.execute(self, obj)
obj.Pressure = obj.Proxy.pressure.tolist()
obj.PressureLevel = obj.Proxy.pressure_level.tolist()
def updateCavity(self, obj):
"""
Update the Shape of the Cavity.
"""
obj.Shape = self.getCavityShape(obj.Structure, obj.Position)
obj.Volume = obj.Shape.Volume
@staticmethod
def getCavityShape(structure, position):
"""
Return shape of cavity in structure for a certain position.
:param structure: a :class:`Part.MultiFuse`
:param position: a :class:`FreeCAD.Vector`
:rtype: :class:`FreeCAD.TopoShape`
"""
#structure = obj.Structure
tolerance = 0.01
allowface = False
for shape in structure.Shape.Shells:
if shape.isInside(position, tolerance, allowface) and shape.Volume < 0.0:
shape.complement() # Reverse the shape to obtain positive volume
return shape
#else:
#App.Console.PrintWarning("No cavity at this position.\n")
| python-acoustics/Sea | Sea/adapter/components/ComponentCavity.py | Python | bsd-3-clause | 2,308 |
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from blockstore import BlockStore, TxStore
from util import p2p_port
'''
This is a tool for comparing two or more bitcoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.create_callback_map()
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of [obj, True/False/None]:
# - obj is either a CBlock or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analagous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=[], sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
self.connections.append(NodeConn('127.0.0.1', p2p_port(i),
nodes[i], TestNode(self.block_store, self.tx_store)))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
self.connections[-1].cb.add_connection(self.connections[-1])
def wait_for_verack(self):
sleep_time = 0.05
max_tries = 10 / sleep_time # Wait at most 10 seconds
while max_tries > 0:
done = True
with mininode_lock:
for c in self.connections:
if c.cb.verack_received is False:
done = False
break
if done:
break
time.sleep(sleep_time)
def wait_for_pings(self, counter):
received_pongs = False
while received_pongs is not True:
time.sleep(0.05)
received_pongs = True
with mininode_lock:
for c in self.connections:
if c.cb.received_ping_response(counter) is not True:
received_pongs = False
break
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
# Wait for nodes to request block (50ms sleep * 20 tries * num_blocks)
max_tries = 20*num_blocks
while max_tries > 0:
with mininode_lock:
results = [ blockhash in c.cb.block_request_map and
c.cb.block_request_map[blockhash] for c in self.connections ]
if False not in results:
break
time.sleep(0.05)
max_tries -= 1
# --> error if not requested
if max_tries == 0:
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# --> Answer request (we did this inline!)
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
max_tries = 20*num_events
while max_tries > 0:
with mininode_lock:
results = [ txhash in c.cb.tx_request_map and
c.cb.tx_request_map[txhash] for c in self.connections ]
if False not in results:
break
time.sleep(0.05)
max_tries -= 1
# --> error if not requested
if max_tries == 0:
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# --> Answer request (we did this inline!)
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome ] = [ None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for b_or_t, outcome in test_instance.blocks_and_transactions:
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
# Add to shared block_store, set as current block
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(block.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256,
len(test_instance.blocks_and_transactions))
if (not self.check_results(block.sha256, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
test_number += 1
self.block_store.close()
self.tx_store.close()
[ c.disconnect_node() for c in self.connections ]
| coinkeeper/2015-06-22_18-31_bitcoin | qa/rpc-tests/test_framework/comptool.py | Python | mit | 14,544 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-12-09 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_task_due_date'),
]
operations = [
migrations.AddField(
model_name='task',
name='scheduled_date',
field=models.DateField(null=True),
),
]
| chiubaka/serenity | server/api/migrations/0005_task_scheduled_date.py | Python | mit | 442 |
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.process.subprocess import Subprocess
from pants.testutil.subsystem.util import global_subsystem_instance
from pants.testutil.test_base import TestBase
class SubprocessTest(TestBase):
def subprocess(self):
return global_subsystem_instance(Subprocess.Factory).create()
def test_get_subprocess_dir(self):
self.assertTrue(self.subprocess().get_subprocess_dir().endswith("/.pids"))
| tdyas/pants | tests/python/pants_test/process/test_subprocess.py | Python | apache-2.0 | 547 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
VERSION = "2.0"
# Official repo: https://github.com/pdehaye/edx-presenter
# (a fork of mokaspar's release)
# Originally authored by mokaspar (edx-presenter.py),
# tailored for mat101 by Paul-Olivier Dehaye (mat101-presenter.py)
# To see the full help, type: ./mat101-presenter.py --help
"""
README
This comman line tool requires the PyYAML library. This library can be installed by the following two commands in your shell:
$ easy_install pip
$ pip install pyyaml
"""
usage = """usage: %prog [options] <source1>, [<source2>,...]
Introduction:
This command line tool converts groups into an edx course. It also individually compresses groups into single files.
A group is a local directory or remote GIT repository containing a 'group.yaml' file (for more information on YAML, see http://en.wikipedia.org/wiki/YAML). This file describes the content of the group and must contain the following properties:
project: Project A
group: Group 1
authors:
- name: mokaspar
email: mokaspar@gmail.com
edx: NotAvailable
content:
- pdf: docs/introduction.pdf
- html: docs/html1.html
- text: docs/simple.txt
- html: docs/html2.html
- file: docs/sample-data.zip
- video: http://www.youtube.com/watch?v=04ZOMuAg2bA
- source: src/
The order order of the content is preserved and the same content type can be added multiple times.
The paths are expected to be relative to the directory containing the 'group.yaml' file.
'pdf', 'html' and 'text' just process one file but 'source' processes all files in the directory and it's subdirectories.
Note that you can use LaTeX in 'html' and 'text' files inside the usual \[ ... \] block.
'video' currently supports just YouTube. Remember to make your video public, or at least unlisted.
Remark:
- All files are expected to be UTF-8 encoded.
"""
# Source
"""
Workflow
1. A temporary directory is created
2. All GIT sources are cloned inside this temporary directory
3. The 'group.yaml' of each group is interpreted to build the data tree.
4. A edx directory is created inside the temporary directory, into which all the data is processed
5. The edx directory gets compressed
The source is in 3 parts (reversed to the workflow)
Part 1:
For each content type there is a class ContentXXX which takes care about generating the files for the edx directory
Part 2:
Group and project is represented by a class.
Part 3:
Contains the __main__ method and does the GIT handling
"""
DISPLAY_NAME = "MAT101 projects"
ORG_NAME = "UZH"
PROFILE_BASE = "https://mat101hs15.math.uzh.ch/courses/course-v1:UZH+MAT101+HS2015/wiki/UZH.MAT101.HS2015/profiles/"
# Projects that do not require the comment field for every author. This is meant to
# give credit to individual authors for their contributions.
PROJECTS_NO_CREDITS_REQUIRED = ["Example Projects",
"Project A",
"Project B",
"Project C"]
THINLINC_PATH = "/scratch/student/upload/mat101/hs13"
# Handles command line arguments
from optparse import OptionParser
# Logging functionality of python.
import logging
# XML and yaml
from xml.etree.cElementTree import Element, SubElement, ElementTree
# Regular expressions
import re
# Packages for file and process operatons
import sys
import os
import tarfile
import subprocess
import shutil
import tempfile
# utf-8 support
import codecs
# misc
import uuid
import operator
import cgi
import urlparse
# PyYAML is not a core module.
try:
import yaml
except ImportError:
print """ERROR: The module PyYAML is required but missing. You can install it with the following commands:
$ easy_install pip
$ pip install pyyaml
"""
sys.exit(1)
import_file = []
upload_files = []
def escape(string):
'''Escapes the string for HTML.'''
return cgi.escape(string).encode('ascii', 'xmlcharrefreplace')
# --- Part 1 -----------------------------------------------------------------
class ContentDiscussion:
def __init__(self, parent):
self.parent = parent
def url_name(self):
"Using the fact, that there is exactly one discussion for each group."
return re.sub(r'\W+', '', self.parent.url_name() + '_discussion')
def edx(self, out_dir):
discussion_dir = os.path.join(out_dir, 'discussion')
if not os.path.exists(discussion_dir):
os.makedirs(discussion_dir)
discussion = Element('discussion', {'discussion_id':self.url_name(),
'discussion_category': self.parent.project(),
'discussion_target': self.parent.group() })
tree = ElementTree(discussion)
tree.write(os.path.join(discussion_dir, "{0}.xml".format(self.url_name())) )
def parent_tag(self, xml):
"Adds the XML element pointing to this resoure to the vertical."
e = SubElement(xml, 'discussion', {'url_name':self.url_name()})
class ContentIntro:
def __init__(self, parent):
self.parent = parent
def url_name(self):
"Using the fact, that there is exactly one intro for each group."
return re.sub(r'\W+', '', self.parent.url_name() + '_intro')
def edx(self, out_dir):
# Create the HTML-page with the details
html_dir = os.path.join(out_dir, 'html')
if not os.path.exists(html_dir):
os.makedirs(html_dir)
html = Element('html', {'filename':self.url_name(), 'display_name':"Intro"});
tree = ElementTree(html)
tree.write(os.path.join(html_dir, "{0}.xml".format(self.url_name())) )
#Create the corresponding html-file
html = '''<h2>%(project)s: %(group)s</h2>
''' % {'project':escape(self.parent.project()), 'group':escape(self.parent.group()) }
html += '<div class="authors">Author(s):<ul>'
for author in self.parent.authors():
profile_URL = PROFILE_BASE + escape(author['edx']) + "/"
try:
comment = ": " + escape(author['comment'])
except:
if not self.parent.project() in PROJECTS_NO_CREDITS_REQUIRED:
raise KeyError("You need to include a comment about your code contribution for %s %s"%(author["name"], self.parent.project()))
comment = ""
print "Please check that the following profile exists:\n %s\n" % profile_URL
identity = dict(email = escape(author.get('email',"")),
name = escape(author['name']),
edx = escape(author['edx']),
comment = comment,
profile_URL = profile_URL)
#identity = {_: "anonymized" for _ in ["email", "name", "edx", "profile_URL"]}
html += '<li><a href="mailto:%(email)s">%(name)s</a> AKA <a href="%(profile_URL)s">%(edx)s</a>%(comment)s</li>' % identity
html += '</ul></div>'
with codecs.open(os.path.join(html_dir, "{0}.html".format(self.url_name())), mode='w', encoding='utf-8') as f:
f.write(html)
def parent_tag(self, xml):
"Adds the XML element pointing to this resoure to the vertical."
e = SubElement(xml, 'html', {'url_name':self.url_name()})
class ContentHTML:
def __init__(self, parent, path):
logging.debug("ContentHTML:__init__ %s", path)
self.parent = parent
self.path = path
def url_name(self):
return re.sub(r'\W+', '', self.parent.url_name() + '_html_' + self.path)
def edx(self, out_dir):
html_dir = os.path.join(out_dir, 'html')
if not os.path.exists(html_dir):
os.makedirs(html_dir)
html = Element('html', {'filename':self.url_name(), 'display_name':"HTML"});
tree = ElementTree(html)
tree.write(os.path.join(html_dir, "{0}.xml".format(self.url_name())) )
#Copy the corresponding html-file
shutil.copyfile(os.path.join(self.parent.path, self.path), os.path.join(html_dir, "{0}.html".format(self.url_name())) );
def parent_tag(self, xml):
"Adds the XML element pointing to this resoure to the vertical."
e = SubElement(xml, 'html', {'url_name':self.url_name()})
class ContentFile:
def __init__(self, parent, path):
logging.debug("ContentFile:__init__ %s", path)
self.parent = parent
self.path = path
def url_name(self):
fileName, fileExtension = os.path.splitext(self.path)
return re.sub(r'[^(\w|.)]+', '', self.parent.url_name() + '_' + fileName)
def edx(self, out_dir):
# Copy the Pdf to the static directory
static_dir = os.path.join(out_dir, 'static')
if not os.path.exists(static_dir):
os.makedirs(static_dir)
# In order to get an unique filename inside edx, we have to prefix the project and group name
_, fileExtension = os.path.splitext(self.path)
target_filename = self.url_name()+fileExtension
target_path = os.path.join(static_dir,target_filename)
shutil.copyfile(os.path.join(self.parent.path, self.path), target_path);
html_dir = os.path.join(out_dir, 'html')
if not os.path.exists(html_dir):
os.makedirs(html_dir)
html = Element('html', {'filename':self.url_name(), 'display_name':"File"});
tree = ElementTree(html)
tree.write(os.path.join(html_dir, "{0}.xml".format(self.url_name())) )
(_ , filename) = os.path.split(self.path)
html = '''
<a href="/static/%(file)s">Download %(filename)s</a>
''' % {'file':target_filename, 'filename':filename}
with codecs.open(os.path.join(html_dir, "{0}.html".format(self.url_name())), mode='w', encoding='utf-8') as f:
f.write(html)
def parent_tag(self, xml):
"Adds the XML element pointing to this resoure to the vertical."
e = SubElement(xml, 'html', {'url_name':self.url_name()})
class ContentSource:
def __init__(self, parent, path):
logging.debug("ContentSource:__init__ %s", path)
self.parent = parent
self.path = path
def url_name(self):
return re.sub(r'\W+', '', self.parent.url_name() + '_source_' + self.path)
def edx(self, out_dir):
# Path of the source directory relative to our working directory
path_complete = os.path.join(self.parent.path, self.path)
# Create a archive with the source inside the static directory
static_dir = os.path.join(out_dir, 'static')
if not os.path.exists(static_dir):
os.makedirs(static_dir)
# In order to get an unique filename inside edx, we have to prefix the project and group name
target_filename = self.url_name()
target_path = os.path.join(static_dir, target_filename)
tar = tarfile.open(target_path, "w:gz")
tar.add(path_complete, arcname=os.path.basename(path_complete))
tar.close()
html_dir = os.path.join(out_dir, 'html')
if not os.path.exists(html_dir):
os.makedirs(html_dir)
html = Element('html', {'filename':self.url_name(), 'display_name':"Source"});
tree = ElementTree(html)
tree.write(os.path.join(html_dir, "{0}.xml".format(self.url_name())) )
html = '''<h3>Source of %(path)s</h3>
''' % {'path':escape(self.path) }
html += '''
<a href="/static/%(file)s">Download source as archive</a>
''' % {'file':target_filename}
if self.path[-3:] == ".py":
# Link to a single file
# We simulate the output of os.walk:
tmp_path = os.path.split(os.path.join(self.parent.path, self.path))
cleaned_path = [(os.path.join(tmp_path[:-1][0]), None, [tmp_path[-1]])]
else:
cleaned_path = os.walk(os.path.join(self.parent.path, self.path))
for dirname, dirnames, filenames in cleaned_path:
# Process each file
for filename in filenames:
#ignore any non-python file
if not filename.endswith('.py') or filename.startswith('.'):
continue
path_full = os.path.join(dirname, filename)
# This path is relative to the group definition
path_relative = path_full[len(self.parent.path):]
html += '<h3>%(path)s</h3>\n' % {'path':escape(path_relative)}
# It would be better to control the font-size by the theme
html += '<script src="https://google-code-prettify.googlecode.com/svn/loader/run_prettify.js?skin=tomorrow"></script>'
html += '<pre class="prettyprint python">'
with codecs.open(path_full, mode='r', encoding='utf-8') as f:
html += escape(f.read())
html += '</pre>'
with codecs.open(os.path.join(html_dir, "{0}.html".format(self.url_name())), mode='w', encoding='utf-8') as f:
f.write(html)
def parent_tag(self, xml):
"Adds the XML element pointing to this resoure to the vertical."
e = SubElement(xml, 'html', {'url_name':self.url_name()})
class ContentText:
def __init__(self, parent, path):
logging.debug("ContentText:__init__ %s", path)
self.parent = parent
self.path = path
def url_name(self):
return re.sub(r'\W+', '', self.parent.url_name() + '_text_' + self.path)
def edx(self, out_dir):
html_dir = os.path.join(out_dir, 'html')
if not os.path.exists(html_dir):
os.makedirs(html_dir)
html = Element('html', {'filename':self.url_name(), 'display_name':"Text"});
tree = ElementTree(html)
tree.write(os.path.join(html_dir, "{0}.xml".format(self.url_name())) )
html = ''
html += '<div>'
with codecs.open (os.path.join(self.parent.path, self.path), mode="r", encoding='utf-8') as f:
html += re.sub('\n', '<br/>',escape(f.read()) )
html += '</div>'
with codecs.open(os.path.join(html_dir, "{0}.html".format(self.url_name())), mode='w', encoding='utf-8') as f:
f.write(html)
def parent_tag(self, xml):
"Adds the XML element pointing to this resoure to the vertical."
e = SubElement(xml, 'html', {'url_name':self.url_name()})
class ContentVideoYouTube:
def __init__(self, parent, youtube_id):
logging.debug("ContentVideoYouTube:__init__ %s", youtube_id)
self.parent = parent
self.youtube_id = youtube_id
def url_name(self):
return re.sub(r'\W+', '', self.parent.url_name() + '_youtube_' + self.youtube_id)
def edx(self, out_dir):
video_dir = os.path.join(out_dir, 'video')
if not os.path.exists(video_dir):
os.makedirs(video_dir)
video = Element('video', {'youtube':'1.00:'+self.youtube_id, 'youtube_id_1_0':self.youtube_id});
tree = ElementTree(video)
tree.write(os.path.join(video_dir, "{0}.xml".format(self.url_name())) )
def parent_tag(self, xml):
"Adds the XML element pointing to this resoure to the vertical."
e = SubElement(xml, 'video', {'url_name':self.url_name()})
class ContentPdf:
def __init__(self, parent, path):
logging.debug("ContentPdf:__init__ %s", path)
self.parent = parent
self.path = path
def url_name(self):
return re.sub(r'\W+', '', self.parent.url_name() + '_' + self.path)
def edx(self, out_dir):
# Copy the Pdf to the static directory
static_dir = os.path.join(out_dir, 'static')
if not os.path.exists(static_dir):
os.makedirs(static_dir)
# In order to get an unique filename inside edx, we have to prefix the project and group name
target_filename = self.url_name()+'.pdf'
target_path = os.path.join(static_dir,target_filename)
shutil.copyfile(os.path.join(self.parent.path, self.path), target_path);
html_dir = os.path.join(out_dir, 'html')
if not os.path.exists(html_dir):
os.makedirs(html_dir)
html = Element('html', {'filename':self.url_name(), 'display_name':"Pdf"});
tree = ElementTree(html)
tree.write(os.path.join(html_dir, "{0}.xml".format(self.url_name())) )
# We have to double %% because % is a placeholder for the argument
html = ''
if courseURL == None:
logging.warning("courseURL is not specified. Therefore the inline pdf-viewer will be disabled.")
else:
html += '''
<object data="%(courseURL)s/asset/%(file)s" type="application/pdf" width="100%%" height="600pt">
''' %{'courseURL':courseURL , 'file':target_filename}
html += '''
<a href="/static/%(file)s">Download Pdf %(name)s</a>
''' % {'file':target_filename, 'name':os.path.basename(self.path)}
html += "</object>"
with codecs.open(os.path.join(html_dir, "{0}.html".format(self.url_name())), mode='w', encoding='utf-8') as f:
f.write(html)
def parent_tag(self, xml):
"Adds the XML element pointing to this resoure to the vertical."
e = SubElement(xml, 'html', {'url_name':self.url_name()})
class ContentImg:
def __init__(self, parent, path):
logging.debug("ContentImg:__init__ %s", path)
self.parent = parent
self.path = path
def url_name(self):
return re.sub(r'\W+', '', self.parent.url_name() + '_' + self.path)
def edx(self, out_dir):
# Copy the image to the static directory
static_dir = os.path.join(out_dir, 'static')
if not os.path.exists(static_dir):
os.makedirs(static_dir)
# In order to get an unique filename inside edx, we have to prefix the project and group name
# We cannot use the filename, because it may contain characters, that have to be escaped.
# Therefore we just add the extension, which is expected to contain [a-z][A-Z][0-9].
_, fileExtension = os.path.splitext(self.path)
target_filename = self.url_name()+fileExtension
target_path = os.path.join(static_dir,target_filename)
shutil.copyfile(os.path.join(self.parent.path, self.path), target_path);
html_dir = os.path.join(out_dir, 'html')
if not os.path.exists(html_dir):
os.makedirs(html_dir)
html = Element('html', {'filename':self.url_name(), 'display_name':"Img"});
tree = ElementTree(html)
tree.write(os.path.join(html_dir, "{0}.xml".format(self.url_name())) )
# We have to double %% because % is a placeholder for the argument
html = '<img src="/static/%(file)s">' % {'file':target_filename}
html += '''<br>
<a href="/static/%(file)s">Download Image %(name)s</a>
''' % {'file':target_filename, 'name':os.path.basename(self.path)}
with codecs.open(os.path.join(html_dir, "{0}.html".format(self.url_name())), mode='w', encoding='utf-8') as f:
f.write(html)
def parent_tag(self, xml):
"Adds the XML element pointing to this resoure to the vertical."
e = SubElement(xml, 'html', {'url_name':self.url_name()})
# --- Part 2 -----------------------------------------------------------------
class Project:
def __init__(self,project):
self.project = project
self.groups = []
def append(self, group):
"""Appends a group to a project and keeps the groups list sorted"""
self.groups.append(group)
self.groups = sorted(self.groups, key=operator.methodcaller('group'))
def __len__(self):
return len(self.groups)
def __getitem__(self, key):
return self.groups[key]
def url_name(self):
"""Just keeps the basic ASCII characters: It removes any whitespaces and umlauts."""
return re.sub(r'\W+', '', self.project)
def __repr__(self):
return "<Project '{0}' {1}>".format(escape(self.project), repr(self.groups))
def edx(self, out_dir):
chapter_dir = os.path.join(out_dir, 'chapter')
if not os.path.exists(chapter_dir):
os.makedirs(chapter_dir)
chapter = Element('chapter', {'display_name':escape(self.project)});
for group in self.groups:
e = SubElement(chapter, 'sequential')
e.set('url_name', group.url_name())
tree = ElementTree(chapter)
tree.write(os.path.join(chapter_dir, "{0}.xml".format(self.url_name())) )
for group in self.groups:
group.edx(out_dir)
class Group:
"""Represents a submitted group."""
def __init__(self, path):
self.path = path
self.content = []
def load(self):
"""Reads the definition file of this group."""
# We don't catch any exception here, because the using function should decide on what to do.
f = codecs.open(os.path.join(self.path, 'group.yaml'), mode='r', encoding='utf-8')
self.properties = yaml.safe_load(f)
self.content = []
self.content.append(ContentIntro(self))
for c in self.properties['content']:
co = None
if 'html' in c:
co = ContentHTML(self, c['html'])
if 'source' in c:
co = ContentSource(self, c['source'])
if 'text' in c:
co = ContentText(self, c['text'])
if 'video' in c:
# We currently only support youtube
o = urlparse.urlparse(c['video'])
if o.netloc == 'www.youtube.com':
co = ContentVideoYouTube(self, o.query[2:])
else:
raise SubprocessError('Undefined video source {0}'.format(c['video']))
if 'pdf' in c:
co = ContentPdf(self, c['pdf'])
if 'file' in c:
co = ContentFile(self, c['file'])
if 'img' in c:
co = ContentImg(self, c['img'])
if co is not None:
self.content.append(co)
else:
logging.info('Undefined source %s', c)
self.content.append(ContentDiscussion(self))
def project(self):
return self.properties['project']
def group(self):
return self.properties['group']
def authors(self):
return self.properties['authors']
def url_name(self):
"""Just keeps the basic ASCII characters: It removes any whitespaces and umlauts."""
return re.sub(r'\W+', '', (self.project() + '__' + self.group()).replace(" ","_") ).lstrip("_")
def __repr__(self):
return "<Group '{0}/{1}'>".format(escape(self.project()), escape(self.group()))
def edx(self, out_dir):
sequential_dir = os.path.join(out_dir, 'sequential')
if not os.path.exists(sequential_dir):
os.makedirs(sequential_dir)
sequential = Element('sequential', {'display_name':escape(self.group())});
e = SubElement(sequential, 'vertical')
e.set('url_name', self.url_name()+'_vertical')
tree = ElementTree(sequential)
tree.write(os.path.join(sequential_dir, "{0}.xml".format(self.url_name())) )
vertical_dir = os.path.join(out_dir, 'vertical')
if not os.path.exists(vertical_dir):
os.makedirs(vertical_dir)
vertical = Element('vertical', {'display_name':'MainUnit'});
for c in self.content:
c.parent_tag(vertical)
c.edx(out_dir)
tree = ElementTree(vertical)
tree.write(os.path.join(vertical_dir, "{0}.xml".format(self.url_name()+'_vertical')) )
# --- Part 3 -----------------------------------------------------------------
# This customized LoggingFormatter formats all the output of this tool.
class LoggingFormatter(logging.Formatter):
FORMATS = {logging.DEBUG : "DEBUG: %(module)s: %(lineno)d: %(message)s",
logging.ERROR : "ERROR: %(message)s",
logging.WARNING : "WARNING: %(message)s",
logging.INFO : "%(message)s",
'DEFAULT' : "%(levelname)s: %(message)s"
}
def format(self, record):
self._fmt = self.FORMATS.get(record.levelno, self.FORMATS['DEFAULT'])
return logging.Formatter.format(self, record)
class SubprocessError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def main():
# The following block parses the arguments supplied.
parser = OptionParser(usage=usage)
parser.add_option("-u", "--course-url", default=None,
dest="course_url",
help="Specifies the public URL of the course. It is used for the inline Pdf viewer using Google-Docs. [default: %default]")
parser.add_option("-v", "--verbose",
action="count", dest="verbose", default=False,
help="Increase verbosity (specify multiple times for more)")
parser.add_option("-o", "--output", default="to_import.tar.gz",
metavar="FILE", dest="output",
help="Specifies the filename of the generated edx-file relative to the working directory. [default: %default]")
parser.add_option("--tmp",
metavar="DIR", dest="tmp",
help="""Configures the directory to use for the intermediate files.
If set, this direcory will not be deleted. If not specified,
a temporary directory is created by the operating system and deleted.
"""
)
(options, sources) = parser.parse_args()
global courseURL
courseURL = options.course_url
# Setting up the logging facility.
log_level = logging.WARNING
if options.verbose == 1:
log_level = logging.INFO
elif options.verbose >= 2:
log_level = logging.DEBUG
fmt = LoggingFormatter()
hdlr = logging.StreamHandler(sys.stdout)
hdlr.setFormatter(fmt)
logging.root.addHandler(hdlr)
logging.root.setLevel(log_level)
# When debugging, it's always good to know the values of the following variables:
logging.debug("Options %s", options)
logging.debug("Sources %s", sources)
if len(sources) == 0:
logging.error("Expects least one source.")
parser.print_help()
sys.exit(1)
try:
# Setup of our temorary directory, where we do all the file processing.
if options.tmp is None:
tmp_dir = tempfile.mkdtemp();
else:
tmp_dir = options.tmp
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
logging.debug("Temporary directory: %s", tmp_dir)
# The strategy is as follows:
# projects is a dictionary with the project name as the key and a list of groups as values.
# When all groups are loaded, we transform the dict into a tuple, sort it and sort all the groups inside each project.
projects = {}
# If we are verbose, we print stdout/stderr of subprocesses (GIT).
subprocess_setting = {'stderr':subprocess.PIPE, 'stdout':subprocess.PIPE}
if logging.getLogger().getEffectiveLevel() <= logging.DEBUG:
subprocess_setting = {'stderr':None, 'stdout':None}
# We now load each source.
for source in sources:
if not os.path.exists(source):
if source.endswith(('zip', 'tar.gz')):
logging.info("Assuming that %s is a remote archive.", source)
# requests is not a core module.
try:
import requests
except ImportError:
print """ERROR: The module requests is required but missing for remote archives. You can install it with the following commands:
$ easy_install pip
$ pip install requests
"""
sys.exit(1)
# We need a 'unique' directory for the local checkout
dest = str(uuid.uuid4())
local_path = os.path.join(tmp_dir, dest)
os.makedirs(local_path)
# Wen need the file-extension of the remote file
o = urlparse.urlparse(source)
(_ , filename_compressed) = os.path.split(o.path)
archive_path = os.path.join(local_path, filename_compressed)
logging.info("Downloading remote archive to %s.", archive_path);
with open(archive_path, 'wb') as handle:
request = requests.get(source, stream=True)
# Raise in case of server/network problems: (4xx, 5xx, ...)
request.raise_for_status()
for block in request.iter_content(1024):
if not block:
break
handle.write(block)
if archive_path.endswith('.zip'):
# We use the external unzip utility.
logging.info("Unzip %s", source)
try:
p = subprocess.Popen(['unzip', archive_path], cwd=local_path, **subprocess_setting)
p.wait()
if p.returncode != 0:
logging.error("Failed to unzip %s", source)
raise SubprocessError("Failed to unzip %s".format(source))
except OSError:
logging.error("unzip not found. Do you have unzip installed?")
raise
if archive_path.endswith('.tar.gz'):
# We use the external untar utility.
logging.info("untar %s", source)
try:
p = subprocess.Popen(['tar', '-zxvf', archive_path], cwd=local_path, **subprocess_setting)
p.wait()
if p.returncode != 0:
logging.error("Failed to untar %s", source)
raise SubprocessError("Failed to untar %s".format(source))
except OSError:
logging.error("tar not found. Do you have tar installed?")
raise
# We search for a file called group.yaml, which gives us the directory to process
path = None
for dirname, dirnames, filenames in os.walk(local_path):
# Process each file
for filename in filenames:
if filename == 'group.yaml':
logging.debug("Found a group.yaml file inside %s.", dirname)
path = dirname
if path==None:
logging.error("No group.yaml file found in %s", source)
raise SubprocessError("No group.yaml file found in %s".format(source))
else:
logging.info("There is no directory %s. Assuming that it's a remote GIT repository.", source)
# We need a 'unique' directory for the local checkout
dest = str(uuid.uuid4())
path = os.path.join(tmp_dir, dest)
logging.warning("Cloning %s", source)
try:
p = subprocess.Popen(['git', 'clone', source, dest], cwd=tmp_dir, **subprocess_setting)
p.wait()
if p.returncode != 0:
logging.error("Failed to clone GIT repository %s", source)
raise SubprocessError("Failed to clone GIT repository %s".format(source))
except OSError:
logging.error("GIT not found. Do you have GIT installed?")
raise
else:
path = source
logging.info("Processing %s", path)
# We load the group definition and add it to the corresponding group.
g = Group(path)
g.load()
if g.project() not in projects:
projects[g.project()] = Project(g.project())
projects[g.project()].append(g)
print "Archiving: %(path)s\n as: %(url_name)s" % { 'path':path, 'url_name':g.url_name()} + ".tar.gz\n"
tar = tarfile.open(g.url_name()+".tar.gz", "w:gz")
tar.add(path, arcname = g.url_name())
tar.close()
upload_files.append(g.url_name()+".tar.gz")
# Sort the projects alphabetically
projects = projects.values()
list.sort(projects, key=operator.attrgetter('project'))
# We now have successfully read all groups and we proceed to create the edx course.
# Setup the edx directory structure
# All the other files and directories inside are uuid named. We don't have to fear a name clash.
out_dir = os.path.join(tmp_dir, DISPLAY_NAME)
# Delete the output directory, if it already exists
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
# Create course.xml
course = Element('course');
course.set('url_name', 'url_name')
course.set('org', ORG_NAME)
course.set('course', 'course')
tree = ElementTree(course)
tree.write(os.path.join(out_dir, "course.xml"))
# Create course/course.xml
course_dir = os.path.join(out_dir, 'course')
os.makedirs(course_dir)
course = Element('course');
course.set('display_name', DISPLAY_NAME)
for project in projects:
e = SubElement(course, 'chapter')
e.set('url_name', project.url_name())
tree = ElementTree(course)
tree.write(os.path.join(course_dir, "{0}.xml".format('url_name')) )
# Let each project and implicitly each group create it's files
for project in projects:
project.edx(out_dir)
# Archive the directory to the output file
print "Creating the archive: %(path)s\n" % { 'path':options.output}
tar = tarfile.open(options.output, "w:gz")
tar.add(out_dir, arcname=os.path.basename(out_dir))
tar.close()
import_file.append(options.output)
# If any expection occurs, we still want to delete the temporary directory.
finally:
try:
# We don't delete the temp_dir if it's path was specified by the user.
if options.tmp is None:
shutil.rmtree(tmp_dir)
else:
logging.warning("The manually set temporary directory won't be deleted (%s).", tmp_dir)
except OSError as exc:
if exc.errno != 2: # Unless the error says, that the tmp-directory doesn't exist anymore.
raise
def submit_imath(target_path = THINLINC_PATH):
""" A submission routine for students using thinlincs.
Puts one of the .tar.gz files generated by this script in the target_path folder.
Presumably students have write permission there but not overwrite permission.
Submissions are timestamped so it is easy to spot the most recent one.
"""
for upload_file in upload_files:
upload = raw_input("\nDo you want me to automatically upload %s? (y/N)"%upload_file).lower() == "y"
if not upload: return False
print "Uploading file %s"%upload_file
import time
import os
import shutil
timestr = time.strftime("-%Y-%m-%d-%H-%M-%S")
target_file = os.path.join(target_path, "mat101-"+upload_file[:-7]+timestr+".tar.gz")
shutil.copyfile(upload_file, target_file)
print target_file
print "Your file was uploaded"
return True # It was "uploaded"
if __name__ == '__main__':
print "This is mat101-presenter, version %s \n\n" % VERSION
main()
print "\n\n"
print "="*80
print "\n\n"
print "I have successfully created the file ", import_file[0]
print "\n"
print "I have also created the file:"
for upload_file in upload_files:
print " "+upload_file
print "\n"
print "You should the file %s on a sandbox, to test it:\n"%import_file[0]
print " - http://edx-sandbox.math.uzh.ch:18010"
print " - https://sandbox.edx.org/"
tested = raw_input("Have you tested %s on a sandbox? (y/N)"%import_file[0]).lower() == 'y'
if not tested:
print "Do it then..."
else:
submitted = False
from socket import gethostname
host = gethostname()
if host[:3] == "ssh" or host[:2] == "tl":
if raw_input("\nAs far as I can tell you are running this from an IMATH machine, is this correct? (Y/n)").lower() == "y":
submitted = submit_imath()
else:
print "Sorry, wrong guesses can happen!"
if not submitted:
print "\nYour file was not automatically uploaded"
print "\nYou should put your file on the web and add your URL to the appropriate wiki page:"
| pdehaye/edx-presenter | mat101-presenter.py | Python | agpl-3.0 | 37,588 |
from django.db import models
from buildings.models import Building
class Room(models.Model):
roomid = models.CharField(max_length=10)
name = models.CharField(max_length=128)
description = models.TextField(null=True, blank=True, default=None)
capacity = models.IntegerField(null=True)
buildingid = models.ForeignKey('buildings.Building')
def __unicode__(self):
return '{} ({})'.format(self.roomid, self.name)
class Meta:
unique_together = (('roomid', 'buildingid',),)
| ritashugisha/ASUEvents | ASUEvents/rooms/models.py | Python | mit | 516 |
import os, sys
import numpy as np
import time
import json
import cPickle as pickle
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(min_df=6, stop_words='english')
file_list_lite = 'clickture-lite.lst'
data_dir_lite = '../keyword/keywords_lite'
file_list_dev = 'clickture-dev.lst'
data_dir_dev = '../keyword/keywords_dev'
def gen_count_vector(file_list, data_dir):
keywords_all = []
with open(file_list) as fin:
for fid, file_name in enumerate(fin.readlines()):
json_file = '%s/%s.json'%(data_dir, file_name.strip())
with open(json_file) as fin2:
data = json.load(fin2)
keywords = data['tagging']['keywords']
keywords = ' '.join(keywords)
keywords_all.append(keywords)
X_train_counts = count_vect.fit_transform(keywords_all)
print('vocaulary size: %d'%(len(count_vect.vocabulary_)))
pickle.dump(count_vect, open('count_vectorizer_clickture_dev_sk_6.pkl', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
def gen_compact_range_by_id(sparse_matrix, num_item, output_file):
cur_id = 0
cur_start = 0
sparse_range = (-1) * np.ones((num_item, 2))
for line_id, item_id in enumerate(sparse_matrix[:, 0]):
if not item_id == cur_id:
sparse_range[cur_id, :] = [cur_start, line_id-1]
cur_start = line_id
cur_id = item_id
sparse_range[cur_id, :] = [cur_start, len(sparse_matrix[:, 0])-1]
np.save(output_file, sparse_range.astype(int))
if not os.path.exists('count_vectorizer_clickture_dev_sk_6.pkl'):
print 'Generating count vector'
# We focus on evaluating test set keywords, discard those only appearing in training (lite) set
gen_count_vector(file_list_dev, data_dir_dev)
count_vect = np.load('count_vectorizer_clickture_dev_sk_6.pkl')
analyzer = count_vect.build_analyzer()
word_dict = count_vect.vocabulary_
print 'Encoding Lite Set'
img_keyword_rec = []
with open(file_list_lite) as fin:
for fid, file_name in enumerate(fin.readlines()):
img_id = int(file_name.strip().split('/')[-1])
json_file = '%s/%s.json'%(data_dir_lite, file_name.strip())
with open(json_file) as fin2:
data = json.load(fin2)
keywords = data['tagging']['keywords']
keywords = ' '.join(keywords)
keywords = analyzer(keywords)
if len(keywords) > 0:
keyword_ids = [word_dict.get(word) for word in keywords if word in word_dict]
for word_id in keyword_ids:
img_keyword_rec.append([img_id, word_id])
img_keyword_rec = np.array(img_keyword_rec).astype('int')
# np.save('img_keyword_rec_clickture_lite_sk_6.npy', img_keyword_rec)
gen_compact_range_by_id(img_keyword_rec, np.max(img_keyword_rec[:, 0])+1, 'img_keyword_ind_clickture_lite_sk_6_sparse.npy')
print 'Encoding Dev Set'
img_keyword_rec = []
with open(file_list_dev) as fin:
for fid, file_name in enumerate(fin.readlines()):
img_id = int(file_name.strip().split('/')[-1])
json_file = '%s/%s.json'%(data_dir_dev, file_name.strip())
with open(json_file) as fin2:
data = json.load(fin2)
keywords = data['tagging']['keywords']
keywords = ' '.join(keywords)
keywords = analyzer(keywords)
if len(keywords) > 0:
keyword_ids = [word_dict.get(word) for word in keywords if word in word_dict]
for word_id in keyword_ids:
img_keyword_rec.append([img_id, word_id])
img_keyword_rec = np.array(img_keyword_rec).astype('int')
# np.save('img_keyword_rec_clickture_dev_sk_6.npy', img_keyword_rec)
gen_compact_range_by_id(img_keyword_rec, np.max(img_keyword_rec[:, 0])+1, 'img_keyword_ind_clickture_dev_sk_6_sparse.npy')
| kanchen-usc/amc_att | clickture/pro_keyword_clickture.py | Python | mit | 3,533 |
#!/usr/bin/env python
"""
Kover: Learn interpretable computational phenotyping models from k-merized genomic data
Copyright (C) 2018 Alexandre Drouin & Gael Letarte
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from math import ceil
from .popcount import inplace_popcount_32, inplace_popcount_64
from ...utils import _minimum_uint_size, _unpack_binary_bytes_from_ints
class KmerRule(object):
def __init__(self, kmer_index, kmer_sequence, type):
"""
A k-mer rule
Parameters:
-----------
kmer_index: uint
The index of the k-mer
kmer_sequence: string
The nucleotide sequence of the k-mer
type: string
The type of rule: presence or absence (use p or a)
"""
self.kmer_index = kmer_index
self.kmer_sequence = kmer_sequence
self.type = type
def classify(self, X):
if self.type == "absence":
return (X[:, self.kmer_index] == 0).astype(np.uint8)
else:
return (X[:, self.kmer_index] == 1).astype(np.uint8)
def inverse(self):
return KmerRule(kmer_index=self.kmer_index, kmer_sequence=self.kmer_sequence, type="absence" if self.type == "presence" else "presence")
def __str__(self):
return ("Absence(" if self.type == "absence" else "Presence(") + self.kmer_sequence + ")"
class LazyKmerRuleList(object):
"""
By convention, the first half of the list contains presence rules and the second half contains the absence rules in
the same order.
"""
def __init__(self, kmer_sequences, kmer_by_rule):
self.n_rules = kmer_by_rule.shape[0] * 2
self.kmer_sequences = kmer_sequences
self.kmer_by_rule = kmer_by_rule
def __getitem__(self, idx):
if idx >= self.n_rules:
raise ValueError("Index %d is out of range for list of size %d" % (idx, self.n_rules))
if idx >= len(self.kmer_sequences):
type = "absence"
kmer_idx = self.kmer_by_rule[idx % len(self.kmer_sequences)]
else:
type = "presence"
kmer_idx = self.kmer_by_rule[idx]
return KmerRule(idx % len(self.kmer_sequences), self.kmer_sequences[kmer_idx], type)
def __len__(self):
return self.n_rules
class BaseRuleClassifications(object):
def __init__(self):
pass
def get_columns(self, columns):
raise NotImplementedError()
def remove_rows(self, rows):
raise NotImplementedError()
@property
def shape(self):
raise NotImplementedError()
def sum_rows(self, rows):
raise NotImplementedError()
class KmerRuleClassifications(BaseRuleClassifications):
"""
Methods involving columns account for presence and absence rules
"""
# TODO: Clean up. Get rid of the code to handle deleted rows. We don't need this.
def __init__(self, dataset, n_rows, block_size=None):
self.dataset = dataset
self.dataset_initial_n_rows = n_rows
self.dataset_n_rows = n_rows
self.dataset_removed_rows = []
self.dataset_removed_rows_mask = np.zeros(self.dataset_initial_n_rows, dtype=np.bool)
self.block_size = (None, None)
if block_size is None:
if self.dataset.chunks is None:
self.block_size = (1, self.dataset.shape[1])
else:
self.block_size = self.dataset.chunks
else:
if len(block_size) != 2 or not isinstance(block_size[0], int) or not isinstance(block_size[1], int):
raise ValueError("The block size must be a tuple of 2 integers.")
self.block_size = block_size
# Get the size of the ints used to store the data
if self.dataset.dtype == np.uint32:
self.dataset_pack_size = 32
self.inplace_popcount = inplace_popcount_32
elif self.dataset.dtype == np.uint64:
self.dataset_pack_size = 64
self.inplace_popcount = inplace_popcount_64
else:
raise ValueError("Unsupported data type for packed attribute classifications array. The supported data" +
" types are np.uint32 and np.uint64.")
super(BaseRuleClassifications, self).__init__()
def get_columns(self, columns):
"""
Columns can be an integer (or any object that implements __index__) or a sorted list/ndarray.
"""
#TODO: Support slicing, make this more efficient than getting the columns individually.
columns_is_int = False
if hasattr(columns, "__index__"): # All int types implement the __index__ method (PEP 357)
columns = [columns.__index__()]
columns_is_int = True
elif isinstance(columns, np.ndarray):
columns = columns.tolist()
elif isinstance(columns, list):
pass
else:
columns = list(columns)
# Detect where an inversion is needed (columns corresponding to absence rules)
columns, invert_result = zip(* (((column if column < self.dataset.shape[1] else column % self.dataset.shape[1]),
(True if column > self.dataset.shape[1] else False)) for column in columns))
columns = list(columns)
invert_result = np.array(invert_result)
# Don't return rows that have been deleted
row_mask = np.ones(self.dataset.shape[0] * self.dataset_pack_size, dtype=np.bool)
row_mask[self.dataset_initial_n_rows:] = False
row_mask[self.dataset_removed_rows] = False
# h5py requires that the column indices are sorted
unique, inverse = np.unique(columns, return_inverse=True)
result = _unpack_binary_bytes_from_ints(self.dataset[:, unique.tolist()])[row_mask]
result = result[:, inverse]
result[:, invert_result] = 1 - result[:, invert_result]
if columns_is_int:
return result.reshape(-1)
else:
return result
def remove_rows(self, rows):
# Find in which dataset the rows must be removed
dataset_removed_rows = []
# TODO: This is inefficient!
for row_idx in rows:
current_idx = -1
n_active_elements_seen = 0
while n_active_elements_seen <= row_idx:
current_idx += 1
if not self.dataset_removed_rows_mask[current_idx]:
n_active_elements_seen += 1
dataset_removed_rows.append(current_idx)
# Update the dataset removed row lists
# Update the start and stop indexes
# Adjust the shape
# Adjust the number of rows in each dataset
# Store the sorted relative removed row indexes by dataset
if len(dataset_removed_rows) > 0:
self.dataset_removed_rows = sorted(set(self.dataset_removed_rows + dataset_removed_rows))
self.dataset_removed_rows_mask = np.zeros(self.dataset_initial_n_rows, dtype=np.bool)
self.dataset_removed_rows_mask[self.dataset_removed_rows] = True
self.dataset_n_rows = self.dataset_initial_n_rows - len(self.dataset_removed_rows)
@property
def shape(self):
return self.dataset_n_rows, self.dataset.shape[1] * 2
# TODO: allow summing over multiple lists of rows at a time (saves i/o operations)
def sum_rows(self, rows):
"""
Note: Assumes that the rows argument does not contain duplicate elements. Rows will not be considered more than once.
"""
rows = np.asarray(rows)
result_dtype = _minimum_uint_size(rows.shape[0])
result = np.zeros(self.dataset.shape[1] * 2, dtype=result_dtype)
# Builds a mask to turn off the bits of the rows we do not want to count in the sum.
def build_row_mask(example_idx, n_examples, mask_n_bits):
if mask_n_bits not in [8, 16, 32, 64, 128]:
raise ValueError("Unsupported mask format. Use 8, 16, 32, 64 or 128 bits.")
n_masks = int(ceil(float(n_examples) / mask_n_bits))
masks = [0] * n_masks
for idx in example_idx:
example_mask = idx / mask_n_bits
example_mask_idx = mask_n_bits - (idx - mask_n_bits * example_mask) - 1
masks[example_mask] |= 1 << example_mask_idx
return np.array(masks, dtype="u" + str(mask_n_bits / 8))
# Find the rows that occur in each dataset and their relative index
rows = np.sort(rows)
dataset_relative_rows = []
for row_idx in rows:
# Find which row in the dataset corresponds to the requested row
# TODO: This is inefficient! Could exploit the fact that rows is sorted to reuse previous iterations.
current_idx = -1
n_active_elements_seen = 0
while n_active_elements_seen <= row_idx:
current_idx += 1
if not self.dataset_removed_rows_mask[current_idx]:
n_active_elements_seen += 1
dataset_relative_rows.append(current_idx)
# Create a row mask for each dataset
row_mask = build_row_mask(dataset_relative_rows, self.dataset_initial_n_rows, self.dataset_pack_size)
del dataset_relative_rows
# For each dataset load the rows for which the mask is not 0. Support column slicing aswell
n_col_blocks = int(ceil(1.0 * self.dataset.shape[1] / self.block_size[1]))
rows_to_load = np.where(row_mask != 0)[0]
n_row_blocks = int(ceil(1.0 * len(rows_to_load) / self.block_size[0]))
for row_block in xrange(n_row_blocks):
block_row_mask = row_mask[rows_to_load[row_block * self.block_size[0]:(row_block + 1) * self.block_size[0]]]
for col_block in xrange(n_col_blocks):
# Load the appropriate rows/columns based on the block sizes
block = self.dataset[rows_to_load[row_block * self.block_size[0]:(row_block + 1) * self.block_size[0]],
col_block * self.block_size[1]:(col_block + 1) * self.block_size[1]]
# Popcount
if len(block.shape) == 1:
block = block.reshape(1, -1)
self.inplace_popcount(block, block_row_mask)
# Increment the sum
result[col_block * self.block_size[1]:min((col_block + 1) * self.block_size[1], self.dataset.shape[1])] += np.sum(block, axis=0)
# Compute the sum for absence rules
result[self.dataset.shape[1] : ] = len(rows) - result[: self.dataset.shape[1]]
return result
| aldro61/kover | core/kover/learning/common/rules.py | Python | gpl-3.0 | 11,255 |
__author__ = 'pferland'
from PrinterStats import *
__all__ = ['PrinterStats']
#__all__ = ['create_models_functions', 'check_printers_table', 'daemon_get_host_stats', 'hostcheck'] | RIEI/printerStats | daemon/PrinterStats/__init__.py | Python | gpl-2.0 | 179 |
"""
:mod:`zsl.utils.import_helper`
------------------------------
.. moduleauthor:: Martin Babka
"""
from __future__ import unicode_literals
import importlib
def fetch_class(full_class_name):
"""Fetches the given class.
:param string full_class_name: Name of the class to be fetched.
"""
(module_name, class_name) = full_class_name.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, class_name)
| AtteqCom/zsl | src/zsl/utils/import_helper.py | Python | mit | 457 |
from unittest.mock import patch, ANY
from django.test import RequestFactory, SimpleTestCase
from django.core.exceptions import ValidationError
from corehq.apps.users.models import WebUser
from ..forms import HQAuthenticationTokenForm, HQBackupTokenForm
class HQAuthenticationTokenFormTests(SimpleTestCase):
def setUp(self):
self.factory = RequestFactory()
self.setUp_mocks()
def setUp_mocks(self):
clean_patcher = patch('corehq.apps.hqwebapp.forms.AuthenticationTokenForm.clean')
self.mocked_clean = clean_patcher.start()
self.mocked_clean.side_effect = ValidationError('Bad Token')
self.addCleanup(clean_patcher.stop)
def begin_login_attempt(self):
user = WebUser(username='test_user')
request = self.factory.post('/login')
user_patcher = patch('corehq.apps.hqwebapp.forms.CouchUser.get_by_username', return_value=user)
user_patcher.start()
self.addCleanup(user_patcher.stop)
return (user, request)
def create_form_with_invalid_token(self, user, request):
return HQAuthenticationTokenForm(user, 'device', request)
@patch('corehq.apps.hqwebapp.forms.user_login_failed')
def test_failed_authentication_sends_fully_formed_signal(self, mock_signal):
user, request = self.begin_login_attempt()
form = self.create_form_with_invalid_token(user, request)
with self.assertRaises(ValidationError):
form.clean()
expected_credentials = {'username': user.username}
mock_signal.send.assert_called_once_with(credentials=expected_credentials, request=request,
token_failure=True, sender=ANY)
class HQBackupTokenFormTests(SimpleTestCase):
def setUp(self):
self.factory = RequestFactory()
self.setUp_mocks()
def setUp_mocks(self):
clean_patcher = patch('corehq.apps.hqwebapp.forms.BackupTokenForm.clean')
self.mocked_clean = clean_patcher.start()
self.mocked_clean.side_effect = ValidationError('Bad Token')
self.addCleanup(clean_patcher.stop)
def begin_login_attempt(self):
user = WebUser(username='test_user')
request = self.factory.post('/login')
user_patcher = patch('corehq.apps.hqwebapp.forms.CouchUser.get_by_username', return_value=user)
user_patcher.start()
self.addCleanup(user_patcher.stop)
return (user, request)
def create_form_with_invalid_token(self, user, request):
return HQBackupTokenForm(user, 'device', request)
@patch('corehq.apps.hqwebapp.forms.user_login_failed')
def test_failed_clean_sends_fully_formed_signal(self, mock_signal):
user, request = self.begin_login_attempt()
form = self.create_form_with_invalid_token(user, request)
with self.assertRaises(ValidationError):
form.clean()
expected_credentials = {'username': user.username}
mock_signal.send.assert_called_once_with(credentials=expected_credentials, request=request,
token_failure=True, sender=ANY)
| dimagi/commcare-hq | corehq/apps/hqwebapp/tests/test_forms.py | Python | bsd-3-clause | 3,073 |
###################
## EXAMPLE: strings
###################
#hi = "hello there"
#name = "ana"
#greet = hi + name
#print(greet)
#greeting = hi + " " + name
#print(greeting)
#silly = hi + (" " + name)*3
#print(silly)
####################
## EXAMPLE: output
####################
#x = 1
#print(x)
#x_str = str(x)
#print("my fav number is", x, ".", "x=", x)
#print("my fav number is", x_str + "." + "x=" + x_str)
#print("my fav number is" + x_str + "." + "x=" + x_str)
####################
## EXAMPLE: input
####################
#text = input("Type anything... ")
#print(5*text)
#num = int(input("Type a number... "))
#print(5*num)
####################
## EXAMPLE: conditionals/branching
####################
#x = float(input("Enter a number for x: "))
#y = float(input("Enter a number for y: "))
#if x == y:
# print("x and y are equal")
# if y != 0:
# print("therefore, x / y is", x/y)
#elif x < y:
# print("x is smaller")
#elif x > y:
# print("y is smaller")
#print("thanks!")
####################
## EXAMPLE: remainder
####################
#num = int(input("Enter a number: "))
#if num % 2 == 0:
# print("number is even")
#else:
# print("number is odd")
####################
## EXAMPLE: while loops
## Try expanding this code to show a sad face if you go right
## twice and flip the table any more times than that.
## Hint: use a counter
####################
#n = input("You are in the Lost Forest\n****************\n****************\n :)\n****************\n****************\nGo left or right? ")
#while n == "right" or n == "Right":
# n = input("You are in the Lost Forest\n****************\n****** ***\n (╯°□°)╯︵ ┻━┻\n****************\n****************\nGo left or right? ")
#print("\nYou got out of the Lost Forest!\n\o/")
#n = 0
#while n < 5:
# print(n)
# n = n+1
####################
## EXAMPLE: for loops
####################
#for n in range(5):
# print(n)
#
#mysum = 0
#for i in range(10):
# mysum += i
#print(mysum)
#
#mysum = 0
#for i in range(7, 10):
# mysum += i
#print(mysum)
#
#mysum = 0
#for i in range(5, 11, 2):
# mysum += i
# if mysum == 5:
# break
# mysum += 1
#print(mysum)
####################
## EXAMPLE: perfect squares
####################
#ans = 0
#neg_flag = False
#x = int(input("Enter an integer: "))
#if x < 0:
# neg_flag = True
#while ans**2 < x:
# ans = ans + 1
#if ans**2 == x:
# print("Square root of", x, "is", ans)
#else:
# print(x, "is not a perfect square")
# if neg_flag:
# print("Just checking... did you mean", -x, "?")
####################
## TEST YOURSELF!
## Modify the perfect squares example to print
## imaginary perfect sqrts if given a negative num.
#################### | indefinitelee/Learning | MIT_60001_Introduction_to_Computer_Science_and_Programming_in_Python/week_2/lec2_branch_loops.py | Python | mit | 2,880 |
"""
Copyright (C) 2011 Maximiliano Pin
Redmodel is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Redmodel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with Redmodel. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import sys
from datetime import datetime
from test import example_data
from test.example_models import City, Weapon, Fighter, Gang, Skill, SkillInstance, FighterSkillList
from redmodel.models import SetField, ModelWriter, ListFieldWriter, SetFieldWriter, SortedSetFieldWriter, NotFoundError, UniqueError, BadArgsError
from redmodel.containers import List, Set, SortedSet, ListHandle, SetHandle, SortedSetHandle, ListWriter, SetWriter, SortedSetWriter
import redmodel
from redmodel import connection as ds
class ModelTestCase(unittest.TestCase):
pass
class ContainersTestCase(ModelTestCase):
def setUp(self):
ds.flushdb()
def tearDown(self):
pass
def test_strlist(self):
writer = ListWriter(str)
hlist = ListHandle('mylist', str)
writer.append(hlist, 'spam')
writer.append(hlist, 'eggs')
writer.append(hlist, 'hello world')
mylist = List(hlist)
self.assertEqual(mylist, ('spam', 'eggs', 'hello world'))
def test_intset(self):
writer = SetWriter(int)
hset = SetHandle('myset', int)
writer.append(hset, 11)
writer.append(hset, 13)
writer.append(hset, 17)
myset = Set(hset)
self.assertEqual(myset, set([11, 13, 17]))
def test_model_list(self):
writer = ListWriter(Fighter)
hlist = ListHandle('mylist', Fighter)
f1, f2, f3 = map(Fighter.by_id, [21, 33, 47])
for f in f1, f2, f3:
writer.append(hlist, f)
mylist = List(hlist)
self.assertEqual(mylist, (f1, f2, f3))
def test_sorted_set(self):
writer = SortedSetWriter(str)
hzset = SortedSetHandle('myzset', str)
writer.append(hzset, 'spam', 3.25)
writer.append(hzset, 'eggs', 3.24)
self.assertEqual(SortedSet(hzset), ('eggs', 'spam'))
self.assertEqual(SortedSet(hzset, lte = 3.24), ('eggs',))
self.assertEqual(SortedSet.zrange(hzset, 0, 0), ('eggs',))
self.assertEqual(SortedSet.zfind(hzset, gt = 3.24), ('spam',))
def test_model_sorted_set(self):
writer = SortedSetWriter(Fighter)
hzset = SortedSetHandle('myzset', Fighter)
f1, f2, f3 = map(Fighter.by_id, [21, 33, 47])
writer.append(hzset, f1, 3.25)
writer.append(hzset, f2, 3.24)
writer.append(hzset, f3, 4)
self.assertEqual(SortedSet.zrange(hzset), (f2, f1, f3))
self.assertEqual(SortedSet.zrevrange(hzset, 0, 0), (f3,))
self.assertEqual(SortedSet(hzset, gt = 3.24), (f1, f3))
def test_indexed_set(self):
writer = SetWriter(int, index_key = 'myindex')
hset1 = SetHandle('myset:1', int)
hset2 = SetHandle('myset:2', int)
for i in 1, 2, 3:
writer.append(hset1, i)
for i in 2, 3, 4, 5:
writer.append(hset2, i)
self.assertEqual(Set(hset1), set([1, 2, 3]))
self.assertEqual(Set(hset2), set([2, 3, 4, 5]))
self.assertEqual(ds.smembers('myindex:1'), set(['1']))
self.assertEqual(ds.smembers('myindex:2'), set(['1', '2']))
self.assertEqual(ds.smembers('myindex:3'), set(['1', '2']))
self.assertEqual(ds.smembers('myindex:4'), set(['2']))
self.assertEqual(ds.smembers('myindex:5'), set(['2']))
self.assertEqual(ds.smembers('myindex:6'), set())
def test_unique_indexed_set(self):
writer = SetWriter(int, index_key = 'myindex', unique_index = True)
hset1 = SetHandle('myset:1', int)
hset2 = SetHandle('myset:2', int)
for i in 1, 2, 3:
writer.append(hset1, i)
writer.append(hset2, i + 3)
self.assertRaises(UniqueError, writer.append, hset1, 4)
self.assertRaises(UniqueError, writer.append, hset2, 2)
self.assertEqual(Set(hset1), set([1, 2, 3]))
self.assertEqual(Set(hset2), set([4, 5, 6]))
self.assertEqual(ds.hgetall('myindex'),
{'1': '1', '2': '1', '3': '1', '4': '2', '5': '2', '6': '2'})
class ModelWriteTestCase(ModelTestCase):
def setUp(self):
ds.flushdb()
def tearDown(self):
pass
def test_write(self):
# basic model
city_writer = ModelWriter(City)
c1 = City(name = 'Reixte', coast = True)
c2 = City(name = 'Damtoo', coast = True)
c3 = City(name = 'Toynbe', coast = False)
map(city_writer.create, [c1, c2, c3])
self.assertEqual((c1.oid, c2.oid, c3.oid), ('1', '2', '3'))
self.assertEqual(City(City.by_id(1)).oid, '1')
self.assertEqual(ds.hgetall('City:1'), {'name': 'Reixte', 'coast': '1'})
self.assertEqual(ds.hgetall('City:2'), {'name': 'Damtoo', 'coast': '1'})
self.assertEqual(ds.hgetall('City:3'), {'name': 'Toynbe', 'coast': '0'})
# list field referencing model
city_connections_writer = ListFieldWriter(City.connections)
city_connections_writer.append(c1.connections, c2)
city_connections_writer.append(c2.connections, c1)
city_connections_writer.append(c1.connections, c3)
city_connections_writer.append(c3.connections, c1)
self.assertEqual(ds.lrange('City:1:connections', 0, -1), ['2', '3'])
self.assertEqual(ds.lrange('City:2:connections', 0, -1), ['1'])
self.assertEqual(ds.lrange('City:3:connections', 0, -1), ['1'])
# unique indexed field
fighter_writer = ModelWriter(Fighter)
dtime1 = datetime.utcfromtimestamp(1400000002)
dtime2 = datetime.utcfromtimestamp(1400000001)
f1 = Fighter(name = 'Alice', age = 29, weight = 73.2, joined = dtime1, city = City.by_id(1))
f2 = Fighter(name = 'Bob', age = 23, weight = 98, joined = dtime2, city = City.by_id(1))
map(fighter_writer.create, [f1, f2])
self.assertEqual(ds.hgetall('Fighter:1'), {'name': 'Alice', 'age': '29', 'weight': '73.2', 'joined': '1400000002', 'city': '1'})
self.assertEqual(ds.hgetall('Fighter:2'), {'name': 'Bob', 'age': '23', 'weight': '98', 'joined': '1400000001', 'city': '1'})
self.assertEqual(ds.hgetall('u:Fighter:name'), {'Alice': '1', 'Bob': '2'})
# indexed reference field
self.assertEqual(ds.smembers('i:Fighter:city:1'), set(['1', '2']))
# zindexed fields
self.assertEqual(ds.zrange('z:Fighter:age', 0, -1), ['2', '1'])
self.assertEqual(ds.zrange('z:Fighter:weight', 0, -1), ['1', '2'])
self.assertEqual(ds.zrange('z:Fighter:joined', 0, -1), ['2', '1'])
# missing argument
self.assertRaises(BadArgsError, Fighter, name = 'MissingWeight', age = 30, city = 1)
# unique attribute
f3 = Fighter(name = 'Bob', age = 30, weight = 80, joined = dtime1, city = 1)
self.assertRaises(UniqueError, fighter_writer.create, f3)
# basic model
gang_writer = ModelWriter(Gang)
g1 = Gang(name = 'Ghetto Warriors', leader = f1, hqcity = c3)
g2 = Gang(name = 'Midnight Club', leader = f2, hqcity = c1)
map(gang_writer.create, [g1, g2])
# unique indexed reference field
self.assertEqual(ds.hgetall('u:Gang:leader'), {'1': '1', '2': '2' })
# unique indexed set field
gang_members_writer = SetFieldWriter(Gang.members)
gang_members_writer.append(g1.members, f1)
gang_members_writer.append(g1.members, f2)
self.assertEqual(ds.smembers('Gang:1:members'), set(['1', '2']))
self.assertEqual(ds.hgetall('u:Gang:members'), {'1': '1', '2': '1'})
self.assertRaises(UniqueError, gang_members_writer.append, g2.members, f1)
self.assertRaises(UniqueError, gang_members_writer.append, g1.members, f1)
# non-unique indexed set field
gang_cities_writer = SetFieldWriter(Gang.cities)
gang_cities_writer.append(g1.cities, c1)
gang_cities_writer.append(g1.cities, c3)
self.assertEqual(ds.smembers('Gang:1:cities'), set(['1', '3']))
self.assertEqual(ds.smembers('i:Gang:cities:1'), set(['1']))
self.assertEqual(ds.smembers('i:Gang:cities:3'), set(['1']))
# listed reference field
self.assertEqual(ds.lrange('l:Gang:hqcity:1', 0, -1), ['2'])
self.assertEqual(ds.lrange('l:Gang:hqcity:3', 0, -1), ['1'])
# basic model
skill_writer = ModelWriter(Skill)
sk1 = Skill(category = 1, name = 'Strength', description = 'Strength...')
sk2 = Skill(category = 3, name = 'Karate', description = 'Karate...')
map(skill_writer.create, [sk1, sk2])
# owned model
fighter_skill_list_writer = ModelWriter(FighterSkillList)
f1skills = FighterSkillList()
f2skills = FighterSkillList()
fighter_skill_list_writer.create(f1skills, f1)
fighter_skill_list_writer.create(f2skills, f2)
self.assertEqual(f1skills.oid, '1')
self.assertEqual(f2skills.oid, '2')
if __debug__:
f3skills = FighterSkillList()
self.assertRaises(AssertionError, fighter_skill_list_writer.create, f3skills)
f3 = Fighter(name = 'Unsaved', age = 0, weight = 0, joined = None, city = 1)
self.assertRaises(AssertionError, fighter_skill_list_writer.create, f3skills, f3)
# owned model list field
skill_instance_writer = ModelWriter(SkillInstance)
fighter_skills_writer = ListFieldWriter(FighterSkillList.skills, element_writer = skill_instance_writer)
ski1 = SkillInstance(skill = sk1.handle(), value = 21)
ski2 = SkillInstance(skill = sk2, value = 15)
fighter_skills_writer.append(f1skills.skills, ski1)
fighter_skills_writer.append(f1skills.skills, ski2)
self.assertEqual(ds.lrange('FighterSkillList:1:skills', 0, -1), ['1', '2'])
self.assertEqual(ds.hgetall('SkillInstance:1'), {'skill': '1', 'value': '21'})
self.assertEqual(ds.hgetall('SkillInstance:2'), {'skill': '2', 'value': '15'})
ski1 = SkillInstance(skill = sk1, value = 27)
ski2 = SkillInstance(skill = sk2, value = 91)
fighter_skills_writer.append(f2skills.skills, ski1)
fighter_skills_writer.append(f2skills.skills, ski2)
self.assertEqual(ds.lrange('FighterSkillList:2:skills', 0, -1), ['3', '4'])
self.assertEqual(ds.hgetall('SkillInstance:3'), {'skill': '1', 'value': '27'})
self.assertEqual(ds.hgetall('SkillInstance:4'), {'skill': '2', 'value': '91'})
# owned model sorted set field
weapon_writer = ModelWriter(Weapon)
fighter_weapons_writer = SortedSetFieldWriter(Fighter.weapons, weapon_writer)
w1 = Weapon(description = 'second', power = 50.5)
w2 = Weapon(description = 'third', power = 34.2)
w3 = Weapon(description = 'first', power = 50.7)
fighter_weapons_writer.append(f1.weapons, w1)
fighter_weapons_writer.append(f1.weapons, w2)
fighter_weapons_writer.append(f1.weapons, w3)
self.assertEqual(ds.zrange('Fighter:1:weapons', 0, -1), ['2', '1', '3'])
self.assertEqual(ds.hgetall('Weapon:1'), {'description': 'second', 'power': '50.5'})
self.assertEqual(ds.hgetall('Weapon:2'), {'description': 'third', 'power': '34.2'})
self.assertEqual(ds.hgetall('Weapon:3'), {'description': 'first', 'power': '50.7'})
def test_update(self):
example_data.load()
fighter_writer = ModelWriter(Fighter)
# update unique attribute
fighter = Fighter(Fighter.by_id(2))
fighter.name = 'Bobby'
fighter.age = 41
fighter_writer.update_all(fighter)
self.assertEqual(ds.hgetall('u:Fighter:name'), {'Alice': '1', 'Bobby': '2'})
fighter2 = Fighter(Fighter.by_id(2))
self.assertEqual(fighter2.name, 'Bobby')
self.assertEqual(fighter2.age, 41)
fighter_writer.update(fighter2, name = 'Robert', weight = 99.9)
self.assertEqual(ds.hgetall('u:Fighter:name'), {'Alice': '1', 'Robert': '2'})
self.assertEqual(fighter2.name, 'Robert')
self.assertEqual(fighter2.weight, 99.9)
fighter3 = Fighter(Fighter.by_id(2))
self.assertEqual(fighter3.name, 'Robert')
self.assertEqual(fighter3.weight, 99.9)
# update indexed attribute
self.assertEqual(ds.smembers('i:Fighter:city:1'), set(['1', '2']))
self.assertEqual(ds.smembers('i:Fighter:city:2'), set())
fighter1 = Fighter(Fighter.by_id(1))
fighter2 = Fighter(Fighter.by_id(2))
fighter2.city = City.by_id(2)
fighter_writer.update_all(fighter2)
self.assertEqual(ds.smembers('i:Fighter:city:1'), set(['1']))
self.assertEqual(ds.smembers('i:Fighter:city:2'), set(['2']))
fighter_writer.update(fighter1, city = City.by_id(2))
self.assertEqual(ds.smembers('i:Fighter:city:1'), set())
self.assertEqual(ds.smembers('i:Fighter:city:2'), set(['1', '2']))
city1 = City(City.by_id(1))
fighter_writer.update(fighter1, city = city1)
self.assertEqual(ds.smembers('i:Fighter:city:1'), set(['1']))
self.assertEqual(ds.smembers('i:Fighter:city:2'), set(['2']))
# update zindexed attribute
self.assertEqual(ds.zrange('z:Fighter:weight', 0, -1), ['2', '1'])
fighter_writer.update(fighter1, weight = 99.91)
self.assertEqual(ds.zrange('z:Fighter:weight', 0, -1), ['2', '1'])
fighter_writer.update(fighter1, weight = 99.89)
self.assertEqual(ds.zrange('z:Fighter:weight', 0, -1), ['1', '2'])
# update listed attribute
self.assertEqual(ds.lrange('l:Gang:hqcity:1', 0, -1), [])
self.assertEqual(ds.lrange('l:Gang:hqcity:3', 0, -1), ['1', '2'])
gang2 = Gang(Gang.by_id(2))
gang_writer = ModelWriter(Gang)
gang_writer.update(gang2, hqcity = city1)
self.assertEqual(ds.lrange('l:Gang:hqcity:1', 0, -1), ['2'])
self.assertEqual(ds.lrange('l:Gang:hqcity:3', 0, -1), ['1'])
# update object and sorted set atomically
self.assertEqual(ds.zrange('Fighter:1:weapons', 0, -1), ['2', '1', '3'])
self.assertEqual(ds.hgetall('Weapon:2'), {'description': 'third', 'power': '34.2'})
weapon_writer = ModelWriter(Weapon)
fighter_weapons_writer = SortedSetFieldWriter(Fighter.weapons, weapon_writer)
w2 = Weapon(Weapon.by_id(2))
fighter_weapons_writer.update(fighter1.weapons, w2,
power = 70, description = 'improved')
self.assertEqual(ds.zrange('Fighter:1:weapons', 0, -1), ['1', '3', '2'])
self.assertEqual(ds.hgetall('Weapon:2'), {'description': 'improved', 'power': '70'})
self.assertEqual(w2.power, 70)
self.assertEqual(w2.description, 'improved')
w2.power -= 60
w2.description = 'degraded'
fighter_weapons_writer.update_all(fighter1.weapons, w2)
self.assertEqual(ds.zrange('Fighter:1:weapons', 0, -1), ['2', '1', '3'])
self.assertEqual(ds.hgetall('Weapon:2'), {'description': 'degraded', 'power': '10'})
def test_delete(self):
example_data.load()
# delete object updates indexes
fighter_writer = ModelWriter(Fighter)
self.assertEqual(ds.hgetall('u:Fighter:name'), {'Alice': '1', 'Bob': '2'})
self.assertEqual(ds.smembers('i:Fighter:city:1'), set(['1', '2']))
self.assertEqual(ds.zrange('z:Fighter:weight', 0, -1), ['2', '1'])
fighter1 = Fighter(Fighter.by_id(1))
fighter_writer.delete(fighter1)
self.assertRaises(NotFoundError, Fighter, Fighter.by_id(1))
self.assertTrue(fighter1.oid is None)
self.assertEqual(ds.hgetall('u:Fighter:name'), {'Bob': '2'})
self.assertEqual(ds.smembers('i:Fighter:city:1'), set(['2']))
self.assertEqual(ds.zrange('z:Fighter:weight', 0, -1), ['2'])
# delete container item updates indexes
gang_members_writer = SetFieldWriter(Gang.members)
self.assertEqual(ds.smembers('Gang:1:members'), set(['1', '2']))
self.assertEqual(ds.hgetall('u:Gang:members'), {'1': '1', '2': '1'})
gang1 = Gang(Gang.by_id(1))
gang_members_writer.remove(gang1.members, Fighter.by_id(1))
self.assertEqual(ds.smembers('Gang:1:members'), set(['2']))
self.assertEqual(ds.hgetall('u:Gang:members'), {'2': '1'})
gang_cities_writer = SetFieldWriter(Gang.cities)
self.assertEqual(ds.smembers('Gang:1:cities'), set(['1', '3']))
self.assertEqual(ds.smembers('i:Gang:cities:1'), set(['1']))
self.assertEqual(ds.smembers('i:Gang:cities:3'), set(['1']))
gang1 = Gang(Gang.by_id(1))
gang_cities_writer.remove(gang1.cities, City.by_id(1))
self.assertEqual(ds.smembers('Gang:1:cities'), set(['3']))
self.assertFalse(ds.exists('i:Gang:cities:1'))
self.assertEqual(ds.smembers('i:Gang:cities:3'), set(['1']))
# delete object updates lists of listed attributes
self.assertEqual(ds.lrange('l:Gang:hqcity:3', 0, -1), ['1', '2'])
gang_writer = ModelWriter(Gang)
gang_writer.delete(gang1)
self.assertEqual(ds.lrange('l:Gang:hqcity:3', 0, -1), ['2'])
# autodelete owned item
fighter_skill_list_writer = ModelWriter(FighterSkillList)
skill_instance_writer = ModelWriter(SkillInstance)
fighter_skills_writer = ListFieldWriter(FighterSkillList.skills, element_writer = skill_instance_writer)
self.assertEqual(ds.lrange('FighterSkillList:1:skills', 0, -1), ['1', '2'])
self.assertEqual(ds.lrange('FighterSkillList:2:skills', 0, -1), ['3', '4'])
for i in range(1, 5):
self.assertTrue(ds.exists('SkillInstance:{0}'.format(i)))
handle = FighterSkillList.by_owner(Fighter.by_id(2))
fsl = FighterSkillList(handle)
ski = SkillInstance(SkillInstance.by_id(3))
fighter_skills_writer.remove(fsl.skills, ski)
self.assertTrue(ski.oid is None)
self.assertEqual(ds.lrange('FighterSkillList:2:skills', 0, -1), ['4'])
self.assertFalse(ds.exists('SkillInstance:3'))
self.assertTrue(ds.exists('SkillInstance:4'))
# don't allow removing not owned object
ski = SkillInstance(SkillInstance.by_id(1))
self.assertRaises(NotFoundError, fighter_skills_writer.remove, fsl.skills, ski)
self.assertTrue(ds.exists('SkillInstance:1'))
class ModelReadTestCase(ModelTestCase):
def setUp(self):
example_data.load()
def tearDown(self):
pass
def test_read(self):
handle = Gang.by_id(1)
gang = Gang(handle)
self.assertEqual(gang.name, 'Ghetto Warriors')
members = Set(gang.members)
self.assertEqual(members, set([Fighter.by_id(1), Fighter.by_id(2)]))
hfighter1 = Fighter.by_id(1)
hfighter2 = Fighter.by_id(2)
fighter1 = Fighter(hfighter1)
fighter2 = Fighter(hfighter2)
self.assertEqual(fighter1.name, 'Alice')
self.assertEqual(fighter2.name, 'Bob')
dtime = datetime.utcfromtimestamp(1400000001)
self.assertEqual(fighter2.joined, dtime)
handle1 = Fighter.by_id(1)
handle2 = Fighter.by_id(2)
handle3 = Fighter.by_id(1)
self.assertEqual(handle1, handle3)
self.assertFalse(handle1 != handle3)
self.assertFalse(handle1 == handle2)
self.assertTrue(handle1 != handle2)
city = City(fighter1.city)
self.assertEqual(city.name, 'Reixte')
self.assertEqual(city.coast, True)
conns = List(city.connections)
self.assertEqual(len(conns), 2)
city2 = City(conns[1])
self.assertEqual(city2.name, 'Toynbe')
self.assertEqual(city2.coast, False)
handle = Gang.by_id(999)
self.assertRaises(NotFoundError, Gang, handle)
handle = Fighter.find(name = 'Bob')
self.assertEqual(handle, fighter2.handle())
handle = Fighter.find(name = 'NoPlayer')
self.assertFalse(handle)
self.assertRaises(NotFoundError, Fighter, handle)
city_fighters = Fighter.multifind(city = City.by_id(1))
self.assertEqual(city_fighters, set([hfighter1, hfighter2]))
handle = Gang.find(leader = hfighter1)
self.assertEqual(handle, Gang.by_id(1))
self.assertTrue(not Gang.find(members__contains = Fighter.by_id(3)))
handle = Gang.find(members__contains = Fighter(hfighter2))
self.assertEqual(handle, Gang.by_id(1))
city3 = City(City.by_id(3))
city_gangs = Gang.multifind(cities__contains = city3)
self.assertEqual(city_gangs, set([Gang.by_id(1)]))
handle = FighterSkillList.by_owner(fighter1)
fsl = FighterSkillList(handle)
fsl_skills = List(fsl.skills)
skills = map(SkillInstance, fsl_skills)
self.assertEqual(skills[0].skill, Skill.by_id(1))
self.assertEqual(skills[0].value, 21)
self.assertEqual(skills[1].skill, Skill.by_id(2))
self.assertEqual(skills[1].value, 15)
handle = FighterSkillList.by_owner(hfighter2)
fsl = FighterSkillList(handle)
fsl_skills = List(fsl.skills)
skills = map(SkillInstance, fsl_skills)
self.assertEqual(skills[0].skill, Skill.by_id(1))
self.assertEqual(skills[0].value, 27)
self.assertEqual(skills[1].skill, Skill.by_id(2))
self.assertEqual(skills[1].value, 91)
sorted_by_weight_1 = Fighter.zrange('weight')
self.assertEqual(sorted_by_weight_1, [hfighter2, hfighter1])
sorted_by_weight_2 = Fighter.zrange('weight', 0, 0)
self.assertEqual(sorted_by_weight_2, [hfighter2])
sorted_by_weight_3 = Fighter.zrange('weight', 1, -1)
self.assertEqual(sorted_by_weight_3, [hfighter1])
sorted_by_weight_4 = Fighter.zrevrange('weight')
self.assertEqual(sorted_by_weight_4, [hfighter1, hfighter2])
sorted_by_weight_5 = Fighter.zrevrange('weight', 0, 0)
self.assertEqual(sorted_by_weight_5, [hfighter1])
le24a = Fighter.zfind(age__lte = 24)
le24b = Fighter.zrangebyscore('age', '-inf', 24)
self.assertEqual(le24a, [hfighter1, hfighter2])
self.assertEqual(le24a, le24b)
le23a = Fighter.zfind(age__lte = 23)
le23b = Fighter.zrangebyscore('age', '-inf', 23)
self.assertEqual(le23a, [hfighter1, hfighter2])
self.assertEqual(le23a, le23b)
lt23a = Fighter.zfind(age__lt = 23)
lt23b = Fighter.zrangebyscore('age', '-inf', '(23')
self.assertEqual(lt23a, [hfighter1])
self.assertEqual(lt23a, lt23b)
ge24a = Fighter.zfind(age__gte = 24)
ge24b = Fighter.zrangebyscore('age', 24, '+inf')
self.assertEqual(ge24a, [])
self.assertEqual(ge24a, ge24b)
ge23a = Fighter.zfind(age__gte = 23)
ge23b = Fighter.zrangebyscore('age', 23, '+inf')
self.assertEqual(ge23a, [hfighter2])
self.assertEqual(ge23a, ge23b)
gt23a = Fighter.zfind(age__gt = 20)
gt23b = Fighter.zrangebyscore('age', '(20', '+inf')
self.assertEqual(gt23a, [hfighter2])
self.assertEqual(gt23a, gt23b)
age_in_a = Fighter.zfind(age__in = (20, 23))
age_in_b = Fighter.zrangebyscore('age', 20, 23)
age_in_c = Fighter.zrangebyscore('age', 20, 23, 0, 2)
self.assertEqual(age_in_a, [hfighter1, hfighter2])
self.assertEqual(age_in_a, age_in_b)
self.assertEqual(age_in_a, age_in_c)
age_eq_a = Fighter.zfind(age = 20)
age_eq_b = Fighter.zrangebyscore('age', 20, 20)
self.assertEqual(age_eq_a, [hfighter1])
self.assertEqual(age_eq_a, age_eq_b)
joined_before_2014 = Fighter.zfind(joined__lt = datetime(2014, 1, 1))
self.assertEqual(joined_before_2014, [])
joined_before_2020 = Fighter.zfind(joined__lt = datetime(2020, 1, 1))
self.assertEqual(joined_before_2020, [hfighter2, hfighter1])
joined_in_201x = Fighter.zfind(joined__in = (datetime(2010, 1, 1), datetime(2020, 1, 1)))
self.assertEqual(joined_in_201x, [hfighter2, hfighter1])
rev_age_in_a = Fighter.zrevrangebyscore('age', 23, 20)
rev_age_in_b = Fighter.zrevrangebyscore('age', 23, 20, 0, 2)
self.assertEqual(rev_age_in_a, [hfighter2, hfighter1])
self.assertEqual(rev_age_in_a, rev_age_in_b)
self.assertEqual(Fighter.zcount('age', 20, 23), 2)
self.assertEqual(Fighter.zrank('weight', fighter1), 1)
self.assertEqual(Fighter.zrank('weight', hfighter2), 0)
self.assertEqual(Fighter.zrevrank('weight', hfighter1), 0)
self.assertEqual(Fighter.zrevrank('weight', hfighter2), 1)
hweapon1 = Weapon.by_id(1)
hweapon2 = Weapon.by_id(2)
hweapon3 = Weapon.by_id(3)
sorted_weapons = SortedSet.zrange(fighter1.weapons)
self.assertEqual(sorted_weapons, (hweapon2, hweapon1, hweapon3))
top_weapons = SortedSet.zrevrange(fighter1.weapons, 0, 1)
self.assertEqual(top_weapons, (hweapon3, hweapon1))
powerful_weapons1 = SortedSet.zfind(fighter1.weapons, gt = 50)
powerful_weapons2 = SortedSet(fighter1.weapons, gt = 50)
self.assertEqual(powerful_weapons1, (hweapon1, hweapon3))
self.assertEqual(powerful_weapons1, powerful_weapons2)
gangs_by_hqcity = Gang.getlist(hqcity = city3)
self.assertEqual(gangs_by_hqcity, [Gang.by_id(1), Gang.by_id(2)])
first_gang_by_hqcity = Gang.getlist(0, 0, hqcity = city3)
self.assertEqual(first_gang_by_hqcity, [Gang.by_id(1)])
def all_tests():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ContainersTestCase))
suite.addTest(unittest.makeSuite(ModelWriteTestCase))
suite.addTest(unittest.makeSuite(ModelReadTestCase))
return suite
if __name__ == "__main__":
#redmodel.connection_setup(unix_socket_path='/tmp/redis.sock')
if len(sys.argv) > 1:
unittest.main()
else:
suite = all_tests()
unittest.TextTestRunner(verbosity=2).run(suite)
| beatmax/redmodel | test/models.py | Python | gpl-3.0 | 26,707 |
import py
from rpython.rtyper.lltypesystem import lltype, rffi, llmemory
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.jit.backend.llsupport import symbolic, support
from rpython.jit.metainterp.history import AbstractDescr, getkind, FLOAT, INT
from rpython.jit.metainterp import history
from rpython.jit.codewriter import heaptracker, longlong
from rpython.jit.codewriter.longlong import is_longlong
from rpython.jit.metainterp.optimizeopt import intbounds
from rpython.rtyper import rclass
class GcCache(object):
def __init__(self, translate_support_code, rtyper=None):
self.translate_support_code = translate_support_code
self.rtyper = rtyper
self._cache_size = {}
self._cache_field = {}
self._cache_array = {}
self._cache_arraylen = {}
self._cache_call = {}
self._cache_interiorfield = {}
def init_size_descr(self, STRUCT, sizedescr):
pass
def init_array_descr(self, ARRAY, arraydescr):
assert (isinstance(ARRAY, lltype.GcArray) or
isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld)
# ____________________________________________________________
# SizeDescrs
class SizeDescr(AbstractDescr):
size = 0 # help translation
tid = llop.combine_ushort(lltype.Signed, 0, 0)
vtable = lltype.nullptr(rclass.OBJECT_VTABLE)
immutable_flag = False
def __init__(self, size, gc_fielddescrs=None, all_fielddescrs=None,
vtable=lltype.nullptr(rclass.OBJECT_VTABLE),
immutable_flag=False):
self.size = size
self.gc_fielddescrs = gc_fielddescrs
self.all_fielddescrs = all_fielddescrs
self.vtable = vtable
assert vtable is not None
self.immutable_flag = immutable_flag
def get_all_fielddescrs(self):
return self.all_fielddescrs
def repr_of_descr(self):
return '<SizeDescr %s>' % self.size
def is_object(self):
return bool(self.vtable)
def is_immutable(self):
return self.immutable_flag
def get_vtable(self):
return heaptracker.adr2int(llmemory.cast_ptr_to_adr(self.vtable))
def get_type_id(self):
assert self.tid
return self.tid
def get_size_descr(gccache, STRUCT, vtable=lltype.nullptr(rclass.OBJECT_VTABLE)):
cache = gccache._cache_size
assert not isinstance(vtable, bool)
try:
return cache[STRUCT]
except KeyError:
size = symbolic.get_size(STRUCT, gccache.translate_support_code)
immutable_flag = heaptracker.is_immutable_struct(STRUCT)
if vtable:
assert heaptracker.has_gcstruct_a_vtable(STRUCT)
else:
assert not heaptracker.has_gcstruct_a_vtable(STRUCT)
sizedescr = SizeDescr(size, vtable=vtable,
immutable_flag=immutable_flag)
gccache.init_size_descr(STRUCT, sizedescr)
cache[STRUCT] = sizedescr
# XXX do we really need gc_fielddescrs if we also have
# all_fielddescrs and can ask is_pointer_field() on them?
gc_fielddescrs = heaptracker.gc_fielddescrs(gccache, STRUCT)
sizedescr.gc_fielddescrs = gc_fielddescrs
all_fielddescrs = heaptracker.all_fielddescrs(gccache, STRUCT)
sizedescr.all_fielddescrs = all_fielddescrs
return sizedescr
# ____________________________________________________________
# FieldDescrs
FLAG_POINTER = 'P'
FLAG_FLOAT = 'F'
FLAG_UNSIGNED = 'U'
FLAG_SIGNED = 'S'
FLAG_STRUCT = 'X'
FLAG_VOID = 'V'
class ArrayOrFieldDescr(AbstractDescr):
vinfo = None
def get_vinfo(self):
return self.vinfo
class FieldDescr(ArrayOrFieldDescr):
name = ''
offset = 0 # help translation
field_size = 0
flag = '\x00'
def __init__(self, name, offset, field_size, flag, index_in_parent=0,
is_pure=False):
self.name = name
self.offset = offset
self.field_size = field_size
self.flag = flag
self.index = index_in_parent
self._is_pure = is_pure
def is_always_pure(self):
return self._is_pure
def __repr__(self):
return 'FieldDescr<%s>' % (self.name,)
def check_correct_type(self, struct):
if self.parent_descr.is_object():
cls = llmemory.cast_adr_to_ptr(
heaptracker.int2adr(self.parent_descr.get_vtable()),
lltype.Ptr(rclass.OBJECT_VTABLE))
tpptr = lltype.cast_opaque_ptr(rclass.OBJECTPTR, struct).typeptr
# this comparison is necessary, since we want to make sure
# that vtable for JitVirtualRef is the same without actually reading
# fields
if tpptr != cls:
assert rclass.ll_isinstance(lltype.cast_opaque_ptr(
rclass.OBJECTPTR, struct), cls)
else:
pass
def is_pointer_field(self):
return self.flag == FLAG_POINTER
def is_float_field(self):
return self.flag == FLAG_FLOAT
def is_field_signed(self):
return self.flag == FLAG_SIGNED
def is_integer_bounded(self):
return self.flag in (FLAG_SIGNED, FLAG_UNSIGNED) \
and self.field_size < symbolic.WORD
def get_integer_min(self):
if self.flag == FLAG_UNSIGNED:
return intbounds.get_integer_min(True, self.field_size)
elif self.flag == FLAG_SIGNED:
return intbounds.get_integer_min(False, self.field_size)
assert False
def get_integer_max(self):
if self.flag == FLAG_UNSIGNED:
return intbounds.get_integer_max(True, self.field_size)
elif self.flag == FLAG_SIGNED:
return intbounds.get_integer_max(False, self.field_size)
assert False
def sort_key(self):
return self.offset
def repr_of_descr(self):
return '<Field%s %s %s>' % (self.flag, self.name, self.offset)
def get_parent_descr(self):
return self.parent_descr
def get_index(self):
return self.index
def get_field_descr(gccache, STRUCT, fieldname):
cache = gccache._cache_field
try:
return cache[STRUCT][fieldname]
except KeyError:
offset, size = symbolic.get_field_token(STRUCT, fieldname,
gccache.translate_support_code)
FIELDTYPE = getattr(STRUCT, fieldname)
flag = get_type_flag(FIELDTYPE)
name = '%s.%s' % (STRUCT._name, fieldname)
index_in_parent = heaptracker.get_fielddescr_index_in(STRUCT, fieldname)
is_pure = bool(STRUCT._immutable_field(fieldname))
fielddescr = FieldDescr(name, offset, size, flag, index_in_parent,
is_pure)
cachedict = cache.setdefault(STRUCT, {})
cachedict[fieldname] = fielddescr
if STRUCT is rclass.OBJECT:
vtable = lltype.nullptr(rclass.OBJECT_VTABLE)
else:
vtable = heaptracker.get_vtable_for_gcstruct(gccache, STRUCT)
fielddescr.parent_descr = get_size_descr(gccache, STRUCT, vtable)
return fielddescr
def get_type_flag(TYPE):
if isinstance(TYPE, lltype.Ptr):
if TYPE.TO._gckind == 'gc':
return FLAG_POINTER
else:
return FLAG_UNSIGNED
if isinstance(TYPE, lltype.Struct):
return FLAG_STRUCT
if TYPE is lltype.Float or is_longlong(TYPE):
return FLAG_FLOAT
if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and
rffi.cast(TYPE, -1) == -1):
return FLAG_SIGNED
return FLAG_UNSIGNED
def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT):
cache = gccache._cache_arraylen
try:
return cache[ARRAY_OR_STRUCT]
except KeyError:
tsc = gccache.translate_support_code
(_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc)
size = symbolic.get_size(lltype.Signed, tsc)
result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed))
result.parent_descr = None
cache[ARRAY_OR_STRUCT] = result
return result
# ____________________________________________________________
# ArrayDescrs
class ArrayDescr(ArrayOrFieldDescr):
tid = 0
basesize = 0 # workaround for the annotator
itemsize = 0
lendescr = None
flag = '\x00'
vinfo = None
all_interiorfielddescrs = None
concrete_type = '\x00'
def __init__(self, basesize, itemsize, lendescr, flag, is_pure=False, concrete_type='\x00'):
self.basesize = basesize
self.itemsize = itemsize
self.lendescr = lendescr # or None, if no length
self.flag = flag
self._is_pure = is_pure
self.concrete_type = concrete_type
def get_all_fielddescrs(self):
return self.all_interiorfielddescrs
def is_always_pure(self):
return self._is_pure
def getconcrete_type(self):
return self.concrete_type
def is_array_of_primitives(self):
return self.flag == FLAG_FLOAT or \
self.flag == FLAG_SIGNED or \
self.flag == FLAG_UNSIGNED
def is_array_of_pointers(self):
return self.flag == FLAG_POINTER
def is_array_of_floats(self):
return self.flag == FLAG_FLOAT
def is_item_signed(self):
return self.flag == FLAG_SIGNED
def get_item_size_in_bytes(self):
return self.itemsize
def is_array_of_structs(self):
return self.flag == FLAG_STRUCT
def is_item_integer_bounded(self):
return self.flag in (FLAG_SIGNED, FLAG_UNSIGNED) \
and self.itemsize < symbolic.WORD
def get_item_integer_min(self):
if self.flag == FLAG_UNSIGNED:
return intbounds.get_integer_min(True, self.itemsize)
elif self.flag == FLAG_SIGNED:
return intbounds.get_integer_min(False, self.itemsize)
assert False
def get_item_integer_max(self):
if self.flag == FLAG_UNSIGNED:
return intbounds.get_integer_max(True, self.itemsize)
elif self.flag == FLAG_SIGNED:
return intbounds.get_integer_max(False, self.itemsize)
assert False
def get_type_id(self):
assert self.tid
return self.tid
def repr_of_descr(self):
return '<Array%s %s>' % (self.flag, self.itemsize)
def get_array_descr(gccache, ARRAY_OR_STRUCT):
cache = gccache._cache_array
try:
return cache[ARRAY_OR_STRUCT]
except KeyError:
tsc = gccache.translate_support_code
basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc)
if isinstance(ARRAY_OR_STRUCT, lltype.Array):
ARRAY_INSIDE = ARRAY_OR_STRUCT
else:
ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld]
if ARRAY_INSIDE._hints.get('nolength', False):
lendescr = None
else:
lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT)
flag = get_type_flag(ARRAY_INSIDE.OF)
is_pure = bool(ARRAY_INSIDE._immutable_field(None))
arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag, is_pure)
if ARRAY_INSIDE.OF is lltype.SingleFloat or \
ARRAY_INSIDE.OF is lltype.Float:
# it would be better to set the flag as FLOAT_TYPE
# for single float -> leads to problems
arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag, is_pure, concrete_type='f')
cache[ARRAY_OR_STRUCT] = arraydescr
if isinstance(ARRAY_INSIDE.OF, lltype.Struct):
descrs = heaptracker.all_interiorfielddescrs(gccache,
ARRAY_INSIDE, get_field_descr=get_interiorfield_descr)
arraydescr.all_interiorfielddescrs = descrs
if ARRAY_OR_STRUCT._gckind == 'gc':
gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr)
return arraydescr
# ____________________________________________________________
# InteriorFieldDescr
class InteriorFieldDescr(AbstractDescr):
arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator
fielddescr = FieldDescr('', 0, 0, '\x00')
def __init__(self, arraydescr, fielddescr):
assert arraydescr.flag == FLAG_STRUCT
self.arraydescr = arraydescr
self.fielddescr = fielddescr
def get_index(self):
return self.fielddescr.get_index()
def get_arraydescr(self):
return self.arraydescr
def get_field_descr(self):
return self.fielddescr
def sort_key(self):
return self.fielddescr.sort_key()
def is_pointer_field(self):
return self.fielddescr.is_pointer_field()
def is_float_field(self):
return self.fielddescr.is_float_field()
def is_integer_bounded(self):
return self.fielddescr.is_integer_bounded()
def get_integer_min(self):
return self.fielddescr.get_integer_min()
def get_integer_max(self):
return self.fielddescr.get_integer_max()
def repr_of_descr(self):
return '<InteriorFieldDescr %s>' % self.fielddescr.repr_of_descr()
def get_interiorfield_descr(gc_ll_descr, ARRAY, name, arrayfieldname=None):
# can be used either with a GcArray of Structs, or with a GcStruct
# containing an inlined GcArray of Structs (then arrayfieldname != None).
cache = gc_ll_descr._cache_interiorfield
try:
return cache[(ARRAY, name, arrayfieldname)]
except KeyError:
arraydescr = get_array_descr(gc_ll_descr, ARRAY)
if arrayfieldname is None:
REALARRAY = ARRAY
else:
REALARRAY = getattr(ARRAY, arrayfieldname)
fielddescr = get_field_descr(gc_ll_descr, REALARRAY.OF, name)
descr = InteriorFieldDescr(arraydescr, fielddescr)
cache[(ARRAY, name, arrayfieldname)] = descr
return descr
# ____________________________________________________________
# CallDescrs
def _missing_call_stub_i(func, args_i, args_r, args_f):
return 0
def _missing_call_stub_r(func, args_i, args_r, args_f):
return lltype.nullptr(llmemory.GCREF.TO)
def _missing_call_stub_f(func, args_i, args_r, args_f):
return longlong.ZEROF
class CallDescr(AbstractDescr):
arg_classes = '' # <-- annotation hack
result_type = '\x00'
result_flag = '\x00'
ffi_flags = 1
def __init__(self, arg_classes, result_type, result_signed, result_size,
extrainfo=None, ffi_flags=1):
"""
'arg_classes' is a string of characters, one per argument:
'i', 'r', 'f', 'L', 'S'
'result_type' is one character from the same list or 'v'
'result_signed' is a boolean True/False
"""
self.arg_classes = arg_classes
self.result_type = result_type
self.result_size = result_size
self.extrainfo = extrainfo
self.ffi_flags = ffi_flags
self.call_stub_i = _missing_call_stub_i
self.call_stub_r = _missing_call_stub_r
self.call_stub_f = _missing_call_stub_f
# NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which
# makes sense on Windows as it's the one for all the C functions
# we are compiling together with the JIT. On non-Windows platforms
# it is just ignored anyway.
if result_type == 'v':
result_flag = FLAG_VOID
elif result_type == 'i':
if result_signed:
result_flag = FLAG_SIGNED
else:
result_flag = FLAG_UNSIGNED
elif result_type == history.REF:
result_flag = FLAG_POINTER
elif result_type == history.FLOAT or result_type == 'L':
result_flag = FLAG_FLOAT
elif result_type == 'S':
result_flag = FLAG_UNSIGNED
else:
raise NotImplementedError("result_type = '%s'" % (result_type,))
self.result_flag = result_flag
def __repr__(self):
res = 'CallDescr(%s)' % (self.arg_classes,)
extraeffect = getattr(self.extrainfo, 'extraeffect', None)
if extraeffect is not None:
res += ' EF=%r' % extraeffect
oopspecindex = getattr(self.extrainfo, 'oopspecindex', 0)
if oopspecindex:
from rpython.jit.codewriter.effectinfo import EffectInfo
for key, value in EffectInfo.__dict__.items():
if key.startswith('OS_') and value == oopspecindex:
break
else:
key = 'oopspecindex=%r' % oopspecindex
res += ' ' + key
return '<%s>' % res
def get_extra_info(self):
return self.extrainfo
def get_ffi_flags(self):
return self.ffi_flags
def get_call_conv(self):
from rpython.rlib.clibffi import get_call_conv
return get_call_conv(self.ffi_flags, True)
def get_arg_types(self):
return self.arg_classes
def get_result_type(self):
return self.result_type
def get_normalized_result_type(self):
if self.result_type == 'S':
return 'i'
if self.result_type == 'L':
return 'f'
return self.result_type
def get_result_size(self):
return self.result_size
def is_result_signed(self):
return self.result_flag == FLAG_SIGNED
def create_call_stub(self, rtyper, RESULT):
from rpython.rlib.clibffi import FFI_DEFAULT_ABI
assert self.get_call_conv() == FFI_DEFAULT_ABI, (
"%r: create_call_stub() with a non-default call ABI" % (self,))
def process(c):
if c == 'L':
assert longlong.supports_longlong
c = 'f'
elif c == 'f' and longlong.supports_longlong:
return 'longlong.getrealfloat(%s)' % (process('L'),)
elif c == 'S':
return 'longlong.int2singlefloat(%s)' % (process('i'),)
arg = 'args_%s[%d]' % (c, seen[c])
seen[c] += 1
return arg
def TYPE(arg):
if arg == 'i':
return lltype.Signed
elif arg == 'f':
return lltype.Float
elif arg == 'r':
return llmemory.GCREF
elif arg == 'v':
return lltype.Void
elif arg == 'L':
return lltype.SignedLongLong
elif arg == 'S':
return lltype.SingleFloat
else:
raise AssertionError(arg)
seen = {'i': 0, 'r': 0, 'f': 0}
args = ", ".join([process(c) for c in self.arg_classes])
result_type = self.get_result_type()
if result_type == history.INT:
result = 'rffi.cast(lltype.Signed, res)'
category = 'i'
elif result_type == history.REF:
assert RESULT == llmemory.GCREF # should be ensured by the caller
result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)'
category = 'r'
elif result_type == history.FLOAT:
result = 'longlong.getfloatstorage(res)'
category = 'f'
elif result_type == 'L':
result = 'rffi.cast(lltype.SignedLongLong, res)'
category = 'f'
elif result_type == history.VOID:
result = '0'
category = 'i'
elif result_type == 'S':
result = 'longlong.singlefloat2int(res)'
category = 'i'
else:
assert 0
source = py.code.Source("""
def call_stub(func, args_i, args_r, args_f):
fnptr = rffi.cast(lltype.Ptr(FUNC), func)
res = support.maybe_on_top_of_llinterp(rtyper, fnptr)(%(args)s)
return %(result)s
""" % locals())
ARGS = [TYPE(arg) for arg in self.arg_classes]
FUNC = lltype.FuncType(ARGS, RESULT)
d = globals().copy()
d.update(locals())
exec source.compile() in d
call_stub = d['call_stub']
# store the function into one of three attributes, to preserve
# type-correctness of the return value
setattr(self, 'call_stub_%s' % category, call_stub)
def verify_types(self, args_i, args_r, args_f, return_type):
assert self.result_type in return_type
assert (self.arg_classes.count('i') +
self.arg_classes.count('S')) == len(args_i or ())
assert self.arg_classes.count('r') == len(args_r or ())
assert (self.arg_classes.count('f') +
self.arg_classes.count('L')) == len(args_f or ())
def repr_of_descr(self):
res = 'Call%s %d' % (self.result_type, self.result_size)
if self.arg_classes:
res += ' ' + self.arg_classes
if self.extrainfo:
res += ' EF=%d' % self.extrainfo.extraeffect
oopspecindex = self.extrainfo.oopspecindex
if oopspecindex:
res += ' OS=%d' % oopspecindex
return '<%s>' % res
def map_type_to_argclass(ARG, accept_void=False):
kind = getkind(ARG)
if kind == 'int':
if ARG is lltype.SingleFloat: return 'S'
else: return 'i'
elif kind == 'ref': return 'r'
elif kind == 'float':
if is_longlong(ARG): return 'L'
else: return 'f'
elif kind == 'void':
if accept_void: return 'v'
raise NotImplementedError('ARG = %r' % (ARG,))
def get_call_descr(gccache, ARGS, RESULT, extrainfo=None):
arg_classes = map(map_type_to_argclass, ARGS)
arg_classes = ''.join(arg_classes)
result_type = map_type_to_argclass(RESULT, accept_void=True)
RESULT_ERASED = RESULT
if RESULT is lltype.Void:
result_size = 0
result_signed = False
else:
if isinstance(RESULT, lltype.Ptr):
# avoid too many CallDescrs
if result_type == 'r':
RESULT_ERASED = llmemory.GCREF
else:
RESULT_ERASED = llmemory.Address
result_size = symbolic.get_size(RESULT_ERASED,
gccache.translate_support_code)
result_signed = get_type_flag(RESULT) == FLAG_SIGNED
key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo)
cache = gccache._cache_call
try:
calldescr = cache[key]
except KeyError:
calldescr = CallDescr(arg_classes, result_type, result_signed,
result_size, extrainfo)
calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED)
cache[key] = calldescr
assert repr(calldescr.result_size) == repr(result_size)
return calldescr
def unpack_arraydescr(arraydescr):
assert isinstance(arraydescr, ArrayDescr)
ofs = arraydescr.basesize
size = arraydescr.itemsize
sign = arraydescr.is_item_signed()
return size, ofs, sign
def unpack_fielddescr(fielddescr):
assert isinstance(fielddescr, FieldDescr)
ofs = fielddescr.offset
size = fielddescr.field_size
sign = fielddescr.is_field_signed()
return ofs, size, sign
unpack_fielddescr._always_inline_ = True
def unpack_interiorfielddescr(descr):
assert isinstance(descr, InteriorFieldDescr)
arraydescr = descr.arraydescr
ofs = arraydescr.basesize
itemsize = arraydescr.itemsize
fieldsize = descr.fielddescr.field_size
sign = descr.fielddescr.is_field_signed()
ofs += descr.fielddescr.offset
return ofs, itemsize, fieldsize, sign
| jptomo/rpython-lang-scheme | rpython/jit/backend/llsupport/descr.py | Python | mit | 23,481 |
import sys
sys.path.insert(0,"../control/")
import motorControl
import plot
sys.path.insert(0, "../settings/")
import settings
import time
import cv2
import select
def moveAndPlot(motorControler, plotter, values):
motorControler.setMotorsByName(values)
out = motorControler.readAllMotors()
plotter.addNewVal(out,time.time()-initTime)
motorSettings = settings.MotorSettings()
motorControler = motorControl.MotorControl(motorSettings.get())
motorControler.setAllSpeed(100)
plotter = plot.Ploting()
initTime = time.time()
exit = False
while not exit:
if select.select([sys.stdin],[],[],0) == ([sys.stdin], [], []):
cmd = sys.stdin.read(1)
if cmd == 'q':
exit = True
elif cmd == 'p':
plotter.plot()
moveAndPlot(motorControler, plotter, [["bottom",150],["mid",220],["head",125],["top",75],["bowl",45]])
time.sleep(1)
moveAndPlot(motorControler, plotter, [["bottom",150],["mid",180],["head",175],["top",75],["bowl",135]])
time.sleep(1)
moveAndPlot(motorControler, plotter, [["bottom",150],["mid",180],["head",125],["top",190],["bowl",45]])
time.sleep(1)
| Fdepraetre/PinokioProject | src/test/testArm.py | Python | mit | 1,100 |
import redis
def do_commands(conn, GW=True):
r = conn.do_generic_request
assert_equal = redis.rr_assert_equal
assert_subs = redis.rr_assert_substring
key = 'commands_genericxxxxxxxkey'
key2 = 'commands_genericxxxxxxkey2'
dest = 'commands_generic_xxxxxdest'
try:
# | APPEND | O | |
r('del', key)
r('APPEND', key, '1')
r('APPEND', key, '2')
assert_equal('12', r('get', key))
# | ASKING | X | |
resp = r('ASKING')
assert(resp.startswith('ERR Unsupported'))
# | AUTH | X | |
resp = r('AUTH', 'passwd')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('ERR Client sent'))
# | BGREWRITEAOF | X | |
resp = r('BGREWRITEAOF')
assert(resp.startswith('ERR Unsupported'))
# | BGSAVE | X | |
resp = r('BGSAVE')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('Background'))
# | BITCOUNT | O | |
r('set', key, 'foobar')
resp = r('BITCOUNT', key)
assert(resp == 26)
# | BITFIELD | O | Available since 1.4 |
r('del', key)
resp = r('BITFIELD', key, 'incrby', 'i5', 100, 1, 'get', 'u4', 0)
assert(resp == [1,0])
# | BITOP | X | |
r('set', key, 'foobar')
r('set', key2, 'abcdef')
resp = r('BITOP', 'AND', dest, key, key2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 6)
# | BITPOS | O | |
r('set', key, '\xff\xf0\x00')
resp = r('BITPOS', key, 0)
assert(resp == 12)
# | BLPOP | X | |
resp = r('BLPOP', key, 0)
assert(resp.startswith('ERR Unsupported'))
# | BRPOP | X | |
resp = r('BRPOP', key, 0)
assert(resp.startswith('ERR Unsupported'))
# | BRPOPLPUSH | X | |
resp = r('BRPOPLPUSH', key, dest, 0)
assert(resp.startswith('ERR Unsupported'))
# | CLIENT | X | |
resp = r('CLIENT', 'LIST')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('id='))
# | CLUSTER | X | |
resp = r('CLUSTER', 'info')
assert(resp.startswith('ERR Unsupported'))
# | COMMAND | X | |
resp = r('COMMAND')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) > 0)
# | CONFIG | X | |
resp = r('CONFIG', 'get', 'save')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 2) # ['save', '']
# | CRC16 | O | |
r('del', key)
resp = r('CRC16', key, 'abcd')
assert(resp == 43062)
# | DBSIZE | O | returns aggregated dbsize |
r('set', key, '1')
resp = r('DBSIZE')
assert(resp >= 1)
# | DEBUG | X | |
if GW:
resp = r('DEBUG', 'object', key)
assert(resp.startswith('ERR Unsupported'))
else:
pass # integration test use 'debug'
# | DECR | O | |
r('set', key, 10)
resp = r('DECR', key)
assert(resp == 9)
# | DECRBY | O | |
r('set', key, 10)
resp = r('DECRBY', key, 2)
assert(resp == 8)
# | DEL | O | |
r('set', key, 'babe')
r('DEL', key)
resp = r('exists', key)
assert(resp == 0)
# | DISCARD | X | |
resp = r('DISCARD')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('ERR DISCARD'))
# | DUMP | O | |
r('set', key, 1)
dump = r('DUMP', key)
resp = r('del', key)
assert(resp == 1)
r('restore', key, 0, dump)
resp = r('get', key)
assert(resp == '1')
# | ECHO | X | |
resp = r('ECHO', 'hihi')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'hihi')
# | EVAL | X | |
resp = r('EVAL', 'return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}', 2, key, key2, 'first', 'second')
assert(resp.startswith('ERR Unsupported'))
# | EVALSHA | X | |
resp = r('EVALSHA', 'fd758d1589d044dd850a6f05d52f2eefd27f033f', 1, key)
assert(resp.startswith('ERR Unsupported'))
# | EXEC | X | |
resp = r('EXEC')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('ERR EXEC'))
# | EXISTS | O | |
r('del', key)
r('set', key, 1)
resp = r('EXISTS', key)
assert(resp == 1)
# | EXPIRE | O | |
r('set', key, 1)
resp = r('EXPIRE', key, 1)
assert(resp == 1)
# | EXPIREAT | O | |
r('set', key, 1)
resp = r('EXPIREAT', key, 1293840000)
assert(resp == 1)
# | FLUSHALL | X | |
resp = r('FLUSHALL')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('OK'))
# | FLUSHDB | X | |
resp = r('FLUSHDB')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('OK'))
# | GEOADD | O | Available since 1.4 |
# | GEODIST | O | Available since 1.4 |
# | GEOHASH | O | Available since 1.4 |
# | GEOPOS | O | Available since 1.4 |
# | GEORADIUS | O | Available since 1.4 (no store option) |
# | GEORADIUSBYMEMBER | O | Available since 1.4 (no store option) |
r('del', key)
resp = r('GEOADD', key, 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania')
assert(resp == 2)
resp = r('GEODIST', key, 'Palermo', 'Catania')
assert(float(resp) > 166274 and float(resp) < 166275) # 66274.1516
resp = r('GEOHASH', key, 'Palermo', 'Catania')
assert(len(resp) == 2)
resp = r('GEOPOS', key, 'Palermo', 'Catania', 'NonExisting')
assert(len(resp) == 3)
resp = r('GEORADIUS', key, 15, 37, 200, 'km', 'WITHDIST')
assert(len(resp) == 2)
resp = r('GEORADIUS', key, 15, 37, 200, 'km', 'WITHDIST', 'STORE', key2)
assert(resp.startswith('ERR STORE'))
resp = r('GEORADIUSBYMEMBER', key, 'Palermo', 1000, 'km')
assert(len(resp) == 2)
resp = r('GEORADIUSBYMEMBER', key, 'Palermo', 200, 'km', 'STORE', key2)
assert(resp.startswith('ERR STORE'))
# | GET | O | |
r('set', key, 'gg')
resp = r('GET', key)
assert(resp == 'gg')
# | GETBIT | O | |
r('setbit', key, 7, 1)
resp = r('GETBIT', key, 7)
assert(resp == 1)
# | GETRANGE | O | |
r('set', key, "This is a string")
resp = r('GETRANGE', key, 0, 3)
assert(resp == "This")
# | GETSET | O | |
r('set', key, 'oldval')
resp = r('GETSET', key, 'newval')
assert(resp == 'oldval')
# | HDEL | O | |
# | HEXISTS | O | |
# | HGET | O | |
# | HGETALL | O | |
# | HINCRBY | O | |
# | HINCRBYFLOAT | O | |
# | HKEYS | O | |
# | HLEN | O | |
# | HMGET | O | |
# | HMSET | O | |
r('del', key)
resp = r('HSET', key, 'k1', 'v1')
assert(resp == 1)
resp = r('HGET', key, 'k1')
assert(resp == 'v1')
resp = r('HGETALL', key)
assert(len(resp) == 2)
resp = r('HEXISTS', key, 'kkk')
assert(resp == 0)
r('hset', key, 'count', 100)
resp = r('HINCRBY', key, 'count', 2)
assert(resp == 102)
resp = r('HINCRBYFLOAT', key, 'count', 2.0)
assert(float(resp) == 104.0)
resp = r('HKEYS', key)
assert(len(resp) == 2)
resp = r('HLEN', key)
assert(resp == 2)
resp = r('HMGET', key, 'k1', 'k2')
assert(len(resp) == 2)
resp = r('HMSET', key, 'kk1', 'vv1', 'kk2', 'vv2')
assert(resp == 'OK')
# | HOST: | X | |
# skip
# | HSCAN | O | Available since 1.4 |
# | HSET | O | |
# | HSETNX | O | |
# | HSTRLEN | O | Available since 1.4 |
# | HVALS | O | |
r('del', key)
resp = r('HSET', key, 'k1', 'v11')
assert(resp == 1)
resp = r('HSCAN', key, 0)
assert(len(resp) == 2)
resp = r('HSETNX', key, 'k1', 'v2')
assert(resp == 0)
resp = r('HSTRLEN', key, 'k1')
assert(resp == 3)
resp = r('HVALS', key)
assert(len(resp) == 1)
# | INCR | O | |
# | INCRBY | O | |
# | INCRBYFLOAT | O | |
r('set', key, 100)
resp = r('INCR', key)
assert(resp == 101)
resp = r('INCRBY', key, 1000)
assert(resp == 1101)
resp = r('INCRBYFLOAT', key, 0.12)
assert(float(resp) == 1101.12)
# | INFO | O | returns cluster info |
resp = r('INFO')
assert(len(resp) > 500)
# | KEYS | X | |
resp = r('KEYS', 'nosuchkey_may_be.really.ok???')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 0)
# | LASTSAVE | X | |
resp = r('LASTSAVE')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp > 1500000000)
# | LATENCY | X | |
resp = r('LATENCY')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('ERR wrong number'))
# | LINDEX | O | |
# | LINSERT | O | |
# | LLEN | O | |
# | LPOP | O | |
# | LPUSH | O | |
# | LPUSHX | O | |
# | LRANGE | O | |
# | LREM | O | |
# | LSET | O | |
# | LTRIM | O | |
r('del', key)
resp = r('LPUSH', key, 'v2')
assert(resp == 1)
resp = r('LPUSHX', key, 'v1')
assert(resp == 2)
resp = r('LINDEX', key, 1)
assert(resp == 'v2')
resp = r('LINSERT', key, 'BEFORE', 'v2', 'mid')
assert(resp == 3)
resp = r('LLEN', key)
assert(resp == 3)
resp = r('LRANGE', key, 0, 0)
assert(len(resp) == 1 and resp[0] == 'v1')
resp = r('LREM', key, 0, 'v1')
assert(resp == 1)
resp = r('LSET', key, 1, 'MID')
assert(resp == 'OK')
resp = r('LTRIM', key, 1, -1)
assert(resp == 'OK')
resp = r('LPOP', key)
assert(resp == 'MID')
# | MGET | O | |
r('set', key, 1)
r('set', key2, 2)
resp = r('MGET', key, key2)
assert(len(resp) == 2)
# | MIGRATE | X | |
resp = r('MIGRATE', 'localhost', '7009', key)
assert(resp.startswith('ERR Unsupported'))
# | MONITOR | X | |
# skip
# | MOVE | X | |
resp = r('MOVE', key, 1)
assert(resp.startswith('ERR Unsupported'))
# | MSET | O | |
resp = r('MSET', key, 1, key2, 2)
assert(resp == 'OK')
# | MSETNX | X | |
r('del', key)
r('del', key2)
resp = r('MSETNX', key, 1, key2, 2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 1) # all keys are set
# | MULTI | X | |
resp = r('MULTI')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
resp = r('discard')
assert(resp == 'OK')
# | OBJECT | O | |
r('set', key, 'this is test expected to be uni..dque')
resp = r('OBJECT', 'REFCOUNT', key)
assert(resp == 1)
# | PERSIST | O | |
r('set', key, 100)
resp = r('PERSIST', key)
assert(resp == 0) # has no associated ttl
# | PEXPIRE | O | |
r('set', key, 100)
resp = r('PEXPIRE', key, 10000)
assert(resp == 1)
# | PEXPIREAT | O | |
r('set', key, 200)
resp = r('PEXPIREAT', key, 1000000)
assert(resp == 1)
# | PFADD | O | Available since 1.4 |
# | PFCOUNT | O | Available since 1.4 (single key only) |
r('del', key)
resp = r('PFADD', key, 1, 2, 3, 4, 5, 6)
assert (resp == 1)
resp = r('PFCOUNT', key)
assert (resp == 6)
# | PFDEBUG | X | |
resp = r('PFDEBUG', key)
assert(resp.startswith('ERR Unsupported'))
# | PFMERGE | X | |
r('del', key)
r('del', key2)
r('pfdadd', key, 1, 2, 3, 4, 5)
resp = r('PFMERGE', key2, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
# | PFSELFTEST | X | |
resp = r('PFSELFTEST', 'xbac') # bad arg for purpose
assert(resp.startswith('ERR Unsupported'))
# | PING | O | gateway ping |
resp = r('PING')
assert(resp == 'PONG')
# | POST | X | |
# skip
# | PSETEX | O | |
r('del', key)
resp = r('PSETEX', key, 10000, 'val')
assert(resp == 'OK')
# | PSUBSCRIBE | X | |
resp = r('PSUBSCRIBE', 'h?llo')
assert(resp.startswith('ERR Unsupported'))
# | PSYNC | X | |
resp = r('PSYNC', 'runid', 1000)
assert(resp.startswith('ERR Unsupported'))
# | PTTL | O | |
r('set', key, 1)
resp = r('PTTL', key)
assert(resp == -1)
# | PUBLISH | X | |
resp = r('PUBLISH', 'chan', 'message')
assert(resp.startswith('ERR Unsupported'))
# | PUBSUB | X | |
resp = r('PUBSUB', 'CHANNELS')
assert(resp.startswith('ERR Unsupported'))
# | PUNSUBSCRIBE | X | |
resp = r('PUNSUBSCRIBE')
assert(resp.startswith('ERR Unsupported'))
# | QUIT | O | |
# skip
# | RANDOMKEY | X | |
r('set', key, 100)
resp = r('RANDOMKEY')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) > 0)
# | READONLY | X | |
resp = r('READONLY')
assert(resp.startswith('ERR Unsupported'))
# | READWRITE | X | |
resp = r('READWRITE')
assert(resp.startswith('ERR Unsupported'))
# | RENAME | X | |
r('set', key, 1)
r('del', key2)
resp = r('RENAME', key, key2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
# | RENAMENX | X | |
r('set', key, 1)
r('del', key2)
resp = r('RENAMENX', key, key2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 1)
# | REPLCONF | X | |
resp = r('REPLCONF', 'option', 'value')
assert(resp.startswith('ERR Unsupported'))
# | RESTORE | O | |
r('del', key)
resp = r('RESTORE', key, 0, '\n\x17\x17\x00\x00\x00\x12\x00\x00\x00\x03\x00\x00\xc0\x01\x00\x04\xc0\x02\x00\x04\xc0\x03\x00\xff\x04\x00u#<\xc0;.\xe9\xdd')
assert(resp == 'OK')
resp = r('type', key)
assert(resp == 'list')
# | RESTORE-ASKING | X | |
r('del', key)
resp = r('RESTORE-ASKING', key, 0, '\n\x17\x17\x00\x00\x00\x12\x00\x00\x00\x03\x00\x00\xc0\x01\x00\x04\xc0\x02\x00\x04\xc0\x03\x00\xff\x04\x00u#<\xc0;.\xe9\xdd')
assert(resp.startswith('ERR Unsupported'))
# | ROLE | X | |
resp = r('ROLE')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 3)
# | RPOP | O | |
r('del', key)
r('rpush', key, 'v1')
r('rpush', key, 'v2')
resp = r('RPOP', key)
assert(resp == 'v2')
# | RPOPLPUSH | X | |
r('del', key)
r('del', key2)
resp = r('RPOPLPUSH', key, key2)
if GW:
assert (resp.startswith('ERR Unsupported'))
else:
assert (resp == None)
# | RPUSH | O | |
r('del', key)
resp = r('RPUSH', key, 'v')
assert(resp == 1)
# | RPUSHX | O | |
r('del', key)
r('rpush', key, 'v1')
resp = r('RPUSHX', key, 'v2')
assert(resp == 2)
# | SADD | O | |
r('del', key)
resp = r('SADD', key, 'v1')
assert(resp == 1)
# | SAVE | X | |
resp = r('SAVE')
assert(resp.startswith('ERR Unsupported'))
# | SCAN | O | Available since 1.4 |
resp = r('SCAN', 0)
assert(len(resp) == 2)
# | SCARD | O | |
r('del', key)
r('sadd', key, 'v')
resp = r('SCARD', key)
assert(resp == 1)
# | SCRIPT | X | |
resp = r('SCRIPT', 'exists')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 0)
# | SDIFF | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
r('sadd', key2, 'c', 'd')
resp = r('SDIFF', key, key2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 2)
# | SDIFFSTORE | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
r('sadd', key2, 'c', 'd')
resp = r('SDIFFSTORE', key2, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 3)
# | SELECT | X | |
resp = r('SELECT', 0)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
# | SET | O | |
resp = r('SET', key, 100)
assert(resp == 'OK')
# | SETBIT | O | |
r('set', key, 7)
resp = r('SETBIT', key, 3, 0)
assert(resp == 1)
# | SETEX | O | |
resp = r('SETEX', key, 10, "hello")
assert(resp == 'OK')
# | SETNX | O | |
r('del', key)
resp = r('SETNX', key, 100)
assert(resp == 1)
# | SETRANGE | O | |
r('set', key, 'Hello World')
resp = r('SETRANGE', key, 6, 'Redis')
assert(resp == 11)
# | SHUTDOWN | X | |
resp = r('SHUTDOWN')
assert(resp.startswith('ERR Unsupported'))
# | SINTER | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
r('sadd', key2, 'b', 'c')
resp = r('SINTER', key, key2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 2)
# | SINTERSTORE | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
resp = r('SINTERSTORE', key2, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 3)
# | SISMEMBER | O | |
r('del', key)
r('sadd', key, 'a', 'b', 'c')
resp = r('SISMEMBER',key, 'c')
assert(resp == 1)
# | SLAVEOF | X | |
resp = r('SLAVEOF', 'localhost', 1234)
assert(resp.startswith('ERR Unsupported'))
# | SLOWLOG | X | |
resp = r('SLOWLOG', 'get', 1)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(not str(resp).startswith('ERR'))
# | SMEMBERS | O | |
r('del', key)
r('sadd', key, 'a', 'b', 'c')
resp = r('SMEMBERS', key)
assert(len(resp) == 3)
# | SMOVE | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
r('sadd', key2, 'b', 'c')
resp = r('SMOVE', key, key2, 'a')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 1)
# | SORT | X | |
r('del', key)
r('sadd', key, 10, 9, 8)
resp = r('SORT', key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 3 and resp[0] == '8')
# | SPOP | X | |
r('del', key)
r('sadd', key, 10, 9, 8)
resp = r('SPOP', key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == '10' or resp == '9' or resp == '8')
# | SRANDMEMBER | O | |
r('del', key)
r('sadd', key, 10, 9, 8)
resp = r('SRANDMEMBER', key)
assert(resp == '10' or resp == '9' or resp == '8')
# | SREM | O | |
r('del', key)
r('sadd', key, 10, 9, 8)
resp = r('SREM', key, 10, 9)
assert(resp == 2)
# | SSCAN | O | Available since 1.4 |
r('del', key)
r('sadd', key, 10)
resp = r('SSCAN', key, 0)
assert(len(resp) == 2)
# | STRLEN | O | |
r('set', key, '01234')
resp = r('STRLEN', key)
assert(resp == 5)
# | SUBSCRIBE | X | |
resp = r('SUBSCRIBE', 'channel')
assert(resp.startswith('ERR Unsupported'))
# | SUBSTR | O | |
r('set', key, "This is a string")
resp = r('SUBSTR', key, 0, 3)
assert(resp == "This")
# | SUNION | X | |
r('del', key)
r('sadd', key, 'a', 'b', 'c')
resp = r('SUNION', key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 3)
# | SUNIONSTORE | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
resp = r('SUNIONSTORE', key2, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 3)
# | SYNC | X | |
resp = r('SYNC')
assert(resp.startswith('ERR Unsupported'))
# | TIME | X | |
resp = r('TIME')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 2)
# | TOUCH | O | Available since 1.4 (multi key) |
r('del', key)
r('del', key2)
resp = r('TOUCH', key, key2)
assert(resp == 0)
# | TTL | O | |
r('set', key, 100)
resp = r('TTL', key)
assert(resp == -1)
# | TYPE | O | |
r('set', key, 100)
resp = r('TYPE', key)
assert(resp == 'string')
# | UNSUBSCRIBE | X | |
resp = r('UNSUBSCRIBE')
assert(resp.startswith('ERR Unsupported'))
# | UNWATCH | X | |
# | WATCH | X | |
resp = r('WATCH', key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
resp = r('UNWATCH')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
# | WAIT | X | |
resp = r('WAIT', 1, 10000)
assert(resp.startswith('ERR Unsupported'))
# | ZADD | O | |
# | ZCARD | O | |
# | ZCOUNT | O | |
# | ZINCRBY | O | |
# | ZINTERSTORE | X | |
# | ZLEXCOUNT | O | Available since 1.4 |
# | ZRANGE | O | |
# | ZRANGEBYLEX | O | Available since 1.4 |
# | ZRANGEBYSCORE | O | |
# | ZRANK | O | |
# | ZREM | O | |
# | ZREMRANGEBYLEX | O | Available since 1.4 |
# | ZREMRANGEBYRANK | O | |
# | ZREMRANGEBYSCORE | O | |
# | ZREVRANGE | O | |
# | ZREVRANGEBYLEX | O | Available since 1.4 |
# | ZREVRANGEBYSCORE | O | |
# | ZREVRANK | O | |
# | ZSCAN | O | Available since 1.4 |
# | ZSCORE | O | |
# | ZUNIONSTORE | X | |
r('del', key)
resp = r('ZADD', key, 1.0, 'v1')
assert(resp == 1)
resp = r('ZCARD', key)
assert(resp == 1)
resp = r('ZCOUNT', key, 0.9, 1.1)
assert(resp == 1)
resp = r('ZINCRBY', key, 1.0, 'v1')
assert(float(resp) == 2.0)
resp = r('ZINTERSTORE', dest, 1, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 1)
r('del', key)
r('zadd',key, 0, 'a', 0, 'b', 0, 'c', 0, 'd', 0, 'e', 0, 'f', 0, 'g')
resp = r('ZLEXCOUNT', key, '-', '+')
assert(resp == 7)
r('del', key)
r('zadd', key, 0, "zero", 1, "one", 2, "two")
resp = r('ZRANGE', key, 0, -1)
assert(len(resp) == 3)
r('del', key)
r('zadd',key, 0, 'a', 0, 'b', 0, 'c', 0, 'd', 0, 'e', 0, 'f', 0, 'g')
resp = r('ZRANGEBYLEX', key, '-', '[c')
assert(len(resp) == 3)
r('del', key)
r('zadd', key, 0, "zero", 1, "one", 2, "two")
resp = r('ZRANGEBYSCORE', key, 1, 2)
assert(len(resp) == 2)
resp = r('ZRANK', key, "one")
assert(resp == 1)
resp = r('ZREM', key, "two")
assert(resp == 1)
r('del', key)
r('zadd',key, 0, 'a', 0, 'b', 0, 'c', 0, 'd', 0, 'e', 0, 'f', 0, 'g')
resp = r('ZREMRANGEBYLEX', key, '[a', '[e')
assert(resp == 5)
r('del', key)
r('zadd',key, 0, 'a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 6, 'g')
resp = r('ZREMRANGEBYRANK', key, 0, 1)
assert(resp == 2)
r('del', key)
r('zadd',key, 0, 'a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 6, 'g')
resp = r('ZREMRANGEBYSCORE', key, 0, 3)
assert(resp == 4)
r('del', key)
r('zadd', key, 0, "zero", 1, "one", 2, "two")
resp = r('ZREVRANGE', key, 0, -1)
assert(len(resp) == 3 and resp[0] == 'two')
r('del', key)
r('zadd',key, 0, 'a', 0, 'b', 0, 'c', 0, 'd', 0, 'e', 0, 'f', 0, 'g')
resp = r('ZREVRANGEBYLEX', key, '[c', '-')
assert(len(resp) == 3)
r('del', key)
r('zadd',key, 0, 'a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 6, 'g')
resp = r('ZREVRANGEBYSCORE', key, '(3', '(0')
assert(len(resp) == 2)
r('del', key)
r('zadd', key, 0, "zero", 1, "one", 2, "two")
resp = r('ZREVRANK', key, "zero")
assert(resp == 2)
r('del', key)
r('zadd', key, 0, "zero")
resp = r('ZSCAN', key, 0)
assert(len(resp) == 2 and resp[0] == '0')
r('del', key)
r('zadd',key, 0, 'a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 6, 'g')
resp = r('ZSCORE', key, 'c')
assert(float(resp) == 2)
r('del', key)
r('del', key2)
r('zadd',key, 0, 'a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 6, 'g')
resp = r('ZUNIONSTORE', key2, 1, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 7)
finally:
r('del', key)
r('del', key2)
r('del', dest)
| naver/nbase-arc | redis/tests/nbase-arc/commands_generic.py | Python | apache-2.0 | 29,358 |
# -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CAP Schema permissions."""
from functools import partial
from cap.modules.experiments.permissions import exp_need_factory
from invenio_access.factory import action_factory
from invenio_access.permissions import ParameterizedActionNeed, Permission
SchemaReadAction = action_factory(
'schema-object-read', parameter=True)
SchemaReadActionNeed = partial(ParameterizedActionNeed, 'schema-object-read')
class ReadSchemaPermission(Permission):
"""Schema read permission."""
def __init__(self, schema):
"""Initialize state.
Read access for:
* all members of experiment assigned to schema
* all users/roles assigned to schema-object-read action
"""
_needs = set()
_needs.add(SchemaReadActionNeed(schema.id))
# experiments members can access schema
if schema.experiment:
_needs.add(exp_need_factory(schema.experiment))
super(ReadSchemaPermission, self).__init__(*_needs)
| tiborsimko/analysis-preservation.cern.ch | cap/modules/schemas/permissions.py | Python | gpl-2.0 | 2,071 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Simul(Package):
"""simul is an MPI coordinated test of parallel
filesystem system calls and library functions. """
homepage = "https://github.com/LLNL/simul"
url = "https://github.com/LLNL/simul/archive/1.16.tar.gz"
version('1.16', 'd616c1046a170c1e1b7956c402d23a95')
version('1.15', 'a5744673c094a87c05c6f0799d1f496f')
version('1.14', 'f8c14f0bac15741e2af354e3f9a0e30f')
version('1.13', '8a80a62d569557715d6c9c326e39a8ef')
depends_on('mpi')
def install(self, spec, prefix):
filter_file('mpicc', '$(MPICC)', 'Makefile', string=True)
filter_file('inline void', 'void', 'simul.c', string=True)
make('simul')
mkdirp(prefix.bin)
install('simul', prefix.bin)
| TheTimmy/spack | var/spack/repos/builtin/packages/simul/package.py | Python | lgpl-2.1 | 2,009 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-10-02 15:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cerimonial', '0054_auto_20181001_1728'),
]
operations = [
migrations.AlterField(
model_name='endereco',
name='complemento',
field=models.CharField(blank=True, default='', max_length=6, verbose_name='Complemento'),
),
migrations.AlterField(
model_name='localtrabalho',
name='complemento',
field=models.CharField(blank=True, default='', max_length=6, verbose_name='Complemento'),
),
]
| interlegis/saap | saap/cerimonial/migrations/0055_auto_20181002_1212.py | Python | gpl-3.0 | 728 |
import os
from PyML.utils import misc,myio
from PyML.base.pymlObject import PyMLobject
__docformat__ = "restructuredtext en"
class Parser (PyMLobject) :
'''A parser class to read datasets from a file.
Each parser support the following interface:
Constructor - pass a file name / file handle and information on which
pattern/classes/features to read from the file
check - checks whether the file conforms to the format read by the parser
scan - scan the file and make the _address variable that lists the positions
in the file of all the patterns that need to be read
next - read the next pattern (after calling the __iter__ method)
'''
commentChar = ['%', '#']
def __init__(self, file, **args) :
if type(file) == type('') :
if not os.path.exists(file) :
raise ValueError, "file does not exist at %s" % file
self._fileHandle = myio.myopen(file)
#self._fileHandle = open(file)
else :
self._fileHandle = file
if 'classes' in args :
self.classesToRead = args['classes']
else :
self.classesToRead = []
if 'patterns' in args :
self.patternsToRead = args['patterns']
else :
self.patternsToRead = None
if 'features' in args :
self.featuresToRead = args['features']
else :
self.featuresToRead = []
def check(self) :
pass
def scan(self) :
pass
def __iter__(self) :
self._addressIterator = iter(self._address)
return self
def __len__(self) :
'''how many patterns are read'''
return len(self._address)
def next(self) :
pass
def skipComments(self) :
pos = 0
line = self._fileHandle.readline()
while line[0] in self.commentChar :
pos += len(line)
line = self._fileHandle.readline()
return line, pos
class SparseParser (Parser) :
'''A class for parsing sparse data'''
def __init__(self, file, **args) :
Parser.__init__(self, file, **args)
self.sparsify = False
if 'sparsify' in args :
self.sparsify = args['sparsify']
def check(self) :
self._fileHandle.seek(0)
line,pos = self.skipComments()
return len(line.split(':')) > 1
def readLabels(self) :
self._fileHandle.seek(0)
patternID = None
L = []
line,pos = self.skipComments()
# determine if the dataset has IDs :
patternIDflag = (line.find(",") != -1)
if patternIDflag :
patternID = []
# make sure there are labels :
tokens = line.split(',')[-1].split()
if len(tokens) == 0 or tokens[0].find(':') >= 0 :
raise ValueError, "unlabeled data"
while line :
if patternIDflag:
(patID, line) = line.split(",")
patternID.append(patID)
L.append(line.split()[0])
line = self._fileHandle.readline()
return L,patternID
def scan(self) :
self._fileHandle.seek(0)
patternID = None
self._featureID = []
address = []
line, pos = self.skipComments()
# determine if the dataset has IDs :
patternIDflag = (line.find(",") != -1)
if patternIDflag :
patternID = []
# determine if the dataset has labels or not :
tokens = line.split(',')[-1].split()
if len(tokens) == 0 or tokens[0].find(':') >= 0 :
L = None
labeledData = 0
firstToken = 0
else :
L = []
labeledData = 1
firstToken = 1
self._numFeatures = 0
self.integerID = True
i = 0
featureDict = {}
foundIntegerID = False
while line :
nextPos = pos + len(line)
if patternIDflag:
(patID, line) = line.split(",")
tokens = line.split()
if labeledData :
label = tokens[0]
else :
label = None
if not foundIntegerID :
if labeledData :
t = tokens[1:]
else :
t = tokens
if len(t) > 0 :
foundIntegerID = True
for token in t :
try :
int(token.split(':')[0])
except :
self.integerID = False
if (label in self.classesToRead or len(self.classesToRead) == 0) :
if labeledData :
L.append(label)
if patternIDflag :
patternID.append(patID)
address.append(pos)
pos = nextPos
line = self._fileHandle.readline()
i +=1
# if i % 100 == 0 and i > 0 :
# print 'scanned',i,'patterns'
self._featureDict = {}
self._featureDict2 = {}
self._featureKeyDict = {}
self._address = address
self._labeledData = labeledData
self._labels = L
self._patternIDflag = patternIDflag
self._patternID = patternID
self._firstToken = firstToken
def __iter__(self) :
self._addressIterator = iter(self._address)
return self
def next(self) :
address = self._addressIterator.next()
self._fileHandle.seek(address)
line = self._fileHandle.readline()
if self._patternIDflag:
(patID, line) = line.split(",")
tokens = line.split()
if self._labeledData :
label = tokens[0]
else :
label = None
x = {}
if len(tokens) > self._firstToken : # check if this is not a zero vector
for token in tokens[self._firstToken:] :
(featureID, featureVal) = token.split(":")
if self.integerID :
featureID = int(featureID)
uniqueHash = True
# handle the case where the hash function is not unique:
if (featureID in self._featureDict2 and
self._featureDict2[featureID] != featureID) :
uniqueHash = False
#XXX
for i in range(255) :
fid = featureID + '+' + chr(i)
if fid not in self._featureDict2 :
featureID = fid
uniqueHash = True
if not uniqueHash :
raise ValueError, 'non-unique hash'
if not self.integerID :
featureKey = hash(featureID)
else :
featureKey = featureID
self._featureDict[featureID] = featureKey
self._featureDict2[featureID] = featureID
self._featureKeyDict[featureKey] = 1
if float(featureVal) != 0.0 or not self.sparsify :
#x[self._featureDict[featureID]] = float(featureVal)
x[featureKey] = float(featureVal)
return x
def postProcess(self) :
if len(self._featureDict.keys()) != len(misc.unique(self._featureDict.values())) :
print len(self._featureDict.keys()), len(misc.unique(self._featureDict.values()))
raise ValueError, 'non-unique hash'
featureKeyDict = {}
featureKey = self._featureDict.values()
featureKey.sort()
for i in range(len(featureKey)) :
featureKeyDict[featureKey[i]] = i
inverseFeatureDict = misc.invertDict(self._featureDict)
featureID = [str(inverseFeatureDict[key]) for key in featureKey]
return featureID, featureKey, featureKeyDict
class CSVParser (Parser):
"""A class for parsing delimited files"""
attributes = {'idColumn' : None,
'labelsColumn' : None,
'headerRow' : False}
def __init__(self, file, **args) :
"""
:Keywords:
- `headerRow` - True/False depending on whether the file contains a
header row that provides feature IDs
- `idColumn` - set to 0 if the data has pattern IDs in the first column
- `labelsColumn` - possible values: if there are no patternIDs
it is either 0 or -1, and if there are patternIDs, 1 or -1
"""
Parser.__init__(self, file, **args)
PyMLobject.__init__(self, None, **args)
if self.labelsColumn == 1 :
self.idColumn = 0
if self.idColumn is None and self.labelsColumn is None :
self._first = 0
else :
self._first = max(self.idColumn, self.labelsColumn) + 1
# print 'label at ', self.labelsColumn
def check(self) :
"""very loose checking of the format of the file:
if the first line does not contain a colon (":") it is assumed
to be in csv format
the delimiter is determined to be "," if the first line contains
at least one comma; otherwise a split on whitespaces is used.
"""
self._fileHandle.seek(0)
if self.headerRow :
line = self._fileHandle.readline()
else :
line,pos = self.skipComments()
if len(line.split('\t')) > 1 :
self.delim = '\t'
elif len(line.split(',')) > 1 :
self.delim = ','
else :
self.delim = None
#line,pos = self.skipHeader(line,pos)
# print 'delimiter', self.delim
# a file that does not contain a ":" is assumed to be in
# CSV format
if len(line.split(':')) > 1 : return False
return True
def skipHeader(self, line, pos) :
"""
check if the file has a first line that provides the feature IDs
"""
tokens = line[:-1].split(self.delim)
if self.labelsColumn == -1 :
self._last = len(tokens) - 1
else :
self._last = len(tokens)
if self.headerRow :
self._featureID = tokens[self._first:self._last]
pos += len(line)
line = self._fileHandle.readline()
return line, pos
def readLabels(self) :
self._fileHandle.seek(0)
L = []
patternID = []
line,pos = self.skipComments()
line, pos = self.skipHeader(line, pos)
tokens = line[:-1].split(self.delim)
if self.labelsColumn is None :
if len(tokens) == 2 :
self.labelsColumn = 1
self.idColumn = 0
elif len(tokens) == 1 :
self.labelsColumn = 0
i = 1
while line :
tokens = line[:-1].split(self.delim)
if self.idColumn is not None :
patternID.append(tokens[self.idColumn])
else :
patternID.append(str(i))
if self.labelsColumn is not None :
L.append(tokens[self.labelsColumn])
line = self._fileHandle.readline()
i =+ 1
return L,patternID
def scan(self) :
self._fileHandle.seek(0)
self._featureID = None
address = []
line,pos = self.skipComments()
line, pos = self.skipHeader(line, pos)
tokens = line.split(self.delim)
self._patternID = []
dim = len(tokens) - (self.idColumn is not None) - \
(self.labelsColumn is not None)
self._labels = None
if self.labelsColumn is not None :
self._labels = []
i = 0
while line :
address.append(pos)
pos += len(line)
line = self._fileHandle.readline()
i +=1
# if i % 1000 == 0 and i > 0 :
# print 'scanned',i,'patterns'
self._address = address
if self._featureID is None :
self._featureID = [str(i) for i in range(dim)]
def next(self) :
address = self._addressIterator.next()
self._fileHandle.seek(address)
line = self._fileHandle.readline()
tokens = line[:-1].split(self.delim)
x = [float(token) for token in tokens[self._first:self._last]]
if self.labelsColumn is not None :
self._labels.append(tokens[self.labelsColumn])
if self.idColumn is not None :
self._patternID.append(tokens[self.idColumn])
return x
def postProcess(self) :
featureKey = [hash(id) for id in self._featureID]
featureKeyDict = {}
for i in range(len(featureKey)) :
featureKeyDict[featureKey[i]] = i
return self._featureID, featureKey, featureKeyDict
def parserDispatcher(fileHandle, **args) :
if 'hint' in args :
hint = args['hint']
if hint == 'sparse' :
return SparseParser(fileHandle, **args)
elif hint == 'csv' :
p = CSVParser(fileHandle, **args)
p.check()
#print 'returning a csv parser'
return p
p = SparseParser(fileHandle, **args)
if p.check() :
return p
p = CSVParser(fileHandle, **args)
if p.check() :
return p
raise ValueError, 'file does not match existing parsers'
def test(fileName) :
p = SparseParser(fileName)
print 'p.check:',p.check()
p.scan()
| cathywu/Sentiment-Analysis | PyML-0.7.9/PyML/containers/parsers.py | Python | gpl-2.0 | 13,813 |
import numpy as np
import pandas as pd
from pandas.tools.merge import concat
from scipy.stats import ranksums
from munge import toPrettyCsv
# from utils import *
from results_utils import *
COMBINED_CSV_FILE = 'combined.csv'
ERRS_CSV_FILE = 'errRates.csv'
RANK_CSV_FILE = 'combined_rank.csv'
ZVALS_CSV_FILE = 'combined_zvals.csv'
PVALS_CSV_FILE = 'combined_pvals.csv'
def extractErrRates(combined, onlySharedDatasets=True):
# df = cleanCombinedResults(combined)
df = removeStatsCols(combined) # remove length / cardinality stats
if DATASET_COL_NAME in df.columns:
setDatasetAsIndex(df)
if onlySharedDatasets:
df.dropna(axis=0, how='any', inplace=True)
return df
def computeRanks(errRates, lowIsBetter=True, onlyFullRows=True):
if onlyFullRows:
errRates = errRates.dropna(axis=0, how='any', inplace=False)
return errRates.rank(axis=1, numeric_only=True, ascending=lowIsBetter)
# return errRates.copy().rank(axis=1, numeric_only=True)
def avgRanks(errRates, lowIsBetter=True, onlyFullRows=True):
ranks = computeRanks(errRates, lowIsBetter=lowIsBetter,
onlyFullRows=onlyFullRows)
# return ranks.mean(axis=0, skipna=True)
return ranks.mean(axis=0)
def computeRankSumZvalsPvals(errRates, lowIsBetter=True):
ranks = computeRanks(errRates, onlyFullRows=False)
# compute the ranked sums test p-value between different classifiers
numClassifiers = errRates.shape[1]
dims = (numClassifiers, numClassifiers)
zvals = np.empty(dims)
pvals = np.empty(dims)
for i in range(numClassifiers):
zvals[i, i] = 0
pvals[i, i] = 1
for j in range(i+1, numClassifiers):
x = errRates.iloc[:, i]
y = errRates.iloc[:, j]
# compare using all datasets they have in common
rowsWithoutNans = np.invert(np.isnan(x) + np.isnan(y))
x = x[rowsWithoutNans]
y = y[rowsWithoutNans]
zvals[i, j], pvals[i, j] = ranksums(y, x) # cols are indep var
zvals[j, i], pvals[j, i] = -zvals[i, j], pvals[i, j]
classifierNames = ranks.columns.values
zvals = pd.DataFrame(data=zvals, index=classifierNames,
columns=classifierNames)
pvals = pd.DataFrame(data=pvals, index=classifierNames,
columns=classifierNames)
return zvals, pvals
def extractBestResultsRows(df,
objectiveColName=ACCURACY_COL_NAME,
classifierColName=CLASSIFIER_COL_NAME,
datasetColName=DATASET_COL_NAME):
"""
df: a dataframe with columns for {accuracy (or another objective
function), Dataset, Classifier}, and possibly others
return: a dataframe of only the rows containing the highest accuracy
for each classifier
"""
df = df.copy()
# ensure that all idxs are unique...everything breaks otherwise
df.index = np.arange(df.shape[0])
datasetCol = df[datasetColName]
classifiersCol = df[classifierColName]
uniqDatasets = datasetCol.unique()
uniqClassifiers = classifiersCol.unique()
# print uniqDatasets
# print uniqClassifiers
# return
# allRows = pd.DataFrame()
keepIdxs = []
for dset in uniqDatasets:
dsetDf = df[df[datasetColName] == dset]
# print dsetDf
for classifier in uniqClassifiers:
# classifierDf = df[(datasetCol == dset) * (classifiersCol == classifier)]
# classifierDf = df[df[classifierColName] == classifier]
classifierDf = dsetDf[dsetDf[classifierColName] == classifier]
# print classifier
# print classifierDf
assert(classifierDf.empty or classifierDf.shape[0] == 1)
# continue
if classifierDf.empty:
continue
idx = classifierDf[ACCURACY_COL_NAME].idxmax(skipna=True)
if np.isnan(idx):
continue
try: # deal with ties by taking first element
idx = idx[0]
except:
pass
keepIdxs.append(idx)
# if not np.isnan(idx):
# row = classifierDf.iloc(idx)
# print row
# allRows = concat([allRows, row], join='outer')
# keepIdxs.append(idx)
# break
# print keepIdxs
return df.iloc[sorted(keepIdxs)]
def extractBestResults(df,
objectiveColName=ACCURACY_COL_NAME,
classifierColName=CLASSIFIER_COL_NAME,
datasetColName=DATASET_COL_NAME):
"""
df: a dataframe with columns for {accuracy (or another objective
function), Dataset, Classifier}, and possibly others
return: a dataframe indexed by Dataset, with one column for each
classifier; entries in the classifier column are equal to the highest
result of that classifier in objectiveColName
"""
best = extractBestResultsRows(df, objectiveColName,
classifierColName, datasetColName)
uniqDatasets = best[datasetColName].unique()
uniqClassifiers = best[classifierColName].unique()
colNames = [datasetColName]
df = pd.DataFrame(np.asarray(uniqDatasets), columns=colNames)
for classifier in uniqClassifiers:
clsDf = best[best[classifierColName] == classifier]
# dont care about other parameters and dont need a column
# of just the classifier name
clsDf = clsDf[[datasetColName, objectiveColName]]
# rename the objective column to the name of this classifier
# so each classifier will have its own column
clsDf.columns = [datasetColName, classifier]
df = df.merge(clsDf, on=datasetColName, how='outer')
return df
def main():
combined = buildCombinedResults()
toPrettyCsv(combined, COMBINED_CSV_FILE)
combined = cleanCombinedResults(combined)
errRates = extractErrRates(combined)
toPrettyCsv(errRates, ERRS_CSV_FILE)
ranks = computeRanks(errRates)
toPrettyCsv(ranks, RANK_CSV_FILE)
print ranks.sum(axis=0)
zvals, pvals = computeRankSumZvalsPvals(errRates)
toPrettyCsv(zvals, ZVALS_CSV_FILE)
toPrettyCsv(pvals, PVALS_CSV_FILE)
if __name__ == '__main__':
main()
| dblalock/flock | python/analyze/classify/compare_algos.py | Python | mit | 6,149 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
from datetime import date
from openerp.osv import fields as old_fields
class res_users(models.Model):
_inherit = "res.users"
# added this here because in v8 there is a conflict with a char birthdate
# field in partner it is supose to be fixed
birthdate = fields.Date(string='Birthdate')
class res_partner(models.Model):
_inherit = "res.partner"
@api.one
@api.depends('birthdate')
def _get_age(self):
today = date.today()
age = False
if self.birthdate:
birthdate = fields.Date.from_string(self.birthdate)
try:
birthday = birthdate.replace(year=today.year)
# raised when birth date is February 29 and the current year is
# not a leap year
except ValueError:
birthday = birthdate.replace(
year=today.year, day=birthdate.day - 1)
if birthday > today:
age = today.year - birthdate.year - 1
else:
age = today.year - birthdate.year
self.age = age
@api.one
# @api.depends('wife_id')
def _get_husband(self):
husbands = self.search([('wife_id', '=', self.id)])
self.husband_id = husbands.id
@api.one
def _set_wife(self):
husbands = self.search([('wife_id', '=', self.id)])
# If wife related to this partner, we set husband = False for those
# wifes
husbands.write({'wife_id': False})
# We write the husband for the actual wife
if self.husband_id:
self.husband_id.wife_id = self.id
@api.model
def _search_husband(self, operator, value):
if operator == 'like':
operator = 'ilike'
partners = self.search([
('wife_id', '!=', False),
('name', operator, value)])
return [('id', 'in', partners.mapped('wife_id.id'))]
disabled_person = fields.Boolean(
string='Disabled Person?'
)
# TODO analizar si mejor depende del modulo de la oca partner_firstname
# y que estos campos vengan de ahi
firstname = fields.Char(
string='First Name'
)
lastname = fields.Char(
string='Last Name'
)
national_identity = fields.Char(
string='National Identity'
)
passport = fields.Char(
string='Passport'
)
marital_status = fields.Selection(
[(u'single', u'Single'), (u'married', u'Married'),
(u'divorced', u'Divorced')],
string='Marital Status',
)
birthdate = fields.Date(
string='Birthdate'
)
father_id = fields.Many2one(
'res.partner',
string='Father',
context={'default_is_company': False, 'default_sex': 'M',
'from_member': True},
domain=[('is_company', '=', False), ('sex', '=', 'M')]
)
mother_id = fields.Many2one(
'res.partner',
string='Mother',
context={'default_is_company': False, 'default_sex': 'F',
'from_member': True},
domain=[('is_company', '=', False), ('sex', '=', 'F')]
)
sex = fields.Selection(
[(u'M', u'Male'), (u'F', u'Female')],
string='Sex',
)
age = fields.Integer(
compute='_get_age',
type='integer',
string='Age'
)
father_child_ids = fields.One2many(
'res.partner',
'father_id',
string='Childs',
)
mother_child_ids = fields.One2many(
'res.partner',
'mother_id',
string='Childs',
)
nationality_id = fields.Many2one(
'res.country',
string='Nationality'
)
husband_id = fields.Many2one(
'res.partner',
compute='_get_husband',
inverse='_set_wife',
search='_search_husband',
string='Husband',
domain=[('sex', '=', 'M'), ('is_company', '=', False)],
context={'default_sex': 'M', 'is_person': True}
)
wife_id = fields.Many2one(
'res.partner',
string='Wife',
domain=[('sex', '=', 'F'), ('is_company', '=', False)],
context={'default_sex': 'F', 'is_person': True}
)
@api.one
@api.onchange('firstname', 'lastname')
@api.constrains('firstname', 'lastname')
def build_name(self):
print 'build_name'
if self.lastname and self.firstname:
self.name = '%s %s' % (
self.lastname or '', self.firstname or '')
print 'name', self.name
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
national_identity = ''
if record.national_identity:
national_identity = '[' + record.national_identity + ']'
name = "%s %s" % (name, national_identity)
if record.parent_id and not record.is_company:
name = "%s, %s" % (record.parent_id.name, name)
if context.get('show_address'):
name = name + "\n" + \
self._display_address(
cr, uid, record, without_company=True, context=context)
name = name.replace('\n\n', '\n')
name = name.replace('\n\n', '\n')
if context.get('show_email') and record.email:
name = "%s <%s>" % (name, record.email)
res.append((record.id, name))
return res
def name_search(
self, cr, uid, name, args=None, operator='ilike',
context=None, limit=100):
if not args:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
query_args = {'name': search_name}
# TODO: simplify this in trunk with `display_name`, once it is stored
# Perf note: a CTE expression (WITH ...) seems to have an even higher cost
# than this query with duplicated CASE expressions. The bulk of
# the cost is the ORDER BY, and it is inevitable if we want
# relevant results for the next step, otherwise we'd return
# a random selection of `limit` results.
query = ('''SELECT partner.id FROM res_partner partner
LEFT JOIN res_partner company
ON partner.parent_id = company.id
WHERE partner.national_identity ''' + operator + ''' %(name)s OR
partner.email ''' + operator + ''' %(name)s OR
CASE
WHEN company.id IS NULL OR partner.is_company
THEN partner.name
ELSE company.name || ', ' || partner.name
END ''' + operator + ''' %(name)s
ORDER BY
CASE
WHEN company.id IS NULL OR partner.is_company
THEN partner.name
ELSE company.name || ', ' || partner.name
END''')
if limit:
query += ' limit %(limit)s'
query_args['limit'] = limit
cr.execute(query, query_args)
ids = map(lambda x: x[0], cr.fetchall())
ids = self.search(
cr, uid, [('id', 'in', ids)] + args, limit=limit, context=context)
if ids:
return self.name_get(cr, uid, ids, context)
return super(res_partner, self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
# Como no anduvo sobre escribiendo con la nueva api, tuvimos que hacerlo con la vieja
# display_name = fields.Char(
# compute='_display_name', string='Name', store=True, select=True)
# @api.one
# @api.depends(
# 'name',
# 'firstname',
# 'lastname',
# 'is_company',
# 'national_identity',
# 'parent_id',
# 'parent_id.name',
# )
# def _diplay_name(self):
# self.display_name = self.with_context({}).name_get()
_display_name = lambda self, *args, **kwargs: self._display_name_compute(*args, **kwargs)
_display_name_store_triggers = {
'res.partner': (lambda self,cr,uid,ids,context=None: self.search(cr, uid, [('id','child_of',ids)], context=dict(active_test=False)),
['parent_id', 'is_company', 'name', 'national_identity'], 10)
# Se agrega national_identity aqui
}
_columns = {
'display_name': old_fields.function(_display_name, type='char', string='N2222asdasdadsame', store=_display_name_store_triggers, select=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| sysadminmatmoz/ingadhoc | partner_person/res_partner.py | Python | agpl-3.0 | 9,708 |
default_email_subject_template = "Merry Christmas {giver.first_name}!"
default_email_content_template = "Dear {giver.first_name},\n\
\n\
I'm very busy this year and could use a little help from you, one of my little helpers.\n\
\n\
{receiver} has been very good this year, and deserves a lovely gift.\n\
\n\
Could you please try to get something nice?\n\
\n\
Ho ho ho!!!\n\
Santa"
creation_email_subject = "You created a Secret Santa list"
creation_email_content = "Congratulations. You have created a Secret Santa list!\n\
\n\
Here is the link that people can use to sign up to the list: {0}\n\
And here is the link that you can use to review, and close the list: {1}"
| TheUKDave/secret_santa | santa/__init__.py | Python | mit | 672 |
import copy
import numpy as np
class Face:
# constant locators for landmarks
jaw_points = np.arange(0, 17) # face contour points
eyebrow_dx_points = np.arange(17, 22)
eyebrow_sx_points = np.arange(22, 27)
nose_points = np.arange(27, 36)
nosecenter_points = np.array([30, 33])
right_eye = np.arange(36, 42)
left_eye = np.arange(42, 48)
mouth = np.arange(48, 68)
def __init__(self, img, rect=None):
"""
Utility class for a face
:param img: image containing the face
:param rect: face rectangle
"""
self.img = img
self.rect = rect
self.landmarks = None
def get_face_center(self, absolute=True):
"""
Return center coordinates of the face. Coordinates are rounded to closest int
:param absolute: if True, center is absolute to whole image, otherwise is relative to face img
:return: (x, y)
"""
if self.rect:
x, y = self.rect.get_center()
if absolute:
x += self.rect.left
y += self.rect.top
return x, y
def get_face_img(self, boundary_resize_factor: tuple = None):
"""
Return image bounded to target face (boundary is defined by rect attribute)
:return:
"""
if boundary_resize_factor:
target_rect = self.rect.resize(boundary_resize_factor)
self.rect = target_rect
else:
target_rect = self.rect
top, right, bottom, left = target_rect.get_coords()
face_img = self.img[top:bottom, left:right]
return face_img
def get_face_size(self):
"""
Return size of face as (width, height)
:return: (w, h)
"""
# w, h = self.rect.get_size()
# Consider image cause rect might exceed actual image boundaries
face_img = self.get_face_img()
w, h = face_img.shape[:2][::-1]
return w, h
def get_eyes(self):
lx_eye = self.landmarks[Face.left_eye]
rx_eye = self.landmarks[Face.right_eye]
return lx_eye, rx_eye
def get_contour_points(self):
# shape to numpy
points = np.array([(p.x, p.y) for p in self.parts()])
face_boundary = points[np.concatenate([Face.jaw_points,
Face.eyebrow_dx_points,
Face.eyebrow_sx_points])]
return face_boundary, self.rect
def __copy__(self):
face_copy = Face(self.img.copy(), copy.copy(self.rect))
face_copy.landmarks = self.landmarks.copy()
return face_copy
class Rectangle:
def __init__(self, top, right, bottom, left):
"""
Utility class to hold information about face position/boundaries in an image
:param top:
:param right:
:param bottom:
:param left:
"""
self.top = top
self.right = right
self.bottom = bottom
self.left = left
def get_coords(self):
return self.top, self.right, self.bottom, self.left
def get_center(self):
x = (self.right - self.left)//2
y = (self.bottom - self.top)//2
return x, y
def get_size(self):
w = self.right - self.left
h = self.bottom - self.top
return w, h
def resize(self, resize_factor: tuple):
"""
Return new resized rectangle
:return:
"""
w, h = self.get_size()
# if float given, consider as expansion ratio and obtain equivalent int values
if type(resize_factor[0]) == float:
resize_factor = (int(resize_factor[0] * w),
int(resize_factor[1] * h))
# divide by two as we add the border on each side
resize_factor = (resize_factor[0] // 2, resize_factor[1] // 2)
# compute new rectangle coords
return Face.Rectangle(top=max(0, self.top - resize_factor[1]),
right=self.right + resize_factor[0],
left=max(0, self.left - resize_factor[0]),
bottom=self.bottom + resize_factor[1])
def __copy__(self):
return Face.Rectangle(self.top, self.right, self.bottom, self.left)
def __str__(self):
return f'top: {self.top}, left: {self.left}, bottom: {self.bottom}, right: {self.right}'
| 5agado/data-science-learning | face_utils/Face.py | Python | apache-2.0 | 4,604 |
# -*- coding: utf-8 -*-
import ast
import base64
import csv
import functools
import glob
import itertools
import jinja2
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import sys
import time
import urllib2
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
try:
import xlwt
except ImportError:
xlwt = None
import openerp
import openerp.modules.registry
from openerp.addons.base.ir.ir_qweb import AssetsBundle, QWebTemplateNotFound
from openerp.modules import get_module_resource
from openerp.tools import topological_sort
from openerp.tools.translate import _
from openerp import http
from openerp.http import request, serialize_exception as _serialize_exception
_logger = logging.getLogger(__name__)
if hasattr(sys, 'frozen'):
# When running on compiled windows binary, we don't have access to package loader.
path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'views'))
loader = jinja2.FileSystemLoader(path)
else:
loader = jinja2.PackageLoader('openerp.addons.web', "views")
env = jinja2.Environment(loader=loader, autoescape=True)
env.filters["json"] = simplejson.dumps
# 1 week cache for asset bundles as advised by Google Page Speed
BUNDLE_MAXAGE = 60 * 60 * 24 * 7
#----------------------------------------------------------
# OpenERP Web helpers
#----------------------------------------------------------
db_list = http.db_list
db_monodb = http.db_monodb
def serialize_exception(f):
@functools.wraps(f)
def wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception, e:
_logger.exception("An exception occured during an http request")
se = _serialize_exception(e)
error = {
'code': 200,
'message': "Odoo Server Error",
'data': se
}
return werkzeug.exceptions.InternalServerError(simplejson.dumps(error))
return wrap
def redirect_with_hash(*args, **kw):
"""
.. deprecated:: 8.0
Use the ``http.redirect_with_hash()`` function instead.
"""
return http.redirect_with_hash(*args, **kw)
def abort_and_redirect(url):
r = request.httprequest
response = werkzeug.utils.redirect(url, 302)
response = r.app.get_response(r, response, explicit_session=False)
werkzeug.exceptions.abort(response)
def ensure_db(redirect='/web/database/selector'):
# This helper should be used in web client auth="none" routes
# if those routes needs a db to work with.
# If the heuristics does not find any database, then the users will be
# redirected to db selector or any url specified by `redirect` argument.
# If the db is taken out of a query parameter, it will be checked against
# `http.db_filter()` in order to ensure it's legit and thus avoid db
# forgering that could lead to xss attacks.
db = request.params.get('db')
# Ensure db is legit
if db and db not in http.db_filter([db]):
db = None
if db and not request.session.db:
# User asked a specific database on a new session.
# That mean the nodb router has been used to find the route
# Depending on installed module in the database, the rendering of the page
# may depend on data injected by the database route dispatcher.
# Thus, we redirect the user to the same page but with the session cookie set.
# This will force using the database route dispatcher...
r = request.httprequest
url_redirect = r.base_url
if r.query_string:
# Can't use werkzeug.wrappers.BaseRequest.url with encoded hashes:
# https://github.com/amigrave/werkzeug/commit/b4a62433f2f7678c234cdcac6247a869f90a7eb7
url_redirect += '?' + r.query_string
response = werkzeug.utils.redirect(url_redirect, 302)
request.session.db = db
abort_and_redirect(url_redirect)
# if db not provided, use the session one
if not db and request.session.db and http.db_filter([request.session.db]):
db = request.session.db
# if no database provided and no database in session, use monodb
if not db:
db = db_monodb(request.httprequest)
# if no db can be found til here, send to the database selector
# the database selector will redirect to database manager if needed
if not db:
werkzeug.exceptions.abort(werkzeug.utils.redirect(redirect, 303))
# always switch the session to the computed db
if db != request.session.db:
request.session.logout()
abort_and_redirect(request.httprequest.url)
request.session.db = db
def module_installed():
# Candidates module the current heuristic is the /static dir
loadable = http.addons_manifest.keys()
modules = {}
# Retrieve database installed modules
# TODO The following code should move to ir.module.module.list_installed_modules()
Modules = request.session.model('ir.module.module')
domain = [('state','=','installed'), ('name','in', loadable)]
for module in Modules.search_read(domain, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = request.session.model('ir.module.module.dependency').read(deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
sorted_modules = topological_sort(modules)
return sorted_modules
def module_installed_bypass_session(dbname):
loadable = http.addons_manifest.keys()
modules = {}
try:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
m = registry.get('ir.module.module')
# TODO The following code should move to ir.module.module.list_installed_modules()
domain = [('state','=','installed'), ('name','in', loadable)]
ids = m.search(cr, 1, [('state','=','installed'), ('name','in', loadable)])
for module in m.read(cr, 1, ids, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = registry.get('ir.module.module.dependency').read(cr, 1, deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
except Exception,e:
pass
sorted_modules = topological_sort(modules)
return sorted_modules
def module_boot(db=None):
server_wide_modules = openerp.conf.server_wide_modules or ['web']
serverside = []
dbside = []
for i in server_wide_modules:
if i in http.addons_manifest:
serverside.append(i)
monodb = db or db_monodb()
if monodb:
dbside = module_installed_bypass_session(monodb)
dbside = [i for i in dbside if i not in serverside]
addons = serverside + dbside
return addons
def concat_xml(file_list):
"""Concatenate xml files
:param list(str) file_list: list of files to check
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
root = None
for fname in file_list:
with open(fname, 'rb') as fp:
contents = fp.read()
checksum.update(contents)
fp.seek(0)
xml = ElementTree.parse(fp).getroot()
if root is None:
root = ElementTree.Element(xml.tag)
#elif root.tag != xml.tag:
# raise ValueError("Root tags missmatch: %r != %r" % (root.tag, xml.tag))
for child in xml.getchildren():
root.append(child)
return ElementTree.tostring(root, 'utf-8'), checksum.hexdigest()
def fs2web(path):
"""convert FS path into web path"""
return '/'.join(path.split(os.path.sep))
def manifest_glob(extension, addons=None, db=None, include_remotes=False):
if addons is None:
addons = module_boot(db=db)
else:
addons = addons.split(',')
r = []
for addon in addons:
manifest = http.addons_manifest.get(addon, None)
if not manifest:
continue
# ensure does not ends with /
addons_path = os.path.join(manifest['addons_path'], '')[:-1]
globlist = manifest.get(extension, [])
for pattern in globlist:
if pattern.startswith(('http://', 'https://', '//')):
if include_remotes:
r.append((None, pattern))
else:
for path in glob.glob(os.path.normpath(os.path.join(addons_path, addon, pattern))):
r.append((path, fs2web(path[len(addons_path):])))
return r
def manifest_list(extension, mods=None, db=None, debug=None):
""" list ressources to load specifying either:
mods: a comma separated string listing modules
db: a database name (return all installed modules in that database)
"""
if debug is not None:
_logger.warning("openerp.addons.web.main.manifest_list(): debug parameter is deprecated")
files = manifest_glob(extension, addons=mods, db=db, include_remotes=True)
return [wp for _fp, wp in files]
def get_last_modified(files):
""" Returns the modification time of the most recently modified
file provided
:param list(str) files: names of files to check
:return: most recent modification time amongst the fileset
:rtype: datetime.datetime
"""
files = list(files)
if files:
return max(datetime.datetime.fromtimestamp(os.path.getmtime(f))
for f in files)
return datetime.datetime(1970, 1, 1)
def make_conditional(response, last_modified=None, etag=None, max_age=0):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = max_age
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(request.httprequest)
def login_and_redirect(db, login, key, redirect_url='/web'):
request.session.authenticate(db, login, key)
return set_cookie_and_redirect(redirect_url)
def set_cookie_and_redirect(redirect_url):
redirect = werkzeug.utils.redirect(redirect_url, 303)
redirect.autocorrect_location_header = False
return redirect
def login_redirect():
url = '/web/login?'
if request.debug:
url += 'debug&'
return """<html><head><script>
window.location = '%sredirect=' + encodeURIComponent(window.location);
</script></head></html>
""" % (url,)
def load_actions_from_ir_values(key, key2, models, meta):
Values = request.session.model('ir.values')
actions = Values.get(key, key2, models, meta, request.context)
return [(id, name, clean_action(action))
for id, name, action in actions]
def clean_action(action):
action.setdefault('flags', {})
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, OpenERP has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
if action.pop('view_type', 'form') != 'form':
return action
if 'view_mode' in action:
action['view_mode'] = ','.join(
mode if mode != 'tree' else 'list'
for mode in action['view_mode'].split(','))
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
def _local_web_translations(trans_file):
messages = []
try:
with open(trans_file) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
return
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
messages.append({'id': x.id, 'string': x.string})
return messages
def xml2json_from_elementtree(el, preserve_whitespaces=False):
""" xml2json-direct
Simple and straightforward XML-to-JSON converter in Python
New BSD Licensed
http://code.google.com/p/xml2json-direct/
"""
res = {}
if el.tag[0] == "{":
ns, name = el.tag.rsplit("}", 1)
res["tag"] = name
res["namespace"] = ns[1:]
else:
res["tag"] = el.tag
res["attrs"] = {}
for k, v in el.items():
res["attrs"][k] = v
kids = []
if el.text and (preserve_whitespaces or el.text.strip() != ''):
kids.append(el.text)
for kid in el:
kids.append(xml2json_from_elementtree(kid, preserve_whitespaces))
if kid.tail and (preserve_whitespaces or kid.tail.strip() != ''):
kids.append(kid.tail)
res["children"] = kids
return res
def content_disposition(filename):
filename = filename.encode('utf8')
escaped = urllib2.quote(filename)
browser = request.httprequest.user_agent.browser
version = int((request.httprequest.user_agent.version or '0').split('.')[0])
if browser == 'msie' and version < 9:
return "attachment; filename=%s" % escaped
elif browser == 'safari':
return "attachment; filename=%s" % filename
else:
return "attachment; filename*=UTF-8''%s" % escaped
#----------------------------------------------------------
# OpenERP Web web Controllers
#----------------------------------------------------------
class Home(http.Controller):
@http.route('/', type='http', auth="none")
def index(self, s_action=None, db=None, **kw):
return http.local_redirect('/web', query=request.params, keep_hash=True)
@http.route('/web', type='http', auth="none")
def web_client(self, s_action=None, **kw):
ensure_db()
if request.session.uid:
if kw.get('redirect'):
return werkzeug.utils.redirect(kw.get('redirect'), 303)
if not request.uid:
request.uid = request.session.uid
menu_data = request.registry['ir.ui.menu'].load_menus(request.cr, request.uid, context=request.context)
return request.render('web.webclient_bootstrap', qcontext={'menu_data': menu_data})
else:
return login_redirect()
@http.route('/web/login', type='http', auth="none")
def web_login(self, redirect=None, **kw):
ensure_db()
if request.httprequest.method == 'GET' and redirect and request.session.uid:
return http.redirect_with_hash(redirect)
if not request.uid:
request.uid = openerp.SUPERUSER_ID
values = request.params.copy()
if not redirect:
redirect = '/web?' + request.httprequest.query_string
values['redirect'] = redirect
try:
values['databases'] = http.db_list()
except openerp.exceptions.AccessDenied:
values['databases'] = None
if request.httprequest.method == 'POST':
old_uid = request.uid
uid = request.session.authenticate(request.session.db, request.params['login'], request.params['password'])
if uid is not False:
return http.redirect_with_hash(redirect)
request.uid = old_uid
values['error'] = "Wrong login/password"
return request.render('web.login', values)
@http.route('/login', type='http', auth="none")
def login(self, db, login, key, redirect="/web", **kw):
if not http.db_filter([db]):
return werkzeug.utils.redirect('/', 303)
return login_and_redirect(db, login, key, redirect_url=redirect)
@http.route([
'/web/js/<xmlid>',
'/web/js/<xmlid>/<version>',
], type='http', auth='public')
def js_bundle(self, xmlid, version=None, **kw):
try:
bundle = AssetsBundle(xmlid)
except QWebTemplateNotFound:
return request.not_found()
response = request.make_response(bundle.js(), [('Content-Type', 'application/javascript')])
return make_conditional(response, bundle.last_modified, max_age=BUNDLE_MAXAGE)
@http.route([
'/web/css/<xmlid>',
'/web/css/<xmlid>/<version>',
], type='http', auth='public')
def css_bundle(self, xmlid, version=None, **kw):
try:
bundle = AssetsBundle(xmlid)
except QWebTemplateNotFound:
return request.not_found()
response = request.make_response(bundle.css(), [('Content-Type', 'text/css')])
return make_conditional(response, bundle.last_modified, max_age=BUNDLE_MAXAGE)
class WebClient(http.Controller):
@http.route('/web/webclient/csslist', type='json', auth="none")
def csslist(self, mods=None):
return manifest_list('css', mods=mods)
@http.route('/web/webclient/jslist', type='json', auth="none")
def jslist(self, mods=None):
return manifest_list('js', mods=mods)
@http.route('/web/webclient/qweb', type='http', auth="none")
def qweb(self, mods=None, db=None):
files = [f[0] for f in manifest_glob('qweb', addons=mods, db=db)]
last_modified = get_last_modified(files)
if request.httprequest.if_modified_since and request.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_xml(files)
return make_conditional(
request.make_response(content, [('Content-Type', 'text/xml')]),
last_modified, checksum)
@http.route('/web/webclient/bootstrap_translations', type='json', auth="none")
def bootstrap_translations(self, mods):
""" Load local translations from *.po files, as a temporary solution
until we have established a valid session. This is meant only
for translating the login page and db management chrome, using
the browser's language. """
# For performance reasons we only load a single translation, so for
# sub-languages (that should only be partially translated) we load the
# main language PO instead - that should be enough for the login screen.
lang = request.lang.split('_')[0]
translations_per_module = {}
for addon_name in mods:
if http.addons_manifest[addon_name].get('bootstrap'):
addons_path = http.addons_manifest[addon_name]['addons_path']
f_name = os.path.join(addons_path, addon_name, "i18n", lang + ".po")
if not os.path.exists(f_name):
continue
translations_per_module[addon_name] = {'messages': _local_web_translations(f_name)}
return {"modules": translations_per_module,
"lang_parameters": None}
@http.route('/web/webclient/translations', type='json', auth="none")
def translations(self, mods=None, lang=None):
request.disable_db = False
uid = openerp.SUPERUSER_ID
if mods is None:
m = request.registry.get('ir.module.module')
mods = [x['name'] for x in m.search_read(request.cr, uid,
[('state','=','installed')], ['name'])]
if lang is None:
lang = request.context["lang"]
res_lang = request.registry.get('res.lang')
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
lang_params = None
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["direction", "date_format", "time_format",
"grouping", "decimal_point", "thousands_sep"])
# Regional languages (ll_CC) must inherit/override their parent lang (ll), but this is
# done server-side when the language is loaded, so we only need to load the user's lang.
ir_translation = request.registry.get('ir.translation')
translations_per_module = {}
messages = ir_translation.search_read(request.cr, uid, [('module','in',mods),('lang','=',lang),
('comments','like','openerp-web'),('value','!=',False),
('value','!=','')],
['module','src','value','lang'], order='module')
for mod, msg_group in itertools.groupby(messages, key=operator.itemgetter('module')):
translations_per_module.setdefault(mod,{'messages':[]})
translations_per_module[mod]['messages'].extend({'id': m['src'],
'string': m['value']} \
for m in msg_group)
return {"modules": translations_per_module,
"lang_parameters": lang_params}
@http.route('/web/webclient/version_info', type='json', auth="none")
def version_info(self):
return openerp.service.common.exp_version()
@http.route('/web/tests', type='http', auth="none")
def index(self, mod=None, **kwargs):
return request.render('web.qunit_suite')
class Proxy(http.Controller):
@http.route('/web/proxy/load', type='json', auth="none")
def load(self, path):
""" Proxies an HTTP request through a JSON request.
It is strongly recommended to not request binary files through this,
as the result will be a binary data blob as well.
:param path: actual request path
:return: file content
"""
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
base_url = request.httprequest.base_url
return Client(request.httprequest.app, BaseResponse).get(path, base_url=base_url).data
class Database(http.Controller):
@http.route('/web/database/selector', type='http', auth="none")
def selector(self, **kw):
try:
dbs = http.db_list()
if not dbs:
return http.local_redirect('/web/database/manager')
except openerp.exceptions.AccessDenied:
dbs = False
return env.get_template("database_selector.html").render({
'databases': dbs,
'debug': request.debug,
})
@http.route('/web/database/manager', type='http', auth="none")
def manager(self, **kw):
# TODO: migrate the webclient's database manager to server side views
request.session.logout()
return env.get_template("database_manager.html").render({
'modules': simplejson.dumps(module_boot()),
})
@http.route('/web/database/get_list', type='json', auth="none")
def get_list(self):
# TODO change js to avoid calling this method if in monodb mode
try:
return http.db_list()
except openerp.exceptions.AccessDenied:
monodb = db_monodb()
if monodb:
return [monodb]
raise
@http.route('/web/database/create', type='json', auth="none")
def create(self, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
db_created = request.session.proxy("db").create_database(
params['super_admin_pwd'],
params['db_name'],
bool(params.get('demo_data')),
params['db_lang'],
params['create_admin_pwd'])
if db_created:
request.session.authenticate(params['db_name'], 'admin', params['create_admin_pwd'])
return db_created
@http.route('/web/database/duplicate', type='json', auth="none")
def duplicate(self, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
duplicate_attrs = (
params['super_admin_pwd'],
params['db_original_name'],
params['db_name'],
)
return request.session.proxy("db").duplicate_database(*duplicate_attrs)
@http.route('/web/database/drop', type='json', auth="none")
def drop(self, fields):
password, db = operator.itemgetter(
'drop_pwd', 'drop_db')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
if request.session.proxy("db").drop(password, db):
return True
else:
return False
except openerp.exceptions.AccessDenied:
return {'error': 'AccessDenied', 'title': 'Drop Database'}
except Exception:
return {'error': _('Could not drop database !'), 'title': _('Drop Database')}
@http.route('/web/database/backup', type='http', auth="none")
def backup(self, backup_db, backup_pwd, token):
try:
db_dump = base64.b64decode(
request.session.proxy("db").dump(backup_pwd, backup_db))
filename = "%(db)s_%(timestamp)s.dump" % {
'db': backup_db,
'timestamp': datetime.datetime.utcnow().strftime(
"%Y-%m-%d_%H-%M-%SZ")
}
return request.make_response(db_dump,
[('Content-Type', 'application/octet-stream; charset=binary'),
('Content-Disposition', content_disposition(filename))],
{'fileToken': token}
)
except Exception, e:
return simplejson.dumps([[],[{'error': openerp.tools.ustr(e), 'title': _('Backup Database')}]])
@http.route('/web/database/restore', type='http', auth="none")
def restore(self, db_file, restore_pwd, new_db, mode):
try:
copy = mode == 'copy'
data = base64.b64encode(db_file.read())
request.session.proxy("db").restore(restore_pwd, new_db, data, copy)
return ''
except openerp.exceptions.AccessDenied, e:
raise Exception("AccessDenied")
@http.route('/web/database/change_password', type='json', auth="none")
def change_password(self, fields):
old_password, new_password = operator.itemgetter(
'old_pwd', 'new_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return request.session.proxy("db").change_admin_password(old_password, new_password)
except openerp.exceptions.AccessDenied:
return {'error': 'AccessDenied', 'title': _('Change Password')}
except Exception:
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
class Session(http.Controller):
def session_info(self):
request.session.ensure_valid()
return {
"session_id": request.session_id,
"uid": request.session.uid,
"user_context": request.session.get_context() if request.session.uid else {},
"db": request.session.db,
"username": request.session.login,
}
@http.route('/web/session/get_session_info', type='json', auth="none")
def get_session_info(self):
request.uid = request.session.uid
request.disable_db = False
return self.session_info()
@http.route('/web/session/authenticate', type='json', auth="none")
def authenticate(self, db, login, password, base_location=None):
request.session.authenticate(db, login, password)
return self.session_info()
@http.route('/web/session/change_password', type='json', auth="user")
def change_password(self, fields):
old_password, new_password,confirm_password = operator.itemgetter('old_pwd', 'new_password','confirm_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
if not (old_password.strip() and new_password.strip() and confirm_password.strip()):
return {'error':_('You cannot leave any password empty.'),'title': _('Change Password')}
if new_password != confirm_password:
return {'error': _('The new password and its confirmation must be identical.'),'title': _('Change Password')}
try:
if request.session.model('res.users').change_password(
old_password, new_password):
return {'new_password':new_password}
except Exception:
return {'error': _('The old password you provided is incorrect, your password was not changed.'), 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
@http.route('/web/session/get_lang_list', type='json', auth="none")
def get_lang_list(self):
try:
return request.session.proxy("db").list_lang() or []
except Exception, e:
return {"error": e, "title": _("Languages")}
@http.route('/web/session/modules', type='json', auth="user")
def modules(self):
# return all installed modules. Web client is smart enough to not load a module twice
return module_installed()
@http.route('/web/session/save_session_action', type='json', auth="user")
def save_session_action(self, the_action):
"""
This method store an action object in the session object and returns an integer
identifying that action. The method get_session_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
return request.httpsession.save_action(the_action)
@http.route('/web/session/get_session_action', type='json', auth="user")
def get_session_action(self, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_session_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
return request.httpsession.get_action(key)
@http.route('/web/session/check', type='json', auth="user")
def check(self):
request.session.assert_valid()
return None
@http.route('/web/session/destroy', type='json', auth="user")
def destroy(self):
request.session.logout()
@http.route('/web/session/logout', type='http', auth="none")
def logout(self, redirect='/web'):
request.session.logout(keep_db=True)
return werkzeug.utils.redirect(redirect, 303)
class Menu(http.Controller):
@http.route('/web/menu/load_needaction', type='json', auth="user")
def load_needaction(self, menu_ids):
""" Loads needaction counters for specific menu ids.
:return: needaction data
:rtype: dict(menu_id: {'needaction_enabled': boolean, 'needaction_counter': int})
"""
return request.session.model('ir.ui.menu').get_needaction_data(menu_ids, request.context)
class DataSet(http.Controller):
@http.route('/web/dataset/search_read', type='json', auth="user")
def search_read(self, model, fields=False, offset=0, limit=False, domain=None, sort=None):
return self.do_search_read(model, fields, offset, limit, domain, sort)
def do_search_read(self, model, fields=False, offset=0, limit=False, domain=None
, sort=None):
""" Performs a search() followed by a read() (if needed) using the
provided search criteria
:param str model: the name of the model to search on
:param fields: a list of the fields to return in the result records
:type fields: [str]
:param int offset: from which index should the results start being returned
:param int limit: the maximum number of records to return
:param list domain: the search domain for the query
:param list sort: sorting directives
:returns: A structure (dict) with two keys: ids (all the ids matching
the (domain, context) pair) and records (paginated records
matching fields selection set)
:rtype: list
"""
Model = request.session.model(model)
records = Model.search_read(domain, fields, offset or 0, limit or False, sort or False,
request.context)
if not records:
return {
'length': 0,
'records': []
}
if limit and len(records) == limit:
length = Model.search_count(domain, request.context)
else:
length = len(records) + (offset or 0)
return {
'length': length,
'records': records
}
@http.route('/web/dataset/load', type='json', auth="user")
def load(self, model, id, fields):
m = request.session.model(model)
value = {}
r = m.read([id], False, request.context)
if r:
value = r[0]
return {'value': value}
def call_common(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
def _call_kw(self, model, method, args, kwargs):
# Temporary implements future display_name special field for model#read()
if method in ('read', 'search_read') and kwargs.get('context', {}).get('future_display_name'):
if 'display_name' in args[1]:
if method == 'read':
names = dict(request.session.model(model).name_get(args[0], **kwargs))
else:
names = dict(request.session.model(model).name_search('', args[0], **kwargs))
args[1].remove('display_name')
records = getattr(request.session.model(model), method)(*args, **kwargs)
for record in records:
record['display_name'] = \
names.get(record['id']) or "{0}#{1}".format(model, (record['id']))
return records
if method.startswith('_'):
raise Exception("Access Denied: Underscore prefixed methods cannot be remotely called")
return getattr(request.registry.get(model), method)(request.cr, request.uid, *args, **kwargs)
@http.route('/web/dataset/call', type='json', auth="user")
def call(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
@http.route(['/web/dataset/call_kw', '/web/dataset/call_kw/<path:path>'], type='json', auth="user")
def call_kw(self, model, method, args, kwargs, path=None):
return self._call_kw(model, method, args, kwargs)
@http.route('/web/dataset/call_button', type='json', auth="user")
def call_button(self, model, method, args, domain_id=None, context_id=None):
action = self._call_kw(model, method, args, {})
if isinstance(action, dict) and action.get('type') != '':
return clean_action(action)
return False
@http.route('/web/dataset/exec_workflow', type='json', auth="user")
def exec_workflow(self, model, id, signal):
return request.session.exec_workflow(model, id, signal)
@http.route('/web/dataset/resequence', type='json', auth="user")
def resequence(self, model, ids, field='sequence', offset=0):
""" Re-sequences a number of records in the model, by their ids
The re-sequencing starts at the first model of ``ids``, the sequence
number is incremented by one after each record and starts at ``offset``
:param ids: identifiers of the records to resequence, in the new sequence order
:type ids: list(id)
:param str field: field used for sequence specification, defaults to
"sequence"
:param int offset: sequence number for first record in ``ids``, allows
starting the resequencing from an arbitrary number,
defaults to ``0``
"""
m = request.session.model(model)
if not m.fields_get([field]):
return False
# python 2.6 has no start parameter
for i, id in enumerate(ids):
m.write(id, { field: i + offset })
return True
class View(http.Controller):
@http.route('/web/view/add_custom', type='json', auth="user")
def add_custom(self, view_id, arch):
CustomView = request.session.model('ir.ui.view.custom')
CustomView.create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
}, request.context)
return {'result': True}
@http.route('/web/view/undo_custom', type='json', auth="user")
def undo_custom(self, view_id, reset=False):
CustomView = request.session.model('ir.ui.view.custom')
vcustom = CustomView.search([('user_id', '=', request.session.uid), ('ref_id' ,'=', view_id)],
0, False, False, request.context)
if vcustom:
if reset:
CustomView.unlink(vcustom, request.context)
else:
CustomView.unlink([vcustom[0]], request.context)
return {'result': True}
return {'result': False}
class TreeView(View):
@http.route('/web/treeview/action', type='json', auth="user")
def action(self, model, id):
return load_actions_from_ir_values(
'action', 'tree_but_open',[(model, id)],
False)
class Binary(http.Controller):
@http.route('/web/binary/image', type='http', auth="public")
def image(self, model, id, field, **kw):
last_update = '__last_update'
Model = request.session.model(model)
headers = [('Content-Type', 'image/png')]
etag = request.httprequest.headers.get('If-None-Match')
hashed_session = hashlib.md5(request.session_id).hexdigest()
retag = hashed_session
id = None if not id else simplejson.loads(id)
if type(id) is list:
id = id[0] # m2o
try:
if etag:
if not id and hashed_session == etag:
return werkzeug.wrappers.Response(status=304)
else:
date = Model.read([id], [last_update], request.context)[0].get(last_update)
if hashlib.md5(date).hexdigest() == etag:
return werkzeug.wrappers.Response(status=304)
if not id:
res = Model.default_get([field], request.context).get(field)
image_base64 = res
else:
res = Model.read([id], [last_update, field], request.context)[0]
retag = hashlib.md5(res.get(last_update)).hexdigest()
image_base64 = res.get(field)
if kw.get('resize'):
resize = kw.get('resize').split(',')
if len(resize) == 2 and int(resize[0]) and int(resize[1]):
width = int(resize[0])
height = int(resize[1])
# resize maximum 500*500
if width > 500: width = 500
if height > 500: height = 500
image_base64 = openerp.tools.image_resize_image(base64_source=image_base64, size=(width, height), encoding='base64', filetype='PNG')
image_data = base64.b64decode(image_base64)
except Exception:
image_data = self.placeholder()
headers.append(('ETag', retag))
headers.append(('Content-Length', len(image_data)))
try:
ncache = int(kw.get('cache'))
headers.append(('Cache-Control', 'no-cache' if ncache == 0 else 'max-age=%s' % (ncache)))
except:
pass
return request.make_response(image_data, headers)
def placeholder(self, image='placeholder.png'):
addons_path = http.addons_manifest['web']['addons_path']
return open(os.path.join(addons_path, 'web', 'static', 'src', 'img', image), 'rb').read()
@http.route('/web/binary/saveas', type='http', auth="public")
@serialize_exception
def saveas(self, model, field, id=None, filename_field=None, **kw):
""" Download link for files stored as binary fields.
If the ``id`` parameter is omitted, fetches the default value for the
binary field (via ``default_get``), otherwise fetches the field for
that precise record.
:param str model: name of the model to fetch the binary from
:param str field: binary field
:param str id: id of the record from which to fetch the binary
:param str filename_field: field holding the file's name, if any
:returns: :class:`werkzeug.wrappers.Response`
"""
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if id:
res = Model.read([int(id)], fields, request.context)[0]
else:
res = Model.default_get(fields, request.context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
return request.not_found()
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
@http.route('/web/binary/saveas_ajax', type='http', auth="public")
@serialize_exception
def saveas_ajax(self, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
data = jdata['data']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', {})
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if data:
res = { field: data }
elif id:
res = Model.read([int(id)], fields, context)[0]
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
raise ValueError(_("No content found for field '%s' on '%s:%s'") %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
headers=[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))],
cookies={'fileToken': token})
@http.route('/web/binary/upload', type='http', auth="user")
@serialize_exception
def upload(self, callback, ufile):
# TODO: might be useful to have a configuration flag for max-length file uploads
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
data = ufile.read()
args = [len(data), ufile.filename,
ufile.content_type, base64.b64encode(data)]
except Exception, e:
args = [False, e.message]
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@http.route('/web/binary/upload_attachment', type='http', auth="user")
@serialize_exception
def upload_attachment(self, callback, model, id, ufile):
Model = request.session.model('ir.attachment')
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
attachment_id = Model.create({
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': model,
'res_id': int(id)
}, request.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
except Exception:
args = {'error': "Something horrible happened"}
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@http.route([
'/web/binary/company_logo',
'/logo',
'/logo.png',
], type='http', auth="none", cors="*")
def company_logo(self, dbname=None, **kw):
imgname = 'logo.png'
placeholder = functools.partial(get_module_resource, 'web', 'static', 'src', 'img')
uid = None
if request.session.db:
dbname = request.session.db
uid = request.session.uid
elif dbname is None:
dbname = db_monodb()
if not uid:
uid = openerp.SUPERUSER_ID
if not dbname:
response = http.send_file(placeholder(imgname))
else:
try:
# create an empty registry
registry = openerp.modules.registry.Registry(dbname)
with registry.cursor() as cr:
cr.execute("""SELECT c.logo_web, c.write_date
FROM res_users u
LEFT JOIN res_company c
ON c.id = u.company_id
WHERE u.id = %s
""", (uid,))
row = cr.fetchone()
if row and row[0]:
image_data = StringIO(str(row[0]).decode('base64'))
response = http.send_file(image_data, filename=imgname, mtime=row[1])
else:
response = http.send_file(placeholder('nologo.png'))
except Exception:
response = http.send_file(placeholder(imgname))
return response
class Action(http.Controller):
@http.route('/web/action/load', type='json', auth="user")
def load(self, action_id, do_not_eval=False, additional_context=None):
Actions = request.session.model('ir.actions.actions')
value = False
try:
action_id = int(action_id)
except ValueError:
try:
module, xmlid = action_id.split('.', 1)
model, action_id = request.session.model('ir.model.data').get_object_reference(module, xmlid)
assert model.startswith('ir.actions.')
except Exception:
action_id = 0 # force failed read
base_action = Actions.read([action_id], ['type'], request.context)
if base_action:
ctx = request.context
action_type = base_action[0]['type']
if action_type == 'ir.actions.report.xml':
ctx.update({'bin_size': True})
if additional_context:
ctx.update(additional_context)
action = request.session.model(action_type).read([action_id], False, ctx)
if action:
value = clean_action(action[0])
return value
@http.route('/web/action/run', type='json', auth="user")
def run(self, action_id):
return_action = request.session.model('ir.actions.server').run(
[action_id], request.context)
if return_action:
return clean_action(return_action)
else:
return False
class Export(http.Controller):
@http.route('/web/export/formats', type='json', auth="user")
def formats(self):
""" Returns all valid export formats
:returns: for each export format, a pair of identifier and printable name
:rtype: [(str, str)]
"""
return [
{'tag': 'csv', 'label': 'CSV'},
{'tag': 'xls', 'label': 'Excel', 'error': None if xlwt else "XLWT required"},
]
def fields_get(self, model):
Model = request.session.model(model)
fields = Model.fields_get(False, request.context)
return fields
@http.route('/web/export/get_fields', type='json', auth="user")
def get_fields(self, model, prefix='', parent_name= '',
import_compat=True, parent_field_type=None,
exclude=None):
if import_compat and parent_field_type == "many2one":
fields = {}
else:
fields = self.fields_get(model)
if import_compat:
fields.pop('id', None)
else:
fields['.id'] = fields.pop('id', {'string': 'ID'})
fields_sequence = sorted(fields.iteritems(),
key=lambda field: openerp.tools.ustr(field[1].get('string', '')))
records = []
for field_name, field in fields_sequence:
if import_compat:
if exclude and field_name in exclude:
continue
if field.get('readonly'):
# If none of the field's states unsets readonly, skip the field
if all(dict(attrs).get('readonly', True)
for attrs in field.get('states', {}).values()):
continue
if not field.get('exportable', True):
continue
id = prefix + (prefix and '/'or '') + field_name
name = parent_name + (parent_name and '/' or '') + field['string']
record = {'id': id, 'string': name,
'value': id, 'children': False,
'field_type': field.get('type'),
'required': field.get('required'),
'relation_field': field.get('relation_field')}
records.append(record)
if len(name.split('/')) < 3 and 'relation' in field:
ref = field.pop('relation')
record['value'] += '/id'
record['params'] = {'model': ref, 'prefix': id, 'name': name}
if not import_compat or field['type'] == 'one2many':
# m2m field in import_compat is childless
record['children'] = True
return records
@http.route('/web/export/namelist', type='json', auth="user")
def namelist(self, model, export_id):
# TODO: namelist really has no reason to be in Python (although itertools.groupby helps)
export = request.session.model("ir.exports").read([export_id])[0]
export_fields_list = request.session.model("ir.exports.line").read(
export['export_fields'])
fields_data = self.fields_info(
model, map(operator.itemgetter('name'), export_fields_list))
return [
{'name': field['name'], 'label': fields_data[field['name']]}
for field in export_fields_list
]
def fields_info(self, model, export_fields):
info = {}
fields = self.fields_get(model)
if ".id" in export_fields:
fields['.id'] = fields.pop('id', {'string': 'ID'})
# To make fields retrieval more efficient, fetch all sub-fields of a
# given field at the same time. Because the order in the export list is
# arbitrary, this requires ordering all sub-fields of a given field
# together so they can be fetched at the same time
#
# Works the following way:
# * sort the list of fields to export, the default sorting order will
# put the field itself (if present, for xmlid) and all of its
# sub-fields right after it
# * then, group on: the first field of the path (which is the same for
# a field and for its subfields and the length of splitting on the
# first '/', which basically means grouping the field on one side and
# all of the subfields on the other. This way, we have the field (for
# the xmlid) with length 1, and all of the subfields with the same
# base but a length "flag" of 2
# * if we have a normal field (length 1), just add it to the info
# mapping (with its string) as-is
# * otherwise, recursively call fields_info via graft_subfields.
# all graft_subfields does is take the result of fields_info (on the
# field's model) and prepend the current base (current field), which
# rebuilds the whole sub-tree for the field
#
# result: because we're not fetching the fields_get for half the
# database models, fetching a namelist with a dozen fields (including
# relational data) falls from ~6s to ~300ms (on the leads model).
# export lists with no sub-fields (e.g. import_compatible lists with
# no o2m) are even more efficient (from the same 6s to ~170ms, as
# there's a single fields_get to execute)
for (base, length), subfields in itertools.groupby(
sorted(export_fields),
lambda field: (field.split('/', 1)[0], len(field.split('/', 1)))):
subfields = list(subfields)
if length == 2:
# subfields is a seq of $base/*rest, and not loaded yet
info.update(self.graft_subfields(
fields[base]['relation'], base, fields[base]['string'],
subfields
))
elif base in fields:
info[base] = fields[base]['string']
return info
def graft_subfields(self, model, prefix, prefix_string, fields):
export_fields = [field.split('/', 1)[1] for field in fields]
return (
(prefix + '/' + k, prefix_string + '/' + v)
for k, v in self.fields_info(model, export_fields).iteritems())
class ExportFormat(object):
raw_data = False
@property
def content_type(self):
""" Provides the format's content type """
raise NotImplementedError()
def filename(self, base):
""" Creates a valid filename for the format (with extension) from the
provided base name (exension-less)
"""
raise NotImplementedError()
def from_data(self, fields, rows):
""" Conversion method from OpenERP's export data to whatever the
current export class outputs
:params list fields: a list of fields to export
:params list rows: a list of records to export
:returns:
:rtype: bytes
"""
raise NotImplementedError()
def base(self, data, token):
params = simplejson.loads(data)
model, fields, ids, domain, import_compat = \
operator.itemgetter('model', 'fields', 'ids', 'domain',
'import_compat')(
params)
Model = request.session.model(model)
context = dict(request.context or {}, **params.get('context', {}))
ids = ids or Model.search(domain, 0, False, False, context)
field_names = map(operator.itemgetter('name'), fields)
import_data = Model.export_data(ids, field_names, self.raw_data, context=context).get('datas',[])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val['label'].strip() for val in fields]
return request.make_response(self.from_data(columns_headers, import_data),
headers=[('Content-Disposition',
content_disposition(self.filename(model))),
('Content-Type', self.content_type)],
cookies={'fileToken': token})
class CSVExport(ExportFormat, http.Controller):
@http.route('/web/export/csv', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'text/csv;charset=utf8'
def filename(self, base):
return base + '.csv'
def from_data(self, fields, rows):
fp = StringIO()
writer = csv.writer(fp, quoting=csv.QUOTE_ALL)
writer.writerow([name.encode('utf-8') for name in fields])
for data in rows:
row = []
for d in data:
if isinstance(d, basestring):
d = d.replace('\n',' ').replace('\t',' ')
try:
d = d.encode('utf-8')
except UnicodeError:
pass
if d is False: d = None
row.append(d)
writer.writerow(row)
fp.seek(0)
data = fp.read()
fp.close()
return data
class ExcelExport(ExportFormat, http.Controller):
# Excel needs raw data to correctly handle numbers and date values
raw_data = True
@http.route('/web/export/xls', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'application/vnd.ms-excel'
def filename(self, base):
return base + '.xls'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
for i, fieldname in enumerate(fields):
worksheet.write(0, i, fieldname)
worksheet.col(i).width = 8000 # around 220 pixels
base_style = xlwt.easyxf('align: wrap yes')
date_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD')
datetime_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD HH:mm:SS')
for row_index, row in enumerate(rows):
for cell_index, cell_value in enumerate(row):
cell_style = base_style
if isinstance(cell_value, basestring):
cell_value = re.sub("\r", " ", cell_value)
elif isinstance(cell_value, datetime.datetime):
cell_style = datetime_style
elif isinstance(cell_value, datetime.date):
cell_style = date_style
worksheet.write(row_index + 1, cell_index, cell_value, cell_style)
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
class Reports(http.Controller):
POLLING_DELAY = 0.25
TYPES_MAPPING = {
'doc': 'application/vnd.ms-word',
'html': 'text/html',
'odt': 'application/vnd.oasis.opendocument.text',
'pdf': 'application/pdf',
'sxw': 'application/vnd.sun.xml.writer',
'xls': 'application/vnd.ms-excel',
}
@http.route('/web/report', type='http', auth="user")
@serialize_exception
def index(self, action, token):
action = simplejson.loads(action)
report_srv = request.session.proxy("report")
context = dict(request.context)
context.update(action["context"])
report_data = {}
report_ids = context.get("active_ids", None)
if 'report_type' in action:
report_data['report_type'] = action['report_type']
if 'datas' in action:
if 'ids' in action['datas']:
report_ids = action['datas'].pop('ids')
report_data.update(action['datas'])
report_id = report_srv.report(
request.session.db, request.session.uid, request.session.password,
action["report_name"], report_ids,
report_data, context)
report_struct = None
while True:
report_struct = report_srv.report_get(
request.session.db, request.session.uid, request.session.password, report_id)
if report_struct["state"]:
break
time.sleep(self.POLLING_DELAY)
report = base64.b64decode(report_struct['result'])
if report_struct.get('code') == 'zlib':
report = zlib.decompress(report)
report_mimetype = self.TYPES_MAPPING.get(
report_struct['format'], 'octet-stream')
file_name = action.get('name', 'report')
if 'name' not in action:
reports = request.session.model('ir.actions.report.xml')
res_id = reports.search([('report_name', '=', action['report_name']),],
0, False, False, context)
if len(res_id) > 0:
file_name = reports.read(res_id[0], ['name'], context)['name']
else:
file_name = action['report_name']
file_name = '%s.%s' % (file_name, report_struct['format'])
return request.make_response(report,
headers=[
('Content-Disposition', content_disposition(file_name)),
('Content-Type', report_mimetype),
('Content-Length', len(report))],
cookies={'fileToken': token})
class Apps(http.Controller):
@http.route('/apps/<app>', auth='user')
def get_app_url(self, req, app):
act_window_obj = request.session.model('ir.actions.act_window')
ir_model_data = request.session.model('ir.model.data')
try:
action_id = ir_model_data.get_object_reference('base', 'open_module_tree')[1]
action = act_window_obj.read(action_id, ['name', 'type', 'res_model', 'view_mode', 'view_type', 'context', 'views', 'domain'])
action['target'] = 'current'
except ValueError:
action = False
try:
app_id = ir_model_data.get_object_reference('base', 'module_%s' % app)[1]
except ValueError:
app_id = False
if action and app_id:
action['res_id'] = app_id
action['view_mode'] = 'form'
action['views'] = [(False, u'form')]
sakey = Session().save_session_action(action)
debug = '?debug' if req.debug else ''
return werkzeug.utils.redirect('/web{0}#sa={1}'.format(debug, sakey))
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| doganaltunbay/odoo | addons/web/controllers/main.py | Python | agpl-3.0 | 65,240 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.