text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import absolute_import
import numpy as np
from scipy.ndimage import grey_opening
from .common import WhittakerSmoother, Baseline
def mpls_baseline(intensities, smoothness_param=100, deriv_order=1,
window_length=100):
'''Perform morphological weighted penalized least squares baseline removal.
* paper: DOI: 10.1039/C3AN00743J (Paper) Analyst, 2013, 138, 4483-4492
* Matlab code: https://code.google.com/p/mpls/
smoothness_param: Relative importance of smoothness of the predicted response.
deriv_order: Polynomial order of the difference of penalties.
window_length: size of the structuring element for the open operation.
'''
Xbg = grey_opening(intensities, window_length)
# find runs of equal values in Xbg
flat = (np.diff(Xbg) != 0).astype(np.int8)
run_idx, = np.where(np.diff(flat))
# local minimums between flat runs
bounds = run_idx[1:-1] if len(run_idx) % 2 == 0 else run_idx[1:]
bounds = bounds.reshape((-1, 2)) + (1, 2)
min_idxs = np.array([np.argmin(Xbg[s:t]) for s,t in bounds], dtype=int)
min_idxs += bounds[:,0]
# create the weight vector by setting 1 at each local min
w = np.zeros_like(intensities)
w[min_idxs] = 1
# make sure we stick to the ends
w[0] = 5
w[-1] = 5
# run one iteration of smoothing
smoother = WhittakerSmoother(Xbg, smoothness_param,
deriv_order=deriv_order)
return smoother.smooth(w)
class MPLS(Baseline):
def __init__(self, smoothness_param=100, deriv_order=1, window_length=100):
self.smoothness_ = smoothness_param
self.window_ = window_length
self.order_ = deriv_order
def _fit_one(self, bands, intensities):
return mpls_baseline(intensities, smoothness_param=self.smoothness_,
deriv_order=self.order_, window_length=self.window_)
def param_ranges(self):
return {
'smoothness_': (1, 1e5, 'log'),
'window_': (3, 500, 'integer'),
'order_': (1, 2, 'integer'),
}
| {
"content_hash": "910041a0d26035baf607df0fa3eb1c2d",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 37.698113207547166,
"alnum_prop": 0.6681681681681682,
"repo_name": "all-umass/superman",
"id": "b0fe7de1e222611bd15a1ac25f5029cedcdb1944",
"size": "1998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "superman/baseline/mpls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "188631"
}
],
"symlink_target": ""
} |
from lino.api import ad, _
class Plugin(ad.Plugin):
verbose_name = _("Tables")
def setup_main_menu(self, site, profile, m):
m = m.add_menu(self.app_label, self.verbose_name)
m.add_action('tables.Authors')
m.add_action('tables.Books')
| {
"content_hash": "af3b75c7a20b15961cee507e04111ad0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 57,
"avg_line_length": 22.916666666666668,
"alnum_prop": 0.6072727272727273,
"repo_name": "lino-framework/book",
"id": "65caf14a45739be33c93d8ccf68cc89888553230",
"size": "275",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lino_book/projects/confdirs/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "JavaScript",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "991438"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
} |
import mock
from nova import block_device
from nova import context
from nova.openstack.common import jsonutils
from nova import test
from nova.tests import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
self.assertRaises(driver_block_device._NotTransformable,
cls, {'no_device': True})
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in db_bdm.iteritems():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
for no_pass in set(db_bdm.keys()) - test_bdm._proxy_as_attr:
self.assertRaises(AttributeError, getattr, test_bdm, no_pass)
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save(self.context)
for fld, alias in test_bdm._update_on_save.iteritems():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with(self.context)
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False, access_mode='rw'):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
expected_conn_info).AndReturn(None)
return instance, expected_conn_info
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
driver_bdm._bdm_obj.save(self.context).AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {}}
expected_conn_info = {'data': {},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save(self.context).AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
snapshot = {'id': 'fake-snapshot-id-1'}
volume = {'id': 'fake-volume-id-2'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3,
'', '', snapshot).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1,
'', '', image_id=image['id']).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertEqual(no_swap, driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
| {
"content_hash": "52aba5d851176806185dc47956f06b08",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 78,
"avg_line_length": 39.792792792792795,
"alnum_prop": 0.5712474530224134,
"repo_name": "tanglei528/nova",
"id": "ae6d16ed1e0dc697966bf71c6acf251a5d98ef69",
"size": "22683",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/virt/test_block_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13998720"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
} |
from unittest.mock import Mock
from unittest.mock import call
from unittest.mock import patch
import pytest
class TestTest:
@pytest.fixture
def test(self):
from palladium.eval import test
return test
def test_test(self, test):
dataset_loader_test, model_persister = Mock(), Mock()
X, y = object(), object()
dataset_loader_test.return_value = X, y
model = model_persister.read.return_value
model.__metadata__ = {'version': 77}
test(dataset_loader_test, model_persister, model_version=77)
dataset_loader_test.assert_called_with()
model_persister.read.assert_called_with(version=77)
model.score.assert_called_with(X, y)
def test_test_no_score(self, test):
dataset_loader_test, model_persister = Mock(), Mock()
X, y = object(), object()
dataset_loader_test.return_value = X, y
model_persister.read.return_value = Mock(spec=['fit', 'predict'])
model_persister.read.return_value.__metadata__ = {'version': 99}
with pytest.raises(ValueError):
test(dataset_loader_test, model_persister)
class TestList:
@pytest.fixture
def list(self):
from palladium.eval import list
return list
def test(self, list):
model_persister = Mock()
model_persister.list_models.return_value = [{1: 2}]
model_persister.list_properties.return_value = {5: 6}
with patch('palladium.eval.pprint') as pprint:
list(model_persister)
assert pprint.mock_calls[0] == call([{1: 2}])
assert pprint.mock_calls[1] == call({5: 6})
| {
"content_hash": "57aed56d85bfa23dded5f8edacc5dc62",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 73,
"avg_line_length": 32.27450980392157,
"alnum_prop": 0.6281895504252734,
"repo_name": "alexsavio/palladium",
"id": "0ae66f9dff3e499a5933414cc8060581364fab52",
"size": "1646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "palladium/tests/test_eval.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "161909"
},
{
"name": "R",
"bytes": "426"
},
{
"name": "Shell",
"bytes": "3940"
}
],
"symlink_target": ""
} |
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.ingestion_policy_read_model import IngestionPolicyReadModel # noqa: E501
from wavefront_api_client.rest import ApiException
class TestIngestionPolicyReadModel(unittest.TestCase):
"""IngestionPolicyReadModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIngestionPolicyReadModel(self):
"""Test IngestionPolicyReadModel"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.ingestion_policy_read_model.IngestionPolicyReadModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d92387ed284bd9228bf9cc3c2dae6c6d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 409,
"avg_line_length": 35.86842105263158,
"alnum_prop": 0.7358767424798239,
"repo_name": "wavefrontHQ/python-client",
"id": "5ee465662877427e714714e4d60c9bcdb8f4a08e",
"size": "1380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_ingestion_policy_read_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4642252"
},
{
"name": "Shell",
"bytes": "3458"
}
],
"symlink_target": ""
} |
import glob
import logging
import os.path
import subprocess
import sys
import time
import traceback
descriptors = []
logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(name)s - %(levelname)s\t Thread-%(thread)d - %(message)s", filename='/tmp/gmond.log', filemode='w')
logging.debug('starting up')
last_update = 0
stats = {}
last_val = {}
pgid_list = {}
MAX_UPDATE_TIME = 15
# clock ticks per second... jiffies (HZ)
JIFFIES_PER_SEC = os.sysconf('SC_CLK_TCK')
PAGE_SIZE = os.sysconf('SC_PAGE_SIZE')
PROCESSES = {}
def readCpu(pid):
try:
stat = file('/proc/' + pid + '/stat', 'rt').readline().split()
#logging.debug(' stat (' + pid + '): ' + str(stat))
utime = int(stat[13])
stime = int(stat[14])
cutime = int(stat[15])
cstime = int(stat[16])
return (utime + stime + cutime + cstime)
except:
logging.warning('failed to get (' + str(pid) + ') stats')
return 0
def get_pgid(proc):
logging.debug('getting pgid for process: ' + proc)
ERROR = 0
if pgid_list.has_key(proc) and os.path.exists('/proc/' + pgid_list[proc][0]):
return pgid_list[proc]
val = PROCESSES[proc]
# Is this a pidfile? Last 4 chars are .pid
if '.pid' in val[-4:]:
if os.path.exists(val):
logging.debug(' pidfile found')
ppid = file(val, 'rt').readline().strip()
pgid = file('/proc/' + ppid + '/stat', 'rt').readline().split()[4]
else:
raise Exception('pidfile (' + val + ') does not exist')
else:
# This is a regex, lets search for it
regex = PROCESSES[proc]
cmd = "ps -Ao pid,ppid,pgid,args | awk '" + regex + " && $2 == 1 && !/awk/ && !/procstat\.py/ {print $0}'"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode:
raise Exception('failed executing ps\n' + cmd + '\n' + err)
result = out.strip().split('\n')
logging.debug(' result: ' + str(result))
if len(result) > 1:
raise Exception('more than 1 result returned\n' + cmd + '\n' + out.strip())
if result[0] in '':
raise Exception('no process returned\n' + cmd)
res = result[0].split()
ppid = res[0]
pgid = res[2]
if os.path.exists('/proc/' + ppid):
logging.debug(' ppid: ' + ppid + ' pgid: ' + pgid)
return (ppid, pgid)
else:
return ERROR
def get_pgroup(ppid, pgid):
'''Return a list of pids having the same pgid, with the first in the list being the parent pid.'''
logging.debug('getting pids for ppid/pgid: ' + ppid + '/' + pgid)
# Get all processes in this group
p_list = []
for stat_file in glob.glob('/proc/[1-9]*/stat'):
try:
stat = file(stat_file, 'rt').readline().split()
if stat[4] == pgid:
p_list.append(stat[0])
except:
# likely the pid has exited. this is normal.
pass
# place pid at the top of the list
if ppid in p_list:
p_list.remove(ppid)
p_list.insert(0, ppid)
else:
logging.warning('failed to find ppid ' + str(ppid) + ' in p_list')
logging.debug('p_list: ' + str(p_list))
if not len(p_list):
logging.warning('failed getting pids')
return p_list
def get_rss(pids):
logging.debug('getting rss for pids')
rss = 0
for p in pids:
try:
statm = open('/proc/' + p + '/statm', 'rt').readline().split()
#logging.debug(' statm (' + p + '): ' + str(statm))
except:
# Process finished, ignore this mem usage
logging.warning(' failed getting statm for pid: ' + p)
continue
rss += int(statm[1])
rss *= PAGE_SIZE
return rss
def test(params):
global PROCESSES, MAX_UPDATE_TIME
MAX_UPDATE_TIME = 2
logging.debug('testing processes: ' + str(params))
PROCESSES = params
for proc, val in PROCESSES.items():
print('')
print(' Testing ' + proc + ': ' + val)
try:
(ppid, pgid) = get_pgid(proc)
except Exception, e:
print(' failed getting pgid: ' + str(e))
continue
pids = get_pgroup(ppid, pgid)
print(' Processes in this group: ')
print(' PID, ARGS')
for pid in pids:
# Read from binary file containing command line arguments
args = file('/proc/' + pid + '/cmdline', 'rt').readline().replace('\0', ' ')
print(' ' + pid + ' ' + args)
logging.debug('success testing')
def update_stats():
logging.debug('updating stats')
global last_update, stats, last_val
cur_time = time.time()
if cur_time - last_update < MAX_UPDATE_TIME:
logging.debug(' wait ' + str(int(MAX_UPDATE_TIME - (cur_time - last_update))) + ' seconds')
return True
else:
last_update = cur_time
for proc, val in PROCESSES.items():
logging.debug(' updating for ' + proc)
# setup storage lists
if not proc in stats:
stats[proc] = {}
if not proc in last_val:
last_val[proc] = {}
#####
# Update CPU utilization
try:
(ppid, pgid) = get_pgid(proc)
except Exception, e:
logging.warning(' failed getting pgid: ' + str(e))
stats[proc]['cpu'] = 0.0
stats[proc]['mem'] = 0
continue
# save for later
pgid_list[proc] = (ppid, pgid)
pids = get_pgroup(ppid, pgid)
cpu_time = time.time()
proc_time = 0
for p in pids:
proc_time += readCpu(p)
logging.debug(' proc_time: ' + str(proc_time) + ' cpu_time: ' + str(cpu_time))
# do we have an old value to calculate with?
if 'cpu_time' in last_val[proc]:
logging.debug(' last_val: ' + str(last_val[proc]))
logging.debug(' calc: 100 * ' + str(proc_time - last_val[proc]['proc_time']) + ' / ' + str(cpu_time - last_val[proc]['cpu_time']) + ' * ' + str(JIFFIES_PER_SEC))
stats[proc]['cpu'] = 100 * (proc_time - last_val[proc]['proc_time']) / float((cpu_time - last_val[proc]['cpu_time']) * JIFFIES_PER_SEC)
logging.debug(' calc: ' + str(stats[proc]['cpu']))
else:
stats[proc]['cpu'] = 0.0
last_val[proc]['cpu_time'] = cpu_time
last_val[proc]['proc_time'] = proc_time
#####
# Update Mem utilization
rss = get_rss(pids)
stats[proc]['mem'] = rss
logging.debug('success refreshing stats')
logging.debug('stats: ' + str(stats))
return True
def get_stat(name):
logging.debug('getting stat: ' + name)
ret = update_stats()
if ret:
if name.startswith('procstat_'):
nsp = name.split('_')
proc = '_'.join(nsp[1:-1])
label = nsp[-1]
try:
return stats[proc][label]
except:
logging.warning('failed to fetch [' + proc + '] ' + name)
return 0
else:
label = name
try:
return stats[label]
except:
logging.warning('failed to fetch ' + name)
return 0
else:
return 0
def metric_init(params):
global descriptors
global PROCESSES
logging.debug('init: ' + str(params))
PROCESSES = params
#for proc,regex in PROCESSES.items():
update_stats()
descriptions = dict(
cpu = {
'units': 'percent',
'value_type': 'float',
'format': '%.1f',
'description': 'The total percent CPU utilization'},
mem = {
'units': 'B',
'description': 'The total memory utilization'}
)
time_max = 60
for label in descriptions:
for proc in PROCESSES:
if stats[proc].has_key(label):
d = {
'name': 'procstat_' + proc + '_' + label,
'call_back': get_stat,
'time_max': time_max,
'value_type': 'uint',
'units': '',
'slope': 'both',
'format': '%u',
'description': label,
'groups': 'procstat'
}
# Apply metric customizations from descriptions
d.update(descriptions[label])
descriptors.append(d)
else:
logging.error("skipped " + proc + '_' + label)
#logging.debug('descriptors: ' + str(descriptors))
return descriptors
def display_proc_stat(pid):
try:
stat = file('/proc/' + pid + '/stat', 'rt').readline().split()
fields = [
'pid', 'comm', 'state', 'ppid', 'pgrp', 'session',
'tty_nr', 'tty_pgrp', 'flags', 'min_flt', 'cmin_flt', 'maj_flt',
'cmaj_flt', 'utime', 'stime', 'cutime', 'cstime', 'priority',
'nice', 'num_threads', 'it_real_value', 'start_time', 'vsize', 'rss',
'rlim', 'start_code', 'end_code', 'start_stack', 'esp', 'eip',
'pending', 'blocked', 'sigign', 'sigcatch', 'wchan', 'nswap',
'cnswap', 'exit_signal', 'processor', 'rt_priority', 'policy'
]
# Display them
i = 0
for f in fields:
print '%15s: %s' % (f, stat[i])
i += 1
except:
print('failed to get /proc/' + pid + '/stat')
print(traceback.print_exc(file=sys.stdout))
def display_proc_statm(pid):
try:
statm = file('/proc/' + pid + '/statm', 'rt').readline().split()
fields = [
'size', 'rss', 'share', 'trs', 'drs', 'lrs', 'dt'
]
# Display them
i = 0
for f in fields:
print '%15s: %s' % (f, statm[i])
i += 1
except:
print('failed to get /proc/' + pid + '/statm')
print(traceback.print_exc(file=sys.stdout))
def metric_cleanup():
logging.shutdown()
# pass
if __name__ == '__main__':
from optparse import OptionParser
import os
logging.debug('running from cmd line')
parser = OptionParser()
parser.add_option('-p', '--processes', dest='processes', default='', help='processes to explicitly check')
parser.add_option('-v', '--value', dest='value', default='', help='regex or pidfile for each processes')
parser.add_option('-s', '--stat', dest='stat', default='', help='display the /proc/[pid]/stat file for this pid')
parser.add_option('-m', '--statm', dest='statm', default='', help='display the /proc/[pid]/statm file for this pid')
parser.add_option('-b', '--gmetric-bin', dest='gmetric_bin', default='/usr/bin/gmetric', help='path to gmetric binary')
parser.add_option('-c', '--gmond-conf', dest='gmond_conf', default='/etc/ganglia/gmond.conf', help='path to gmond.conf')
parser.add_option('-g', '--gmetric', dest='gmetric', action='store_true', default=False, help='submit via gmetric')
parser.add_option('-q', '--quiet', dest='quiet', action='store_true', default=False)
parser.add_option('-t', '--test', dest='test', action='store_true', default=False, help='test the regex list')
(options, args) = parser.parse_args()
if options.stat != '':
display_proc_stat(options.stat)
sys.exit(0)
elif options.statm != '':
display_proc_statm(options.statm)
sys.exit(0)
_procs = options.processes.split(',')
_val = options.value.split(',')
params = {}
i = 0
for proc in _procs:
params[proc] = _val[i]
i += 1
if options.test:
test(params)
update_stats()
print('')
print(' waiting ' + str(MAX_UPDATE_TIME) + ' seconds')
time.sleep(MAX_UPDATE_TIME)
metric_init(params)
for d in descriptors:
v = d['call_back'](d['name'])
if not options.quiet:
print ' %s: %s %s [%s]' % (d['name'], d['format'] % v, d['units'], d['description'])
if options.gmetric:
if d['value_type'] == 'uint':
value_type = 'uint32'
else:
value_type = d['value_type']
cmd = "%s --conf=%s --value='%s' --units='%s' --type='%s' --name='%s' --slope='%s'" % \
(options.gmetric_bin, options.gmond_conf, v, d['units'], value_type, d['name'], d['slope'])
os.system(cmd)
| {
"content_hash": "cc13b96b5a419c169c7337526d8ec57a",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 173,
"avg_line_length": 29.699763593380613,
"alnum_prop": 0.5245562365676988,
"repo_name": "NoodlesNZ/monitor-core",
"id": "b26849b28b977320ba2bcda8d7ca6ca6726f7530",
"size": "17070",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "gmond/python_modules/process/procstat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1122546"
},
{
"name": "CSS",
"bytes": "909"
},
{
"name": "Groff",
"bytes": "11235"
},
{
"name": "HTML",
"bytes": "44529"
},
{
"name": "JavaScript",
"bytes": "389491"
},
{
"name": "Logos",
"bytes": "3485"
},
{
"name": "PHP",
"bytes": "3064"
},
{
"name": "Perl",
"bytes": "78767"
},
{
"name": "Protocol Buffer",
"bytes": "979"
},
{
"name": "Python",
"bytes": "425126"
},
{
"name": "Shell",
"bytes": "41192"
},
{
"name": "SourcePawn",
"bytes": "365"
},
{
"name": "Visual Basic",
"bytes": "279"
}
],
"symlink_target": ""
} |
import pbr.version
__version__ = pbr.version.VersionInfo('python-blazarclient').version_string()
| {
"content_hash": "90afb6d839fc3ae48375a212b02c8c46",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 77,
"avg_line_length": 24.75,
"alnum_prop": 0.7575757575757576,
"repo_name": "openstack/python-blazarclient",
"id": "3bd8f7b31631b19eecf657584c24dc2ea0cde69b",
"size": "682",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "blazarclient/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "135635"
}
],
"symlink_target": ""
} |
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
import wx
class JPEGReader(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._reader = vtk.vtkJPEGReader()
self._reader.SetFileDimensionality(3)
module_utils.setup_vtk_object_progress(self, self._reader,
'Reading JPG images.')
self._config.filePattern = '%03d.jpg'
self._config.firstSlice = 0
self._config.lastSlice = 1
self._config.spacing = (1,1,1)
self._config.fileLowerLeft = False
configList = [
('File pattern:', 'filePattern', 'base:str', 'filebrowser',
'Filenames will be built with this. See module help.',
{'fileMode' : wx.OPEN,
'fileMask' :
'JPG files (*.jpg)|*.jpg|All files (*.*)|*.*'}),
('First slice:', 'firstSlice', 'base:int', 'text',
'%d will iterate starting at this number.'),
('Last slice:', 'lastSlice', 'base:int', 'text',
'%d will iterate and stop at this number.'),
('Spacing:', 'spacing', 'tuple:float,3', 'text',
'The 3-D spacing of the resultant dataset.'),
('Lower left:', 'fileLowerLeft', 'base:bool', 'checkbox',
'Image origin at lower left? (vs. upper left)')]
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkJPEGReader' : self._reader})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._reader
def get_input_descriptions(self):
return ()
def set_input(self, idx, inputStream):
raise Exception
def get_output_descriptions(self):
return ('vtkImageData',)
def get_output(self, idx):
return self._reader.GetOutput()
def logic_to_config(self):
#self._config.filePrefix = self._reader.GetFilePrefix()
self._config.filePattern = self._reader.GetFilePattern()
self._config.firstSlice = self._reader.GetFileNameSliceOffset()
e = self._reader.GetDataExtent()
self._config.lastSlice = self._config.firstSlice + e[5] - e[4]
self._config.spacing = self._reader.GetDataSpacing()
self._config.fileLowerLeft = bool(self._reader.GetFileLowerLeft())
def config_to_logic(self):
#self._reader.SetFilePrefix(self._config.filePrefix)
self._reader.SetFilePattern(self._config.filePattern)
self._reader.SetFileNameSliceOffset(self._config.firstSlice)
self._reader.SetDataExtent(0,0,0,0,0,
self._config.lastSlice -
self._config.firstSlice)
self._reader.SetDataSpacing(self._config.spacing)
self._reader.SetFileLowerLeft(self._config.fileLowerLeft)
def execute_module(self):
self._reader.Update()
| {
"content_hash": "d80fbfeeaa41f028c25ff6b298d6961f",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 74,
"avg_line_length": 35.714285714285715,
"alnum_prop": 0.5897142857142857,
"repo_name": "fvpolpeta/devide",
"id": "88b78c0fbc093f20cf956f657fb08d6553775446",
"size": "3500",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/readers/JPEGReader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3104368"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
} |
from django.dispatch import Signal
post_export = Signal(providing_args=["model"])
post_import = Signal(providing_args=["model"])
| {
"content_hash": "5e29bb754821c50861b1e1a51d9c12d9",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 46,
"avg_line_length": 32.5,
"alnum_prop": 0.7538461538461538,
"repo_name": "daniell/django-import-export",
"id": "0a7ab96b2a1419e3fdbf053cddc1cb763c486286",
"size": "130",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "import_export/signals.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "5731"
},
{
"name": "JavaScript",
"bytes": "777"
},
{
"name": "Python",
"bytes": "166737"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
__title__ = 'djangorestframework-jsonapi'
__version__ = '2.0.0-alpha.2'
__author__ = ''
__license__ = 'MIT'
__copyright__ = ''
# Version synonym
VERSION = __version__
| {
"content_hash": "763f454593260006d6427d6baad1ffb9",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 21,
"alnum_prop": 0.5952380952380952,
"repo_name": "aquavitae/django-rest-framework-json-api",
"id": "d508ca4377b7a6e34c49466ac23abc2f34957ea3",
"size": "193",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "rest_framework_json_api/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "51636"
}
],
"symlink_target": ""
} |
from .sub_resource import SubResource
class IntegrationRuntimeResource(SubResource):
"""Integration runtime resource type.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar etag: Etag identifies change in the resource.
:vartype etag: str
:param properties: Integration runtime properties.
:type properties: ~azure.mgmt.datafactory.models.IntegrationRuntime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IntegrationRuntime'},
}
def __init__(self, properties):
super(IntegrationRuntimeResource, self).__init__()
self.properties = properties
| {
"content_hash": "903def91e50571bc8d9d8f497e97bf7b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 31.35,
"alnum_prop": 0.5956937799043063,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "8568ed26cb1e56ac0c40f242adeccfc78528d31c",
"size": "1728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-datafactory/azure/mgmt/datafactory/models/integration_runtime_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import argparse
import datetime
import time
import sys
import configreader
import mailer
from localstore import LocalStore
def get_args():
"""
Parses command-line arguments and returns them.
returns the argparse namespace with the arguments.
"""
parser = argparse.ArgumentParser(description='Send Emails for NoticeMe.')
parser.add_argument('settings', metavar='<settings>',
help='file containing program settings')
group0 = parser.add_mutually_exclusive_group(required=False)
group0.add_argument('-k', '--keep', action='store_true',
help='do not delete database after mailing')
group0.add_argument('-r', '--rename', action='store_true',
help='rename and keep database after mailing')
group1 = parser.add_mutually_exclusive_group(required=True)
group1.add_argument('-d', '--daily', action='store_true',
help='send mail based on the previous 24 hours of data')
group1.add_argument('-w', '--weekly', action='store_true',
help='send mail based on the previous seven days of data')
group2 = parser.add_mutually_exclusive_group(required=True)
group2.add_argument('-a', '--all', action='store_true',
help='send emails to all registered recipients')
group2.add_argument('-n', '--nola', action='store_true',
help='only send emails to xxx@nola.gov addresses')
group2.add_argument('-c', '--citizens', action='store_true',
help='send emails to recipients without a xxx@nola.gov address')
parser.add_argument('-s', '--save', action='store_true',
help='save emails to disk instead of sending them')
return parser.parse_args()
if __name__ == '__main__':
#Parse the command line arguments, and load the config file.
args = get_args()
config = configreader.Configurator(args.settings)
cfg, notices = config.read_settings('all')
#If we're running a weekly batch, name things appropriately.
if args.weekly:
dbname = 'w_' + cfg['localdb']
emailtemplate = cfg['weeklytemplate']
else:
dbname = cfg['localdb']
emailtemplate = cfg['dailytemplate']
#Get the addresses we need for this sendoff.
if args.all:
early = None
elif args.nola:
early = True
elif args.citizens:
early = False
else:
early = None
if args.weekly:
print 'Processing weekly notices...'
else:
print 'Processing daily notices...'
#Setup the info needed by the MailGenerator
localdb = LocalStore(dbname)
displaynames = {x['table']: x['displayname'] for x in notices}
templates = {x['table']: x['template'] for x in notices}
secretary = mailer.MailGenerator(localdb, cfg['mailtable'],
cfg['namefield'], cfg['addrfield'],
cfg['uidfield'], cfg['sourcefield'],
displaynames, templates)
#Get the email addresses to which we must send notices, and proceed if
# there are any to send.
try:
numemails = secretary.get_emailaddrs(early)
except mailer.MailError as e:
print ' - Cannot retrieve emails: {0}'.format(e.message)
sys.exit(1)
if numemails > 0:
print ' - {0} emails to process and send.'.format(numemails)
if early is not None:
if early:
print ' - Sending early notices.'
else:
print ' - Sending late notices.'
#Process the data and generate emails.
try:
emails = secretary.generate_emails(cfg['citywidearea'], emailtemplate, cfg['templatepath'])
except mailer.MailError as e:
print ' - Error generating emails: {0}'.format(e.message)
sys.exit(1)
#Now, send the emails.
try:
postman = mailer.MailSender(cfg['mailserver'], cfg['fromaddress'])
#For testing, save them to disk before sending.
if args.save:
sentemails, totaltime = postman.save_emails(emails)
else:
sentemails, totaltime = postman.send_emails(emails)
except Exception as e:
print ' - Error sending emails: {0}'.format(e.message)
sys.exit(1)
print ' - {0} emails sent in {1:0.2f} seconds.'.format(sentemails, totaltime)
if sentemails != len(emails):
currdate = datetime.date.today().strftime('%Y%m%d')
errfile = 'email_errors_{0}.txt'.format(currdate)
print ' - errors encountered sending some emails.'
with open(errfile, 'w') as f:
f.writelines([x+'\n' for x in postman.mailerrors])
print ' - error details saved to {0}'.format(errfile)
else:
print ' - No notices to send.'
#Now, delete the sqlite database, unless the --keep option has been passed.
localdb.close_db()
del localdb
if args.keep:
#Do nothing to the DB; we'll need it for a later mailing.
print 'Keeping scratch db for later use...'
elif args.rename:
#Rename and keep the DB, we want it for checking and testing later.
print 'Keeping scratch db for posterity...'
currdate = datetime.date.today().strftime('%Y%m%d')
try:
mailer.keep_db(dbname, currdate)
except mailer.MailError as e:
print ' - Error renaming scratch DB: {0}'.format(e.message)
sys.exit(1)
else:
print 'Deleting scratch db...'
try:
mailer.del_db(dbname)
except mailer.MailError as e:
print ' - Error deleting scratch DB: {0}'.format(e.message)
sys.exit(1)
print 'NoticeMail Complete!'
| {
"content_hash": "44f7d34252e67cbda905a03472ccabbf",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 103,
"avg_line_length": 40.95238095238095,
"alnum_prop": 0.5785714285714286,
"repo_name": "CityOfNewOrleans/NoticeMe",
"id": "c29ca9dd44124f26792a89ad6709e7ae080dcaea",
"size": "6020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Backend/noticemail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "111"
},
{
"name": "CSS",
"bytes": "140629"
},
{
"name": "HTML",
"bytes": "117610"
},
{
"name": "JavaScript",
"bytes": "223012"
},
{
"name": "Python",
"bytes": "73492"
}
],
"symlink_target": ""
} |
import re, os, time
import ftplib
from behave import *
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@given('I want a screenshot of page "{page}"')
def step_impl(context, page):
context.browser.maximize_window()
context.browser.get(context.target + page)
@then('the screenshot is "{title}"')
def step_impl(context, title):
current_directory = os.getcwd()
screenshot_directory = current_directory + "/screenshots"
if not os.path.exists(screenshot_directory):
os.mkdir(screenshot_directory)
os.chdir(screenshot_directory)
context.browser.save_screenshot(title + '.png')
os.chdir(current_directory)
| {
"content_hash": "a41d9a663dde8b99040149ae64fb6df2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 64,
"avg_line_length": 33.30434782608695,
"alnum_prop": 0.7349869451697127,
"repo_name": "raymond91125/amigo",
"id": "8e9ccf4d0c6b7d132fe965d3e8e2ab42dc0ed95f",
"size": "849",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test-app/behave/steps/screenshot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "37943"
},
{
"name": "Emacs Lisp",
"bytes": "13034"
},
{
"name": "Gherkin",
"bytes": "7024"
},
{
"name": "HTML",
"bytes": "131"
},
{
"name": "Java",
"bytes": "2944"
},
{
"name": "JavaScript",
"bytes": "6612903"
},
{
"name": "Makefile",
"bytes": "14662"
},
{
"name": "Perl",
"bytes": "591780"
},
{
"name": "Python",
"bytes": "9320"
},
{
"name": "Ruby",
"bytes": "1061"
}
],
"symlink_target": ""
} |
import os
DEBUG = False
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.' + os.environ['DJANGO_DB'], # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'hello_world', # Or path to database file if using sqlite3.
'USER': 'benchmarkdbuser', # Not used with sqlite3.
'PASSWORD': 'benchmarkdbpass', # Not used with sqlite3.
'HOST': os.environ.get('DBHOST', ''), # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
'CONN_MAX_AGE': 30,
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = False
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_7mb6#v4yf@qhc(r(zbyh&z_iby-na*7wz&-v6pohsul-d#y5f'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'hello.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'hello.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'world',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level': 'ERROR',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
}
}
ALLOWED_HOSTS = ['*']
| {
"content_hash": "666421e6efcc945c487ae341822920c8",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 127,
"avg_line_length": 31.910714285714285,
"alnum_prop": 0.6562208543182242,
"repo_name": "raziel057/FrameworkBenchmarks",
"id": "86045bc1531ad00672168fa25bc9cfc5b9042343",
"size": "5399",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "frameworks/Python/django/hello/hello/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "838"
},
{
"name": "Batchfile",
"bytes": "1326"
},
{
"name": "C",
"bytes": "148190"
},
{
"name": "C#",
"bytes": "241452"
},
{
"name": "C++",
"bytes": "87141"
},
{
"name": "CMake",
"bytes": "8236"
},
{
"name": "CSS",
"bytes": "157859"
},
{
"name": "Clojure",
"bytes": "70573"
},
{
"name": "Crystal",
"bytes": "13882"
},
{
"name": "D",
"bytes": "11767"
},
{
"name": "Dart",
"bytes": "36881"
},
{
"name": "Elixir",
"bytes": "12344"
},
{
"name": "Erlang",
"bytes": "41367"
},
{
"name": "Go",
"bytes": "79623"
},
{
"name": "Groovy",
"bytes": "20518"
},
{
"name": "HTML",
"bytes": "136524"
},
{
"name": "Haskell",
"bytes": "35546"
},
{
"name": "Java",
"bytes": "499790"
},
{
"name": "JavaScript",
"bytes": "436746"
},
{
"name": "Kotlin",
"bytes": "45125"
},
{
"name": "Lua",
"bytes": "14599"
},
{
"name": "Makefile",
"bytes": "5519"
},
{
"name": "Meson",
"bytes": "846"
},
{
"name": "MoonScript",
"bytes": "2405"
},
{
"name": "Nim",
"bytes": "253"
},
{
"name": "Objective-C",
"bytes": "659"
},
{
"name": "PHP",
"bytes": "710362"
},
{
"name": "Perl",
"bytes": "8520"
},
{
"name": "Perl 6",
"bytes": "3505"
},
{
"name": "PowerShell",
"bytes": "36603"
},
{
"name": "Python",
"bytes": "303834"
},
{
"name": "QMake",
"bytes": "2301"
},
{
"name": "Ruby",
"bytes": "89603"
},
{
"name": "Rust",
"bytes": "15968"
},
{
"name": "Scala",
"bytes": "67151"
},
{
"name": "Shell",
"bytes": "238494"
},
{
"name": "Smarty",
"bytes": "436"
},
{
"name": "Swift",
"bytes": "26887"
},
{
"name": "UrWeb",
"bytes": "65535"
},
{
"name": "Vala",
"bytes": "1572"
},
{
"name": "Volt",
"bytes": "769"
}
],
"symlink_target": ""
} |
"""
Test the Man builder.
"""
import TestSCons
test = TestSCons.TestSCons()
try:
import libxml2
import libxslt
except:
try:
import lxml
except:
test.skip_test('Cannot find installed Python binding for libxml2 or lxml, skipping test.\n')
test.dir_fixture('image')
# Normal invocation
test.run(stderr=None)
test.must_exist(test.workpath('refdb.8'))
test.must_exist(test.workpath('refdb.sh.8'))
# Cleanup
test.run(arguments='-c')
test.must_not_exist(test.workpath('refdb.8'))
test.must_not_exist(test.workpath('refdb.sh.8'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "9a6e17a245c5696efc17992819152e99",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 100,
"avg_line_length": 19.083333333333332,
"alnum_prop": 0.6986899563318777,
"repo_name": "timj/scons",
"id": "d9b16b325d7b5013ef4bea496bdc36400055730e",
"size": "1820",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/Docbook/basic/man/man.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "593"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7393581"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52480"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
import copy
import logging
import os
import shlex
import shutil
import ssl
import socket
import sys
import uuid
from king_phisher import constants
from king_phisher import errors
from king_phisher import find
from king_phisher import ipaddress
from king_phisher import its
from king_phisher import serializers
from king_phisher import ssh_forward
from king_phisher import utilities
from king_phisher import version
from king_phisher.client import assistants
from king_phisher.client import client_rpc
from king_phisher.client import dialogs
from king_phisher.client import graphs
from king_phisher.client import gui_utilities
from king_phisher.client import plugins
from king_phisher.client import server_events
from king_phisher.client.dialogs import ssh_host_key
from king_phisher.client.windows import main
from king_phisher.client.windows import rpc_terminal
from king_phisher.constants import ConnectionErrorReason
import advancedhttpserver
from gi.repository import Gdk
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
import paramiko
from smoke_zephyr.utilities import parse_server
from smoke_zephyr.utilities import parse_timespan
from smoke_zephyr.utilities import which
if its.py_v2:
from httplib import BadStatusLine
else:
from http.client import BadStatusLine
DISABLED = constants.DISABLED
GTK3_DEFAULT_THEME = 'Adwaita'
"""The default GTK3 Theme for style information."""
USER_DATA_PATH = os.path.join(GLib.get_user_config_dir(), 'king-phisher')
"""The default folder location of user specific data storage."""
if isinstance(Gtk.Widget, utilities.Mock):
_Gtk_Application = type('Gtk.Application', (object,), {'__module__': ''})
else:
_Gtk_Application = Gtk.Application
class KingPhisherClientApplication(_Gtk_Application):
"""
This is the top level King Phisher client object. It contains the
custom GObject signals, keeps all the GUI references, and manages
the RPC client object. This is also the parent window for most
GTK objects.
:GObject Signals: :ref:`gobject-signals-application-label`
"""
# pylint: disable=too-many-public-methods
__gsignals__ = {
'campaign-changed': (GObject.SIGNAL_RUN_FIRST, None, (str,)),
'campaign-created': (GObject.SIGNAL_RUN_FIRST, None, (str,)),
'campaign-delete': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, (str,)),
'campaign-set': (GObject.SIGNAL_RUN_FIRST, None, (str, str)),
'config-load': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, (bool,)),
'config-save': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, ()),
'credential-delete': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, (object,)),
'exit': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, ()),
'exit-confirm': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, ()),
'message-delete': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, (object,)),
'message-sent': (GObject.SIGNAL_RUN_FIRST, None, (str, str)),
'reload-css-style': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, ()),
'rpc-cache-clear': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, ()),
'server-connected': (GObject.SIGNAL_RUN_FIRST, None, ()),
'server-disconnected': (GObject.SIGNAL_RUN_FIRST, None, ()),
'sftp-client-start': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, ()),
'visit-delete': (GObject.SIGNAL_ACTION | GObject.SIGNAL_RUN_LAST, None, (object,)),
'unhandled-exception': (GObject.SIGNAL_RUN_FIRST, None, (object, object))
}
def __init__(self, config_file=None, use_plugins=True, use_style=True):
super(KingPhisherClientApplication, self).__init__()
if use_style:
gtk_version = (Gtk.get_major_version(), Gtk.get_minor_version())
if gtk_version > (3, 18):
self._theme_file = 'theme.v2.css'
else:
self._theme_file = 'theme.v1.css'
else:
self._theme_file = DISABLED
self.logger = logging.getLogger('KingPhisher.Client.Application')
# log version information for debugging purposes
self.logger.debug("gi.repository GLib version: {0}".format('.'.join(map(str, GLib.glib_version))))
self.logger.debug("gi.repository GObject version: {0}".format('.'.join(map(str, GObject.pygobject_version))))
self.logger.debug("gi.repository Gtk version: {0}.{1}.{2}".format(Gtk.get_major_version(), Gtk.get_minor_version(), Gtk.get_micro_version()))
if rpc_terminal.has_vte:
self.logger.debug("gi.repository VTE version: {0}".format(rpc_terminal.Vte._version))
if graphs.has_matplotlib:
self.logger.debug("matplotlib version: {0}".format(graphs.matplotlib.__version__))
# do not negotiate a single instance application
# https://developer.gnome.org/gio/unstable/GApplication.html#G-APPLICATION-NON-UNIQUE:CAPS
self.set_flags(Gio.ApplicationFlags.NON_UNIQUE)
self.set_property('application-id', 'org.king-phisher.client')
self.set_property('register-session', True)
self.config_file = config_file or os.path.join(USER_DATA_PATH, 'config.json')
"""The file containing the King Phisher client configuration."""
if not os.path.isfile(self.config_file):
self._create_config()
self.config = None
"""The primary King Phisher client configuration."""
self.main_window = None
"""The primary top-level :py:class:`~.MainAppWindow` instance."""
self.references = []
"""A list to store references to arbitrary objects in for avoiding garbage collection."""
self.rpc = None
"""The :py:class:`~.KingPhisherRPCClient` instance for the application."""
self._rpc_ping_event = None
# this will be populated when the RPC object is authenticated to ping
# the server periodically and keep the session alive
self.server_events = None
"""The :py:class:`~.ServerEventSubscriber` instance for the application to receive server events."""
self._ssh_forwarder = None
"""The SSH forwarder responsible for tunneling RPC communications."""
self.style_provider = None
try:
self.emit('config-load', True)
except IOError:
self.logger.critical('failed to load the client configuration')
raise
self.connect('window-added', self.signal_window_added)
self.actions = {}
self._create_actions()
if not use_plugins:
self.logger.info('disabling all plugins')
self.config['plugins.enabled'] = []
self.plugin_manager = plugins.ClientPluginManager(
[os.path.join(USER_DATA_PATH, 'plugins'), find.data_directory('plugins')],
self
)
if use_plugins:
self.plugin_manager.load_all()
def _create_actions(self):
action = Gio.SimpleAction.new('emit-application-signal', GLib.VariantType.new('s'))
action.connect('activate', self.action_emit_application_signal)
accelerators = (
('<Control><Shift>F1', 'rpc-cache-clear'),
('<Control><Shift>F2', 'config-save'),
('<Control><Shift>F12', 'reload-css-style')
)
for key, signal_name in accelerators:
if Gtk.check_version(3, 14, 0):
self.add_accelerator(key, 'win.emit-application-signal', GLib.Variant.new_string(signal_name))
else:
self.set_accels_for_action("win.emit-application-signal('{0}')".format(signal_name), (key,))
self.actions['emit-application-signal'] = action
self.add_action(action)
def _create_ssh_forwarder(self, server, username, password):
"""
Create and set the
:py:attr:`~.KingPhisherClientApplication._ssh_forwarder` attribute.
:param tuple server: The server information as a host and port tuple.
:param str username: The username to authenticate to the SSH server with.
:param str password: The password to authenticate to the SSH server with.
:rtype: int
:return: The local port that is forwarded to the remote server or None if the connection failed.
"""
active_window = self.get_active_window()
title_ssh_error = 'Failed To Connect To The SSH Service'
server_remote_port = self.config['server_remote_port']
try:
self._ssh_forwarder = ssh_forward.SSHTCPForwarder(
server,
username,
password,
('127.0.0.1', server_remote_port),
private_key=self.config.get('ssh_preferred_key'),
missing_host_key_policy=ssh_host_key.MissingHostKeyPolicy(self)
)
self._ssh_forwarder.start()
except ssh_forward.KingPhisherSSHKeyError as error:
gui_utilities.show_dialog_error(
'SSH Key Configuration Error',
active_window,
error.message
)
except errors.KingPhisherAbortError as error:
self.logger.info("ssh connection aborted ({0})".format(error.message))
except paramiko.PasswordRequiredException:
gui_utilities.show_dialog_error(title_ssh_error, active_window, 'The specified SSH key requires a password.')
except paramiko.AuthenticationException:
self.logger.warning('failed to authenticate to the remote ssh server')
gui_utilities.show_dialog_error(title_ssh_error, active_window, 'The server responded that the credentials are invalid.')
except paramiko.SSHException as error:
self.logger.warning("failed with ssh exception '{0}'".format(error.args[0]))
except socket.error as error:
gui_utilities.show_dialog_exc_socket_error(error, active_window, title=title_ssh_error)
except Exception as error:
self.logger.warning('failed to connect to the remote ssh server', exc_info=True)
gui_utilities.show_dialog_error(title_ssh_error, active_window, "An {0}.{1} error occurred.".format(error.__class__.__module__, error.__class__.__name__))
else:
return self._ssh_forwarder.local_server
self.emit('server-disconnected')
return
def _create_config(self):
config_dir = os.path.dirname(self.config_file)
if not os.path.isdir(config_dir):
self.logger.debug('creating the user configuration directory')
os.makedirs(config_dir)
client_template = find.data_file('client_config.json')
shutil.copy(client_template, self.config_file)
def add_reference(self, ref_object):
"""
Add *ref_object* to the :py:attr:`.references` so the object won't be
garbage collected. The object must either be a
:py:class:`.GladeGObject` or :py:class:`Gtk.Widget` instance so a
cleanup function can be attached to a ``destroy`` signal to remove the
reference automatically.
:param ref_object: The object to store a reference to.
:type ref_object: :py:class:`.GladeGObject`, :py:class:`Gtk.Widget`
"""
utilities.assert_arg_type(ref_object, (gui_utilities.GladeGObject, Gtk.Widget))
self.references.append(ref_object)
if isinstance(ref_object, gui_utilities.GladeGObject):
widget = getattr(ref_object, ref_object.top_gobject)
else:
widget = ref_object
widget.connect('destroy', self.signal_multi_destroy_remove_reference, ref_object)
def campaign_configure(self):
assistant = assistants.CampaignAssistant(self, campaign_id=self.config['campaign_id'])
assistant.assistant.set_transient_for(self.get_active_window())
assistant.assistant.set_modal(True)
self.add_reference(assistant)
assistant.interact()
def do_campaign_delete(self, campaign_id):
"""
Delete the campaign on the server. A confirmation dialog will be
displayed before the operation is performed. If the campaign is deleted
and a new campaign is not selected with
:py:meth:`.show_campaign_selection`, the client will quit.
"""
self.rpc('db/table/delete', 'campaigns', campaign_id)
if campaign_id == self.config['campaign_id'] and not self.show_campaign_selection():
gui_utilities.show_dialog_error('Now Exiting', self.get_active_window(), 'A campaign must be selected.')
self.quit()
def do_credential_delete(self, row_ids):
if len(row_ids) == 1:
self.rpc('db/table/delete', 'credentials', row_ids[0])
else:
self.rpc('db/table/delete/multi', 'credentials', row_ids)
def do_message_delete(self, row_ids):
if len(row_ids) == 1:
self.rpc('db/table/delete', 'messages', row_ids[0])
else:
self.rpc('db/table/delete/multi', 'messages', row_ids)
def do_visit_delete(self, row_ids):
if len(row_ids) == 1:
self.rpc('db/table/delete', 'visits', row_ids[0])
else:
self.rpc('db/table/delete/multi', 'visits', row_ids)
def campaign_rename(self):
"""
Show a dialog prompting the user to for the a new name to assign to the
currently selected campaign.
"""
campaign = self.rpc.remote_table_row('campaigns', self.config['campaign_id'])
prompt = dialogs.TextEntryDialog.build_prompt(self, 'Rename Campaign', 'Enter the new campaign name:', campaign.name)
response = prompt.interact()
if response is None or response == campaign.name:
return
self.rpc('db/table/set', 'campaigns', self.config['campaign_id'], 'name', response)
gui_utilities.show_dialog_info('Campaign Name Updated', self.get_active_window(), 'The campaign name was successfully changed.')
def exception_hook(self, exc_type, exc_value, exc_traceback):
if isinstance(exc_value, KeyboardInterrupt):
self.logger.warning('received a KeyboardInterrupt exception')
return
exc_info = (exc_type, exc_value, exc_traceback)
error_uid = uuid.uuid4()
self.logger.error("error uid: {0} an unhandled exception was thrown".format(str(error_uid)), exc_info=exc_info)
self.emit('unhandled-exception', exc_info, error_uid)
def quit(self, optional=False):
"""
Quit the client and perform any necessary clean up operations. If
*optional* is False then the exit-confirm signal will not be sent and
there will not be any opportunities for the client to cancel the
operation.
:param bool optional: Whether the quit is request is optional or not.
"""
self.emit('exit-confirm' if optional else 'exit')
def action_emit_application_signal(self, _, signal_name):
signal_name = signal_name.get_string()
self.logger.debug('action emit-application-signal invoked for ' + signal_name)
self.emit(signal_name)
def do_activate(self):
Gtk.Application.do_activate(self)
sys.excepthook = self.exception_hook
# reset theme settings to defaults so we have a standard baseline
settings = Gtk.Settings.get_default()
if settings.get_property('gtk-theme-name') != GTK3_DEFAULT_THEME:
self.logger.debug('resetting the gtk-theme-name property to it\'s default value')
settings.set_property('gtk-theme-name', GTK3_DEFAULT_THEME)
if settings.get_property('gtk-icon-theme-name') != GTK3_DEFAULT_THEME:
self.logger.debug('resetting the gtk-icon-theme-name property to it\'s default value')
settings.set_property('gtk-icon-theme-name', GTK3_DEFAULT_THEME)
settings.set_property('gtk-application-prefer-dark-theme', False)
# load a custom css theme file if one is available
theme_file = self.theme_file
if theme_file:
self.style_provider = self.load_style_css(theme_file)
elif theme_file is DISABLED:
self.logger.debug('no css theme file will be loaded (styling has been disabled)')
else:
self.logger.debug('no css theme file will be loaded (file not found)')
# create and show the main window
self.main_window = main.MainAppWindow(self.config, self)
self.main_tabs = self.main_window.tabs
enabled_plugins = sorted(set(self.config['plugins.enabled']))
for name in enabled_plugins:
try:
self.plugin_manager.load(name)
self.plugin_manager.enable(name)
except Exception:
self.config['plugins.enabled'].remove(name)
gui_utilities.show_dialog_error(
'Failed To Enable Plugin',
self.main_window,
"Plugin '{0}' could not be enabled.".format(name)
)
self.config['plugins.enabled'] = enabled_plugins
def do_campaign_set(self, *_):
self.logger.info("campaign set to {0} (id: {1})".format(self.config['campaign_name'], self.config['campaign_id']))
self.emit('rpc-cache-clear')
def do_config_save(self):
self.logger.info('writing the client configuration to disk')
config = copy.copy(self.config)
for key in self.config.keys():
if 'password' in key or key == 'server_config':
del config[key]
self.logger.info('writing the config to: ' + self.config_file)
with open(self.config_file, 'w') as config_file_h:
serializers.JSON.dump(config, config_file_h, pretty=True)
def do_exit(self):
self.plugin_manager.shutdown()
self.main_window.hide()
gui_utilities.gtk_widget_destroy_children(self.main_window)
gui_utilities.gtk_sync()
self.emit('server-disconnected')
self.main_window.destroy()
return
def do_exit_confirm(self):
self.emit('exit')
def do_reload_css_style(self):
if self.style_provider:
Gtk.StyleContext.remove_provider_for_screen(
Gdk.Screen.get_default(),
self.style_provider
)
self.style_provider = None
theme_file = self.theme_file
if theme_file:
self.style_provider = self.load_style_css(theme_file)
def do_rpc_cache_clear(self):
if not self.rpc:
return
self.rpc.cache_clear()
self.logger.debug('the rpc cache has been cleared')
def do_server_connected(self):
self.load_server_config()
campaign_id = self.config.get('campaign_id')
if not campaign_id:
if not self.show_campaign_selection():
self.logger.debug('no campaign selected, disconnecting and exiting')
self.emit('exit')
return True
campaign_info = self.rpc.remote_table_row('campaigns', self.config['campaign_id'], cache=True)
if campaign_info is None:
if not self.show_campaign_selection():
self.logger.debug('no campaign selected, disconnecting and exiting')
self.emit('exit')
return True
campaign_info = self.rpc.remote_table_row('campaigns', self.config['campaign_id'], cache=True, refresh=True)
self.config['campaign_name'] = campaign_info.name
self.emit('campaign-set', None, self.config['campaign_id'])
return
def do_shutdown(self):
Gtk.Application.do_shutdown(self)
sys.excepthook = sys.__excepthook__
self.emit('config-save')
def do_unhandled_exception(self, exc_info, error_uid):
dialogs.ExceptionDialog.interact_on_idle(self, exc_info=exc_info, error_uid=error_uid)
@property
def theme_file(self):
if not self._theme_file:
return DISABLED
return find.data_file(os.path.join('style', self._theme_file))
def do_config_load(self, load_defaults):
"""
Load the client configuration from disk and set the
:py:attr:`~.KingPhisherClientApplication.config` attribute.
:param bool load_defaults: Load missing options from the template configuration file.
"""
self.logger.info('loading the config from disk')
client_template = find.data_file('client_config.json')
self.logger.info('loading the config from: ' + self.config_file)
with open(self.config_file, 'r') as tmp_file:
self.config = serializers.JSON.load(tmp_file)
if load_defaults:
with open(client_template, 'r') as tmp_file:
client_template = serializers.JSON.load(tmp_file)
for key, value in client_template.items():
if not key in self.config:
self.config[key] = value
def merge_config(self, config_file, strict=True):
"""
Merge the configuration information from the specified configuration
file. Only keys which exist in the currently loaded configuration are
copied over while non-existent keys are skipped. The contents of the new
configuration overwrites the existing.
:param bool strict: Do not try remove trailing commas from the JSON data.
:param str config_file: The path to the configuration file to merge.
"""
with open(config_file, 'r') as tmp_file:
config = serializers.JSON.load(tmp_file, strict=strict)
if not isinstance(config, dict):
self.logger.error("can not merge configuration file: {0} (invalid format)".format(config_file))
return
self.logger.debug('merging configuration information from source file: ' + config_file)
for key, value in config.items():
if not key in self.config:
self.logger.warning("skipped merging non-existent configuration key {0}".format(key))
continue
self.config[key] = value
return
def load_server_config(self):
"""Load the necessary values from the server's configuration."""
self.config['server_config'] = self.rpc('config/get', ['server.require_id', 'server.secret_id', 'server.tracking_image', 'server.web_root'])
return
def load_style_css(self, css_file):
self.logger.debug('loading style from css file: ' + css_file)
css_file = Gio.File.new_for_path(css_file)
style_provider = Gtk.CssProvider()
style_provider.connect('parsing-error', self.signal_css_provider_parsing_error)
try:
style_provider.load_from_file(css_file)
except GLib.Error: # pylint: disable=catching-non-exception
self.logger.error('there was an error parsing the css file, it will not be applied as a style provider')
return None
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
return style_provider
def server_connect(self, username, password, otp=None):
# pylint: disable=too-many-locals
server_version_info = None
title_rpc_error = 'Failed To Connect To The King Phisher RPC Service'
active_window = self.get_active_window()
server = parse_server(self.config['server'], 22)
if ipaddress.is_loopback(server[0]):
local_server = ('localhost', self.config['server_remote_port'])
self.logger.info("connecting to local king phisher instance")
else:
local_server = self._create_ssh_forwarder(server, username, password)
if not local_server:
return False, ConnectionErrorReason.ERROR_PORT_FORWARD
rpc = client_rpc.KingPhisherRPCClient(local_server, use_ssl=self.config.get('server_use_ssl'))
if self.config.get('rpc.serializer'):
try:
rpc.set_serializer(self.config['rpc.serializer'])
except ValueError as error:
self.logger.error("failed to set the rpc serializer, error: '{0}'".format(error.message))
generic_message = 'Can not contact the RPC HTTP service, ensure that the '
generic_message += "King Phisher Server is currently running on port {0}.".format(int(self.config['server_remote_port']))
connection_failed = True
try:
server_version_info = rpc('version')
if server_version_info is None:
raise RuntimeError('no version information was retrieved from the server')
except advancedhttpserver.RPCError as error:
self.logger.warning('failed to connect to the remote rpc service due to http status: ' + str(error.status))
gui_utilities.show_dialog_error(title_rpc_error, active_window, "The server responded with HTTP status: {0}.".format(str(error.status)))
except BadStatusLine as error:
self.logger.warning('failed to connect to the remote rpc service due to http bad status line: ' + error.line)
gui_utilities.show_dialog_error(title_rpc_error, active_window, generic_message)
except socket.error as error:
self.logger.debug('failed to connect to the remote rpc service due to a socket error', exc_info=True)
gui_utilities.show_dialog_exc_socket_error(error, active_window)
except ssl.CertificateError as error:
self.logger.warning('failed to connect to the remote rpc service with a https certificate error: ' + error.message)
gui_utilities.show_dialog_error(title_rpc_error, active_window, 'The server presented an invalid SSL certificate.')
except Exception:
self.logger.warning('failed to connect to the remote rpc service', exc_info=True)
gui_utilities.show_dialog_error(title_rpc_error, active_window, generic_message)
else:
connection_failed = False
if connection_failed:
self.emit('server-disconnected')
return False, ConnectionErrorReason.ERROR_CONNECTION
server_rpc_api_version = server_version_info.get('rpc_api_version', -1)
if isinstance(server_rpc_api_version, int):
# compatibility with pre-0.2.0 version
server_rpc_api_version = (server_rpc_api_version, 0)
self.logger.info(
"successfully connected to the king phisher server (version: {0} rpc api version: {1}.{2})".format(
server_version_info['version'],
server_rpc_api_version[0],
server_rpc_api_version[1]
)
)
error_text = None
if server_rpc_api_version[0] < version.rpc_api_version.major or (server_rpc_api_version[0] == version.rpc_api_version.major and server_rpc_api_version[1] < version.rpc_api_version.minor):
error_text = 'The server is running an old and incompatible version.'
error_text += '\nPlease update the remote server installation.'
elif server_rpc_api_version[0] > version.rpc_api_version.major:
error_text = 'The client is running an old and incompatible version.'
error_text += '\nPlease update the local client installation.'
if error_text:
gui_utilities.show_dialog_error('The RPC API Versions Are Incompatible', active_window, error_text)
self.emit('server-disconnected')
return False, ConnectionErrorReason.ERROR_INCOMPATIBLE_VERSIONS
login_result, login_reason = rpc.login(username, password, otp)
if not login_result:
self.logger.warning('failed to authenticate to the remote king phisher service, reason: ' + login_reason)
self.emit('server-disconnected')
return False, login_reason
rpc.username = username
self.logger.debug('successfully authenticated to the remote king phisher service')
self._rpc_ping_event = GLib.timeout_add_seconds(parse_timespan('5m'), rpc.ping)
self.rpc = rpc
event_subscriber = server_events.ServerEventSubscriber(rpc)
if not event_subscriber.is_connected:
self.logger.error('failed to connect the server event socket')
event_subscriber.reconnect = False
event_subscriber.shutdown()
return False, ConnectionErrorReason.ERROR_UNKNOWN
self.server_events = event_subscriber
self.emit('server-connected')
return True, ConnectionErrorReason.SUCCESS
def do_server_disconnected(self):
"""
Clean up the connections to the server and disconnect. This logs out
of the RPC, closes the server event socket, and stops the SSH
forwarder.
"""
if self.rpc is not None:
if self.server_events is not None:
self.server_events.reconnect = False
GLib.source_remove(self._rpc_ping_event)
try:
self.rpc('logout')
except advancedhttpserver.RPCError as error:
self.logger.warning('failed to logout, rpc error: ' + error.message)
else:
if self.server_events is not None:
self.server_events.shutdown()
self.server_events = None
self.rpc = None
if self._ssh_forwarder:
self._ssh_forwarder.stop()
self._ssh_forwarder = None
return
def show_campaign_graph(self, graph_name):
"""
Create a new :py:class:`.CampaignGraph` instance and make it into
a window. *graph_name* must be the name of a valid, exported
graph provider.
:param str graph_name: The name of the graph to make a window of.
"""
cls = graphs.get_graph(graph_name)
graph_inst = cls(self, style_context=self.style_context)
graph_inst.load_graph()
window = graph_inst.make_window()
window.show()
def show_campaign_selection(self):
"""
Display the campaign selection dialog in a new
:py:class:`.CampaignSelectionDialog` instance.
:return: Whether or not a campaign was selected.
:rtype: bool
"""
dialog = dialogs.CampaignSelectionDialog(self)
return dialog.interact() == Gtk.ResponseType.APPLY
def show_preferences(self):
"""
Display a
:py:class:`.dialogs.configuration.ConfigurationDialog`
instance and saves the configuration to disk if cancel is not selected.
"""
dialog = dialogs.ConfigurationDialog(self)
if dialog.interact() != Gtk.ResponseType.CANCEL:
self.emit('config-save')
def signal_css_provider_parsing_error(self, css_provider, css_section, gerror):
file_path = css_section.get_file()
if file_path:
file_path = file_path.get_path()
else:
file_path = '[ unknown file ]'
self.logger.error("css parser error ({0}) in {1}:{2}".format(gerror.message, file_path, css_section.get_start_line() + 1))
return
def signal_multi_destroy_remove_reference(self, widget, ref_object):
self.references.remove(ref_object)
def signal_window_added(self, _, window):
for action in self.actions.values():
window.add_action(action)
def do_sftp_client_start(self):
"""
Start the client's preferred sftp client application in a new process.
"""
if not self.config['sftp_client']:
gui_utilities.show_dialog_error('Invalid SFTP Configuration', self.get_active_window(), 'An SFTP client is not configured.\nOne can be configured in the Client Preferences.')
return False
command = str(self.config['sftp_client'])
sftp_bin = shlex.split(command)[0]
if not which(sftp_bin):
self.logger.error('could not locate the sftp binary: ' + sftp_bin)
gui_utilities.show_dialog_error('Invalid SFTP Configuration', self.get_active_window(), "Could not find the SFTP binary '{0}'".format(sftp_bin))
return False
try:
command = command.format(
server=self.config['server'],
username=self.config['server_username'],
web_root=self.config['server_config']['server.web_root']
)
except KeyError as error:
self.logger.error("key error while parsing the sftp command for token: {0}".format(error.args[0]))
gui_utilities.show_dialog_error('Invalid SFTP Configuration', self.get_active_window(), "Invalid token '{0}' in the SFTP command.".format(error.args[0]))
return False
self.logger.debug("starting sftp client command: {0}".format(command))
utilities.start_process(command, wait=False)
return
def stop_remote_service(self):
"""
Stop the remote King Phisher server. This will request that the
server stop processing new requests and exit. This will display
a confirmation dialog before performing the operation. If the
remote service is stopped, the client will quit.
"""
active_window = self.get_active_window()
if not gui_utilities.show_dialog_yes_no('Stop The Remote King Phisher Service?', active_window, 'This will stop the remote King Phisher service and\nnew incoming requests will not be processed.'):
return
self.rpc('shutdown')
self.logger.info('the remote king phisher service has been stopped')
gui_utilities.show_dialog_error('Now Exiting', active_window, 'The remote service has been stopped.')
self.quit()
return
@property
def style_context(self):
window = self.get_active_window() or self.main_window
if window is None:
return None
return window.get_style_context()
| {
"content_hash": "2d5f2ce98b7d485d02d1e0caad1ec047",
"timestamp": "",
"source": "github",
"line_count": 723,
"max_line_length": 198,
"avg_line_length": 41.40940525587828,
"alnum_prop": 0.7284812451985704,
"repo_name": "hdemeyer/king-phisher",
"id": "010ef0b745ddb091411c1102d267499571f2540e",
"size": "31526",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "king_phisher/client/application.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "33168"
},
{
"name": "HTML",
"bytes": "552"
},
{
"name": "JavaScript",
"bytes": "1328"
},
{
"name": "Jupyter Notebook",
"bytes": "11394"
},
{
"name": "Mako",
"bytes": "574"
},
{
"name": "Python",
"bytes": "966857"
},
{
"name": "Ruby",
"bytes": "7629"
}
],
"symlink_target": ""
} |
"""The anti-commutator: ``{A,B} = A*B + B*A``."""
from __future__ import print_function, division
from sympy import S, Expr, Mul, Integer
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.operator import Operator
from sympy.physics.quantum.dagger import Dagger
__all__ = [
'AntiCommutator'
]
#-----------------------------------------------------------------------------
# Anti-commutator
#-----------------------------------------------------------------------------
class AntiCommutator(Expr):
"""The standard anticommutator, in an unevaluated state.
Evaluating an anticommutator is defined [1]_ as: ``{A, B} = A*B + B*A``.
This class returns the anticommutator in an unevaluated form. To evaluate
the anticommutator, use the ``.doit()`` method.
Canonical ordering of an anticommutator is ``{A, B}`` for ``A < B``. The
arguments of the anticommutator are put into canonical order using
``__cmp__``. If ``B < A``, then ``{A, B}`` is returned as ``{B, A}``.
Parameters
==========
A : Expr
The first argument of the anticommutator {A,B}.
B : Expr
The second argument of the anticommutator {A,B}.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.quantum import AntiCommutator
>>> from sympy.physics.quantum import Operator, Dagger
>>> x, y = symbols('x,y')
>>> A = Operator('A')
>>> B = Operator('B')
Create an anticommutator and use ``doit()`` to multiply them out.
>>> ac = AntiCommutator(A,B); ac
{A,B}
>>> ac.doit()
A*B + B*A
The commutator orders it arguments in canonical order:
>>> ac = AntiCommutator(B,A); ac
{A,B}
Commutative constants are factored out:
>>> AntiCommutator(3*x*A,x*y*B)
3*x**2*y*{A,B}
Adjoint operations applied to the anticommutator are properly applied to
the arguments:
>>> Dagger(AntiCommutator(A,B))
{Dagger(A),Dagger(B)}
References
==========
.. [1] https://en.wikipedia.org/wiki/Commutator
"""
is_commutative = False
def __new__(cls, A, B):
r = cls.eval(A, B)
if r is not None:
return r
obj = Expr.__new__(cls, A, B)
return obj
@classmethod
def eval(cls, a, b):
if not (a and b):
return S.Zero
if a == b:
return Integer(2)*a**2
if a.is_commutative or b.is_commutative:
return Integer(2)*a*b
# [xA,yB] -> xy*[A,B]
ca, nca = a.args_cnc()
cb, ncb = b.args_cnc()
c_part = ca + cb
if c_part:
return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb)))
# Canonical ordering of arguments
#The Commutator [A,B] is on canonical form if A < B.
if a.compare(b) == 1:
return cls(b, a)
def doit(self, **hints):
""" Evaluate anticommutator """
A = self.args[0]
B = self.args[1]
if isinstance(A, Operator) and isinstance(B, Operator):
try:
comm = A._eval_anticommutator(B, **hints)
except NotImplementedError:
try:
comm = B._eval_anticommutator(A, **hints)
except NotImplementedError:
comm = None
if comm is not None:
return comm.doit(**hints)
return (A*B + B*A).doit(**hints)
def _eval_adjoint(self):
return AntiCommutator(Dagger(self.args[0]), Dagger(self.args[1]))
def _sympyrepr(self, printer, *args):
return "%s(%s,%s)" % (
self.__class__.__name__, printer._print(
self.args[0]), printer._print(self.args[1])
)
def _sympystr(self, printer, *args):
return "{%s,%s}" % (self.args[0], self.args[1])
def _pretty(self, printer, *args):
pform = printer._print(self.args[0], *args)
pform = prettyForm(*pform.right((prettyForm(u','))))
pform = prettyForm(*pform.right((printer._print(self.args[1], *args))))
pform = prettyForm(*pform.parens(left='{', right='}'))
return pform
def _latex(self, printer, *args):
return "\\left\\{%s,%s\\right\\}" % tuple([
printer._print(arg, *args) for arg in self.args])
| {
"content_hash": "93336afad8635989728a01ce0b44d8e6",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 83,
"avg_line_length": 30.02777777777778,
"alnum_prop": 0.5393154486586494,
"repo_name": "kaushik94/sympy",
"id": "142d2f77678b7520801d18b5979fca3cca5e8e5b",
"size": "4324",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sympy/physics/quantum/anticommutator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "13553568"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
import os
import dj_database_url
import re
from django.conf import settings
from django.core.urlresolvers import reverse_lazy
from cabot.settings_utils import environ_get_list, force_bool
from cabot.cabot_config import *
settings_dir = os.path.dirname(__file__)
PROJECT_ROOT = os.path.abspath(settings_dir)
DEBUG = force_bool(os.environ.get('DEBUG', False))
ADMINS = (
('Admin', os.environ.get('ADMIN_EMAIL', 'name@example.com')),
)
MANAGERS = ADMINS
if os.environ.get('CABOT_FROM_EMAIL'):
DEFAULT_FROM_EMAIL = os.environ['CABOT_FROM_EMAIL']
DATABASES = {'default': dj_database_url.config()}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
URL_PREFIX = os.environ.get('URL_PREFIX', '/').rstrip('/')
LOGIN_URL = os.environ.get('LOGIN_URL', reverse_lazy('login'))
LOGIN_REDIRECT_URL = reverse_lazy('services')
USE_TZ = True
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '*').split(',')
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = os.environ.get('TIME_ZONE', 'Etc/UTC')
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '%s/media/' % URL_PREFIX
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, '.collectstatic/')
COMPRESS_ROOT = STATIC_ROOT
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '%s/static/' % URL_PREFIX
COMPRESS_URL = STATIC_URL
# Additional locations of static files
STATICFILES_DIRS = [os.path.join(PROJECT_ROOT, 'static')]
if not DEBUG:
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
if os.environ.get('WWW_SCHEME') == 'https':
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get(
'DJANGO_SECRET_KEY', '2FL6ORhHwr5eX34pP9mMugnIOd3jzVuT45f7w430Mt5PnEwbcJgma0q8zUXNZ68A')
# List of callables that know how to import templates from various sources.
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (
os.path.join(PROJECT_ROOT, 'templates'),
),
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cabot.context_processors.global_settings',
],
'debug': force_bool(os.environ.get('TEMPLATE_DEBUG', False))
},
}]
MIDDLEWARE_CLASSES = (
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'cabot.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_filters',
'compressor',
'polymorphic',
'jsonify',
'cabot.cabotapp',
'rest_framework',
'dal',
'dal_select2',
'django.contrib.admin',
)
AUTH_USER_MODEL = 'auth.User'
# Load additional apps from configuration file
CABOT_PLUGINS_ENABLED_PARSED = []
for plugin in CABOT_PLUGINS_ENABLED.split(","):
# Hack to clean up if versions of plugins specified
exploded = re.split(r'[<>=]+', plugin)
CABOT_PLUGINS_ENABLED_PARSED.append(exploded[0])
INSTALLED_APPS += tuple(CABOT_PLUGINS_ENABLED_PARSED)
COMPRESS_PRECOMPILERS = (
('text/coffeescript', 'coffee --compile --stdio'),
('text/eco',
'eco -i TEMPLATES {infile} && cat "$(echo "{infile}" | sed -e "s/\.eco$/.js/g")"'),
('text/less', 'lessc {infile} > {outfile}'),
)
# For the email settings we both accept old and new names
EMAIL_HOST = environ_get_list(['EMAIL_HOST', 'SES_HOST'], 'localhost')
EMAIL_PORT = int(environ_get_list(['EMAIL_PORT', 'SES_PORT'], 25))
EMAIL_HOST_USER = environ_get_list(['EMAIL_USER', 'SES_USER'], '')
EMAIL_HOST_PASSWORD = environ_get_list(['EMAIL_PASSWORD', 'SES_PASS'], '')
EMAIL_BACKEND = environ_get_list(
['EMAIL_BACKEND', 'SES_BACKEND'],
'django.core.mail.backends.smtp.EmailBackend'
)
EMAIL_USE_TLS = force_bool(environ_get_list(['EMAIL_USE_TLS', 'SES_USE_TLS'], False))
EMAIL_USE_SSL = force_bool(environ_get_list(['EMAIL_USE_SSL', 'SES_USE_SSL'], not EMAIL_USE_TLS))
COMPRESS_OFFLINE = not DEBUG
RECOVERY_SNIPPETS_WHITELIST = (
r'https?://[^.]+\.hackpad\.com/[^./]+\.js',
r'https?://gist\.github\.com/[^.]+\.js',
r'https?://www\.refheap\.com/[^.]+\.js',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'log_file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.environ.get('LOG_FILE', '/dev/null'),
'maxBytes': 1024 * 1024 * 25, # 25 MB
'backupCount': 5,
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
# Catch All Logger -- Captures any other logging
'': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'INFO',
'propagate': True,
}
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissions',
],
'DEFAULT_FILTER_BACKENDS': [
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.OrderingFilter',
]
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
AUTH_LDAP = force_bool(os.environ.get('AUTH_LDAP', False))
if AUTH_LDAP:
from settings_ldap import *
AUTHENTICATION_BACKENDS += tuple(['django_auth_ldap.backend.LDAPBackend'])
# Github SSO
AUTH_GITHUB_ENTERPRISE_ORG = force_bool(os.environ.get('AUTH_GITHUB_ENTERPRISE_ORG', False))
AUTH_GITHUB_ORG = force_bool(os.environ.get('AUTH_GITHUB_ORG', False))
AUTH_GOOGLE_OAUTH2 = force_bool(os.environ.get('AUTH_GOOGLE_OAUTH2', False))
AUTH_SOCIAL = AUTH_GITHUB_ORG or AUTH_GITHUB_ENTERPRISE_ORG or AUTH_GOOGLE_OAUTH2
if AUTH_SOCIAL:
SOCIAL_AUTH_URL_NAMESPACE = 'social'
INSTALLED_APPS += tuple(['social_django'])
if AUTH_GITHUB_ORG:
AUTHENTICATION_BACKENDS += tuple(['social_core.backends.github.GithubOrganizationOAuth2'])
SOCIAL_AUTH_GITHUB_ORG_KEY = os.environ.get('AUTH_GITHUB_ORG_CLIENT_ID')
SOCIAL_AUTH_GITHUB_ORG_SECRET = os.environ.get('AUTH_GITHUB_ORG_CLIENT_SECRET')
SOCIAL_AUTH_GITHUB_ORG_NAME = os.environ.get('AUTH_GITHUB_ORG_NAME')
SOCIAL_AUTH_GITHUB_ORG_SCOPE = ['read:org']
if AUTH_GITHUB_ENTERPRISE_ORG:
AUTHENTICATION_BACKENDS += tuple(['social_core.backends.github_enterprise.GithubEnterpriseOrganizationOAuth2'])
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL = os.environ.get('AUTH_GITHUB_ENTERPRISE_ORG_URL')
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL = os.environ.get('AUTH_GITHUB_ENTERPRISE_ORG_API_URL')
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY = os.environ.get('AUTH_GITHUB_ENTERPRISE_ORG_CLIENT_ID')
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET = os.environ.get('AUTH_GITHUB_ENTERPRISE_ORG_CLIENT_SECRET')
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME = os.environ.get('AUTH_GITHUB_ENTERPRISE_ORG_NAME')
if AUTH_GOOGLE_OAUTH2:
AUTHENTICATION_BACKENDS += tuple(['social_core.backends.google.GoogleOAuth2'])
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get('AUTH_GOOGLE_OAUTH2_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get('AUTH_GOOGLE_OAUTH2_SECRET')
SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS = os.environ.get('AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS', '').split(',')
EXPOSE_USER_API = force_bool(os.environ.get('EXPOSE_USER_API', False))
ENABLE_SUBSCRIPTION = force_bool(os.environ.get('ENABLE_SUBSCRIPTION', True))
ENABLE_DUTY_ROTA = force_bool(os.environ.get('ENABLE_DUTY_ROTA', True))
| {
"content_hash": "146abfb974214fdf82de01196f0ce52d",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 123,
"avg_line_length": 36.26923076923077,
"alnum_prop": 0.6724107458465889,
"repo_name": "maks-us/cabot",
"id": "a23340d7859b60a4382901bd44bfa399bf3d3485",
"size": "11316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cabot/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22025"
},
{
"name": "HTML",
"bytes": "84338"
},
{
"name": "JavaScript",
"bytes": "368548"
},
{
"name": "Python",
"bytes": "204139"
},
{
"name": "Shell",
"bytes": "2008"
}
],
"symlink_target": ""
} |
import six
from django.db import models
# Create your models here.
from wagtail.wagtailcore.models import Page, PageBase
from wagtailmetadata.models import MetadataPageMixin
class HomePage(six.with_metaclass(PageBase, MetadataPageMixin, Page)):
content_panels = Page.content_panels + [
]
promote_panels = Page.promote_panels + MetadataPageMixin.panels
| {
"content_hash": "d6772eedb750d10e9d0e4ea01b8c9cca",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 70,
"avg_line_length": 30.583333333333332,
"alnum_prop": 0.784741144414169,
"repo_name": "Mandelbrew/cookiecutter-django",
"id": "69769f05c98d1a049d1e8b24dba1f5144decedbf",
"size": "367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.project_slug}}/application/pages/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "102748"
},
{
"name": "Dockerfile",
"bytes": "1066"
},
{
"name": "HTML",
"bytes": "296811"
},
{
"name": "JavaScript",
"bytes": "19028"
},
{
"name": "Makefile",
"bytes": "6833"
},
{
"name": "Python",
"bytes": "31620"
},
{
"name": "Shell",
"bytes": "1420"
}
],
"symlink_target": ""
} |
"""Library to gather runtime performance metrics.
This module exposes the ResourceMonitor class, which client code can use to
gather resource usage metrics about their program. An example usage would look
something like:
with ResourceMonitor() as monitor:
... do work ...
metrics = monitor.metrics()
"""
import platform
import resource
import time
import psutil
from deepvariant.protos import resources_pb2
class ResourceMonitor(object):
"""Class for collecting resource usage info from this or child process."""
def __init__(self):
"""Constructs a ResourceMonitor object."""
self.wall_start = None
self.metrics_pb = self._initial_metrics_protobuf()
def _initial_metrics_protobuf(self):
"""Returns an initialized ResourceMetrics proto.
This function also fills in the "constant" fields of the ResourceMetrics
proto that don't depend on the actual running commands, such as host_name.
Returns:
learning.genomics.deepvariant.ResourceMetrics proto.
"""
return resources_pb2.ResourceMetrics(
host_name=_get_host_name(),
cpu_frequency_mhz=_get_cpu_frequency(),
physical_core_count=_get_cpu_count(),
total_memory_mb=_get_total_memory())
def __enter__(self):
return self.start()
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
def start(self):
"""Starts timers associated with resource collection.
This method must be called before metrics().
Returns:
self to enable the idiom `monitor = ResourceMonitor().start()`.
"""
self.wall_start = time.time()
return self
def metrics(self):
"""Collects and return runtime metrics as a ResourceMetrics proto.
This method can be called multiple times, but wall clock time is always
reckoned from the time of the last start() call.
Returns:
A learning.genomics.deepvariant.ResourceMetrics proto message.
Raises:
RuntimeError: if start() was not called previously.
"""
if self.wall_start is None:
raise RuntimeError('start() must be called prior to metrics()')
self.metrics_pb.wall_time_seconds = time.time() - self.wall_start
# Consider using psutil.cpu_times() instead to get more detailed information
# about the usage in self and all children.
try:
rusage = resource.getrusage(resource.RUSAGE_SELF)
self.metrics_pb.cpu_user_time_seconds = rusage.ru_utime
self.metrics_pb.cpu_system_time_seconds = rusage.ru_stime
self.metrics_pb.memory_peak_rss_mb = int(rusage.ru_maxrss / 1024)
except resource.error:
# The OS call to get rusage failed, so just don't set the field values,
# leaving them as the default values of 0.
pass
# Create a psutil.Process pointed at the current process.
process = psutil.Process()
io_counters = process.io_counters()
self.metrics_pb.read_bytes = io_counters.read_bytes
self.metrics_pb.write_bytes = io_counters.write_bytes
return self.metrics_pb
# ------------------------------------------------------------------------------
# Simple functions for getting host_name, cpu count, etc. Isolated here to make
# them mockable.
# ------------------------------------------------------------------------------
def _get_host_name():
"""Gets the host name of this machine."""
return platform.node()
def _get_cpu_count():
"""Gets the number of physical cores in this machine.
Returns:
int >= 1 if the call to get the cpu_count succeeded, or 0 if not.
"""
return psutil.cpu_count(logical=False) or 0
def _get_cpu_frequency():
"""Gets the frequency in MHz of the cpus in this machine.
Returns:
float > 0 if the call to get the cpu_frequency succeeded. This information
may not be available on all systems, in which case we return 0.0.
"""
try:
freq = psutil.cpu_freq()
return freq.current if freq is not None else 0.0
except NotImplementedError:
return 0.0
def _get_total_memory():
"""Gets the total memory in megabytes in this machine."""
return int(psutil.virtual_memory().total / (1024 * 1024))
| {
"content_hash": "830967f461615afe675c1b9046be1fd7",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 80,
"avg_line_length": 30.54074074074074,
"alnum_prop": 0.6689303904923599,
"repo_name": "google/deepvariant",
"id": "21b479c836659e58e4abc5405208066a712c9f2f",
"size": "5667",
"binary": false,
"copies": "1",
"ref": "refs/heads/r1.4",
"path": "deepvariant/resources.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "587559"
},
{
"name": "Dockerfile",
"bytes": "9270"
},
{
"name": "Python",
"bytes": "1617393"
},
{
"name": "Shell",
"bytes": "91210"
},
{
"name": "Starlark",
"bytes": "75694"
}
],
"symlink_target": ""
} |
"""
Created on 24 Oct 2014
@author: jmht
"""
import copy
import glob
import logging
import os
import pandas as pd
import shutil
import sys
from ample.util import ample_util, csymmatch, mtz_util, pdb_edit, pdb_model, reforigin, residue_map, rio, shelxe, \
tm_util
from pyjob import Script
logger = logging.getLogger(__name__)
_oldroot = None
_newroot = None
SHELXE_STEM = 'shelxe'
_CSV_KEYLIST = [
'ample_version',
# Native info
'native_pdb_code',
'native_pdb_title',
'native_pdb_resolution',
'native_pdb_solvent_content',
'native_pdb_space_group',
'native_pdb_num_atoms',
'native_pdb_num_residues',
'native_pdb_num_chains',
# The modelled sequence
'fasta_length',
# Get the ensemble data and add to the MRBUMP data
'ensemble_name',
'ensemble_percent_model',
# cluster info
'cluster_method',
'num_clusters',
'cluster_num',
'cluster_centroid',
'cluster_num_models',
# truncation info
'truncation_level',
'percent_truncation',
'truncation_method',
'truncation_pruning',
'truncation_variance',
'num_residues',
'pruned_residues',
# subclustering info
'subcluster_num_models',
'subcluster_radius_threshold',
'subcluster_centroid_model',
'subcluster_centroid_model_RMSD',
'subcluster_centroid_model_TM',
# ensemble info
# 'name',
'side_chain_treatment',
'ensemble_num_atoms',
# MR result info
# 'name',
'MR_program',
'Solution_Type',
'PHASER_LLG',
'PHASER_TFZ',
'PHASER_RFZ',
'PHASER_time',
'PHASER_killed',
'PHASER_version',
'PHASER_errors',
'MOLREP_score',
'MOLREP_time',
'MOLREP_version',
'MR_MPE',
'MR_wMPE',
'REFMAC_Rfact',
'REFMAC_Rfree',
# 'REFMAC_MPE',
# 'REFMAC_wMPE',
'REFMAC_version',
'BUCC_final_Rfact',
'BUCC_final_Rfree',
'BUCC_version',
'ARP_final_Rfact',
'ARP_final_Rfree',
'ARP_version',
'SHELXE_CC',
'SHELXE_ACL',
'SHELXE_MCL',
'SHELXE_NC',
'SHELXE_wPE',
'SHELXE_wMPE',
'SHELXE_os',
'SHELXE_time',
'SHELXE_version',
'SXRBUCC_version',
'SXRBUCC_final_Rfact',
'SXRBUCC_final_Rfree',
'SXRBUCC_MPE',
'SXRBUCC_wMPE',
'SXRARP_version',
'SXRARP_final_Rfact',
'SXRARP_final_Rfree',
'SXRARP_MPE',
'SXRARP_wMPE',
'num_placed_chains',
'num_placed_atoms',
'reforigin_RMSD',
'AA_num_contacts',
'RIO_num_contacts',
'RIO_in_register',
'RIO_oo_register',
'RIO_backwards',
'RIO',
'RIO_no_cat',
'RIO_norm',
]
def analyse(amoptd, newroot=None):
if newroot:
assert os.path.isdir(newroot)
global _oldroot, _newroot
_newroot = newroot
_oldroot = amoptd['work_dir']
if not os.path.isdir(fixpath(amoptd['benchmark_dir'])):
os.mkdir(fixpath(amoptd['benchmark_dir']))
os.chdir(fixpath(amoptd['benchmark_dir']))
# AnalysePdb may have already been called from the main script
if amoptd['native_pdb'] and 'native_pdb_std' not in amoptd:
analysePdb(amoptd)
if amoptd['native_pdb_std']:
# Generate an SHELXE HKL and ENT file so that we can calculate phase errors
mtz_util.to_hkl(amoptd['mtz'], hkl_file=os.path.join(amoptd['benchmark_dir'], SHELXE_STEM + ".hkl"))
shutil.copyfile(amoptd['native_pdb_std'], os.path.join(amoptd['benchmark_dir'], SHELXE_STEM + ".ent"))
if amoptd['native_pdb'] and not (amoptd['homologs'] or amoptd['ideal_helices'] or amoptd['helical_ensembles']
or amoptd['import_ensembles'] or amoptd['single_model_mode']):
analyseModels(amoptd)
# Get the ensembling data
if 'ensembles_data' not in amoptd or not len(amoptd['ensembles_data']):
logger.critical("Benchmark cannot find any ensemble data!")
return
# Get dict of ensemble name -> ensemble result
ensemble_results = {e['name']: e for e in amoptd['ensembles_data']}
# Get mrbump_results for cluster
if 'mrbump_results' not in amoptd or not len(amoptd['mrbump_results']):
logger.critical("Benchmark cannot find any mrbump results!")
return
data = []
mrinfo = shelxe.MRinfo(amoptd['shelxe_exe'], amoptd['native_pdb_info'].pdb, amoptd['mtz'])
for result in amoptd['mrbump_results']:
# use mrbump dict as basis for result object
d = copy.copy(result)
# Add in the data from the ensemble
d.update(ensemble_results[d['ensemble_name']])
assert d['ensemble_name'] == d['name'], d
# Hack for old results
if 'truncation_num_residues' in d:
d['num_residues'] = d['truncation_num_residues']
del d['truncation_num_residues']
# Hack for ideal helices where num_residues are missing
if amoptd['ideal_helices'] and ('num_residues' not in d or d['num_residues'] is None):
d['num_residues'] = int(d['ensemble_name'].lstrip('polyala_'))
# Get the ensemble data and add to the MRBUMP data
d['ensemble_percent_model'] = int((float(d['num_residues']) / float(amoptd['fasta_length'])) * 100)
if amoptd['native_pdb']:
# Add in stuff we've cleaned from the pdb
native_keys = [
'native_pdb_code',
'native_pdb_title',
'native_pdb_resolution',
'native_pdb_solvent_content',
'native_pdb_space_group',
'native_pdb_num_chains',
'native_pdb_num_atoms',
'native_pdb_num_residues',
]
d.update({key: amoptd[key] for key in native_keys})
# Analyse the solution
analyseSolution(amoptd, d, mrinfo)
data.append(d)
# Put everything in a pandas DataFrame
dframe = pd.DataFrame(data)
# General stuff
dframe['ample_version'] = amoptd['ample_version']
dframe['fasta_length'] = amoptd['fasta_length']
# Analyse subcluster centroid models
if 'subcluster_centroid_model' in dframe.columns and amoptd['native_pdb']:
centroid_index = dframe.index
centroid_models = [fixpath(f) for f in dframe.subcluster_centroid_model]
native_pdb_std = fixpath(amoptd['native_pdb_std'])
fasta = fixpath(amoptd['fasta'])
# Calculation of TMscores for subcluster centroid models
if amoptd['have_tmscore']:
tm = tm_util.TMscore(amoptd['tmscore_exe'], wdir=fixpath(amoptd['benchmark_dir']), **amoptd)
tm_results = tm.compare_structures(centroid_models, [native_pdb_std], [fasta])
centroid_tmscores = [r['tmscore'] for r in tm_results]
centroid_rmsds = [r['rmsd'] for r in tm_results]
else:
raise RuntimeError("No program to calculate tmscores!")
dframe['subcluster_centroid_model_TM'] = pd.Series(centroid_tmscores, index=centroid_index)
dframe['subcluster_centroid_model_RMSD'] = pd.Series(centroid_rmsds, index=centroid_index)
# Save the data
file_name = os.path.join(fixpath(amoptd['benchmark_dir']), 'results.csv')
dframe.to_csv(file_name, columns=_CSV_KEYLIST, index=False, na_rep="N/A")
amoptd['benchmark_results'] = dframe.to_dict('records')
return
def analyseModels(amoptd):
# Get hold of a full model so we can do the mapping of residues
refModelPdb = glob.glob(os.path.join(amoptd['models_dir'], "*.pdb"))[0]
nativePdbInfo = amoptd['native_pdb_info']
refModelPdbInfo = pdb_edit.get_info(refModelPdb)
amoptd['ref_model_pdb_info'] = refModelPdbInfo
try:
resSeqMap = residue_map.residueSequenceMap()
resSeqMap.fromInfo(
refInfo=refModelPdbInfo,
refChainID=refModelPdbInfo.models[0].chains[0], # Only 1 chain in model
targetInfo=nativePdbInfo,
targetChainID=nativePdbInfo.models[0].chains[0],
)
amoptd['res_seq_map'] = resSeqMap
except Exception as e:
logger.exception("Error calculating resSeqMap: %s" % e)
amoptd['res_seq_map'] = None # Won't be able to calculate RIO scores
if amoptd['have_tmscore']:
try:
tm = tm_util.TMscore(amoptd['tmscore_exe'], wdir=fixpath(amoptd['benchmark_dir']))
# Calculation of TMscores for all models
logger.info("Analysing Rosetta models with TMscore")
model_list = sorted(glob.glob(os.path.join(amoptd['models_dir'], "*pdb")))
structure_list = [amoptd['native_pdb_std']]
amoptd['tmComp'] = tm.compare_structures(model_list, structure_list, fastas=[amoptd['fasta']])
except Exception as e:
logger.exception("Unable to run TMscores: %s", e)
else:
raise RuntimeError("No program to calculate TMSCORES")
def analysePdb(amoptd):
"""Collect data on the native pdb structure"""
nativePdb = fixpath(amoptd['native_pdb'])
nativePdbInfo = pdb_edit.get_info(nativePdb)
# number atoms/residues
natoms, nresidues = pdb_edit.num_atoms_and_residues(nativePdb)
# Get information on the origins for this spaceGroup
try:
originInfo = pdb_model.OriginInfo(spaceGroupLabel=nativePdbInfo.crystalInfo.spaceGroup)
except Exception:
originInfo = None
# Do this here as a bug in pdbcur can knacker the CRYST1 data
amoptd['native_pdb_code'] = nativePdbInfo.pdbCode
amoptd['native_pdb_title'] = nativePdbInfo.title
amoptd['native_pdb_resolution'] = nativePdbInfo.resolution
amoptd['native_pdb_solvent_content'] = nativePdbInfo.solventContent
amoptd['native_pdb_matthews_coefficient'] = nativePdbInfo.matthewsCoefficient
if not originInfo:
space_group = "P1"
else:
space_group = originInfo.spaceGroup()
amoptd['native_pdb_space_group'] = space_group
amoptd['native_pdb_num_atoms'] = natoms
amoptd['native_pdb_num_residues'] = nresidues
# First check if the native has > 1 model and extract the first if so
if len(nativePdbInfo.models) > 1:
logger.info("nativePdb has > 1 model - using first")
nativePdb1 = ample_util.filename_append(
filename=nativePdb, astr="model1", directory=fixpath(amoptd['work_dir'])
)
pdb_edit.extract_model(nativePdb, nativePdb1, modelID=nativePdbInfo.models[0].serial)
nativePdb = nativePdb1
# Standardise the PDB to rename any non-standard AA, remove solvent etc
nativePdbStd = ample_util.filename_append(filename=nativePdb, astr="std", directory=fixpath(amoptd['work_dir']))
pdb_edit.standardise(nativePdb, nativePdbStd, del_hetatm=True)
nativePdb = nativePdbStd
# Get the new Info about the native
nativePdbInfo = pdb_edit.get_info(nativePdb)
# For comparsion of shelxe model we need a single chain from the native so we get this here
if len(nativePdbInfo.models[0].chains) > 1:
nativeChain1 = ample_util.filename_append(
filename=nativePdbInfo.pdb, astr="chain1", directory=fixpath(amoptd['work_dir'])
)
pdb_edit.merge_chains(nativePdbInfo.pdb, nativeChain1)
else:
nativeChain1 = nativePdbInfo.pdb
# Additional data
amoptd['native_pdb_num_chains'] = len(nativePdbInfo.models[0].chains)
amoptd['native_pdb_info'] = nativePdbInfo
amoptd['native_pdb_std'] = nativePdbStd
amoptd['native_pdb_1chain'] = nativeChain1
amoptd['native_pdb_origin_info'] = originInfo
return
def analyseSolution(amoptd, d, mrinfo):
logger.info("Benchmark: analysing result: {0}".format(d['ensemble_name']))
mrPdb = None
if d['MR_program'] == "PHASER":
mrPdb = d['PHASER_pdbout']
mrMTZ = d['PHASER_mtzout']
elif d['MR_program'] == "MOLREP":
mrPdb = d['MOLREP_pdbout']
elif d['MR_program'] == "unknown":
return
if mrPdb is None or not os.path.isfile(mrPdb):
# logger.critical("Cannot find mrPdb {0} for solution {1}".format(mrPdb,d))
return
# debug - copy into work directory as reforigin struggles with long pathnames
shutil.copy(mrPdb, os.path.join(fixpath(amoptd['benchmark_dir']), os.path.basename(mrPdb)))
mrPdbInfo = pdb_edit.get_info(mrPdb)
d['num_placed_chains'] = mrPdbInfo.numChains()
d['num_placed_atoms'] = mrPdbInfo.numAtoms()
d['num_placed_CA'] = mrPdbInfo.numCalpha()
if amoptd['native_pdb']:
if not d['SHELXE_os']:
logger.critical("mrPdb {0} has no SHELXE_os origin shift. Calculating...".format(mrPdb))
mrinfo.analyse(mrPdb)
mrOrigin = mrinfo.originShift
d['SHELXE_MPE'] = mrinfo.MPE
d['SHELXE_wMPE'] = mrinfo.wMPE
else:
mrOrigin = [c * -1 for c in d['SHELXE_os']]
# Move pdb onto new origin
originPdb = ample_util.filename_append(mrPdb, astr='offset', directory=fixpath(amoptd['benchmark_dir']))
pdb_edit.translate(mrPdb, originPdb, mrOrigin)
# offset.pdb is the mrModel shifted onto the new origin use csymmatch to wrap onto native
csymmatch.Csymmatch().wrapModelToNative(
originPdb,
amoptd['native_pdb'],
csymmatchPdb=os.path.join(
fixpath(amoptd['benchmark_dir']), "phaser_{0}_csymmatch.pdb".format(d['ensemble_name'])
),
)
# can now delete origin pdb
os.unlink(originPdb)
# Calculate phase error for the MR PDB
try:
mrinfo.analyse(mrPdb)
d['MR_MPE'] = mrinfo.MPE
d['MR_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing mrPdb: {0}\n{1}".format(mrPdb, e))
# We cannot calculate the Reforigin RMSDs or RIO scores for runs where we don't have a full initial model
# to compare to the native to allow us to determine which parts of the ensemble correspond to which parts of
# the native structure - or if we were unable to calculate a res_seq_map
if not (
amoptd['homologs']
or amoptd['ideal_helices']
or amoptd['helical_ensembles']
or amoptd['import_ensembles']
or amoptd['single_model_mode']
or amoptd['res_seq_map']
):
# Get reforigin info
rmsder = reforigin.ReforiginRmsd()
try:
rmsder.getRmsd(
nativePdbInfo=amoptd['native_pdb_info'],
placedPdbInfo=mrPdbInfo,
refModelPdbInfo=amoptd['ref_model_pdb_info'],
cAlphaOnly=True,
workdir=fixpath(amoptd['benchmark_dir']),
)
d['reforigin_RMSD'] = rmsder.rmsd
except Exception as e:
logger.critical("Error calculating RMSD: {0}".format(e))
d['reforigin_RMSD'] = 999
# Score the origin with all-atom and rio
rioData = rio.Rio().scoreOrigin(
mrOrigin,
mrPdbInfo=mrPdbInfo,
nativePdbInfo=amoptd['native_pdb_info'],
resSeqMap=amoptd['res_seq_map'],
workdir=fixpath(amoptd['benchmark_dir']),
)
# Set attributes
d['AA_num_contacts'] = rioData.aaNumContacts
d['RIO_num_contacts'] = rioData.rioNumContacts
d['RIO_in_register'] = rioData.rioInRegister
d['RIO_oo_register'] = rioData.rioOoRegister
d['RIO_backwards'] = rioData.rioBackwards
d['RIO'] = rioData.rioInRegister + rioData.rioOoRegister
d['RIO_no_cat'] = rioData.rioNumContacts - (rioData.rioInRegister + rioData.rioOoRegister)
d['RIO_norm'] = float(d['RIO']) / float(d['native_pdb_num_residues'])
else:
d['AA_num_contacts'] = None
d['RIO_num_contacts'] = None
d['RIO_in_register'] = None
d['RIO_oo_register'] = None
d['RIO_backwards'] = None
d['RIO'] = None
d['RIO_no_cat'] = None
d['RIO_norm'] = None
# # Now get the helix
# helixSequence = contacts.Rio().helixFromContacts( contacts=rioData.contacts,
# dsspLog=dsspLog )
# if helixSequence is not None:
# ampleResult.rioHelixSequence = helixSequence
# ampleResult.rioLenHelix = len( helixSequence )
# hfile = os.path.join( workdir, "{0}.helix".format( ampleResult.ensembleName ) )
# with open( hfile, 'w' ) as f:
# f.write( helixSequence+"\n" )
#
# This purely for checking and so we have pdbs to view
#
# Wrap shelxe trace onto native using Csymmatch
if not d['SHELXE_pdbout'] is None and os.path.isfile(fixpath(d['SHELXE_pdbout'])):
csymmatch.Csymmatch().wrapModelToNative(
fixpath(d['SHELXE_pdbout']),
amoptd['native_pdb'],
origin=mrOrigin,
workdir=fixpath(amoptd['benchmark_dir']),
)
if not ('SHELXE_wMPE' in d and d['SHELXE_wMPE']):
try:
mrinfo.analyse(d['SHELXE_pdbout'])
d['SHELXE_MPE'] = mrinfo.MPE
d['SHELXE_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing SHELXE_pdbout: {0}\n{1}".format(d['SHELXE_pdbout'], e))
# Wrap parse_buccaneer model onto native
if d['SXRBUCC_pdbout'] and os.path.isfile(fixpath(d['SXRBUCC_pdbout'])):
# Need to rename Pdb as is just called buccSX_output.pdb
csymmatchPdb = os.path.join(
fixpath(amoptd['benchmark_dir']), "buccaneer_{0}_csymmatch.pdb".format(d['ensemble_name'])
)
csymmatch.Csymmatch().wrapModelToNative(
fixpath(d['SXRBUCC_pdbout']),
amoptd['native_pdb'],
origin=mrOrigin,
csymmatchPdb=csymmatchPdb,
workdir=fixpath(amoptd['benchmark_dir']),
)
# Calculate phase error
try:
mrinfo.analyse(d['SXRBUCC_pdbout'])
d['SXRBUCC_MPE'] = mrinfo.MPE
d['SXRBUCC_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing SXRBUCC_pdbout: {0}\n{1}".format(d['SXRBUCC_pdbout'], e))
# Wrap parse_buccaneer model onto native
if d['SXRARP_pdbout'] and os.path.isfile(fixpath(d['SXRARP_pdbout'])):
# Need to rename Pdb as is just called buccSX_output.pdb
csymmatchPdb = os.path.join(
fixpath(amoptd['benchmark_dir']), "arpwarp_{0}_csymmatch.pdb".format(d['ensemble_name'])
)
csymmatch.Csymmatch().wrapModelToNative(
fixpath(d['SXRARP_pdbout']),
amoptd['native_pdb'],
origin=mrOrigin,
csymmatchPdb=csymmatchPdb,
workdir=fixpath(amoptd['benchmark_dir']),
)
# Calculate phase error
try:
mrinfo.analyse(d['SXRARP_pdbout'])
d['SXRARP_MPE'] = mrinfo.MPE
d['SXRARP_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing SXRARP_pdbout: {0}\n{1}".format(d['SXRARP_pdbout'], e))
return
def cluster_script(amoptd, python_path="ccp4-python"):
"""Create the script for benchmarking on a cluster"""
# write out script
work_dir = amoptd['work_dir']
script = Script(directory=work_dir, stem="submit_benchmark")
pydir = os.path.abspath(os.path.dirname(__file__))
benchmark_script = os.path.join(pydir, "benchmark_util.py")
script.append("{0} {1} {2} {3}".format(python_path, "-u", benchmark_script, amoptd['results_path']))
script.write()
return script
def fixpath(path):
# fix for analysing on a different machine
if _oldroot and _newroot:
return path.replace(_oldroot, _newroot)
else:
return path
# Run unit tests
if __name__ == "__main__":
# Set up logging - could append to an existing log?
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# This runs the benchmarking starting from a pickled file containing an amopt dictionary.
# - used when submitting the modelling jobs to a cluster
if len(sys.argv) != 2 or not os.path.isfile(sys.argv[1]):
logging.debug("benchmark script requires the path to a pickled amopt dictionary!")
sys.exit(1)
# Get the amopt dictionary
amoptd = ample_util.read_amoptd(sys.argv[1])
fl = logging.FileHandler(os.path.join(amoptd['work_dir'], "benchmark.log"))
fl.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fl.setFormatter(formatter)
logger.addHandler(fl)
analyse(amoptd)
ample_util.save_amoptd(amoptd)
| {
"content_hash": "c303255f602a01cc4a9845d06f095cf1",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 116,
"avg_line_length": 37.144366197183096,
"alnum_prop": 0.6036591146080197,
"repo_name": "rigdenlab/ample",
"id": "c0f4a75c515868692fdabf28f0e0e9870157bce7",
"size": "21098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ample/util/benchmark_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "45"
},
{
"name": "CMake",
"bytes": "426"
},
{
"name": "Fortran",
"bytes": "52396"
},
{
"name": "Python",
"bytes": "1088422"
},
{
"name": "Shell",
"bytes": "1022"
},
{
"name": "TeX",
"bytes": "10539"
}
],
"symlink_target": ""
} |
from supybot.test import *
class SnarferTestCase(ChannelPluginTestCase):
plugins = ('Snarfer',)
if network:
def testTinyurl(self):
try:
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(False)
self.assertRegexp(
'shrinkurl tiny http://sourceforge.net/tracker/?'
'func=add&group_id=58965&atid=489447',
r'http://tinyurl.com/rqac')
conf.supybot.plugins.Snarfer.default.setValue('tiny')
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(True)
self.assertRegexp(
'shrinkurl tiny http://sourceforge.net/tracker/?'
'func=add&group_id=58965&atid=489447',
r'http://tinyurl.com/rqac')
finally:
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(False)
def testTinysnarf(self):
try:
conf.supybot.snarfThrottle.setValue(1)
conf.supybot.plugins.Snarfer.default.setValue('tiny')
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(True)
self.assertSnarfRegexp(
'http://sourceforge.net/tracker/?func=add&'
'group_id=58965&atid=489447',
r'http://tinyurl.com/rqac.* \(at')
self.assertSnarfRegexp(
'http://www.urbandictionary.com/define.php?'
'term=all+your+base+are+belong+to+us',
r'http://tinyurl.com/u479.* \(at')
finally:
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(False)
def testLnurl(self):
try:
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(False)
self.assertRegexp(
'shrinkurl ln http://sourceforge.net/tracker/?'
'func=add&group_id=58965&atid=489447',
r'http://ln-s.net/25Z')
conf.supybot.plugins.Snarfer.default.setValue('ln')
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(True)
self.assertRegexp(
'shrinkurl ln http://sourceforge.net/tracker/?'
'func=add&group_id=58965&atid=489447',
r'http://ln-s.net/25Z')
finally:
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(False)
def testLnsnarf(self):
try:
conf.supybot.snarfThrottle.setValue(1)
conf.supybot.plugins.Snarfer.default.setValue('ln')
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(True)
self.assertSnarfRegexp(
'http://sourceforge.net/tracker/?func=add&'
'group_id=58965&atid=489447',
r'http://ln-s.net/25Z.* \(at')
self.assertSnarfRegexp(
'http://www.urbandictionary.com/define.php?'
'term=all+your+base+are+belong+to+us',
r'http://ln-s.net/2\$K.* \(at')
finally:
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(False)
def testNonSnarfing(self):
tiny = conf.supybot.plugins.Snarfer.shrinkSnarfer()
snarf = conf.supybot.plugins.Snarfer.nonSnarfingRegexp()
try:
conf.supybot.snarfThrottle.setValue(1)
conf.supybot.plugins.Snarfer.default.setValue('tiny')
conf.supybot.plugins.Snarfer.nonSnarfingRegexp.set('m/sf/')
conf.supybot.plugins.Snarfer.minimumLength.setValue(10)
try:
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(True)
self.assertSnarfNoResponse('http://sf.net/', 2)
self.assertSnarfRegexp('http://sourceforge.net/',
r'http://tinyurl.com/7vm7.* \(at')
finally:
conf.supybot.plugins.Snarfer.shrinkSnarfer.setValue(tiny)
finally:
conf.supybot.plugins.Snarfer.nonSnarfingRegexp.setValue(snarf)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| {
"content_hash": "5b7a3a237304adb7de280ef7b5066837",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 46.78021978021978,
"alnum_prop": 0.5466290815128024,
"repo_name": "jeffmahoney/supybot",
"id": "0fffd3f6991b2f16cb24fb44c4ad9d0c4cce9510",
"size": "5886",
"binary": false,
"copies": "1",
"ref": "refs/heads/urlsnarfer",
"path": "plugins/Snarfer/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2044241"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0008_auto_20170124_1651'),
]
operations = [
migrations.CreateModel(
name='Accessory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Name')),
('text', ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='Text')),
('slug', models.CharField(blank=True, max_length=250, verbose_name='Pic')),
('published', models.BooleanField(verbose_name='Published')),
('ordering', models.IntegerField(blank=True, default=0, null=True, verbose_name='Ordering')),
('category', models.ManyToManyField(blank=True, related_name='accessories', related_query_name='access', to='product.Category', verbose_name='Category')),
],
options={
'verbose_name_plural': 'Accessories',
'verbose_name': 'Accessory',
'ordering': ['ordering'],
'db_table': 'accessories',
},
),
]
| {
"content_hash": "b7417fa52381b7f0d623cb75541f51cb",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 170,
"avg_line_length": 41.9375,
"alnum_prop": 0.5842026825633383,
"repo_name": "skylifewww/pangolin-fog",
"id": "3984800b07824bbfd563d1436c22fd9ac33786c7",
"size": "1414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "product/migrations/0009_accessory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "126434"
},
{
"name": "HTML",
"bytes": "154546"
},
{
"name": "JavaScript",
"bytes": "174324"
},
{
"name": "Makefile",
"bytes": "1483"
},
{
"name": "Nginx",
"bytes": "641"
},
{
"name": "Python",
"bytes": "177394"
}
],
"symlink_target": ""
} |
import re
import numpy as np
from scipy.sparse import csr_matrix
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import PolynomialCountSketch
from sklearn.datasets import make_classification
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel, chi2_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
@pytest.mark.parametrize("degree", [-1, 0])
def test_polynomial_count_sketch_raises_if_degree_lower_than_one(degree):
with pytest.raises(ValueError, match=f"degree={degree} should be >=1."):
ps_transform = PolynomialCountSketch(degree=degree)
ps_transform.fit(X, Y)
@pytest.mark.parametrize("gamma", [0.1, 1, 2.5])
@pytest.mark.parametrize("degree, n_components", [(1, 500), (2, 500), (3, 5000)])
@pytest.mark.parametrize("coef0", [0, 2.5])
def test_polynomial_count_sketch(gamma, degree, coef0, n_components):
# test that PolynomialCountSketch approximates polynomial
# kernel on random data
# compute exact kernel
kernel = polynomial_kernel(X, Y, gamma=gamma, degree=degree, coef0=coef0)
# approximate kernel mapping
ps_transform = PolynomialCountSketch(
n_components=n_components,
gamma=gamma,
coef0=coef0,
degree=degree,
random_state=42,
)
X_trans = ps_transform.fit_transform(X)
Y_trans = ps_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert np.abs(np.mean(error)) <= 0.05 # close to unbiased
np.abs(error, out=error)
assert np.max(error) <= 0.1 # nothing too far off
assert np.mean(error) <= 0.05 # mean is fairly close
@pytest.mark.parametrize("gamma", [0.1, 1.0])
@pytest.mark.parametrize("degree", [1, 2, 3])
@pytest.mark.parametrize("coef0", [0, 2.5])
def test_polynomial_count_sketch_dense_sparse(gamma, degree, coef0):
"""Check that PolynomialCountSketch results are the same for dense and sparse
input.
"""
ps_dense = PolynomialCountSketch(
n_components=500, gamma=gamma, degree=degree, coef0=coef0, random_state=42
)
Xt_dense = ps_dense.fit_transform(X)
Yt_dense = ps_dense.transform(Y)
ps_sparse = PolynomialCountSketch(
n_components=500, gamma=gamma, degree=degree, coef0=coef0, random_state=42
)
Xt_sparse = ps_sparse.fit_transform(csr_matrix(X))
Yt_sparse = ps_sparse.transform(csr_matrix(Y))
assert_allclose(Xt_dense, Xt_sparse)
assert_allclose(Yt_dense, Yt_sparse)
def _linear_kernel(X, Y):
return np.dot(X, Y.T)
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = large_kernel.sum(axis=2)
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
msg = "Negative values in data passed to"
with pytest.raises(ValueError, match=msg):
transform.transform(Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
msg = re.escape(
"If sample_steps is not in [1, 2, 3], you need to provide sample_interval"
)
with pytest.raises(ValueError, match=msg):
transform.fit(X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert transform.sample_interval is None
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert transform.sample_interval_ is not None
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4, sample_interval=sample_interval)
assert transform.sample_interval == sample_interval
transform.fit(X)
assert transform.sample_interval_ == sample_interval
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# set on negative component but greater than c to ensure that the kernel
# approximation is valid on the group (-c; +\infty) endowed with the skewed
# multiplication.
Y[0, 0] = -c / 2.0
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = (
(np.log(X_c) / 2.0) + (np.log(Y_c) / 2.0) + np.log(2.0) - np.log(X_c + Y_c)
)
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000, random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
assert np.isfinite(kernel).all(), "NaNs found in the Gram matrix"
assert np.isfinite(kernel_approx).all(), "NaNs found in the approximate Gram matrix"
# test error is raised on when inputs contains values smaller than -c
Y_neg = Y.copy()
Y_neg[0, 0] = -c * 2.0
msg = "X may not contain entries smaller than -skewedness"
with pytest.raises(ValueError, match=msg):
transform.transform(Y_neg)
def test_additive_chi2_sampler_exceptions():
"""Ensures correct error message"""
transformer = AdditiveChi2Sampler()
X_neg = X.copy()
X_neg[0, 0] = -1
with pytest.raises(ValueError, match="X in AdditiveChi2Sampler.fit"):
transformer.fit(X_neg)
with pytest.raises(ValueError, match="X in AdditiveChi2Sampler.transform"):
transformer.fit(X)
transformer.transform(X_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.0
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert np.abs(np.mean(error)) <= 0.01 # close to unbiased
np.abs(error, out=error)
assert np.max(error) <= 0.1 # nothing too far off
assert np.mean(error) <= 0.05 # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert X_transformed.shape == (X.shape[0], 2)
# test callable kernel
trans = Nystroem(n_components=2, kernel=_linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert X_transformed.shape == (X.shape[0], 2)
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert X_transformed.shape == (X.shape[0], 2)
def test_nystroem_default_parameters():
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(10, 4))
# rbf kernel should behave as gamma=None by default
# aka gamma = 1 / n_features
nystroem = Nystroem(n_components=10)
X_transformed = nystroem.fit_transform(X)
K = rbf_kernel(X, gamma=None)
K2 = np.dot(X_transformed, X_transformed.T)
assert_array_almost_equal(K, K2)
# chi2 kernel should behave as gamma=1 by default
nystroem = Nystroem(kernel="chi2", n_components=10)
X_transformed = nystroem.fit_transform(X)
K = chi2_kernel(X, gamma=1)
K2 = np.dot(X_transformed, X_transformed.T)
assert_array_almost_equal(K, K2)
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert np.all(np.isfinite(Y))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=0.1)
nystroem = Nystroem(
kernel="polynomial", n_components=X.shape[0], degree=3.1, coef0=0.1
)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(
kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={"log": kernel_log},
).fit(X)
assert len(kernel_log) == n_samples * (n_samples - 1) / 2
# if degree, gamma or coef0 is passed, we raise a ValueError
msg = "Don't pass gamma, coef0 or degree to Nystroem"
params = ({"gamma": 1}, {"coef0": 1}, {"degree": 2})
for param in params:
ny = Nystroem(kernel=_linear_kernel, n_components=(n_samples - 1), **param)
with pytest.raises(ValueError, match=msg):
ny.fit(X)
def test_nystroem_precomputed_kernel():
# Non-regression: test Nystroem on precomputed kernel.
# PR - 14706
rnd = np.random.RandomState(12)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=2, coef0=0.1)
nystroem = Nystroem(kernel="precomputed", n_components=X.shape[0])
X_transformed = nystroem.fit_transform(K)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
# if degree, gamma or coef0 is passed, we raise a ValueError
msg = "Don't pass gamma, coef0 or degree to Nystroem"
params = ({"gamma": 1}, {"coef0": 1}, {"degree": 2})
for param in params:
ny = Nystroem(kernel="precomputed", n_components=X.shape[0], **param)
with pytest.raises(ValueError, match=msg):
ny.fit(K)
def test_nystroem_component_indices():
"""Check that `component_indices_` corresponds to the subset of
training points used to construct the feature map.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20474
"""
X, _ = make_classification(n_samples=100, n_features=20)
feature_map_nystroem = Nystroem(
n_components=10,
random_state=0,
)
feature_map_nystroem.fit(X)
assert feature_map_nystroem.component_indices_.shape == (10,)
@pytest.mark.parametrize(
"Estimator", [PolynomialCountSketch, RBFSampler, SkewedChi2Sampler, Nystroem]
)
def test_get_feature_names_out(Estimator):
"""Check get_feature_names_out"""
est = Estimator().fit(X)
X_trans = est.transform(X)
names_out = est.get_feature_names_out()
class_name = Estimator.__name__.lower()
expected_names = [f"{class_name}{i}" for i in range(X_trans.shape[1])]
assert_array_equal(names_out, expected_names)
def test_additivechi2sampler_get_feature_names_out():
"""Check get_feature_names_out for AdditiveChi2Sampler."""
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 3))
chi2_sampler = AdditiveChi2Sampler(sample_steps=3).fit(X)
input_names = ["f0", "f1", "f2"]
suffixes = [
"f0_sqrt",
"f1_sqrt",
"f2_sqrt",
"f0_cos1",
"f1_cos1",
"f2_cos1",
"f0_sin1",
"f1_sin1",
"f2_sin1",
"f0_cos2",
"f1_cos2",
"f2_cos2",
"f0_sin2",
"f1_sin2",
"f2_sin2",
]
names_out = chi2_sampler.get_feature_names_out(input_features=input_names)
expected_names = [f"additivechi2sampler_{suffix}" for suffix in suffixes]
assert_array_equal(names_out, expected_names)
| {
"content_hash": "e6e092a6b5d0bea2070bc4b4dd8f0362",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 88,
"avg_line_length": 34.37081339712919,
"alnum_prop": 0.6656922113176028,
"repo_name": "jakirkham/scikit-learn",
"id": "29681f32b769519a395b5a3a6fac6e8ea1bc92d7",
"size": "14367",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "sklearn/tests/test_kernel_approximation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "740294"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10446963"
},
{
"name": "Shell",
"bytes": "41911"
}
],
"symlink_target": ""
} |
from django.core.files.uploadedfile import SimpleUploadedFile
import mock
from nose.tools import eq_
from amo.tests import app_factory, TestCase, user_factory
from mkt.comm.forms import CommAttachmentFormSet
from mkt.comm.tests.test_views import AttachmentManagementMixin
from mkt.comm.utils import create_comm_note
from mkt.constants import comm
class TestCreateCommNote(TestCase, AttachmentManagementMixin):
def setUp(self):
self.create_switch('comm-dashboard')
self.contact = user_factory(username='contact')
self.user = user_factory()
self.grant_permission(self.user, '*:*')
self.app = app_factory(mozilla_contact=self.contact.email)
def test_create_thread(self):
# Default permissions.
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'huehue',
note_type=comm.APPROVAL)
# Check Thread.
eq_(thread.addon, self.app)
eq_(thread.version, self.app.current_version)
expected = {
'public': False, 'developer': True, 'reviewer': True,
'senior_reviewer': True, 'mozilla_contact': True, 'staff': True}
for perm, has_perm in expected.items():
eq_(getattr(thread, 'read_permission_%s' % perm), has_perm, perm)
# Check Note.
eq_(note.thread, thread)
eq_(note.author, self.user)
eq_(note.body, 'huehue')
eq_(note.note_type, comm.APPROVAL)
# Check CC.
eq_(thread.thread_cc.count(), 2)
assert thread.thread_cc.filter(user=self.contact).exists()
assert thread.thread_cc.filter(user=self.user).exists()
def test_create_note_existing_thread(self):
# Initial note.
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'huehue')
# Second note from contact.
thread, reply = create_comm_note(
self.app, self.app.current_version, self.contact, 'euheuh!',
note_type=comm.REJECTION)
# Third person joins thread.
thread, last_word = create_comm_note(
self.app, self.app.current_version, user_factory(), 'euheuh!',
note_type=comm.MORE_INFO_REQUIRED)
eq_(thread.thread_cc.count(), 3)
def test_create_note_no_author(self):
thread, note = create_comm_note(
self.app, self.app.current_version, None, 'huehue')
eq_(note.author, None)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_create_note_reviewer_type(self):
for note_type in comm.REVIEWER_NOTE_TYPES:
thread, note = create_comm_note(
self.app, self.app.current_version, None, 'huehue',
note_type=note_type)
eq_(note.read_permission_developer, False)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_custom_perms(self):
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'escalatedquickly',
note_type=comm.ESCALATION, perms={'developer': False,
'staff': True})
expected = {
'public': False, 'developer': False, 'reviewer': True,
'senior_reviewer': True, 'mozilla_contact': True, 'staff': True}
for perm, has_perm in expected.items():
eq_(getattr(thread, 'read_permission_%s' % perm), has_perm, perm)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_attachments(self):
attach_formdata = self._attachment_management_form(num=2)
attach_formdata.update(self._attachments(num=2))
attach_formset = CommAttachmentFormSet(
attach_formdata,
{'form-0-attachment':
SimpleUploadedFile(
'lol', attach_formdata['form-0-attachment'].read()),
'form-1-attachment':
SimpleUploadedFile(
'lol2', attach_formdata['form-1-attachment'].read())})
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'lol',
note_type=comm.APPROVAL, attachments=attach_formset)
eq_(note.attachments.count(), 2)
| {
"content_hash": "e3d377dbd0a7331ed242d10c5138ef2e",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 39.38532110091743,
"alnum_prop": 0.6121593291404612,
"repo_name": "ngokevin/zamboni",
"id": "a664e901e75d6a793c0f4fa403860388d3c5c698",
"size": "4293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/comm/tests/test_utils_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "356777"
},
{
"name": "JavaScript",
"bytes": "536388"
},
{
"name": "Python",
"bytes": "3883015"
},
{
"name": "Shell",
"bytes": "13597"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_commoner_naboo_zabrak_male_02.iff"
result.attribute_template_id = 9
result.stfName("npc_name","zabrak_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "3341c4f24d5c7dac0849ed4ceade2b69",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 83,
"avg_line_length": 24.46153846153846,
"alnum_prop": 0.7012578616352201,
"repo_name": "anhstudios/swganh",
"id": "dd6125713f9b0e5f2a2abb79e71eecbe66bbe5bc",
"size": "463",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_commoner_naboo_zabrak_male_02.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
from .ttypes import *
| {
"content_hash": "4cd5ce356202bcc3d33079d08f3dae30",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 93,
"avg_line_length": 46,
"alnum_prop": 0.8532608695652174,
"repo_name": "mikewalch/accumulo",
"id": "3b2f97a24e68a8a29e650cdaef5d71fa76661bfe",
"size": "1107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proxy/src/main/python/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2423"
},
{
"name": "C++",
"bytes": "2276148"
},
{
"name": "CSS",
"bytes": "7395"
},
{
"name": "FreeMarker",
"bytes": "54658"
},
{
"name": "Groovy",
"bytes": "1385"
},
{
"name": "HTML",
"bytes": "5970"
},
{
"name": "Java",
"bytes": "23452563"
},
{
"name": "JavaScript",
"bytes": "86450"
},
{
"name": "Makefile",
"bytes": "2865"
},
{
"name": "Python",
"bytes": "1001767"
},
{
"name": "Ruby",
"bytes": "270630"
},
{
"name": "Shell",
"bytes": "64165"
},
{
"name": "Thrift",
"bytes": "66191"
}
],
"symlink_target": ""
} |
"""
sentry.cache.redis
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from sentry.utils import json
from sentry.utils.redis import get_cluster_from_options
from .base import BaseCache
class ValueTooLarge(Exception):
pass
class RedisCache(BaseCache):
key_expire = 60 * 60 # 1 hour
max_size = 50 * 1024 * 1024 # 50MB
def __init__(self, **options):
self.cluster, options = get_cluster_from_options('SENTRY_CACHE_OPTIONS', options)
self.client = self.cluster.get_routing_client()
super(RedisCache, self).__init__(**options)
def set(self, key, value, timeout, version=None):
key = self.make_key(key, version=version)
v = json.dumps(value)
if len(v) > self.max_size:
raise ValueTooLarge('Cache key too large: %r %r' % (key, len(v)))
if timeout:
self.client.setex(key, int(timeout), v)
else:
self.client.set(key, v)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.client.delete(key)
def get(self, key, version=None):
key = self.make_key(key, version=version)
result = self.client.get(key)
if result is not None:
result = json.loads(result)
return result
| {
"content_hash": "72bd9b5441bbd110be31cad261d7b41a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 89,
"avg_line_length": 28.44,
"alnum_prop": 0.6188466947960619,
"repo_name": "jean/sentry",
"id": "7ba7625de35cd6a3078745dcd8150f17b71cddb9",
"size": "1422",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "src/sentry/cache/redis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296112"
},
{
"name": "HTML",
"bytes": "314273"
},
{
"name": "JavaScript",
"bytes": "1293918"
},
{
"name": "Lua",
"bytes": "57158"
},
{
"name": "Makefile",
"bytes": "6632"
},
{
"name": "Python",
"bytes": "24515298"
},
{
"name": "Ruby",
"bytes": "4410"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
} |
import abc
from typing import Any, Dict, List, Optional, Sequence, TYPE_CHECKING
import numpy as np
from cirq import protocols
from cirq._compat import proper_repr
from cirq.qis import quantum_state_representation
from cirq.value import big_endian_int_to_digits, linear_dict, random_state
if TYPE_CHECKING:
import cirq
class StabilizerState(
quantum_state_representation.QuantumStateRepresentation, metaclass=abc.ABCMeta
):
"""Interface for quantum stabilizer state representations.
This interface is used for CliffordTableau and StabilizerChForm quantum
state representations, allowing simulators to act on them abstractly.
"""
@abc.abstractmethod
def apply_x(self, axis: int, exponent: float = 1, global_shift: float = 0):
"""Apply an X operation to the state.
Args:
axis: The axis to which the operation should be applied.
exponent: The exponent of the X operation, must be a half-integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not half-integer.
"""
@abc.abstractmethod
def apply_y(self, axis: int, exponent: float = 1, global_shift: float = 0):
"""Apply an Y operation to the state.
Args:
axis: The axis to which the operation should be applied.
exponent: The exponent of the Y operation, must be a half-integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not half-integer.
"""
@abc.abstractmethod
def apply_z(self, axis: int, exponent: float = 1, global_shift: float = 0):
"""Apply a Z operation to the state.
Args:
axis: The axis to which the operation should be applied.
exponent: The exponent of the Z operation, must be a half-integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not half-integer.
"""
@abc.abstractmethod
def apply_h(self, axis: int, exponent: float = 1, global_shift: float = 0):
"""Apply an H operation to the state.
Args:
axis: The axis to which the operation should be applied.
exponent: The exponent of the H operation, must be an integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not an integer.
"""
@abc.abstractmethod
def apply_cz(
self, control_axis: int, target_axis: int, exponent: float = 1, global_shift: float = 0
):
"""Apply a CZ operation to the state.
Args:
control_axis: The control axis of the operation.
target_axis: The axis to which the operation should be applied.
exponent: The exponent of the CZ operation, must be an integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not an integer.
"""
@abc.abstractmethod
def apply_cx(
self, control_axis: int, target_axis: int, exponent: float = 1, global_shift: float = 0
):
"""Apply a CX operation to the state.
Args:
control_axis: The control axis of the operation.
target_axis: The axis to which the operation should be applied.
exponent: The exponent of the CX operation, must be an integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not an integer.
"""
@abc.abstractmethod
def apply_global_phase(self, coefficient: linear_dict.Scalar):
"""Apply a global phase to the state.
Args:
coefficient: The global phase to apply.
"""
class CliffordTableau(StabilizerState):
"""Tableau representation of a stabilizer state
(based on Aaronson and Gottesman 2006).
The tableau stores the stabilizer generators of
the state using three binary arrays: xs, zs, and rs.
Each row of the arrays represents a Pauli string, P, that is
an eigenoperator of the state vector with eigenvalue one: P|psi> = |psi>.
"""
def __init__(
self,
num_qubits,
initial_state: int = 0,
rs: Optional[np.ndarray] = None,
xs: Optional[np.ndarray] = None,
zs: Optional[np.ndarray] = None,
):
"""Initializes CliffordTableau
Args:
num_qubits: The number of qubits in the system.
initial_state: The computational basis representation of the
state as a big endian int.
"""
self.n = num_qubits
self.initial_state = initial_state
# _reconstruct_* adds the last row (`2n+1`-th row) to the input arrays,
# which is the scratch row used in _measurement
# computation process only. It should not be exposed to external usage.
self._rs = self._reconstruct_rs(rs)
self._xs = self._reconstruct_xs(xs)
self._zs = self._reconstruct_zs(zs)
def _reconstruct_rs(self, rs: Optional[np.ndarray]) -> np.ndarray:
if rs is None:
new_rs = np.zeros(2 * self.n + 1, dtype=bool)
for (i, val) in enumerate(
big_endian_int_to_digits(self.initial_state, digit_count=self.n, base=2)
):
new_rs[self.n + i] = bool(val)
else:
shape = rs.shape
if len(shape) == 1 and shape[0] == 2 * self.n and rs.dtype == np.dtype(bool):
new_rs = np.append(rs, np.zeros(1, dtype=bool))
else:
raise ValueError(
"The value you passed for rs is not the correct shape and/or type. "
"Please confirm that it's a single row with 2*num_qubits columns "
"and of type bool."
)
return new_rs
def _reconstruct_xs(self, xs: Optional[np.ndarray]) -> np.ndarray:
if xs is None:
new_xs = np.zeros((2 * self.n + 1, self.n), dtype=bool)
for i in range(self.n):
new_xs[i, i] = True
else:
shape = xs.shape
if (
len(shape) == 2
and shape[0] == 2 * self.n
and shape[1] == self.n
and xs.dtype == np.dtype(bool)
):
new_xs = np.append(xs, np.zeros((1, self.n), dtype=bool), axis=0)
else:
raise ValueError(
"The value you passed for xs is not the correct shape and/or type. "
"Please confirm that it's 2*num_qubits rows, num_qubits columns, "
"and of type bool."
)
return new_xs
def _reconstruct_zs(self, zs: Optional[np.ndarray]) -> np.ndarray:
if zs is None:
new_zs = np.zeros((2 * self.n + 1, self.n), dtype=bool)
for i in range(self.n):
new_zs[self.n + i, i] = True
else:
shape = zs.shape
if (
len(shape) == 2
and shape[0] == 2 * self.n
and shape[1] == self.n
and zs.dtype == np.dtype(bool)
):
new_zs = np.append(zs, np.zeros((1, self.n), dtype=bool), axis=0)
else:
raise ValueError(
"The value you passed for zs is not the correct shape and/or type. "
"Please confirm that it's 2*num_qubits rows, num_qubits columns, "
"and of type bool."
)
return new_zs
@property
def xs(self) -> np.ndarray:
return self._xs[:-1, :]
@xs.setter
def xs(self, new_xs: np.ndarray) -> None:
assert np.shape(new_xs) == (2 * self.n, self.n)
self._xs[:-1, :] = np.array(new_xs).astype(bool)
@property
def zs(self) -> np.ndarray:
return self._zs[:-1, :]
@zs.setter
def zs(self, new_zs: np.ndarray) -> None:
assert np.shape(new_zs) == (2 * self.n, self.n)
self._zs[:-1, :] = np.array(new_zs).astype(bool)
@property
def rs(self) -> np.ndarray:
return self._rs[:-1]
@rs.setter
def rs(self, new_rs: np.ndarray) -> None:
assert np.shape(new_rs) == (2 * self.n,)
self._rs[:-1] = np.array(new_rs).astype(bool)
def matrix(self) -> np.ndarray:
"""Returns the 2n * 2n matrix representation of the Clifford tableau."""
return np.concatenate([self.xs, self.zs], axis=1)
def _json_dict_(self) -> Dict[str, Any]:
return protocols.obj_to_dict_helper(self, ['n', 'rs', 'xs', 'zs'])
@classmethod
def _from_json_dict_(cls, n, rs, xs, zs, **kwargs):
state = cls(n)
state.rs = np.array(rs).astype(bool)
state.xs = np.array(xs).astype(bool)
state.zs = np.array(zs).astype(bool)
return state
def _validate(self) -> bool:
"""Check if the Clifford Tabluea satisfies the symplectic property."""
table = np.concatenate([self.xs, self.zs], axis=1)
perm = list(range(self.n, 2 * self.n)) + list(range(self.n))
skew_eye = np.eye(2 * self.n, dtype=int)[perm]
return np.array_equal(np.mod(table.T.dot(skew_eye).dot(table), 2), skew_eye)
def __eq__(self, other):
if not isinstance(other, type(self)):
# coverage: ignore
return NotImplemented
return (
self.n == other.n
and np.array_equal(self.rs, other.rs)
and np.array_equal(self.xs, other.xs)
and np.array_equal(self.zs, other.zs)
)
def __copy__(self) -> 'CliffordTableau':
return self.copy()
def copy(self, deep_copy_buffers: bool = True) -> 'CliffordTableau':
state = CliffordTableau(self.n)
state.rs = self.rs.copy()
state.xs = self.xs.copy()
state.zs = self.zs.copy()
return state
def __repr__(self) -> str:
return (
f"cirq.CliffordTableau({self.n},"
f"rs={proper_repr(np.delete(self._rs, len(self._rs)-1))}, "
f"xs={proper_repr(np.delete(self._xs, len(self._xs)-1, axis=0))},"
f"zs={proper_repr(np.delete(self._zs, len(self._zs)-1, axis=0))}, "
f"initial_state={self.initial_state})"
)
def __str__(self) -> str:
string = ''
for i in range(self.n, 2 * self.n):
string += '- ' if self.rs[i] else '+ '
for k in range(self.n):
if self.xs[i, k] & (not self.zs[i, k]):
string += 'X '
elif (not self.xs[i, k]) & self.zs[i, k]:
string += 'Z '
elif self.xs[i, k] & self.zs[i, k]:
string += 'Y '
else:
string += 'I '
if i < 2 * self.n - 1:
string += '\n'
return string
def _str_full_(self) -> str:
string = ''
string += 'stable' + ' ' * max(self.n * 2 - 3, 1)
string += '| destable\n'
string += '-' * max(7, self.n * 2 + 3) + '+' + '-' * max(10, self.n * 2 + 4) + '\n'
for j in range(self.n):
for i in [j + self.n, j]:
string += '- ' if self.rs[i] else '+ '
for k in range(self.n):
if self.xs[i, k] & (not self.zs[i, k]):
string += 'X%d' % k
elif (not self.xs[i, k]) & self.zs[i, k]:
string += 'Z%d' % k
elif self.xs[i, k] & self.zs[i, k]:
string += 'Y%d' % k
else:
string += ' '
if i == j + self.n:
string += ' ' * max(0, 4 - self.n * 2) + ' | '
string += '\n'
return string
def then(self, second: 'CliffordTableau') -> 'CliffordTableau':
"""Returns a composed CliffordTableau of this tableau and the second tableau.
Then composed tableau is equal to (up to global phase) the composed
unitary operation of the two tableaux, i.e. equivalent to applying the unitary
operation of this CliffordTableau then applying the second one.
Args:
second: The second CliffordTableau to compose with.
Returns:
The composed CliffordTableau.
Raises:
TypeError: If the type of second is not CliffordTableau.
ValueError: If the number of qubits in the second tableau mismatch with
this tableau.
"""
if not isinstance(second, CliffordTableau):
raise TypeError("The type for second tableau must be the CliffordTableau type")
if self.n != second.n:
raise ValueError(
f"Mismatched number of qubits of two tableaux: {self.n} vs {second.n}."
)
# Convert the underlying data type from bool to int for easier numerical computation.
m1 = self.matrix().astype(int)
m2 = second.matrix().astype(int)
# The following computation is based on Theorem 36 in
# https://arxiv.org/pdf/2009.03218.pdf.
# Any pauli string (one stabilizer) in Clifford Tableau should be able to be expressed as
# (1i)^p (-1)^s X^(mx) Z^(mz)
# where p and s are binary scalar and mx and mz are binary vectors.
num_ys1 = np.sum(m1[:, : self.n] * m1[:, self.n :], axis=1)
num_ys2 = np.sum(m2[:, : self.n] * m2[:, self.n :], axis=1)
p1 = np.mod(num_ys1, 2)
p2 = np.mod(num_ys2, 2)
# Note the `s` is not equal to `r`, which depends on the number of Y gates.
# For example, r * Y_1Y_2Y_3 can be expanded into i^3 * r * X_1Z_1 X_2Z_2 X_3Z_3.
# The global phase is i * (-1) * r ==> s = r + 1 and p = 1.
s1 = self.rs.astype(int) + np.mod(num_ys1, 4) // 2
s2 = second.rs.astype(int) + np.mod(num_ys2, 4) // 2
lmbda = np.zeros((2 * self.n, 2 * self.n))
lmbda[: self.n, self.n :] = np.eye(self.n)
m_12 = np.mod(m1 @ m2, 2)
p_12 = np.mod(p1 + m1 @ p2, 2)
s_12 = (
s1
+ m1 @ s2
+ p1 * (m1 @ p2)
+ np.diag(m1 @ np.tril(np.outer(p2, p2.T) + m2 @ lmbda @ m2.T, -1) @ m1.T)
)
num_ys12 = np.sum(m_12[:, : self.n] * m_12[:, self.n :], axis=1)
merged_sign = np.mod(p_12 + 2 * s_12 - num_ys12, 4) // 2
merged_tableau = CliffordTableau(num_qubits=self.n)
merged_tableau.xs = m_12[:, : self.n]
merged_tableau.zs = m_12[:, self.n :]
merged_tableau.rs = merged_sign
return merged_tableau
def inverse(self) -> 'CliffordTableau':
"""Returns the inverse Clifford tableau of this tableau."""
ret_table = CliffordTableau(num_qubits=self.n)
# It relies on the symplectic property of Clifford tableau.
# [A^T C^T [0 I [A B [0 I
# B^T D^T] I 0] C D] = I 0]
# So the inverse is [[D^T B^T], [C^T A^T]]
ret_table.xs[: self.n] = self.zs[self.n :].T
ret_table.zs[: self.n] = self.zs[: self.n].T
ret_table.xs[self.n :] = self.xs[self.n :].T
ret_table.zs[self.n :] = self.xs[: self.n].T
# Update the sign -- rs.
# The idea is noting the sign of tabluea `a` contributes to the composed tableau
# `a.then(b)` directly. (While the sign in `b` need take very complicated transformation.)
# Refer above `then` function implementation for more details.
ret_table.rs = ret_table.then(self).rs
return ret_table
def __matmul__(self, second: 'CliffordTableau'):
if not isinstance(second, CliffordTableau):
return NotImplemented
return second.then(self)
def _rowsum(self, q1, q2):
"""Implements the "rowsum" routine defined by
Aaronson and Gottesman.
Multiplies the stabilizer in row q1 by the stabilizer in row q2."""
def g(x1, z1, x2, z2):
if not x1 and not z1:
return 0
elif x1 and z1:
return int(z2) - int(x2)
elif x1 and not z1:
return int(z2) * (2 * int(x2) - 1)
else:
return int(x2) * (1 - 2 * int(z2))
r = 2 * int(self._rs[q1]) + 2 * int(self._rs[q2])
for j in range(self.n):
r += g(self._xs[q2, j], self._zs[q2, j], self._xs[q1, j], self._zs[q1, j])
r %= 4
self._rs[q1] = bool(r)
self._xs[q1, :] ^= self._xs[q2, :]
self._zs[q1, :] ^= self._zs[q2, :]
def _row_to_dense_pauli(self, i: int) -> 'cirq.DensePauliString':
"""Return a dense Pauli string for the given row in the tableau.
Args:
i: index of the row in the tableau.
Returns:
A DensePauliString representing the row. The length of the string
is equal to the total number of qubits and each character
represents the effective single Pauli operator on that qubit. The
overall phase is captured in the coefficient.
"""
from cirq.ops.dense_pauli_string import DensePauliString
coefficient = -1 if self.rs[i] else 1
pauli_mask = ""
for k in range(self.n):
if self.xs[i, k] & (not self.zs[i, k]):
pauli_mask += "X"
elif (not self.xs[i, k]) & self.zs[i, k]:
pauli_mask += "Z"
elif self.xs[i, k] & self.zs[i, k]:
pauli_mask += "Y"
else:
pauli_mask += "I"
return DensePauliString(pauli_mask, coefficient=coefficient)
def stabilizers(self) -> List['cirq.DensePauliString']:
"""Returns the stabilizer generators of the state. These
are n operators {S_1,S_2,...,S_n} such that S_i |psi> = |psi>"""
return [self._row_to_dense_pauli(i) for i in range(self.n, 2 * self.n)]
def destabilizers(self) -> List['cirq.DensePauliString']:
"""Returns the destabilizer generators of the state. These
are n operators {S_1,S_2,...,S_n} such that along with the stabilizer
generators above generate the full Pauli group on n qubits."""
return [self._row_to_dense_pauli(i) for i in range(self.n)]
def _measure(self, q, prng: np.random.RandomState) -> int:
"""Performs a projective measurement on the q'th qubit.
Returns: the result (0 or 1) of the measurement.
"""
is_commuting = True
for i in range(self.n, 2 * self.n):
if self.xs[i, q]:
p = i
is_commuting = False
break
if is_commuting:
self._xs[2 * self.n, :] = False
self._zs[2 * self.n, :] = False
self._rs[2 * self.n] = False
for i in range(self.n):
if self.xs[i, q]:
self._rowsum(2 * self.n, self.n + i)
return int(self._rs[2 * self.n])
for i in range(2 * self.n):
if i != p and self.xs[i, q]:
self._rowsum(i, p)
self.xs[p - self.n, :] = self.xs[p, :].copy()
self.zs[p - self.n, :] = self.zs[p, :].copy()
self.rs[p - self.n] = self.rs[p]
self.xs[p, :] = False
self.zs[p, :] = False
self.zs[p, q] = True
self.rs[p] = bool(prng.randint(2))
return int(self.rs[p])
def apply_x(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 == 0:
return
if exponent % 0.5 != 0.0:
raise ValueError('X exponent must be half integer') # coverage: ignore
effective_exponent = exponent % 2
if effective_exponent == 0.5:
self.xs[:, axis] ^= self.zs[:, axis]
self.rs[:] ^= self.xs[:, axis] & self.zs[:, axis]
elif effective_exponent == 1:
self.rs[:] ^= self.zs[:, axis]
elif effective_exponent == 1.5:
self.rs[:] ^= self.xs[:, axis] & self.zs[:, axis]
self.xs[:, axis] ^= self.zs[:, axis]
def apply_y(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 == 0:
return
if exponent % 0.5 != 0.0:
raise ValueError('Y exponent must be half integer') # coverage: ignore
effective_exponent = exponent % 2
if effective_exponent == 0.5:
self.rs[:] ^= self.xs[:, axis] & (~self.zs[:, axis])
(self.xs[:, axis], self.zs[:, axis]) = (
self.zs[:, axis].copy(),
self.xs[:, axis].copy(),
)
elif effective_exponent == 1:
self.rs[:] ^= self.xs[:, axis] ^ self.zs[:, axis]
elif effective_exponent == 1.5:
self.rs[:] ^= ~(self.xs[:, axis]) & self.zs[:, axis]
(self.xs[:, axis], self.zs[:, axis]) = (
self.zs[:, axis].copy(),
self.xs[:, axis].copy(),
)
def apply_z(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 == 0:
return
if exponent % 0.5 != 0.0:
raise ValueError('Z exponent must be half integer') # coverage: ignore
effective_exponent = exponent % 2
if effective_exponent == 0.5:
self.rs[:] ^= self.xs[:, axis] & self.zs[:, axis]
self.zs[:, axis] ^= self.xs[:, axis]
elif effective_exponent == 1:
self.rs[:] ^= self.xs[:, axis]
elif effective_exponent == 1.5:
self.rs[:] ^= self.xs[:, axis] & (~self.zs[:, axis])
self.zs[:, axis] ^= self.xs[:, axis]
def apply_h(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 == 0:
return
if exponent % 1 != 0:
raise ValueError('H exponent must be integer') # coverage: ignore
self.apply_y(axis, 0.5)
self.apply_x(axis)
def apply_cz(
self, control_axis: int, target_axis: int, exponent: float = 1, global_shift: float = 0
):
if exponent % 2 == 0:
return
if exponent % 1 != 0:
raise ValueError('CZ exponent must be integer') # coverage: ignore
(self.xs[:, target_axis], self.zs[:, target_axis]) = (
self.zs[:, target_axis].copy(),
self.xs[:, target_axis].copy(),
)
self.rs[:] ^= self.xs[:, target_axis] & self.zs[:, target_axis]
self.rs[:] ^= (
self.xs[:, control_axis]
& self.zs[:, target_axis]
& (~(self.xs[:, target_axis] ^ self.zs[:, control_axis]))
)
self.xs[:, target_axis] ^= self.xs[:, control_axis]
self.zs[:, control_axis] ^= self.zs[:, target_axis]
(self.xs[:, target_axis], self.zs[:, target_axis]) = (
self.zs[:, target_axis].copy(),
self.xs[:, target_axis].copy(),
)
self.rs[:] ^= self.xs[:, target_axis] & self.zs[:, target_axis]
def apply_cx(
self, control_axis: int, target_axis: int, exponent: float = 1, global_shift: float = 0
):
if exponent % 2 == 0:
return
if exponent % 1 != 0:
raise ValueError('CX exponent must be integer') # coverage: ignore
self.rs[:] ^= (
self.xs[:, control_axis]
& self.zs[:, target_axis]
& (~(self.xs[:, target_axis] ^ self.zs[:, control_axis]))
)
self.xs[:, target_axis] ^= self.xs[:, control_axis]
self.zs[:, control_axis] ^= self.zs[:, target_axis]
def apply_global_phase(self, coefficient: linear_dict.Scalar):
pass
def measure(
self, axes: Sequence[int], seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None
) -> List[int]:
return [self._measure(axis, random_state.parse_random_state(seed)) for axis in axes]
| {
"content_hash": "25e685716f24e97ecd0fcc2c237e92cf",
"timestamp": "",
"source": "github",
"line_count": 642,
"max_line_length": 98,
"avg_line_length": 38.33177570093458,
"alnum_prop": 0.5337071802998903,
"repo_name": "quantumlib/Cirq",
"id": "bd73daccf768bf87746e90deac793dbef07f0ace",
"size": "25194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cirq-core/cirq/qis/clifford_tableau.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4616"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "JavaScript",
"bytes": "660"
},
{
"name": "Jupyter Notebook",
"bytes": "672675"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "8643017"
},
{
"name": "Scilab",
"bytes": "735"
},
{
"name": "Shell",
"bytes": "64230"
},
{
"name": "TypeScript",
"bytes": "91766"
}
],
"symlink_target": ""
} |
from __future__ import division
import dirichlet
import random
import utils
import math
from collections import Counter
class PhraseLDA(object):
"""
Topic Modeling using lda with collapsed gibbs sampling on "bag-of-phrases".
:param partitioned_docs:
List of documents, where each document is partitioned into a list of
phrases.
:param index_vocab:
Mapping of integer index to string word.
:param num_topics:
Number of requested topics that need to be extracted from the inputted
documents.
:param alpha:
Scalar hyperparameter representing the symmetric Dirichlet prior over
document-topic (lambda) distributions. Initially, each topic will have
the same prior weight for each document.
:param beta:
Scalar hyperparameter representing the symmetric Dirichlet prior over
topic-word (phi) distributions. Initially, each word will have the same
prior weight for each topic.
:param iterations:
The total number of Gibbs sampling iterations over the entire corpus.
:param optimization_iterations:
Sets hyperparameter optimization after each ``optimization_iterations``
of the Gibbs sampling. Set to ``None```` for no optimization.
:param optimization_burnin:
Number of gibbs sampling iterations before hyperparameter optimization
starts.
"""
def __init__(self, partitioned_docs, index_vocab,
num_topics=5, alpha=4, beta=0.01, iterations=1000,
optimization_iterations=100, optimization_burnin=50):
# initialize corpus
self.documents = partitioned_docs
self.num_documents = len(partitioned_docs)
self.index_vocab = index_vocab
self.num_words = len(index_vocab)
self.num_topics = num_topics
# initialize hyperparameters
self.alpha = [alpha] * self.num_topics
self.alpha_sum = alpha * num_topics
self.beta = beta
self.beta_sum = self.beta * self.num_words
# gibbs sampling parameters
self.iterations = iterations
self.optimization_iterations = optimization_iterations
self.optimization_burnin = optimization_burnin
def _initialize(self):
self._init_documents()
# Array stores per topic counts
self.n_t = [0] * self.num_topics
# 2d array that stores document/topic counts by phrase, and word respectively
self.n_d_t_phrases = [[0] * self.num_topics for __ in range(self.num_documents)]
self.n_d_t_words = [[0] * self.num_topics for __ in range(self.num_documents)]
# 2d array that stores topic/word counts
self.n_t_w = [[0] * self.num_words for __ in range(self.num_topics)]
self._init_documents_topics()
self._init_histogram()
def _init_documents(self):
self.documents_words = []
self.max_documents_phrases_count = 0
self.max_documents_words_count = 0
for document in self.documents:
document_words = []
document_words_count = 0
for phrase in document:
for word in phrase:
document_words.append(word)
document_words_count += 1
self.documents_words.append(document_words)
self.max_documents_phrases_count = max(self.max_documents_phrases_count, len(document))
self.max_documents_words_count = max(self.max_documents_words_count, document_words_count)
def _init_documents_topics(self):
# we assign a random topic to each phrase in the document
self.documents_phrases_topic = []
for document_index, document in enumerate(self.documents):
document_phrases_topic = []
for phrase_index, phrase in enumerate(document):
document_phrase_topic = random.randint(0,self.num_topics-1)
document_phrases_topic.append(document_phrase_topic)
# Increase counts
self.n_t[document_phrase_topic] += len(phrase)
self.n_d_t_phrases[document_index][document_phrase_topic] += 1
self.n_d_t_words[document_index][document_phrase_topic] += len(phrase)
for word_index in phrase:
self.n_t_w[document_phrase_topic][word_index] += 1
self.documents_phrases_topic.append(document_phrases_topic)
def _init_histogram(self):
self.document_length_histogram = [0] * (self.max_documents_words_count + 1)
for document in self.documents_words:
self.document_length_histogram[len(document)] += 1
self._init_topic_document_histogram()
def _init_topic_document_histogram(self):
self.topic_document_histogram = [[int()] * (self.max_documents_words_count + 1)
for __ in range(self.num_topics)]
def _sample_topic(self, sampling_probabilities):
threshold = random.uniform(0.0,1.0) * sum(sampling_probabilities)
cumulative_sum = 0
for topic in range(self.num_topics):
cumulative_sum += sampling_probabilities[topic]
if cumulative_sum > threshold:
break
return topic
def _calculate_topic_probabilities(self, document_index, phrase_index):
topic_probabilities = []
for topic_index in range(self.num_topics):
left = self.alpha[topic_index] + self.n_d_t_phrases[document_index][topic_index]
right = 1.0
for word_index in self.documents[document_index][phrase_index]:
right *= (self.beta + self.n_t_w[topic_index][word_index]) / (self.beta_sum + (self.n_t[topic_index]))
topic_probability = left * right
topic_probabilities.append(topic_probability)
return topic_probabilities
def _should_optimize(self, iterations):
if self.optimization_iterations is None:
return false
iterations_condition = ((iterations+1) % self.optimization_iterations) == 0
burnin_condition = ((iterations+1) > self.optimization_burnin)
return iterations_condition and burnin_condition
def run(self):
self._initialize()
for iteration in range(self.iterations):
if iteration % 100 == 0:
print "iteration", iteration
for document_index, document in enumerate(self.documents):
for phrase_index, phrase in enumerate(document):
document_phrase_topic = self.documents_phrases_topic[document_index][phrase_index]
# reduce counts for sampling
self.n_t[document_phrase_topic] -= len(phrase)
self.n_d_t_phrases[document_index][document_phrase_topic] -= 1
self.n_d_t_words[document_index][document_phrase_topic] -= len(phrase)
for word_index in phrase:
self.n_t_w[document_phrase_topic][word_index] -= 1
sampling_probabilities = self._calculate_topic_probabilities(document_index, phrase_index)
document_phrase_topic = self._sample_topic(sampling_probabilities)
self.documents_phrases_topic[document_index][phrase_index] = document_phrase_topic
self.n_t[document_phrase_topic] += len(phrase)
self.n_d_t_phrases[document_index][document_phrase_topic] += 1
self.n_d_t_words[document_index][document_phrase_topic] += len(phrase)
for word_index in phrase:
self.n_t_w[document_phrase_topic][word_index] += 1
if self._should_optimize(iteration):
self._optimize_hyperparameters()
topics = self._getTopics()
return self.documents_phrases_topic, self._getMostFrequentPhrasalTopics(topics)
def _optimize_hyperparameters(self):
self._init_topic_document_histogram()
for topic_index in range(self.num_topics):
for document_index in range(len(self.documents)):
self.topic_document_histogram[topic_index][self.n_d_t_words[document_index][topic_index]] += 1
self.alpha_sum = dirichlet.learn_parameters(
self.alpha, self.topic_document_histogram, self.document_length_histogram)
max_topic_size = 0
for topic_index in range (self.num_topics):
if self.n_t[topic_index] > max_topic_size:
max_topic_size = self.n_t[topic_index]
topic_size_histogram = [0] * (max_topic_size + 1)
count_histogram = [0] * (max_topic_size + 1)
topic_index = 0
for topic_index in range(self.num_topics):
topic_size_histogram[self.n_t[topic_index]] += 1
for word_index in range(self.num_words):
count_histogram[
self.n_t_w[topic_index][word_index]] += 1
self.beta_sum = dirichlet.learn_symmetric_concentration(
count_histogram, topic_size_histogram, self.num_words, self.beta_sum)
self.beta = self.beta_sum / self.num_words
def store_phrase_topics(self, path):
f = open(path, 'w')
for document in self.documents_phrases_topic:
f.write(",".join(str(phrase) for phrase in document))
f.write("\n")
def _getTopics(self):
"""
Returns the set of phrases modelling each document.
"""
topics = []
for i in range(self.num_topics):
topics.append(Counter())
for document_index, document in enumerate(self.documents_phrases_topic):
for phrase_index, phrase_topic in enumerate(document):
phrase = " ".join(str(word) for word in self.documents[document_index][phrase_index])
topics[phrase_topic][phrase] += 1
return topics
def _getMostFrequentPhrasalTopics(self, topics):
output = []
topic_index = 0
for topic in topics:
output_for_topic = []
print "topic", topic_index
for phrase, count in topic.most_common():
if len(phrase.split(" ")) > 1:
val = utils._get_string_phrase(phrase, self.index_vocab), count
output_for_topic.append(val)
print val
output.append(output_for_topic)
topic_index += 1
return output
| {
"content_hash": "aa3be3d9f381e5c479b24900db3d2920",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 118,
"avg_line_length": 42.204,
"alnum_prop": 0.6079044640318453,
"repo_name": "anirudyd/topmine",
"id": "5b5aa9b268b4f4fa88e3b7b9bf3159b57a8aa10b",
"size": "10551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topmine_src/phrase_lda.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31949"
}
],
"symlink_target": ""
} |
from os import path
from setuptools import setup, find_packages
MAIN_MODULE = 'agent'
# Find the agent package that contains the main module
packages = find_packages('.')
agent_package = ''
for package in find_packages():
# Because there could be other packages such as tests
if path.isfile(package + '/' + MAIN_MODULE + '.py') is True:
agent_package = package
if not agent_package:
raise RuntimeError('None of the packages under {dir} contain the file '
'{main_module}'.format(main_module=MAIN_MODULE + '.py',
dir=path.abspath('.')))
# Find the version number from the main module
agent_module = agent_package + '.' + MAIN_MODULE
_temp = __import__(agent_module, globals(), locals(), ['__version__'], 0)
__version__ = _temp.__version__
# Setup
setup(
name=agent_package + 'agent',
version=__version__,
install_requires=['volttron', "numpy>1.8,<2", "transitions"],
packages=packages,
entry_points={
'setuptools.installation': [
'eggsecutable = ' + agent_module + ':main',
]
}
)
| {
"content_hash": "108b9e5dd0ebe0043e9fe5c535262b5c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 33.029411764705884,
"alnum_prop": 0.609973285841496,
"repo_name": "VOLTTRON/volttron-applications",
"id": "2d527eaf23b8fbbc30cb679963a3d43431452807",
"size": "3024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MixMarketServiceAgent/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "221216"
},
{
"name": "CSS",
"bytes": "36026"
},
{
"name": "Gherkin",
"bytes": "18993"
},
{
"name": "Gnuplot",
"bytes": "2486"
},
{
"name": "HTML",
"bytes": "105555"
},
{
"name": "JavaScript",
"bytes": "815273"
},
{
"name": "Makefile",
"bytes": "2413"
},
{
"name": "Objective-C",
"bytes": "1042"
},
{
"name": "Python",
"bytes": "5294800"
},
{
"name": "Shell",
"bytes": "6202"
}
],
"symlink_target": ""
} |
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
# Connect the Grove Switch to digital port D3
# SIG,NC,VCC,GND
switch = ARD_D3
jetduino.pinMode(switch, INPUT_PIN)
while True:
try:
print (jetduino.digitalRead(switch))
time.sleep(.5)
except IOError:
print ("Error")
| {
"content_hash": "792de75c7d12e119feb57119d0b395d0",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 103,
"avg_line_length": 35.95744680851064,
"alnum_prop": 0.7775147928994083,
"repo_name": "NeuroRoboticTech/Jetduino",
"id": "e83208c624eca5aa563382c080a10604beb62d39",
"size": "2070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Software/Python/grove_switch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "37042"
},
{
"name": "C",
"bytes": "38867"
},
{
"name": "C#",
"bytes": "33014"
},
{
"name": "C++",
"bytes": "101883"
},
{
"name": "CMake",
"bytes": "3553"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "JavaScript",
"bytes": "30142"
},
{
"name": "Python",
"bytes": "568027"
},
{
"name": "Shell",
"bytes": "17661"
}
],
"symlink_target": ""
} |
import numpy as np
from numpy.random import randint
from textwrap import dedent
import pytest
import pandas as pd
from pandas import DataFrame
from pandas import read_clipboard
from pandas import get_option
from pandas.compat import PY2
from pandas.util import testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf
from pandas.io.clipboard.exceptions import PyperclipException
from pandas.io.clipboard import clipboard_set, clipboard_get
try:
DataFrame({'A': [1, 2]}).to_clipboard()
_DEPS_INSTALLED = 1
except (PyperclipException, RuntimeError):
_DEPS_INSTALLED = 0
def build_kwargs(sep, excel):
kwargs = {}
if excel != 'default':
kwargs['excel'] = excel
if sep != 'default':
kwargs['sep'] = sep
return kwargs
@pytest.fixture(params=['delims', 'utf8', 'string', 'long', 'nonascii',
'colwidth', 'mixed', 'float', 'int'])
def df(request):
data_type = request.param
if data_type == 'delims':
return pd.DataFrame({'a': ['"a,\t"b|c', 'd\tef´'],
'b': ['hi\'j', 'k\'\'lm']})
elif data_type == 'utf8':
return pd.DataFrame({'a': ['µasd', 'Ωœ∑´'],
'b': ['øπ∆˚¬', 'œ∑´®']})
elif data_type == 'string':
return mkdf(5, 3, c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
elif data_type == 'long':
max_rows = get_option('display.max_rows')
return mkdf(max_rows + 1, 3,
data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
elif data_type == 'nonascii':
return pd.DataFrame({'en': 'in English'.split(),
'es': 'en español'.split()})
elif data_type == 'colwidth':
_cw = get_option('display.max_colwidth') + 1
return mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
elif data_type == 'mixed':
return DataFrame({'a': np.arange(1.0, 6.0) + 0.01,
'b': np.arange(1, 6),
'c': list('abcde')})
elif data_type == 'float':
return mkdf(5, 3, data_gen_f=lambda r, c: float(r) + 0.01,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
elif data_type == 'int':
return mkdf(5, 3, data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
else:
raise ValueError
@pytest.mark.single
@pytest.mark.skipif(not _DEPS_INSTALLED,
reason="clipboard primitives not installed")
class TestClipboard(object):
def check_round_trip_frame(self, data, excel=None, sep=None,
encoding=None):
data.to_clipboard(excel=excel, sep=sep, encoding=encoding)
result = read_clipboard(sep=sep or '\t', index_col=0,
encoding=encoding)
tm.assert_frame_equal(data, result, check_dtype=False)
# Test that default arguments copy as tab delimited
def test_round_trip_frame(self, df):
self.check_round_trip_frame(df)
# Test that explicit delimiters are respected
@pytest.mark.parametrize('sep', ['\t', ',', '|'])
def test_round_trip_frame_sep(self, df, sep):
self.check_round_trip_frame(df, sep=sep)
# Test white space separator
def test_round_trip_frame_string(self, df):
df.to_clipboard(excel=False, sep=None)
result = read_clipboard()
assert df.to_string() == result.to_string()
assert df.shape == result.shape
# Two character separator is not supported in to_clipboard
# Test that multi-character separators are not silently passed
def test_excel_sep_warning(self, df):
with tm.assert_produces_warning():
df.to_clipboard(excel=True, sep=r'\t')
# Separator is ignored when excel=False and should produce a warning
def test_copy_delim_warning(self, df):
with tm.assert_produces_warning():
df.to_clipboard(excel=False, sep='\t')
# Tests that the default behavior of to_clipboard is tab
# delimited and excel="True"
@pytest.mark.parametrize('sep', ['\t', None, 'default'])
@pytest.mark.parametrize('excel', [True, None, 'default'])
def test_clipboard_copy_tabs_default(self, sep, excel, df):
kwargs = build_kwargs(sep, excel)
df.to_clipboard(**kwargs)
if PY2:
# to_clipboard copies unicode, to_csv produces bytes. This is
# expected behavior
assert clipboard_get().encode('utf-8') == df.to_csv(sep='\t')
else:
assert clipboard_get() == df.to_csv(sep='\t')
# Tests reading of white space separated tables
@pytest.mark.parametrize('sep', [None, 'default'])
@pytest.mark.parametrize('excel', [False])
def test_clipboard_copy_strings(self, sep, excel, df):
kwargs = build_kwargs(sep, excel)
df.to_clipboard(**kwargs)
result = read_clipboard(sep=r'\s+')
assert result.to_string() == df.to_string()
assert df.shape == result.shape
def test_read_clipboard_infer_excel(self):
# gh-19010: avoid warnings
clip_kwargs = dict(engine="python")
text = dedent("""
John James Charlie Mingus
1 2
4 Harry Carney
""".strip())
clipboard_set(text)
df = pd.read_clipboard(**clip_kwargs)
# excel data is parsed correctly
assert df.iloc[1][1] == 'Harry Carney'
# having diff tab counts doesn't trigger it
text = dedent("""
a\t b
1 2
3 4
""".strip())
clipboard_set(text)
res = pd.read_clipboard(**clip_kwargs)
text = dedent("""
a b
1 2
3 4
""".strip())
clipboard_set(text)
exp = pd.read_clipboard(**clip_kwargs)
tm.assert_frame_equal(res, exp)
def test_invalid_encoding(self, df):
# test case for testing invalid encoding
with pytest.raises(ValueError):
df.to_clipboard(encoding='ascii')
with pytest.raises(NotImplementedError):
pd.read_clipboard(encoding='ascii')
@pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8'])
def test_round_trip_valid_encodings(self, enc, df):
self.check_round_trip_frame(df, encoding=enc)
| {
"content_hash": "525dee49ef5c1179cb70b51d113620b3",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 73,
"avg_line_length": 36.54644808743169,
"alnum_prop": 0.569078947368421,
"repo_name": "pratapvardhan/pandas",
"id": "a6b331685e72a7126b7db437795fb383073015c7",
"size": "6731",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/io/test_clipboard.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3847"
},
{
"name": "C",
"bytes": "432930"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "563"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "13598412"
},
{
"name": "Shell",
"bytes": "25368"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
import os
import neutron
def find_file(filename, path):
"""Find a file with name 'filename' located in 'path'."""
for root, _, files in os.walk(path):
if filename in files:
return os.path.abspath(os.path.join(root, filename))
def find_sample_file(filename):
"""Find a file with name 'filename' located in the sample directory."""
return find_file(
filename,
path=os.path.join(neutron.__path__[0], '..', 'etc'))
| {
"content_hash": "943ab992f35c169c9323b12ce0a64c29",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 75,
"avg_line_length": 27.529411764705884,
"alnum_prop": 0.6239316239316239,
"repo_name": "alexandrucoman/vbox-neutron-agent",
"id": "f6065c0640ca8cd3e1a6fb925fe71672257393d6",
"size": "1074",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "neutron/tests/common/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7321102"
},
{
"name": "Shell",
"bytes": "12819"
}
],
"symlink_target": ""
} |
# Copyright 2015 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import faint
from . parse_util import Matrix
import faint.svg.svg_re as svg_re
import re
from . parse_util import (
apply_transforms,
ABSOLUTE_UNIT_FACTORS,
parse_color,
parse_color_noref,
)
from faint.svg.util import (
deg2rad
)
from faint.svg.ifaint_util import (
mul_matrix_tri,
remove_stroke,
remove_fill,
to_faint_cap_str)
def to_faint_fill_rule(svg_fill_rule):
if svg_fill_rule == "nonzero":
return "winding"
elif svg_fill_rule == "evenodd":
return "evenodd"
else:
return "winding"
class ParseState:
"""The state when arriving at a node. Contains the inheritable
settings from "above", as well as the current transformation matrix
(ctm) and the frame-props.
"""
def __init__(self,
image_props,
frame_props,
ctm=None,
settings=None,
ids=None,
currentColor=None,
viewPort=None,
viewBox=None,
system_language="en"):
self.image_props = image_props
self.props = frame_props
self.ctm = Matrix.identity() if ctm is None else ctm
self.settings = (node_default_settings() if settings is None
else settings)
self.ids = {} if ids is None else ids
self.currentColor = ((255, 0, 0) if currentColor is None
else currentColor)
self.viewBox = viewBox
self.viewPort= viewPort
# ISO 639-1 language code
self.system_language = system_language
def modified(self,
ctm=None,
settings=None,
currentColor=None,
viewPort=None,
viewBox=None):
"""Returns a new ParseState for recursing, with optionally updated
transformation matrix or settings. The props is unchanged.
"""
if ctm is None:
ctm = self.ctm
if settings is None:
settings = self.settings
if currentColor is None:
currentColor = self.currentColor
if viewPort is None:
viewPort = self.viewPort
if viewBox is None:
viewBox = self.viewBox
return ParseState(self.image_props,
self.props,
ctm,
settings,
self.ids,
currentColor,
viewPort=viewPort,
viewBox=viewBox,
system_language=self.system_language)
def set_grid(self, *args, **kwArgs):
self.image_props.set_grid(*args, **kwArgs)
def transform_object(self, object_id):
"""Transform the object with object_id in props with the CTM."""
tri = self.props.get_obj_tri(object_id)
tri = mul_matrix_tri(self.ctm, tri)
self.props.set_obj_tri(object_id, tri)
def transform_tri(self, tri):
"""Return the tri transformed by the CTM"""
return mul_matrix_tri(self.ctm, tri)
def updated(self, node):
"""Return a new ParseState updated by the node attributes (e.g.
transform, settings etc.)
"""
assert self.ctm is not None
transforms = parse_transform_list(node.get('transform', ''))
ctm = apply_transforms(transforms, self.ctm)
cc = node.get('color')
if cc is not None:
cc = parse_color_noref(cc, 1.0, self)
# Fixme: Ugly. Create the new state instead
currentCurrentColor = self.currentColor
if cc is not None:
self.currentColor = cc
settings = self._updated_settings(node, ctm)
self.currentColor = currentCurrentColor
return self.modified(ctm=ctm, settings=settings, currentColor=cc)
def _updated_settings(self, node, ctm):
# Fixme: Move all stuff from parse_style_dict here.
# Fixme: Traverse the keys instead
# Fixme: Handle 'inherit'
settings = faint.Settings()
settings.update_all(self.settings)
settings.fg = self.settings.fg
settings.bg = self.settings.bg
attributes = get_style_dict(node.get('style', ''))
attributes.update(node.attrib)
settings.fillstyle = self.settings.fillstyle
#settings.update_all(self.settings)
stroke_opacity = attributes.get("stroke-opacity", "1.0")
fill_opacity = attributes.get("fill-opacity", "1.0")
stroke = attributes.get("stroke")
fill = attributes.get("fill")
fillstyle_to_settings(settings, stroke, fill, stroke_opacity, fill_opacity, self)
fillrule = attributes.get("fill-rule", "nonzero")
stroke_width = attributes.get('stroke-width')
if stroke_width == "inherit":
pass
elif stroke_width is not None:
# Fixme: Should this be the new state rather than self?
sw1 = svg_length_attr(stroke_width, self)
# Fixme: Using the new ctm, verify.
# Fixme: using .a is simplistic
sw1 *= ctm.a
try:
settings.linewidth = sw1
except ValueError as e:
# Todo: Allow larger stroke width in Faint
# also, check how to handle negative.
self.props.add_warning(str(e))
stroke_dasharray = attributes.get('stroke-dasharray', None)
if stroke_dasharray is not None:
if stroke_dasharray == 'none':
# Fixme: Verify "none"
settings.linestyle = 's'
else:
# Fixme: actually use the dash-array
settings.linestyle = 'ld'
stroke_linejoin = attributes.get('stroke-linejoin', None)
if stroke_linejoin is not None and stroke_linejoin != 'inherit':
# Fixme: settings.join probably not 100% aligned with
# svg join
settings.join = stroke_linejoin
stroke_linecap = attributes.get('stroke-linecap', None)
if stroke_linecap is not None:
settings.cap = to_faint_cap_str(stroke_linecap)
font_size = attributes.get('font-size', None)
if font_size is not None:
# Fixme: What other names are there?
if font_size == 'medium':
settings.fontsize = 12.0 # Fixme
elif font_size == 'inherit':
# TODO: Do what?
pass
else:
# Fixme: Terrible
settings.fontsize = svg_length_attr(font_size, self)
font_family = attributes.get("font-family", None)
if font_family is not None:
# Fixme: face vs family eh
settings.font = font_family
font_style = attributes.get("font-style", None)
if font_style is not None:
# Fixme: Extend
settings.fontitalic = font_style == "italic"
font_weight = attributes.get('font-weight', None)
if font_weight is not None:
settings.fontbold = font_weight == 'bold'
if fillrule != "inherit":
settings.fillrule = to_faint_fill_rule(fillrule)
parse_marker_attr(node, settings)
return settings
def add_warning(self, text):
"""Add a load warning to the contained props."""
self.props.add_warning(text)
def add_ref_item(self, ref_id, obj):
"""Add an item accessible by id (item is a tuple, id to object)"""
self.ids[ref_id] = obj
def add_fill(settings, fill, fill_opacity, state):
"""Adds the specified fill-style to the settings object, preserving
border.
"""
if settings.fillstyle == 'border':
settings.fillstyle = 'border+fill'
settings.bg = parse_color(fill, fill_opacity, state)
else:
settings.fillstyle = 'fill'
settings.fg = parse_color(fill, fill_opacity, state)
def add_stroke(settings, stroke, stroke_opacity, state):
"""Adds the specified stroke-style to the settings object, preserving
fill
"""
if settings.fillstyle == 'fill':
settings.fillstyle = 'border+fill'
settings.bg = settings.fg
settings.fg = parse_color(stroke, stroke_opacity, state)
else:
settings.fillstyle = 'border'
settings.fg = parse_color(stroke, stroke_opacity, state)
def parse_marker_attr(node, settings):
"""Parses the node's SVG 'marker-start', 'marker-end' attributes
"""
arrowhead_str = node.get('marker-end')
arrowtail_str = node.get('marker-start')
# Fixme: actually refer to marked structure
arrowhead = (arrowhead_str is not None and
arrowhead_str.startswith('url(#Arrowhead')) # Fixme: Hack
arrowtail = (arrowtail_str is not None and
arrowtail_str.startswith('url(#Arrowtail')) # Fixme: Hack
if arrowhead and arrowtail:
settings.arrow = 'both'
elif arrowtail:
settings.arrow = 'back'
elif arrowhead:
settings.arrow = 'front'
else:
settings.arrow = 'none'
def fillstyle_to_settings(settings, stroke, fill, stroke_opacity, fill_opacity, state):
"""Converts from SVG stroke and fill to the combined faint fillstyle +
fgcol, bgcol.
"""
# Fixme: Simplify
if stroke == None and fill != None:
if fill == "none":
remove_fill(settings)
else:
add_fill(settings, fill, fill_opacity, state)
return
elif stroke != None and fill == None:
if stroke == "none":
remove_stroke(settings)
else:
add_stroke(settings, stroke, stroke_opacity, state)
return
elif stroke != None and fill != None:
if stroke == "none" and fill == "none":
settings.fillstyle = 'none'
return
elif stroke == "none" and fill != "none":
settings.fillstyle = 'fill'
settings.fg = parse_color(fill, fill_opacity, state)
return
elif stroke != "none" and fill == "none":
settings.fillstyle = 'border'
settings.fg = parse_color(stroke, stroke_opacity, state)
return
elif stroke != "none" and fill != "none":
settings.fillstyle = 'border+fill'
settings.fg = parse_color(stroke, stroke_opacity, state)
settings.bg = parse_color(fill, fill_opacity, state)
return
def get_style_dict(style):
"""Parses an SVG style attribute string, returning it as a key/value
dictionary.
"""
# Fixme: Review, from old
style_items = style.split(";")
style_dict = {}
for item in style_items:
if len(item) > 0:
key, value = item.split(":")
key, value = key.strip(), value.strip()
style_dict[key] = value
return style_dict
def node_default_settings():
"""Returns the initial default Settings."""
# Fixme: Review, from old
settings = faint.Settings()
settings.linewidth = 1.0
settings.cap = 'flat'
settings.fg = (0, 0, 0)
settings.bg = (0, 0, 0)
settings.fillstyle = 'fill'
return settings
def parse_transform(s):
"""Parses an entry in a transform-list."""
def _parse_args(s):
assert s.startswith("(")
assert s.endswith(")")
# Fixme: parse number (i.e. incl. Ee etc)
str_args = [arg for arg in s.replace(" ", ",")[1:-1].split(",") if len(arg) != 0]
return [float(arg) for arg in str_args]
op, args = s.split("(")
args = _parse_args('(' + args)
if op == "skewX":
return Matrix.skewX(deg2rad(*args))
elif op == "skewY":
return Matrix.skewY(deg2rad(*args))
elif op == "rotate":
rad = deg2rad(args[0])
pivot = args[1:] if len(args) == 3 else None
return Matrix.rotation(rad, pivot)
elif op == "translate":
x = args[0]
y = args[1] if len(args) == 2 else 0
return Matrix.translation(x,y)
elif op == "matrix":
return Matrix(*args)
elif op == "scale":
sx = args[0]
sy = args[1] if len(args) == 2 else sx
return Matrix.scale(sx, sy)
else:
raise svg_error("Unsupported transform: %s" % op)
def parse_transform_list(s):
"""Parses an SVG transform attribute"""
transform_list = re.findall(svg_re.transform_list, s)
return [parse_transform(tr) for tr in transform_list]
def svg_length_attr_dumb(value_str, props, full_span):
"""Parses an svg length-attribute from the value_str. full_span is
used as reference for percentages."""
m = re.match(svg_re.length_attr, value_str)
if m is None:
raise SvgLengthError(value_str)
value, unit = m.groups()
if unit == "%":
# Fixme: More work required.
if full_span.__class__ == float: # Fixme: Disallow non-float!
return float(value)/100 * full_span
else:
return float(value)/100 * full_span[0]
elif unit in ABSOLUTE_UNIT_FACTORS:
return float(value) * ABSOLUTE_UNIT_FACTORS[unit]
elif unit in ('em','ex'):
# Fixme: Implement.
props.add_warning("Unsupported unit: %s" % unit)
return float(value)
else:
props.add_warning("Invalid unit: %s" % unit)
return float(value)
def svg_length_attr(value_str, state):
"""Parses an svg length attribute."""
# Fixme: Need to discern x, y
full_span = (state.viewBox[2], state.viewBox[3])
return svg_length_attr_dumb(value_str, state.props,
full_span)
| {
"content_hash": "f1e3e1a4eee5a74cbfa761978ef0cf9a",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 89,
"avg_line_length": 33.11738148984199,
"alnum_prop": 0.5641742212528117,
"repo_name": "lukas-ke/faint-graphics-editor",
"id": "7b1dab0c6fb89c2210cb9dd123083984ecb7db76",
"size": "14720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/faint/svg/parse/parse_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49581"
},
{
"name": "C++",
"bytes": "3170874"
},
{
"name": "Emacs Lisp",
"bytes": "13474"
},
{
"name": "HTML",
"bytes": "26096"
},
{
"name": "NSIS",
"bytes": "2088"
},
{
"name": "Python",
"bytes": "537915"
}
],
"symlink_target": ""
} |
import cPickle
import logging
import sys
import time
import traceback
import uuid
from conary import versions as cny_versions
from conary.deps import deps as cny_deps
from lxml import etree
from django.db import connection
from django.conf import settings
from django.contrib.redirects import models as redirectmodels
from django.contrib.sites import models as sitemodels
from django.core.exceptions import ObjectDoesNotExist
from mint.django_rest import signals, timeutils
from mint.django_rest.rbuilder.inventory import errors
from mint.django_rest.rbuilder.inventory import models
from mint.django_rest.rbuilder.inventory import zones as zmodels
from mint.django_rest.rbuilder.targets import models as targetmodels
from mint.django_rest.rbuilder.manager import basemanager
from mint.django_rest.rbuilder.querysets import models as querysetmodels
from mint.django_rest.rbuilder.jobs import models as jobmodels
from mint.rest import errors as mint_rest_errors
log = logging.getLogger(__name__)
exposed = basemanager.exposed
class SystemManager(basemanager.BaseManager):
LaunchWaitForNetworkEvents = set([
jobmodels.EventType.LAUNCH_WAIT_FOR_NETWORK
])
NonresponsiveStates = set([
models.SystemState.NONRESPONSIVE,
models.SystemState.NONRESPONSIVE_NET,
models.SystemState.NONRESPONSIVE_HOST,
models.SystemState.NONRESPONSIVE_SHUTDOWN,
models.SystemState.NONRESPONSIVE_SUSPENDED,
models.SystemState.NONRESPONSIVE_CREDENTIALS,
])
now = timeutils.now
@exposed
def getEventTypes(self):
EventTypes = jobmodels.EventTypes()
EventTypes.event_type = list(models.Cache.all(jobmodels.EventType))
return EventTypes
@exposed
def getEventType(self, event_type_id):
eventType = models.Cache.get(jobmodels.EventType, pk=int(event_type_id))
return eventType
@exposed
def getEventTypeByName(self, eventTypeName):
return models.Cache.get(jobmodels.EventType, name=eventTypeName)
@exposed
def updateEventType(self, event_type):
"""Update an event type"""
if not event_type:
return
event_type.save()
return event_type
@exposed
def getZone(self, zone_id):
zone = models.Cache.get(zmodels.Zone, pk=int(zone_id))
return zone
@exposed
def getLocalZone(self):
"Return the zone for this rBuilder"
zone = models.Cache.get(zmodels.Zone, name=zmodels.Zone.LOCAL_ZONE)
return zone
@exposed
def getZoneByJID(self, node_jid):
zone = models.ManagementNode.objects.get(node_jid=node_jid).zone
return zone
@exposed
def getZones(self):
Zones = zmodels.Zones()
Zones.zone = list(models.Cache.all(zmodels.Zone))
return Zones
@exposed
def addZone(self, zone):
"""Add a zone"""
if not zone:
return
zone.save()
return zone
@exposed
def updateZone(self, zone):
"""Update a zone"""
if not zone:
return
zone.save()
return zone
@exposed
def deleteZone(self, zone):
"""Update a zone"""
if not zone:
return
zone.delete()
return
@exposed
def getNetwork(self, network_id):
network = models.Network.objects.get(pk=network_id)
return network
@exposed
def getNetworks(self):
Networks = models.Networks()
Networks.network = list(models.Network.objects.all())
return Networks
@exposed
def updateNetwork(self, network):
"""Update a network"""
if not network:
return
network.save()
return network
@exposed
def deleteNetwork(self, network_id):
network = models.Network.objects.get(pk=network_id)
network.delete()
@exposed
def getSystem(self, system_id):
system = models.System.objects.select_related().get(pk=system_id)
return system
@exposed
def deleteSystem(self, system_id):
system = models.System.objects.get(pk=system_id)
system.delete()
@exposed
def getSystemByTargetSystemId(self, target_system_id):
systems = models.System.objects.select_related().filter(
target_system_id=target_system_id)
if systems:
return systems[0]
else:
return None
@classmethod
def _getClassName(cls, field):
xobj = getattr(field, '_xobj', None)
if xobj:
clsName = xobj.tag
else:
clsName = field._meta.verbose_name
return models.modellib.type_map[clsName]
@exposed
def getSystems(self):
systems = models.Systems()
qs = models.System.objects.select_related()
systems.system = qs.all()
return systems
@exposed
def getInventorySystems(self):
systems = models.Systems()
systems.system = \
models.System.objects.select_related().filter(system_type__infrastructure=False)
return systems
@exposed
def getInfrastructureSystems(self):
systems = models.Systems()
systems.system = \
models.System.objects.filter(system_type__infrastructure=True)
return systems
@exposed
def getManagementNode(self, management_node_id):
managementNode = models.ManagementNode.objects.get(pk=management_node_id)
return managementNode
@exposed
def getManagementNodes(self):
ManagementNodes = models.ManagementNodes()
ManagementNodes.management_node = list(models.ManagementNode.objects.all())
return ManagementNodes
@exposed
def addManagementNode(self, managementNode):
"""Add a management node to the inventory"""
if not managementNode:
return
managementNode.save()
self.setSystemState(managementNode)
#TO-DO Need to add the JID to the models.ManagementNode object
return managementNode
@exposed
def synchronizeZones(self, managementNodes):
# Grab all existing management nodes
newhash = set(x.pk for x in managementNodes.management_node)
oldNodes = models.ManagementNode.objects.all()
for node in oldNodes:
if node.pk not in newhash:
# Ideally we want to disassociate the management node from the
# zone, but zone_id is not nullable
# For now, leave the system around until RBL-7703 is fixed
continue
# For good measure, save the nodes, since things like the jid may have
# changed
for x in managementNodes.management_node:
# Management nodes live in the same zone they manage
x.managing_zone_id = x.zone_id
x.save()
@exposed
def getManagementNodeForZone(self, zone_id, management_node_id):
zone = self.getZone(zone_id)
managementNode = models.ManagementNode.objects.get(zone=zone, pk=management_node_id)
return managementNode
@exposed
def addManagementNodeForZone(self, zone_id, managementNode):
"""Add a management node to the inventory"""
if not managementNode:
return
zone = zmodels.Zone.objects.get(pk=zone_id)
managementNode.zone = zone;
managementNode.save()
self.setSystemState(managementNode)
#TO-DO Need to add the JID to the models.ManagementNode object
return managementNode
@exposed
def getManagementNodesForZone(self, zone_id):
zone = zmodels.Zone.objects.get(pk=zone_id)
ManagementNodes = models.ManagementNodes()
ManagementNodes.management_node = list(models.ManagementNode.objects.filter(zone=zone).all())
return ManagementNodes
@exposed
def getSystemType(self, system_type_id):
systemType = models.SystemType.objects.get(pk=system_type_id)
return systemType
@exposed
def getSystemTypes(self):
SystemTypes = models.SystemTypes()
SystemTypes.system_type = list(models.SystemType.objects.all())
return SystemTypes
@exposed
def updateSystemType(self, system_type):
"""Update a system type"""
if not system_type:
return
system_type.save()
return system_type
@exposed
def getSystemTypeSystems(self, system_type_id):
system_type = self.getSystemType(system_type_id)
Systems = models.Systems()
Systems.system = system_type.systems.all()
return Systems
@exposed
def getSystemState(self, system_state_id):
systemState = models.Cache.get(models.SystemState, pk=int(system_state_id))
return systemState
@exposed
def getSystemStates(self):
SystemStates = models.SystemStates()
SystemStates.system_state = list(models.Cache.all(models.SystemState))
return SystemStates
@classmethod
def systemState(cls, stateName):
return models.Cache.get(models.SystemState,
name=stateName)
def _getProductVersionAndStage(self, nvf):
name, version, flavor = nvf
label = version.trailingLabel()
hostname = label.getHost().split('.')[0]
try:
product = self.db.getProduct(hostname)
prodVersions = self.db.listProductVersions(product.hostname)
except mint_rest_errors.ProductNotFound:
# Not a product that lives on this rba
return None, None
for version in prodVersions.versions:
stages = self.db.getProductVersionStages(product.hostname, version.name)
for stage in stages.stages:
if stage.label == label.asString():
return version, stage
return None, None
@exposed
def log_system(self, system, log_msg):
system_log = system.createLog()
system_log_entry = models.SystemLogEntry(system_log=system_log,
entry=log_msg)
system_log.system_log_entries.add(system_log_entry)
system_log.save()
return system_log
@exposed
def addSystems(self, systemList, for_user=None):
'''Add add one or more systems to inventory'''
for system in systemList:
self.addSystem(system, for_user=for_user)
@exposed
def addSystem(self, system, generateCertificates=False, for_user=None,
withRetagging=True):
'''Add a new system to inventory'''
if not system:
return
try:
if system.system_type.name == models.SystemType.INFRASTRUCTURE_MANAGEMENT_NODE:
return self.addManagementNode(system)
except ObjectDoesNotExist:
pass # will default later on
if for_user is not None:
system.created_by = for_user
system.modified_by = for_user
# add the system
system.save()
# Verify potential duplicates here
system = self.mergeSystems(system)
self.setSystemState(system)
if system.event_uuid:
self.postprocessEvent(system)
if withRetagging:
if for_user:
self.mgr.addToMyQuerySet(system, for_user)
self.mgr.retagQuerySetsByType('system', for_user, defer=True)
return system
def mergeSystems(self, system):
if not system.event_uuid:
system._not_merged = True
return system
# Look up a system with a matching event_uuid
systems = [ x.system
for x in models.SystemJob.objects.filter(
event_uuid = system.event_uuid) ]
if not systems:
return system
# Realistically there should only be one
systemByUuid = systems[0]
if systemByUuid.pk == system.pk:
# Same system, nothing to do
return system
systemToKeep, systemToRemove = sorted([system, systemByUuid],
key = lambda x: x.pk)
log.info("Merging 2 systems, id %s will be kept, id %s will be "
"removed." % (systemToKeep.pk, systemToRemove.pk))
system = self._merge(systemToKeep, systemToRemove)
return system
def _merge(self, system, other):
# We don't want to overwrite the name and description
other.name = other.description = None
responsiveState = self.systemState(models.SystemState.RESPONSIVE)
savedState = None
oldModel = getattr(system, 'oldModel', None)
if oldModel is not None:
currentState = oldModel.xpath('./current_state/name/text()')
if currentState and currentState[0] == responsiveState:
savedState = responsiveState
models.System.objects._copyFields(system, other, withReadOnly=True)
if savedState:
system.current_state = savedState
# XXX maybe we should merge instead of simply updating, since we may
# step over a unique constraint? -- misa
cu = connection.cursor()
cu.execute("""
UPDATE inventory_system_target_credentials
SET system_id = %s
WHERE system_id = %s""", [ system.pk, other.pk, ])
# If the other system has the uuids, we'll trust its network and
# installed software info
if other.generated_uuid:
cu.execute("""
DELETE FROM inventory_system_network
WHERE system_id = %s
""", [ system.pk ])
cu.execute("""
UPDATE inventory_system_network
SET system_id = %s
WHERE system_id = %s
""", [ system.pk, other.pk ])
cu.execute("""
UPDATE inventory_system_job
SET system_id = %s
WHERE system_id = %s
""", [ system.pk, other.pk ])
self._mergeLogs(cu, system, other)
# Add a redirect from the deleted system to the saved system
redirect = redirectmodels.Redirect(
site=sitemodels.Site.objects.get(pk=settings.SITE_ID),
new_path=system.get_absolute_url(),
old_path=other.get_absolute_url())
redirect.save()
# Remove the other system before saving this one, or else we may stop
# over some unique constraints (like the one on generated_uuid)
other.delete()
system.updateDerivedData()
system.save()
return system
def _mergeLogs(self, cu, system, other):
# See RBL-6968 - product management has agreed we shouldn't keep the
# other system's logs
# But now we're trying to merge them
otherSystemLog = other.system_log.all()
if not otherSystemLog:
return
otherSystemLog = otherSystemLog[0]
systemLog = self.getSystemLog(system)
cu.execute("""
UPDATE inventory_system_log_entry
SET system_log_id = %s,
entry = '(copied) ' || entry
WHERE system_log_id = %s
""", [ systemLog.pk, otherSystemLog.pk ])
def postprocessEvent(self, system):
# removable legacy artifact given new jobs infrastructure? Does anything call this?
pass
def setSystemState(self, system):
if system.oldModel is None:
self.log_system(system, models.SystemLogEntry.ADDED)
registeredState = self.systemState(models.SystemState.REGISTERED)
onlineState = self.systemState(models.SystemState.RESPONSIVE)
if system.isNewRegistration:
system.update(registration_date=self.now(),
current_state=onlineState)
if system.oldModel is None:
# We really see this system the first time with its proper
# uuids. We'll assume it's been registered with rpath-register
self.log_system(system, models.SystemLogEntry.REGISTERED)
elif system.isRegistered:
# See if a new poll is required
if (system.current_state_id in self.NonresponsiveStates):
system.update(current_state=registeredState)
# Already registered and no need to re-synchronize, if the
# old state was online, and the new state is registered, we must
# be coming in through rpath-tools, so preserve the online state.
elif (self._getSystemOldCurrentStateId(system) == \
onlineState.system_state_id and
system.current_state_id == \
registeredState.system_state_id):
system.update(current_state=onlineState)
elif system.current_state == registeredState:
# system is registered and scanned, should just go ahead and mark online
# definitely do not poll again as the initial registration polled. Orchestrate if you
# want crazy amounts of extra polling.
system.update(current_state=onlineState)
return None
elif system.isRegistrationIncomplete:
self.log_system(system, "Incomplete registration: missing local_uuid. Possible cause: dmidecode malfunctioning")
# so that a transition between Inactive and Active systems will make the system
# move between querysets. Note, not retagging, would be grossly inefficient
# with lots of system activity
self.mgr.invalidateQuerySetsByType('system')
@classmethod
def _getSystemOldCurrentStateId(cls, system):
if system.oldModel is None:
return None
ret = system.oldModel.xpath('./current_state/system_state_id/text()')
if ret:
return ret[0]
return None
@exposed
def updateSystem(self, system, for_user=None):
last_job = getattr(system, 'lastJob', None)
if last_job and last_job.job_state.name == jobmodels.JobState.COMPLETED:
# This will update the system state as a side-effect
self.addSystem(system, generateCertificates=False,
withRetagging=False)
self.setSystemStateFromJob(system)
if for_user:
system.modified_by = for_user
system.modified_date = timeutils.now()
system.save()
self.mgr.invalidateQuerySetsByType('system')
return system
def setSystemStateFromJob(self, system):
job = system.lastJob
if job is None:
# This update did not come in as a result of a job
return
nextSystemState = self.getNextSystemState(system, job)
if nextSystemState is not None:
nstate = self.systemState(nextSystemState)
self.log_system(system, "System state change: %s -> %s" %
(system.current_state.description, nstate.description))
system.update(current_state=nstate, state_change_date=self.now())
@classmethod
def _getTroveSpecForImage(cls, image):
if image is None:
return None, None, None, None
version = cny_versions.ThawVersion(str(image.trove_version))
flavor = cny_deps.ThawFlavor(str(image.trove_flavor))
troveSpec = '%s=%s[%s]' % (image.trove_name, version.freeze(), flavor)
return troveSpec, image.trove_name, version, flavor
def getNextSystemState(self, system, job):
# Return None if the state hasn't changed
jobStateName = job.job_state.name
eventTypeName = job.job_type.name
system.updateDerivedData()
if jobStateName == jobmodels.JobState.COMPLETED:
if eventTypeName in [ jobmodels.EventType.SYSTEM_REGISTRATION, jobmodels.EventType.SYSTEM_REGISTRATION_IMMEDIATE ]:
return models.SystemState.RESPONSIVE
return None
if jobStateName == jobmodels.JobState.FAILED:
currentStateName = system.current_state.name
# Simple cases first.
if job.status_code == 401:
return models.SystemState.NONRESPONSIVE_CREDENTIALS
timedelta = self.now() - system.state_change_date
if currentStateName == models.SystemState.DEAD:
return None
if currentStateName in self.NonresponsiveStates:
if timedelta.days >= self.cfg.deadStateTimeout:
return models.SystemState.DEAD
return None
return None
# Some other job state, do nothing
return None
def lookupTarget(self, targetTypeName, targetName):
return targetmodels.Target.objects.get(
target_type__name=targetTypeName, name=targetName)
@exposed
def addLaunchedSystems(self, systems, job=None, forUser=None):
img = None
if job:
# Try to extract the image for this job
images = job.images.all()
if images:
img = images[0].image
# Copy the incoming systems; we'll replace them with real ones
slist = systems.system
rlist = systems.system = []
for system in slist:
djSystem = self.mgr.addLaunchedSystem(system,
dnsName=system.dnsName,
targetName=system.targetName, targetType=system.targetType,
sourceImage=img, job=job,
for_user=forUser)
rlist.append(djSystem)
if system.dnsName:
self.postSystemLaunch(djSystem)
return systems
@exposed
def getEtreeProperty(self, obj, prop, default=None):
if obj is None:
return default
val = obj.find(prop)
if val is not None:
return val.text
val = obj.attrib.get(prop)
if val is not None:
return val
return default
@exposed
def addLaunchedSystem(self, system, dnsName=None, targetName=None,
targetType=None, for_user=None, sourceImage=None, job=None):
if isinstance(targetType, basestring):
targetTypeName = targetType
else:
targetTypeName = targetType.name
target = self.lookupTarget(targetTypeName=targetTypeName,
targetName=targetName)
system.target = target
system.source_image = sourceImage
if sourceImage is not None:
system.project_id = sourceImage.project_id
system.project_branch_id = sourceImage.project_branch_id
system.project_branch_stage_id = sourceImage.project_branch_stage_id
if system.managing_zone_id is None:
system.managing_zone = self.getLocalZone()
oldModel, system = models.System.objects.load_or_create(system,
withReadOnly=True)
etreeModel = getattr(system, '_etreeModel', None)
# Copy some of the otherwise read-only fields
system.target_system_name = self.getEtreeProperty(etreeModel,
'target_system_name', system.target_system_name)
system.target_system_description = self.getEtreeProperty(etreeModel,
'target_system_description', system.target_system_description)
system.target_system_state= self.getEtreeProperty(etreeModel,
'target_system_state', system.target_system_state)
# Add an old style job, to persist the boot uuid
self._addOldStyleJob(system)
system.launching_user = self.user
if for_user is None:
for_user = self.user
system.created_by = for_user
system.modified_by = for_user
system.launch_date = self.now()
# Copy some of the data from the target
if not system.name:
system.name = system.target_system_name
if not system.description:
system.description = system.target_system_description
# Look up the credentials for this user
credentials = self._getCredentialsForUser(system.target)
assert credentials is not None, "User should have credentials"
# Check if system target creds have already been set. This can happen
# if the target systems import script has run in between launching the
# instance and adding it to inventory.
stcs = models.SystemTargetCredentials.objects.filter(
system=system, credentials=credentials)
if not stcs:
# Not already set. Create the link.
stc = models.SystemTargetCredentials(system=system,
credentials=credentials)
stc.save()
if dnsName:
network = system._matchNetwork(dnsName)
if network is None:
models.Network.objects.create(system=system, dns_name=dnsName, active=True)
self.log_system(system, "System launched in target %s (%s)" %
(target.name, target.target_type.name))
system.system_state = self.systemState(models.SystemState.UNMANAGED)
self.addSystem(system, for_user=for_user)
# Add target system
# get_or_create needs the defaults arg to do this properly (#1631)
defaults=dict(
name=system.target_system_name,
description=system.target_system_description or '',
ip_addr_1=dnsName)
tsys, created = targetmodels.TargetSystem.objects.get_or_create(
target=target,
target_internal_id=system.target_system_id,
defaults=defaults)
if not created:
tsys.name = system.target_system_name
tsys.description = system.target_system_description or ''
# Only update the address if it's not null
if dnsName:
tsys.ip_addr_1 = dnsName
tsys.save()
targetmodels.TargetSystemCredentials.objects.get_or_create(
target_system=tsys,
target_credentials=credentials)
if job is not None:
# Link system to job. This call may be repeated, so
# gracefully handle existing records
jobmodels.JobSystemArtifact.objects.get_or_create(
system=system, job=job)
return system
def _addOldStyleJob(self, system):
if system.boot_uuid is None:
return
cu = connection.cursor()
now = time.time() # self.now()
# Make sure we don't insert duplicates
cu.execute("""
INSERT INTO jobs (job_uuid, job_type_id, job_state_id, created_by,
created, modified)
SELECT %s, %s, %s, %s, %s, %s
WHERE NOT EXISTS (SELECT 1 FROM jobs WHERE job_uuid = %s)""",
[ system.boot_uuid, 1, 3, self.user.user_id, now, now, system.boot_uuid])
cu.execute("SELECT job_id FROM jobs WHERE job_uuid = %s", [ system.boot_uuid ])
jobId = cu.fetchone()[0]
cu.execute("""
INSERT INTO job_system
(job_id, system_id)
SELECT %s, %s
WHERE NOT EXISTS (
SELECT 1
FROM job_system
WHERE job_id = %s
AND system_id = %s)""",
[ jobId, system.pk, jobId, system.pk ])
system.updateDerivedData()
@exposed
def postSystemLaunch(self, system):
# No longer waiting for a network here, the target waits for
# network
self.setSystemState(system)
system.updateDerivedData()
return system
def _getCredentialsForUser(self, target):
tucs = targetmodels.TargetUserCredentials.objects.filter(
target=target, user=self.user)
for tuc in tucs:
return tuc.target_credentials
return None
def matchSystem(self, system):
matchingIPs = models.network_information.objects.filter(
ip_address=system.ip_address)
if matchingIPs:
return matchingIPs[0].managed_system
else:
return None
def isManageable(self, managedSystem):
if managedSystem.launching_user.user_id == self.user.user_id:
# If we own the system, we can manage
return True
# Does the user who launched the system have the same credentials as
# our current user?
cu = connection.cursor()
cu.execute("""
SELECT 1
FROM TargetUserCredentials tc1
JOIN TargetUserCredentials tc2 USING (targetId, targetCredentialsId)
WHERE tc1.userId = %s
AND tc2.userId = %s
""", [ self.user.user_id, managedSystem.launching_user.user_id ])
row = cu.fetchone()
return bool(row)
@exposed
def getSystemLog(self, system):
systemLog = system.system_log.all()
if systemLog:
return systemLog[0]
else:
models.SystemLog()
@exposed
def getSystemLogEntries(self, system):
systemLog = self.getSystemLog(system)
logEntries = systemLog.system_log_entries.order_by('-entry_date')
return logEntries
@exposed
def getSystemEvent(self, event_id):
event = models.SystemEvent.objects.get(pk=event_id)
return event
@exposed
def deleteSystemEvent(self, event):
event = models.SystemEvent.objects.get(pk=event)
event.delete()
@exposed
def getSystemEvents(self):
SystemEvents = models.SystemEvents()
SystemEvents.system_event = list(models.SystemEvent.objects.all())
return SystemEvents
@exposed
def getSystemSystemEvents(self, system_id):
system = models.System.objects.get(pk=system_id)
events = models.SystemEvent.objects.filter(system=system)
system_events = models.SystemEvents()
system_events.system_event = list(events)
return system_events
@exposed
def getSystemSystemEvent(self, system_id, system_event_id):
event = models.SystemEvent.objects.get(pk=system_event_id)
return event
@exposed
def addSystemSystemEvent(self, system_id, systemEvent):
"""Add a system event to a system"""
if not system_id or not systemEvent:
return
system = models.System.objects.get(pk=system_id)
systemEvent.system = system
systemEvent.save()
enable_time = None
if systemEvent.dispatchImmediately():
enable_time = self.now()
else:
enable_time = self.now() + timeutils.timedelta(minutes=self.cfg.systemEventsPollDelay)
self.logSystemEvent(systemEvent, enable_time)
if systemEvent.dispatchImmediately():
self.dispatchSystemEvent(systemEvent)
return systemEvent
def getSystemEventsForProcessing(self):
events = None
try:
# get events in order based on whether or not they are enabled and what their priority is (descending)
current_time = self.now()
events = models.SystemEvent.objects.filter(time_enabled__lte=current_time).order_by('-priority')[0:self.cfg.systemEventsNumToProcess].all()
except models.SystemEvent.DoesNotExist:
pass
return events
@exposed
def processSystemEvents(self):
events = self.getSystemEventsForProcessing()
if not events:
log.info("No systems events to process")
return
for event in events:
try:
self.dispatchSystemEvent(event)
except (errors.IncompatibleEvent, errors.InvalidNetworkInformation):
# Safely ignore these errors
pass
def checkEventCompatibility(self, event):
runningJobs = event.system.jobs.filter(job_state__name=jobmodels.JobState.RUNNING)
runningEventTypes = [j.job_type.name for j in runningJobs]
# Event types are incompatible with themselves
if event.event_type.name in runningEventTypes:
raise errors.IncompatibleSameEvent(eventType=event.event_type.name)
def dispatchSystemEvent(self, event):
# Check if the system has any active jobs before creating the event.
if event.system.hasRunningJobs():
try:
self.checkEventCompatibility(event)
except (errors.IncompatibleEvent, errors.InvalidNetworkInformation), e:
log.error(str(e))
self.cleanupSystemEvent(event)
raise
log.info("Dispatching %s event (id %d, enabled %s) for system %s (id %d)" % \
(event.event_type.name, event.system_event_id, event.time_enabled,
event.system.name, event.system.system_id))
try:
job = self._dispatchSystemEvent(event)
except:
self.cleanupSystemEvent(event)
raise
else:
if job is None:
self.cleanupSystemEvent(event)
def _cimParams(self, repClient, system, destination, eventUuid, requiredNetwork):
# CIM is dead; this is just here for LaunchWaitForNetworkEvents
if system.target_id is not None:
targetName = system.target.name
targetType = system.target.target_type.name
else:
targetName = None
targetType = None
cimParams = repClient.CimParams(host=destination,
port=None,
eventUuid=eventUuid,
clientCert=None,
clientKey=None,
requiredNetwork=requiredNetwork,
# XXX These three do not belong to cimParams
instanceId=system.target_system_id,
targetName=targetName,
targetType=targetType,
launchWaitTime=self.cfg.launchWaitTime)
return cimParams
def _dispatchSystemEvent(self, event):
repClient = self.mgr.repeaterMgr.repeaterClient
if repClient is None:
log.info("Failed loading repeater client, expected in local mode only")
return
self.log_system(event.system,
"Dispatching %s event" % event.event_type.description)
network = self.extractNetworkToUse(event.system)
eventType = event.event_type.name
if not network and eventType not in self.LaunchWaitForNetworkEvents:
msg = "No valid network information found; giving up"
log.error(msg)
self.log_system(event.system, msg)
raise errors.InvalidNetworkInformation
# If no ip address was set, fall back to dns_name
if network:
destination = network.ip_address or network.dns_name
requiredNetwork = (network.pinned and destination) or None
else:
destination = None
requiredNetwork = None
eventUuid = str(uuid.uuid4())
zone = event.system.managing_zone.name
params = self._cimParams(repClient, event.system, destination,
eventUuid, requiredNetwork)
if params is None:
return
resultsLocation = repClient.ResultsLocation(
path = "/api/v1/inventory/systems/%d" % event.system.pk,
port = 80)
if eventType in self.LaunchWaitForNetworkEvents:
method = repClient.launchWaitForNetwork
job = self._runSystemEvent(event, method, params, resultsLocation,
user=self.user, zone=zone)
else:
log.error("Unknown event type %s" % eventType)
raise errors.UnknownEventType(eventType=eventType)
return job
@exposed
def extractNetworkToUse(self, system):
return models.System.extractNetworkToUse(system)
def _runSystemEvent(self, event, method, params, resultsLocation=None,
**kwargs):
user = kwargs.pop('user', None)
systemName = event.system.name
eventType = event.event_type.name
if hasattr(params, 'eventUuid'):
eventUuid = params.eventUuid
else:
eventUuid = kwargs.get('eventUuid')
if not kwargs.get('jobToken'):
kwargs['jobToken'] = str(uuid.uuid4())
jobUuid = str(uuid.uuid4())
logFunc = lambda x: log.info("System %s (%s), task %s (%s) %s" %
(systemName, params.host, jobUuid, eventType, x))
logFunc("queued")
job = jobmodels.Job()
job.job_uuid = jobUuid
job.job_type = event.event_type
job.job_state = self.jobState(jobmodels.JobState.QUEUED)
job.job_token = str(kwargs['jobToken'])
job.created_by = user
job.save()
sjob = models.SystemJob()
sjob.job = job
sjob.system = event.system
sjob.event_uuid = eventUuid
sjob.save()
deferred = lambda connection=None, **kw: self._runEventLater(
event, job, method, params, resultsLocation, kwargs, logFunc)
signals.PostCommitActions.add(deferred)
return job
def _runEventLater(self, event, job, method, params, resultsLocation, kwargs, logFunc):
try:
self._runEventLater_r(job, method, params, resultsLocation, kwargs, logFunc)
finally:
# cleanup now that the event has been processed
self.cleanupSystemEvent(event)
@classmethod
def _runEventLater_r(cls, job, method, params, resultsLocation, kwargs,
logFunc):
zone = kwargs.pop('zone', None)
logFunc("executing")
try:
method(params, resultsLocation=resultsLocation, zone=zone, uuid=job.job_uuid, **kwargs)
except Exception, e:
tb = sys.exc_info()[2]
traceback.print_tb(tb)
logFunc("failed: %s" % (e, ))
job.job_state = cls.jobState(jobmodels.JobState.FAILED)
job.save()
return None
logFunc("in progress")
job.job_state = cls.jobState(jobmodels.JobState.RUNNING)
job.save()
for system_job in job.systems.all():
system_job.system.updateDerivedData()
return job
def cleanupSystemEvent(self, event):
eventType = event.event_type
system = event.system
# remove the event since it has been handled
log.debug("cleaning up %s event (id %d) for system %s" %
(eventType.name, event.system_event_id, system.name))
event.delete()
@classmethod
def eventType(cls, name):
return models.Cache.get(jobmodels.EventType, name=name)
@classmethod
def jobState(cls, name):
return jobmodels.JobState.objects.get(name=name)
@exposed
def scheduleLaunchWaitForNetworkEvent(self, system):
"""
Schedule an event that either waits for the system's IP address to
become available, or sees that the system has registered via
rpath-tools.
"""
return self._scheduleEvent(system,
jobmodels.EventType.LAUNCH_WAIT_FOR_NETWORK,
enableTime=self.now())
def _scheduleEvent(self, system, eventType, enableTime=None,
eventData=None):
eventTypeObject = self.eventType(eventType)
return self.createSystemEvent(system, eventTypeObject, enableTime=enableTime,
eventData=eventData)
@exposed
def createSystemEvent(self, system, eventType, enableTime=None,
eventData=None):
event = None
# do not create events for systems that we cannot possibly contact
if self.getSystemHasHostInfo(system) or \
eventType.name in self.LaunchWaitForNetworkEvents:
if not enableTime:
enableTime = self.now() + timeutils.timedelta(
minutes=self.cfg.systemEventsPollDelay)
if eventData is not None and not isinstance(eventData, basestring):
pickledData = cPickle.dumps(eventData)
else:
pickledData = eventData
event = models.SystemEvent(system=system, event_type=eventType,
priority=eventType.priority, time_enabled=enableTime,
event_data=pickledData)
event.save()
self.logSystemEvent(event, enableTime)
if event.dispatchImmediately():
self.dispatchSystemEvent(event)
else:
systemName = system.name or system.hostname or system.target_system_name
log.info("Event cannot be created for system %s (%s) '%s' because "
"there is no host information" % \
(system.pk, systemName, eventType.description))
self.log_system(system,
"Unable to create event '%s': no networking information" %
eventType.description)
system.updateDerivedData()
return event
def logSystemEvent(self, event, enable_time):
msg = "Event type '%s' registered and will be enabled on %s" % (event.event_type.name, enable_time)
self.log_system(event.system, msg)
log.info(msg)
def getSystemHasHostInfo(self, system):
hasInfo = False
if system and system.networks:
for network in system.networks.all():
if network.ip_address or network.ipv6_address or network.dns_name:
hasInfo = True
break
return hasInfo
def _iterTargetUserCredentials(self):
"Iterate over all configured targets that have credentials"
uqmap = dict()
# We need to build a unique map of credentials for users
for tuc in targetmodels.TargetUserCredentials.objects.all():
uqmap[(tuc.target_id, tuc.target_credentials_id)] = tuc
for tuc in uqmap.values():
yield tuc
def _importTargetSystemsForTUC(self, targetUserCredentials):
jobType = self.getEventTypeByName(jobmodels.EventType.TARGET_REFRESH_SYSTEMS)
target = targetUserCredentials.target
job = jobmodels.Job(job_type=jobType)
# This takes way too long, so let's manufacture the url by hand
# for now
#url = urlresolvers.reverse('TargetRefreshSystems', None,
# (target.target_id, ))
job.descriptor = self.mgr.getDescriptorRefreshSystems(target.pk)
job.descriptor.id = ("/api/v1/targets/%s/descriptors/refresh_systems" %
target.target_id)
job.descriptor_data = etree.fromstring('<descriptor_data/>')
self.mgr.addJob(job)
return job
@exposed
def importTargetSystems(self):
jobs = []
for tuc in self._iterTargetUserCredentials():
jobs.append(self._importTargetSystemsForTUC(tuc))
return jobs
def _disassociateFromTargets(self, objList):
for (targetType, targetName), systemMap in objList:
target = self.lookupTarget(targetType, targetName)
for targetSystemId in systemMap:
system = models.System.objects.get(target=target,
target_system_id=targetSystemId)
self.log_system(system, "Disassociating from target %s (%s)"
% (targetName, targetType))
system.target = None
system.save()
models.SystemTargetCredentials.objects.filter(system=system).delete()
@exposed
def addSystemsFromTarget(self, target):
# Iterate over all existing target systems for this target
tsystems = targetmodels.TargetSystem.objects.filter(target=target)
for tsystem in tsystems:
self._addSystemFromTarget(tsystem)
def _addSystemFromTarget(self, targetSystem):
t0 = time.time()
target = targetSystem.target
targetInternalId = targetSystem.target_internal_id
log.info(" Importing system %s (%s)" % (
targetInternalId, targetSystem.name))
existingSystems = models.System.objects.filter(target=target,
target_system_id=targetInternalId)
if existingSystems:
system = existingSystems[0]
else:
# Having nothing else available, we copy the target's name
system, _ = models.System.objects.get_or_create(target=target,
target_system_id=targetInternalId,
managing_zone=target.zone,
name=targetSystem.name,
description=targetSystem.description)
self.log_system(system, "System added as part of target %s (%s)" %
(target.name, target.target_type.name))
system.managing_zone = target.zone
system.target_system_name = targetSystem.name
system.target_system_description = targetSystem.description
self._addTargetSystemNetwork(system, targetSystem)
system.target_system_state = targetSystem.state
system.save()
self._setSystemTargetCredentials(system, targetSystem)
log.info(" Importing system %s (%s) completed in %.2f seconds" %
(system.target_system_id, system.target_system_name,
time.time() - t0))
def _addTargetSystemNetwork(self, system, tsystem):
dnsName = tsystem.ip_addr_1
if dnsName is None:
return
nws = system.networks.all()
for nw in nws:
if dnsName in [ nw.dns_name, nw.ip_address ]:
return
target = system.target
# Remove the other networks, they're probably stale
for nw in nws:
ipAddress = nw.ip_address and nw.ip_address or "ip unset"
self.log_system(system,
"%s (%s): removing stale network information %s (%s)" %
(target.name, target.target_type.name, nw.dns_name,
ipAddress))
nw.delete()
self.log_system(system, "%s (%s): using %s as primary contact address" %
(target.name, target.target_type.name, dnsName))
nw = models.Network(system=system, dns_name=dnsName)
nw.save()
if tsystem.ip_addr_2:
nw = models.Network(system=system, dns_name=tsystem.ip_addr_2)
nw.save()
def _setSystemTargetCredentials(self, system, targetSystem):
cu = connection.cursor()
query = """
DELETE FROM inventory_system_target_credentials
WHERE system_id = %s
AND credentials_id NOT IN
(SELECT target_credentials_id
FROM target_system_credentials
WHERE target_system_id = %s)"""
cu.execute(query, [ system.system_id, targetSystem.target_system_id ])
query = """
INSERT INTO inventory_system_target_credentials
(system_id, credentials_id)
SELECT %s, target_credentials_id
FROM target_system_credentials
WHERE target_system_id = %s
AND target_credentials_id NOT IN
(SELECT credentials_id
FROM inventory_system_target_credentials
WHERE system_id = %s)"""
cu.execute(query, [ system.system_id, targetSystem.target_system_id,
system.system_id ])
@exposed
def getSystemsLog(self):
systemsLog = models.SystemsLog()
systemLogEntries = \
models.SystemLogEntry.objects.all().order_by('entry_date')
systemsLog.system_log_entry = list(systemLogEntries)
return systemsLog
@exposed
def getSystemTags(self, system_id):
system = models.System.objects.get(pk=system_id)
systemTags = querysetmodels.SystemTags()
systemTags.system_tag = system.system_tags.all()
return systemTags
@exposed
def getSystemTag(self, system_id, system_tag_id):
systemTag = querysetmodels.SystemTag.objects.get(pk=system_tag_id)
return systemTag
@exposed
def getSystemDescriptorForAction(self, systemId, descriptorType, parameters=None):
# OBSOLETE
raise errors.errors.ResourceNotFound()
@exposed
def scheduleJobAction(self, system, job):
'''
An action is a bare job submission that is a request to start
a real job.
Job coming in will be xobj only,
containing job_type, descriptor, and descriptor_data. We'll use
that data to schedule a completely different job, which will
be more complete.
'''
raise Exception("action dispatch not yet supported on job type: %s" % job.job_type.name)
@classmethod
def _makeStageLabel(cls, stage):
labelComponents = [ stage.project.name, stage.project_branch.name, stage.name ]
return ' / '.join(labelComponents)
@exposed
def serializeDescriptor(self, descriptor, validate=True):
wrapper = models.modellib.etreeObjectWrapper(
descriptor.getElementTree(validate=validate))
return wrapper
| {
"content_hash": "bc87705cd5522ce734439e359b9a0304",
"timestamp": "",
"source": "github",
"line_count": 1305,
"max_line_length": 151,
"avg_line_length": 37.403831417624524,
"alnum_prop": 0.6194173563877735,
"repo_name": "sassoftware/mint",
"id": "2b09d8d7442eb75a079324ba1fd94e5d84acc12d",
"size": "49399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mint/django_rest/rbuilder/inventory/manager/systemmgr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50165"
},
{
"name": "Genshi",
"bytes": "58741"
},
{
"name": "HTML",
"bytes": "2814"
},
{
"name": "JavaScript",
"bytes": "11470"
},
{
"name": "Makefile",
"bytes": "92418"
},
{
"name": "NASL",
"bytes": "582"
},
{
"name": "PLpgSQL",
"bytes": "5358"
},
{
"name": "Puppet",
"bytes": "17914"
},
{
"name": "Python",
"bytes": "3239135"
},
{
"name": "Ruby",
"bytes": "9268"
},
{
"name": "Shell",
"bytes": "24834"
}
],
"symlink_target": ""
} |
import attr
from typing import ClassVar
@attr.s(auto_attribs=True)
class A1:
bar1: int
<error descr="Fields with a default value must come after any fields without a default.">baz1</error>: int = 1
foo1: int
<error descr="Fields with a default value must come after any fields without a default.">bar2</error>: int = 2
baz2: int
foo2: int = 3
@attr.s(auto_attribs=True)
class A2:
bar1: int
baz1: ClassVar[int] = 1
foo1: int
bar2: ClassVar[int] = 2
baz2: int
foo2: int = 3
@attr.s(auto_attribs=True)
class B1:
a: int = attr.ib()
b: int
@attr.s(auto_attribs=True)
class B2:
<error descr="Fields with a default value must come after any fields without a default.">a</error>: int = attr.ib(default=1)
b: int = attr.ib()
@attr.s(auto_attribs=True)
class B3:
<error descr="Fields with a default value must come after any fields without a default.">a</error>: int = attr.ib(default=attr.Factory(int))
b: int = attr.ib()
@attr.s
class C1:
<error descr="Fields with a default value must come after any fields without a default.">x</error> = attr.ib()
y = attr.ib()
@x.default
def name_does_not_matter(self):
return 1
@attr.dataclass
class D1:
x: int = attr.NOTHING
y: int
@attr.dataclass
class D2:
x: int = attr.ib(default=attr.NOTHING)
y: int
@attr.dataclass
class E1:
x: int = 0
y: int = attr.ib(init=False) | {
"content_hash": "ba8c3d3d270faf4d229a79e20c559a80",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 144,
"avg_line_length": 21.16176470588235,
"alnum_prop": 0.6483669214732453,
"repo_name": "goodwinnk/intellij-community",
"id": "ef71aa22114ad47cfd99aecce6dbbf99a249f9a2",
"size": "1439",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/testData/inspections/PyDataclassInspection/attrsFieldsOrder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
import openerp.addons.decimal_precision as dp
from openerp.tools.float_utils import float_round
from openerp.exceptions import UserError
class product_product(osv.osv):
_inherit = "product.product"
def _stock_move_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict([(id, {'reception_count': 0, 'delivery_count': 0}) for id in ids])
move_pool=self.pool.get('stock.move')
moves = move_pool.read_group(cr, uid, [
('product_id', 'in', ids),
('location_id.usage', '!=', 'internal'),
('location_dest_id.usage', '=', 'internal'),
('state','in',('confirmed','assigned','pending'))
], ['product_id'], ['product_id'])
for move in moves:
product_id = move['product_id'][0]
res[product_id]['reception_count'] = move['product_id_count']
moves = move_pool.read_group(cr, uid, [
('product_id', 'in', ids),
('location_id.usage', '=', 'internal'),
('location_dest_id.usage', '!=', 'internal'),
('state','in',('confirmed','assigned','pending'))
], ['product_id'], ['product_id'])
for move in moves:
product_id = move['product_id'][0]
res[product_id]['delivery_count'] = move['product_id_count']
return res
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
res = super(product_product, self).view_header_get(cr, user, view_id, view_type, context)
if res: return res
if (context.get('active_id', False)) and (context.get('active_model') == 'stock.location'):
return _('Products: ')+self.pool.get('stock.location').browse(cr, user, context['active_id'], context).name
return res
def _get_domain_locations(self, cr, uid, ids, context=None):
'''
Parses the context and returns a list of location_ids based on it.
It will return all stock locations when no parameters are given
Possible parameters are shop, warehouse, location, force_company, compute_child
'''
context = context or {}
location_obj = self.pool.get('stock.location')
warehouse_obj = self.pool.get('stock.warehouse')
location_ids = []
if context.get('location', False):
if isinstance(context['location'], (int, long)):
location_ids = [context['location']]
elif isinstance(context['location'], basestring):
domain = [('complete_name','ilike',context['location'])]
if context.get('force_company', False):
domain += [('company_id', '=', context['force_company'])]
location_ids = location_obj.search(cr, uid, domain, context=context)
else:
location_ids = context['location']
else:
if context.get('warehouse', False):
if isinstance(context['warehouse'], (int, long)):
wids = [context['warehouse']]
elif isinstance(context['warehouse'], basestring):
domain = [('name', 'ilike', context['warehouse'])]
if context.get('force_company', False):
domain += [('company_id', '=', context['force_company'])]
wids = warehouse_obj.search(cr, uid, domain, context=context)
else:
wids = context['warehouse']
else:
wids = warehouse_obj.search(cr, uid, [], context=context)
for w in warehouse_obj.browse(cr, uid, wids, context=context):
location_ids.append(w.view_location_id.id)
operator = context.get('compute_child', True) and 'child_of' or 'in'
domain = context.get('force_company', False) and ['&', ('company_id', '=', context['force_company'])] or []
locations = location_obj.browse(cr, uid, location_ids, context=context)
if operator == "child_of" and locations and locations[0].parent_left != 0:
loc_domain = []
dest_loc_domain = []
for loc in locations:
if loc_domain:
loc_domain = ['|'] + loc_domain + ['&', ('location_id.parent_left', '>=', loc.parent_left), ('location_id.parent_left', '<', loc.parent_right)]
dest_loc_domain = ['|'] + dest_loc_domain + ['&', ('location_dest_id.parent_left', '>=', loc.parent_left), ('location_dest_id.parent_left', '<', loc.parent_right)]
else:
loc_domain += ['&', ('location_id.parent_left', '>=', loc.parent_left), ('location_id.parent_left', '<', loc.parent_right)]
dest_loc_domain += ['&', ('location_dest_id.parent_left', '>=', loc.parent_left), ('location_dest_id.parent_left', '<', loc.parent_right)]
return (
domain + loc_domain,
domain + ['&'] + dest_loc_domain + ['!'] + loc_domain,
domain + ['&'] + loc_domain + ['!'] + dest_loc_domain
)
else:
return (
domain + [('location_id', operator, location_ids)],
domain + ['&', ('location_dest_id', operator, location_ids), '!', ('location_id', operator, location_ids)],
domain + ['&', ('location_id', operator, location_ids), '!', ('location_dest_id', operator, location_ids)]
)
def _get_domain_dates(self, cr, uid, ids, context):
from_date = context.get('from_date', False)
to_date = context.get('to_date', False)
domain = []
if from_date:
domain.append(('date', '>=', from_date))
if to_date:
domain.append(('date', '<=', to_date))
return domain
def _product_available(self, cr, uid, ids, field_names=None, arg=False, context=None):
context = context or {}
field_names = field_names or []
domain_products = [('product_id', 'in', ids)]
domain_quant, domain_move_in, domain_move_out = [], [], []
domain_quant_loc, domain_move_in_loc, domain_move_out_loc = self._get_domain_locations(cr, uid, ids, context=context)
domain_move_in += self._get_domain_dates(cr, uid, ids, context=context) + [('state', 'not in', ('done', 'cancel', 'draft'))] + domain_products
domain_move_out += self._get_domain_dates(cr, uid, ids, context=context) + [('state', 'not in', ('done', 'cancel', 'draft'))] + domain_products
domain_quant += domain_products
if context.get('lot_id'):
domain_quant.append(('lot_id', '=', context['lot_id']))
if context.get('owner_id'):
domain_quant.append(('owner_id', '=', context['owner_id']))
owner_domain = ('restrict_partner_id', '=', context['owner_id'])
domain_move_in.append(owner_domain)
domain_move_out.append(owner_domain)
if context.get('package_id'):
domain_quant.append(('package_id', '=', context['package_id']))
domain_move_in += domain_move_in_loc
domain_move_out += domain_move_out_loc
moves_in = self.pool.get('stock.move').read_group(cr, uid, domain_move_in, ['product_id', 'product_qty'], ['product_id'], context=context)
moves_out = self.pool.get('stock.move').read_group(cr, uid, domain_move_out, ['product_id', 'product_qty'], ['product_id'], context=context)
domain_quant += domain_quant_loc
quants = self.pool.get('stock.quant').read_group(cr, uid, domain_quant, ['product_id', 'qty'], ['product_id'], context=context)
quants = dict(map(lambda x: (x['product_id'][0], x['qty']), quants))
moves_in = dict(map(lambda x: (x['product_id'][0], x['product_qty']), moves_in))
moves_out = dict(map(lambda x: (x['product_id'][0], x['product_qty']), moves_out))
res = {}
ctx = context.copy()
ctx.update({'prefetch_fields': False})
for product in self.browse(cr, uid, ids, context=ctx):
id = product.id
qty_available = float_round(quants.get(id, 0.0), precision_rounding=product.uom_id.rounding)
incoming_qty = float_round(moves_in.get(id, 0.0), precision_rounding=product.uom_id.rounding)
outgoing_qty = float_round(moves_out.get(id, 0.0), precision_rounding=product.uom_id.rounding)
virtual_available = float_round(quants.get(id, 0.0) + moves_in.get(id, 0.0) - moves_out.get(id, 0.0), precision_rounding=product.uom_id.rounding)
res[id] = {
'qty_available': qty_available,
'incoming_qty': incoming_qty,
'outgoing_qty': outgoing_qty,
'virtual_available': virtual_available,
}
return res
def _search_product_quantity(self, cr, uid, obj, name, domain, context):
res = []
for field, operator, value in domain:
#to prevent sql injections
assert field in ('qty_available', 'virtual_available', 'incoming_qty', 'outgoing_qty'), 'Invalid domain left operand'
assert operator in ('<', '>', '=', '!=', '<=', '>='), 'Invalid domain operator'
assert isinstance(value, (float, int)), 'Invalid domain right operand'
if operator == '=':
operator = '=='
ids = []
if name == 'qty_available' and (value != 0.0 or operator not in ('==', '>=', '<=')):
res.append(('id', 'in', self._search_qty_available(cr, uid, operator, value, context)))
else:
product_ids = self.search(cr, uid, [], context=context)
if product_ids:
#TODO: Still optimization possible when searching virtual quantities
for element in self.browse(cr, uid, product_ids, context=context):
if eval(str(element[field]) + operator + str(value)):
ids.append(element.id)
res.append(('id', 'in', ids))
return res
def _search_qty_available(self, cr, uid, operator, value, context):
domain_quant = []
if context.get('lot_id'):
domain_quant.append(('lot_id', '=', context['lot_id']))
if context.get('owner_id'):
domain_quant.append(('owner_id', '=', context['owner_id']))
if context.get('package_id'):
domain_quant.append(('package_id', '=', context['package_id']))
domain_quant += self._get_domain_locations(cr, uid, [], context=context)[0]
quants = self.pool.get('stock.quant').read_group(cr, uid, domain_quant, ['product_id', 'qty'], ['product_id'], context=context)
quants = dict(map(lambda x: (x['product_id'][0], x['qty']), quants))
quants = dict((k, v) for k, v in quants.iteritems() if eval(str(v) + operator + str(value)))
return(list(quants))
def _product_available_text(self, cr, uid, ids, field_names=None, arg=False, context=None):
res = {}
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = str(product.qty_available) + _(" On Hand")
return res
def _compute_nbr_reordering_rules(self, cr, uid, ids, field_names=None, arg=None, context=None):
res = {id : {'nbr_reordering_rules': 0, 'reordering_min_qty': 0, 'reordering_max_qty': 0} for id in ids}
product_data = self.pool['stock.warehouse.orderpoint'].read_group(cr, uid, [('product_id', 'in', ids)], ['product_id', 'product_min_qty', 'product_max_qty'], ['product_id'], context=context)
for data in product_data:
res[data['product_id'][0]]['nbr_reordering_rules'] = int(data['product_id_count'])
res[data['product_id'][0]]['reordering_min_qty'] = data['product_min_qty']
res[data['product_id'][0]]['reordering_max_qty'] = data['product_max_qty']
return res
_columns = {
'reception_count': fields.function(_stock_move_count, string="Receipt", type='integer', multi='pickings'),
'delivery_count': fields.function(_stock_move_count, string="Delivery", type='integer', multi='pickings'),
'qty_available': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Quantity On Hand',
fnct_search=_search_product_quantity,
help="Current quantity of products.\n"
"In a context with a single Stock Location, this includes "
"goods stored at this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods stored in the Stock Location of this Warehouse, or any "
"of its children.\n"
"stored in the Stock Location of the Warehouse of this Shop, "
"or any of its children.\n"
"Otherwise, this includes goods stored in any Stock Location "
"with 'internal' type."),
'virtual_available': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Forecast Quantity',
fnct_search=_search_product_quantity,
help="Forecast quantity (computed as Quantity On Hand "
"- Outgoing + Incoming)\n"
"In a context with a single Stock Location, this includes "
"goods stored in this location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods stored in the Stock Location of this Warehouse, or any "
"of its children.\n"
"Otherwise, this includes goods stored in any Stock Location "
"with 'internal' type."),
'incoming_qty': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Incoming',
fnct_search=_search_product_quantity,
help="Quantity of products that are planned to arrive.\n"
"In a context with a single Stock Location, this includes "
"goods arriving to this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods arriving to the Stock Location of this Warehouse, or "
"any of its children.\n"
"Otherwise, this includes goods arriving to any Stock "
"Location with 'internal' type."),
'outgoing_qty': fields.function(_product_available, multi='qty_available',
type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Outgoing',
fnct_search=_search_product_quantity,
help="Quantity of products that are planned to leave.\n"
"In a context with a single Stock Location, this includes "
"goods leaving this Location, or any of its children.\n"
"In a context with a single Warehouse, this includes "
"goods leaving the Stock Location of this Warehouse, or "
"any of its children.\n"
"Otherwise, this includes goods leaving any Stock "
"Location with 'internal' type."),
'orderpoint_ids': fields.one2many('stock.warehouse.orderpoint', 'product_id', 'Minimum Stock Rules'),
'nbr_reordering_rules': fields.function(_compute_nbr_reordering_rules, string='Reordering Rules', type='integer', multi=True),
'reordering_min_qty': fields.function(_compute_nbr_reordering_rules, type='float', multi=True),
'reordering_max_qty': fields.function(_compute_nbr_reordering_rules, type='float', multi=True),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(product_product, self).fields_view_get(
cr, uid, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
if context is None:
context = {}
if context.get('location') and isinstance(context['location'], int):
location_info = self.pool.get('stock.location').browse(cr, uid, context['location'])
fields=res.get('fields',{})
if fields:
if location_info.usage == 'supplier':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Receipts')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Received Qty')
if location_info.usage == 'internal':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Forecasted Quantity')
if location_info.usage == 'customer':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Deliveries')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Delivered Qty')
if location_info.usage == 'inventory':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future P&L')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('P&L Qty')
if location_info.usage == 'procurement':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Qty')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Unplanned Qty')
if location_info.usage == 'production':
if fields.get('virtual_available'):
res['fields']['virtual_available']['string'] = _('Future Productions')
if fields.get('qty_available'):
res['fields']['qty_available']['string'] = _('Produced Qty')
return res
def action_view_routes(self, cr, uid, ids, context=None):
template_obj = self.pool.get("product.template")
templ_ids = list(set([x.product_tmpl_id.id for x in self.browse(cr, uid, ids, context=context)]))
return template_obj.action_view_routes(cr, uid, templ_ids, context=context)
def onchange_tracking(self, cr, uid, ids, tracking, context=None):
if not tracking or tracking == 'none':
return {}
unassigned_quants = self.pool['stock.quant'].search_count(cr, uid, [('product_id','in', ids), ('lot_id','=', False), ('location_id.usage','=', 'internal')], context=context)
if unassigned_quants:
return {'warning' : {
'title': _('Warning!'),
'message' : _("You have products in stock that have no lot number. You can assign serial numbers by doing an inventory. ")
}}
return {}
class product_template(osv.osv):
_name = 'product.template'
_inherit = 'product.template'
def _product_available(self, cr, uid, ids, name, arg, context=None):
prod_available = {}
product_ids = self.browse(cr, uid, ids, context=context)
var_ids = []
for product in product_ids:
var_ids += [p.id for p in product.product_variant_ids]
variant_available= self.pool['product.product']._product_available(cr, uid, var_ids, context=context)
for product in product_ids:
qty_available = 0
virtual_available = 0
incoming_qty = 0
outgoing_qty = 0
for p in product.product_variant_ids:
qty_available += variant_available[p.id]["qty_available"]
virtual_available += variant_available[p.id]["virtual_available"]
incoming_qty += variant_available[p.id]["incoming_qty"]
outgoing_qty += variant_available[p.id]["outgoing_qty"]
prod_available[product.id] = {
"qty_available": qty_available,
"virtual_available": virtual_available,
"incoming_qty": incoming_qty,
"outgoing_qty": outgoing_qty,
}
return prod_available
def _search_product_quantity(self, cr, uid, obj, name, domain, context):
prod = self.pool.get("product.product")
product_variant_ids = prod.search(cr, uid, domain, context=context)
return [('product_variant_ids', 'in', product_variant_ids)]
def _product_available_text(self, cr, uid, ids, field_names=None, arg=False, context=None):
res = {}
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = str(product.qty_available) + _(" On Hand")
return res
def _compute_nbr_reordering_rules(self, cr, uid, ids, field_names=None, arg=None, context=None):
res = {id : {'nbr_reordering_rules': 0, 'reordering_min_qty': 0, 'reordering_max_qty': 0} for id in ids}
product_data = self.pool['stock.warehouse.orderpoint'].read_group(cr, uid, [('product_id.product_tmpl_id', 'in', ids)], ['product_id', 'product_min_qty', 'product_max_qty'], ['product_id'], context=context)
for data in product_data:
product_tmpl_id = data['__domain'][1][2][0]
res[product_tmpl_id]['nbr_reordering_rules'] = res[product_tmpl_id].get('nbr_reordering_rules', 0) + int(data['product_id_count'])
res[product_tmpl_id]['reordering_min_qty'] = data['product_min_qty']
res[product_tmpl_id]['reordering_max_qty'] = data['product_max_qty']
return res
def _get_product_template_type(self, cr, uid, context=None):
res = super(product_template, self)._get_product_template_type(cr, uid, context=context)
if 'product' not in [item[0] for item in res]:
res.append(('product', _('Stockable Product')))
return res
_columns = {
'property_stock_procurement': fields.property(
type='many2one',
relation='stock.location',
string="Procurement Location",
domain=[('usage','like','procurement')],
help="This stock location will be used, instead of the default one, as the source location for stock moves generated by procurements."),
'property_stock_production': fields.property(
type='many2one',
relation='stock.location',
string="Production Location",
domain=[('usage','like','production')],
help="This stock location will be used, instead of the default one, as the source location for stock moves generated by manufacturing orders."),
'property_stock_inventory': fields.property(
type='many2one',
relation='stock.location',
string="Inventory Location",
domain=[('usage','like','inventory')],
help="This stock location will be used, instead of the default one, as the source location for stock moves generated when you do an inventory."),
'sale_delay': fields.float('Customer Lead Time', help="The average delay in days between the confirmation of the customer order and the delivery of the finished products. It's the time you promise to your customers."),
'tracking': fields.selection(selection=[('serial', 'By Unique Serial Number'), ('lot', 'By Lots'), ('none', 'No Tracking')], string="Tracking", required=True),
'description_picking': fields.text('Description on Picking', translate=True),
# sum of product variant qty
# 'reception_count': fields.function(_product_available, multi='qty_available',
# fnct_search=_search_product_quantity, type='float', string='Quantity On Hand'),
# 'delivery_count': fields.function(_product_available, multi='qty_available',
# fnct_search=_search_product_quantity, type='float', string='Quantity On Hand'),
'qty_available': fields.function(_product_available, multi='qty_available', digits_compute=dp.get_precision('Product Unit of Measure'),
fnct_search=_search_product_quantity, type='float', string='Quantity On Hand'),
'virtual_available': fields.function(_product_available, multi='qty_available', digits_compute=dp.get_precision('Product Unit of Measure'),
fnct_search=_search_product_quantity, type='float', string='Forecasted Quantity'),
'incoming_qty': fields.function(_product_available, multi='qty_available', digits_compute=dp.get_precision('Product Unit of Measure'),
fnct_search=_search_product_quantity, type='float', string='Incoming'),
'outgoing_qty': fields.function(_product_available, multi='qty_available', digits_compute=dp.get_precision('Product Unit of Measure'),
fnct_search=_search_product_quantity, type='float', string='Outgoing'),
'location_id': fields.dummy(string='Location', relation='stock.location', type='many2one'),
'warehouse_id': fields.dummy(string='Warehouse', relation='stock.warehouse', type='many2one'),
'route_ids': fields.many2many('stock.location.route', 'stock_route_product', 'product_id', 'route_id', 'Routes', domain=[('product_selectable', '=', True)],
help="Depending on the modules installed, this will allow you to define the route of the product: whether it will be bought, manufactured, MTO/MTS,..."),
'nbr_reordering_rules': fields.function(_compute_nbr_reordering_rules, string='Reordering Rules', type='integer', multi=True),
'reordering_min_qty': fields.function(_compute_nbr_reordering_rules, type='float', multi=True),
'reordering_max_qty': fields.function(_compute_nbr_reordering_rules, type='float', multi=True),
'route_from_categ_ids': fields.related('categ_id', 'total_route_ids', type="many2many", relation="stock.location.route", string="Category Routes"),
}
_defaults = {
'sale_delay': 7,
'tracking': 'none',
}
def action_view_routes(self, cr, uid, ids, context=None):
route_obj = self.pool.get("stock.location.route")
act_obj = self.pool.get('ir.actions.act_window')
mod_obj = self.pool.get('ir.model.data')
product_route_ids = set()
for product in self.browse(cr, uid, ids, context=context):
product_route_ids |= set([r.id for r in product.route_ids])
product_route_ids |= set([r.id for r in product.categ_id.total_route_ids])
route_ids = route_obj.search(cr, uid, ['|', ('id', 'in', list(product_route_ids)), ('warehouse_selectable', '=', True)], context=context)
result = mod_obj.xmlid_to_res_id(cr, uid, 'stock.action_routes_form', raise_if_not_found=True)
result = act_obj.read(cr, uid, [result], context=context)[0]
result['domain'] = "[('id','in',[" + ','.join(map(str, route_ids)) + "])]"
return result
def onchange_tracking(self, cr, uid, ids, tracking, context=None):
if not tracking:
return {}
product_product = self.pool['product.product']
variant_ids = product_product.search(cr, uid, [('product_tmpl_id', 'in', ids)], context=context)
return product_product.onchange_tracking(cr, uid, variant_ids, tracking, context=context)
def _get_products(self, cr, uid, ids, context=None):
products = []
for prodtmpl in self.browse(cr, uid, ids, context=None):
products += [x.id for x in prodtmpl.product_variant_ids]
return products
def _get_act_window_dict(self, cr, uid, name, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.xmlid_to_res_id(cr, uid, name, raise_if_not_found=True)
result = act_obj.read(cr, uid, [result], context=context)[0]
return result
def action_open_quants(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'stock.product_open_quants', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
result['context'] = "{'search_default_locationgroup': 1, 'search_default_internal_loc': 1}"
return result
def action_view_orderpoints(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'stock.product_open_orderpoint', context=context)
if len(ids) == 1 and len(products) == 1:
result['context'] = "{'default_product_id': " + str(products[0]) + ", 'search_default_product_id': " + str(products[0]) + "}"
else:
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
result['context'] = "{}"
return result
def action_view_stock_moves(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'stock.act_product_stock_move_open', context=context)
if products:
result['context'] = "{'default_product_id': %d}" % products[0]
result['domain'] = "[('product_id.product_tmpl_id','in',[" + ','.join(map(str,ids)) + "])]"
return result
def write(self, cr, uid, ids, vals, context=None):
if 'uom_id' in vals:
new_uom = self.pool.get('product.uom').browse(cr, uid, vals['uom_id'], context=context)
for product in self.browse(cr, uid, ids, context=context):
old_uom = product.uom_id
if old_uom != new_uom:
if self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', [x.id for x in product.product_variant_ids]), ('state', '=', 'done')], limit=1, context=context):
raise UserError(_("You can not change the unit of measure of a product that has already been used in a done stock move. If you need to change the unit of measure, you may deactivate this product."))
return super(product_template, self).write(cr, uid, ids, vals, context=context)
class product_removal_strategy(osv.osv):
_name = 'product.removal'
_description = 'Removal Strategy'
_columns = {
'name': fields.char('Name', required=True),
'method': fields.char("Method", required=True, help="FIFO, LIFO..."),
}
class product_putaway_strategy(osv.osv):
_name = 'product.putaway'
_description = 'Put Away Strategy'
def _get_putaway_options(self, cr, uid, context=None):
return [('fixed', 'Fixed Location')]
_columns = {
'name': fields.char('Name', required=True),
'method': fields.selection(_get_putaway_options, "Method", required=True),
'fixed_location_ids': fields.one2many('stock.fixed.putaway.strat', 'putaway_id', 'Fixed Locations Per Product Category', help="When the method is fixed, this location will be used to store the products", copy=True),
}
_defaults = {
'method': 'fixed',
}
def putaway_apply(self, cr, uid, putaway_strat, product, context=None):
if putaway_strat.method == 'fixed':
for strat in putaway_strat.fixed_location_ids:
categ = product.categ_id
while categ:
if strat.category_id.id == categ.id:
return strat.fixed_location_id.id
categ = categ.parent_id
class fixed_putaway_strat(osv.osv):
_name = 'stock.fixed.putaway.strat'
_order = 'sequence'
_columns = {
'putaway_id': fields.many2one('product.putaway', 'Put Away Method', required=True),
'category_id': fields.many2one('product.category', 'Product Category', required=True),
'fixed_location_id': fields.many2one('stock.location', 'Location', required=True),
'sequence': fields.integer('Priority', help="Give to the more specialized category, a higher priority to have them in top of the list."),
}
class product_category(osv.osv):
_inherit = 'product.category'
def calculate_total_routes(self, cr, uid, ids, name, args, context=None):
res = {}
for categ in self.browse(cr, uid, ids, context=context):
categ2 = categ
routes = [x.id for x in categ.route_ids]
while categ2.parent_id:
categ2 = categ2.parent_id
routes += [x.id for x in categ2.route_ids]
res[categ.id] = routes
return res
_columns = {
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_categ', 'categ_id', 'route_id', 'Routes', domain=[('product_categ_selectable', '=', True)]),
'removal_strategy_id': fields.many2one('product.removal', 'Force Removal Strategy', help="Set a specific removal strategy that will be used regardless of the source location for this product category"),
'total_route_ids': fields.function(calculate_total_routes, relation='stock.location.route', type='many2many', string='Total routes', readonly=True),
}
| {
"content_hash": "8c1e2a90d6a8ace79fbe40a98e8e8261",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 226,
"avg_line_length": 57.592150170648466,
"alnum_prop": 0.5892915345639871,
"repo_name": "vileopratama/vitech",
"id": "1f8d034f6c6b4f12424f4fa61107eca5084e0d7a",
"size": "33849",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "src/addons/stock/product.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
"""Test error messages for 'getaddressinfo' and 'validateaddress' RPC commands."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
BECH32_VALID = 'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv'
BECH32_INVALID_SIZE = 'bcrt1sqqpl9r5c'
BECH32_INVALID_PREFIX = 'bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4'
BASE58_VALID = 'mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn'
BASE58_INVALID_PREFIX = '17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem'
INVALID_ADDRESS = 'asfah14i8fajz0123f'
class InvalidAddressErrorMessageTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_validateaddress(self):
node = self.nodes[0]
# Bech32
info = node.validateaddress(BECH32_INVALID_SIZE)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid Bech32 address data size')
info = node.validateaddress(BECH32_INVALID_PREFIX)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid prefix for Bech32 address')
info = node.validateaddress(BECH32_VALID)
assert info['isvalid']
assert 'error' not in info
# Base58
info = node.validateaddress(BASE58_INVALID_PREFIX)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid prefix for Base58-encoded address')
info = node.validateaddress(BASE58_VALID)
assert info['isvalid']
assert 'error' not in info
# Invalid address format
info = node.validateaddress(INVALID_ADDRESS)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid address format')
def test_getaddressinfo(self):
node = self.nodes[0]
assert_raises_rpc_error(-5, "Invalid Bech32 address data size", node.getaddressinfo, BECH32_INVALID_SIZE)
assert_raises_rpc_error(-5, "Invalid prefix for Bech32 address", node.getaddressinfo, BECH32_INVALID_PREFIX)
assert_raises_rpc_error(-5, "Invalid prefix for Base58-encoded address", node.getaddressinfo, BASE58_INVALID_PREFIX)
assert_raises_rpc_error(-5, "Invalid address format", node.getaddressinfo, INVALID_ADDRESS)
def run_test(self):
self.test_validateaddress()
self.test_getaddressinfo()
if __name__ == '__main__':
InvalidAddressErrorMessageTest().main()
| {
"content_hash": "cd6b3d73ae76668da9f9cfdb6f41ab23",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 124,
"avg_line_length": 33.932432432432435,
"alnum_prop": 0.6933492632417364,
"repo_name": "pstratem/bitcoin",
"id": "469d6bdb05e221ee8f27ed6ebf6811a7448563a3",
"size": "2720",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "test/functional/rpc_invalid_address_message.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "695537"
},
{
"name": "C++",
"bytes": "6406006"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "198872"
},
{
"name": "Makefile",
"bytes": "121257"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "5382"
},
{
"name": "Python",
"bytes": "1602858"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "97840"
}
],
"symlink_target": ""
} |
import argparse
import sys
import Tools
import subprocess
import tempfile
import pdb
import os
def Call(command, logfile=None):
print "calling " + command
if (logfile is not None):
if (isinstance(logfile, str)):
outFile = open(logfile, 'w')
elif (isinstance(logfile, file)):
outFile = logfile
else:
print "bad input."
sys.exit(1)
retval = subprocess.call(command, shell=True, stdout=outFile)
outFile.close()
else:
retval = subprocess.call(command.split())
if (retval != 0):
print "Error running " + command
raise ValueError
return 0
def DeleteFiles(tempfiles):
for filename in tempfiles:
command = "/bin/rm -f {}".format(filename)
retval = subprocess.call(command.split())
ap = argparse.ArgumentParser(description="Create a consensus from high mapqv viewed from a bam file. To run this, "
"you must have samtools, quiver, cmph5tools.py in your path.")
ap.add_argument("bam_list", help="File with list of paths to BAMs generated from blasr SAM output that contains full quality values.")
ap.add_argument("--region", help="Region from the bam file. If 3 options, assume bed format, otherwise, UCSC chr:start-end format.", nargs="+", default=None, required=True)
ap.add_argument("--reference", help="Use this reference file.", default=None)
ap.add_argument("--referenceWindow", help="Use this window from the reference, similar to region, can be 3 arguments or 1.", nargs="+", default=None)
ap.add_argument("--consensus", help="Write consensus to this file. The special file name \"fromregion\" implies build the refrence name from the region", default="consensus.fa")
ap.add_argument("--minq", help="Minimum mapping quality (20)", default=20, type=int)
ap.add_argument("--tmpdir", help="Create temporary files here.", default=".")
ap.add_argument("--keeptemp", help="Do not delete temporary files.", default=False,action='store_true')
ap.add_argument("--delta", help="Increase/decrease region by delta for fetching reads.", default=0,type=int)
ap.add_argument("--p5c3", help="Force p5c5 chemistry flag.", action='store_true', default=False)
args = ap.parse_args()
regionSamFileName = tempfile.mktemp(suffix=".sam", dir=args.tmpdir)
regionBasH5FileName = tempfile.mktemp(suffix=".bas.h5", dir=args.tmpdir)
if (args.region is None):
print "Required argument region is missing."
sys.exit(1)
bedRegion = Tools.FormatRegion(args.region)
print args.region
print bedRegion
expandedRegion = (bedRegion[0], max(0, bedRegion[1] - args.delta), int(bedRegion[2]) + args.delta)
region = Tools.BedToRegion(expandedRegion)
path="/net/eichler/vol5/home/mchaisso/software/blasr_2/cpp"
if (os.path.exists(args.tmpdir) == False):
os.makedirs(args.tmpdir)
tempfiles = []
try:
with open(args.bam_list, "r") as fh:
bam_list = [line.strip() for line in fh]
tempfiles.append(regionSamFileName)
Call("/net/eichler/vol4/home/jlhudd/src/bamtools-2.3.0/bin/bamtools filter -in {} -region {} -mapQuality \">={}\" | samtools view -".format(" -in ".join(bam_list), region.replace("-", ".."), args.minq), regionSamFileName)
tempfiles.append(regionBasH5FileName)
Call("{}/pbihdfutils/bin/samtobas {} {}".format(path, regionSamFileName, regionBasH5FileName))
# build a reference if necessary
if (args.referenceWindow is not None):
tempReference = tempfile.mktemp(suffix=".reference.fasta", dir=args.tmpdir)
refRegion = Tools.BedToRegion(Tools.FormatRegion(args.referenceWindow))
tempfiles.append(tempReference)
Call("samtools faidx {} {} ".format(args.reference, refRegion), tempReference)
args.reference = tempReference
alignmentSamFileName = tempfile.mktemp(suffix=".alignment.sam", dir=args.tmpdir)
cmph5FileName = tempfile.mktemp(suffix=".cmp.h5", dir=args.tmpdir)
tempfiles.append(alignmentSamFileName)
Call("{}/alignment/bin/blasr {} {} -minAlignLength 1000 -sam -bestn 1 -nproc 6 -out {}".format(path, regionBasH5FileName, args.reference, alignmentSamFileName))
tempfiles.append(cmph5FileName)
Call("{}/pbihdfutils/bin/samtoh5 {} {} {}".format(path, alignmentSamFileName, args.reference, cmph5FileName))
Call("cmph5tools.py sort --deep {}".format(cmph5FileName))
Call("{}/pbihdfutils/bin/loadPulses {} {} -metrics InsertionQV,DeletionQV,SubstitutionQV,MergeQV,SubstitutionTag,DeletionTag".format(path, regionBasH5FileName, cmph5FileName))
if (args.consensus == "fromregion"):
tmpRegion = Tools.FormatRegion(args.region)
args.consensus = "{}_{}-{}.fasta".format(tmpRegion[0], tmpRegion[1], tmpRegion[2])
p5C3Flag = ""
if (args.p5c3 == True):
p5C3Flag = "-p P5-C3.AllQVsMergingByChannelModel"
Call("samtools faidx {}".format(args.reference))
Call("quiver -j 6 -r {} {} -o {} {} ".format(args.reference, cmph5FileName, args.consensus, p5C3Flag))
except ValueError:
if (args.keeptemp == False):
DeleteFiles(tempfiles)
sys.exit(1)
if (args.keeptemp == False):
DeleteFiles(tempfiles)
| {
"content_hash": "35075f4cb91b66434456d4abe8dbe388",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 225,
"avg_line_length": 43.601694915254235,
"alnum_prop": 0.6913508260447035,
"repo_name": "EichlerLab/pacbio_variant_caller",
"id": "51019f29b9803dbf9cfea3db7fcdbdc8dc1f4697",
"size": "5168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/RegionToConsensusBAMs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "48915"
},
{
"name": "Makefile",
"bytes": "5759"
},
{
"name": "Python",
"bytes": "167172"
},
{
"name": "R",
"bytes": "2065"
},
{
"name": "Ruby",
"bytes": "463"
},
{
"name": "Shell",
"bytes": "516"
}
],
"symlink_target": ""
} |
import sys
import unittest
from libcloud.test import MockHttp
from libcloud.utils.py3 import httplib
from libcloud.compute.base import Node, NodeAuthPassword
from libcloud.test.secrets import BLUEBOX_PARAMS
from libcloud.compute.types import NodeState
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.compute.drivers.bluebox import BlueboxNodeDriver as Bluebox
class BlueboxTest(unittest.TestCase):
def setUp(self):
Bluebox.connectionCls.conn_class = BlueboxMockHttp
self.driver = Bluebox(*BLUEBOX_PARAMS)
def test_create_node(self):
node = self.driver.create_node(
name="foo",
size=self.driver.list_sizes()[0],
image=self.driver.list_images()[0],
auth=NodeAuthPassword("test123"),
)
self.assertTrue(isinstance(node, Node))
self.assertEqual(node.state, NodeState.PENDING)
self.assertEqual(node.name, "foo.apitest.blueboxgrid.com")
def test_list_nodes(self):
node = self.driver.list_nodes()[0]
self.assertEqual(node.name, "foo.apitest.blueboxgrid.com")
self.assertEqual(node.state, NodeState.RUNNING)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 4)
ids = [s.id for s in sizes]
for size in sizes:
self.assertTrue(size.price > 0)
self.assertTrue("94fd37a7-2606-47f7-84d5-9000deda52ae" in ids)
self.assertTrue("b412f354-5056-4bf0-a42f-6ddd998aa092" in ids)
self.assertTrue("0cd183d3-0287-4b1a-8288-b3ea8302ed58" in ids)
self.assertTrue("b9b87a5b-2885-4a2e-b434-44a163ca6251" in ids)
def test_list_images(self):
images = self.driver.list_images()
image = images[0]
self.assertEqual(len(images), 10)
self.assertEqual(image.name, "CentOS 5 (Latest Release)")
self.assertEqual(image.id, "c66b8145-f768-45ef-9878-395bf8b1b7ff")
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
class BlueboxMockHttp(MockHttp):
fixtures = ComputeFileFixtures("bluebox")
def _api_blocks_json(self, method, url, body, headers):
if method == "POST":
body = self.fixtures.load("api_blocks_json_post.json")
else:
body = self.fixtures.load("api_blocks_json.json")
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_block_products_json(self, method, url, body, headers):
body = self.fixtures.load("api_block_products_json.json")
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_block_templates_json(self, method, url, body, headers):
body = self.fixtures.load("api_block_templates_json.json")
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json(self, method, url, body, headers):
if method == "DELETE":
body = self.fixtures.load(
"api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json"
)
else:
body = self.fixtures.load("api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json")
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json(
self, method, url, body, headers
):
body = self.fixtures.load(
"api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json"
)
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
if __name__ == "__main__":
sys.exit(unittest.main())
| {
"content_hash": "b279be555f3686592bb89fbe4a1f56b8",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 98,
"avg_line_length": 37.49038461538461,
"alnum_prop": 0.6596563221338805,
"repo_name": "apache/libcloud",
"id": "02805b8845afd3fac11ab19d655b26d88d261b1d",
"size": "4680",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "libcloud/test/compute/test_bluebox.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2155"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9105547"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
"""
Base Flask App for Poo Mailer
"""
from flask import Flask, session, render_template, redirect, url_for, request
import json
import os
import sys
parent_path = os.path.dirname(os.path.realpath(__file__))+'/../'
sys.path.append(parent_path)
from data import countries
import settings
from helpers import login_required, ajax_login_required, build_data_dict
from ship import ship_to_address
from record import email_shipment_info
app = Flask(__name__)
app.debug = settings.DEBUG
app.secret_key = settings.SECRET_KEY
# Handles both login and displaying form
#
@app.route("/", methods=['GET', 'POST'])
def root():
if request.method == 'POST':
if request.form['password'] == settings.LOGIN_PASSWORD:
session['logged_in'] = True
else:
session['logged_in'] = False
return render_template('login.html', prompt='Wrong Password')
if 'logged_in' in session and session['logged_in'] == True:
return redirect(url_for('address_form'))
else:
session['logged_in'] = False
return render_template('login.html')
# Logout page, redirects to root
#
@app.route("/logout")
def logout():
if 'logged_in' in session:
session['logged_in'] = False
return redirect(url_for('root'))
# Form for manually entering in shipment information
#
@app.route("/address_form")
@login_required
def address_form():
return render_template('address.html', countries=countries.COUNTRIES, from_address = settings.FROM_ADDRESS)
# Form for using two csv files to enter in shipment information
#
@app.route("/csv_form")
@login_required
def csv_form():
return render_template('csv.html', from_address = settings.FROM_ADDRESS)
# Handles ajax of form submission
#
@app.route("/submit", methods=["POST"])
@ajax_login_required
def submit():
address_keys = ['name', 'company', 'street1', 'street2', 'city', 'state', 'zip', 'country', 'phone']
parcel_keys = ['length', 'width', 'height', 'weight']
option_keys = ['dry_ice_weight', 'print_custom_1']
address_dict = build_data_dict(address_keys, request.form)
parcel_dict = build_data_dict(parcel_keys, request.form)
options_dict = build_data_dict(option_keys, request.form)
status = ship_to_address(address_dict, parcel_dict, options=options_dict)
if status['status'] == 'success':
email_shipment_info(status)
return json.dumps(status)
# Start the app
#
if __name__ == "__main__":
app.run(host='0.0.0.0', port=9001)
| {
"content_hash": "dfa635640f9bd861bdab547d1bc6994a",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 111,
"avg_line_length": 30.28048780487805,
"alnum_prop": 0.6749899315344342,
"repo_name": "albertyw/poo-mailer",
"id": "1c8f1fc21b9d1fff2b277a7d7ab275bdf6b82b54",
"size": "2483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/serve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "146"
},
{
"name": "JavaScript",
"bytes": "4053"
},
{
"name": "Python",
"bytes": "13642"
},
{
"name": "Shell",
"bytes": "123"
}
],
"symlink_target": ""
} |
import pprint
import logging
import datetime
from selenium import webdriver
import hubcheck.conf
# block websites that make linkcheck slow
# these are usually blocked by the workspace firewall
# mozillalabs comes from using a nightly version of firefox browser
# many of the others are from login authentication sites
PROXY_BLACKLIST = [
"http(s)?://.*mozillalabs\\.com/?.*", # testpilot.mozillalabs.com
"http(s)?://.*google-analytics\\.com/.*", # ssl.google-analytics.com
'http(s)?://.*facebook\\.com/?.*', # www.facebook.com/login.php
'http(s)?://.*fbcdn\\.com/?.*', # www.facebook.com/login.php
'http(s)?://.*accounts\\.google\\.com/?.*', # accounts.google.com
'http(s)?://.*linkedin\\.com/?.*', # linkedin.com
'http(s)?://.*twitter\\.com/?.*', # api.twitter.com
# 'http(s)?://.*purdue\\.edu/apps/account/cas/?.*', # purdue cas
]
MIMETYPES = [
"appl/text", # .doc \
"application/acad", # .dwg \
"application/acrobat", # .pdf \
"application/autocad_dwg", # .dwg \
"application/doc", # .doc, .rtf \
"application/dwg", # .dwg \
"application/eps", # .eps \
"application/futuresplash", # .swf \
"application/gzip", # .gz \
"application/gzipped", # .gz \
"application/gzip-compressed", # .gz \
"application/jpg", # .jpg \
"application/ms-powerpoint", # .ppt \
"application/msexcel", # .xls \
"application/mspowerpnt", # .ppt \
"application/mspowerpoint", # .ppt \
"application/msword", # .doc, .rtf \
"application/octet-stream", # .gz, .zip \
"application/pdf", # .pdf \
"application/photoshop", # .psd \
"application/postscript", # .ps, .avi, .eps \
"application/powerpoint", # .ppt \
"application/psd", # .psd \
"application/rss+xml", # .rss \
"application/rtf", # .rtf \
"application/tar", # .tar \
"application/vnd.ms-excel", # .xls, .xlt, .xla \
"application/vnd.ms-excel.addin.macroEnabled.12", # .xlam \
"application/vnd.ms-excel.sheet.binary.macroEnabled.12", # .xlsb \
"application/vnd.ms-excel.sheet.macroEnabled.12", # .xlsm \
"application/vnd.ms-excel.template.macroEnabled.12", # .xltm \
"application/vnd.ms-powerpoint", # .pps, .ppt, .pot, .ppa \
"application/vnd.ms-powerpoint.addin.macroEnabled.12", # .ppam \
"application/vnd.ms-powerpoint.presentation.macroEnabled.12", # .pptm \
"application/vnd.ms-powerpoint.slideshow.macroEnabled.12", # .ppsm \
"application/vnd.ms-powerpoint.template.macroEnabled.12", # .potm \
"application/vnd.ms-word", # .doc \
"application/vnd.ms-word.document.macroEnabled.12", # .docm \
"application/vnd.ms-word.template.macroEnabled.12", # .dotm \
"application/vnd.msexcel", # .xls \
"application/vnd.mspowerpoint", # .ppt \
"application/vnd.msword", # .doc \
"application/vnd.openxmlformats-officedocument.presentationml.presentation", # .pptx \
"application/vnd.openxmlformats-officedocument.presentationml.template", # .potx \
"application/vnd.openxmlformats-officedocument.presentationml.slideshow", # .ppsx \
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", # .xlsx \
"application/vnd.openxmlformats-officedocument.spreadsheetml.template", # .xltx \
"application/vnd.openxmlformats-officedocument.wordprocessingml.document", # .docx \
"application/vnd.openxmlformats-officedocument.wordprocessingml.template", # .dotx \
"application/vnd.pdf", # .pdf \
"application/vnd-mspowerpoint", # .ppt \
"application/winword", # .doc \
"application/word", # .doc \
"application/x-acad", # .dwg \
"application/x-apple-diskimage", # .dmg \
"application/x-autocad", # .dwg \
"application/x-bibtex", # .bib \
"application/x-compress", # .gz, .tar, .zip \
"application/x-compressed", # .gz, .tar, .zip \
"application/x-dos_ms_excel", # .xls \
"application/x-dwg", # .dwg \
"application/x-endnote-refer", # .enw \
"application/x-eps", # .eps \
"application/x-excel", # .xls \
"application/x-gtar", # .tar \
"application/x-gunzip", # .gz \
"application/x-gzip", # .gz \
"application/x-jpg", # .jpg \
"application/x-m", # .ppt \
"application/x-ms-excel", # .xls \
"application/x-msexcel", # .xls \
"application/x-mspublisher", # .pub \
"application/x-msw6", # .doc \
"application/x-msword", # .doc \
"application/x-ole-storage", # .msi \
"application/x-pdf", # .pdf \
"application/x-powerpoint", # .ppt \
"application/x-rtf", # .rtf \
"application/x-shockwave-flash", # .swf \
"application/x-shockwave-flash2-preview", # .swf \
"application/x-tar", # .tar \
"application/x-troff-msvideo", # .avi \
"application/x-soffice", # .rtf \
"application/x-xml", # .xml, .pub \
"application/x-zip", # .zip \
"application/x-zip-compressed", # .zip \
"application/xls", # .xls \
"application/xml", # .xml, .pub \
"application/zip", # .zip \
"audio/aiff", # .avi, .mov \
"audio/avi", # .avi \
"audio/mp3", # .mp3 \
"audio/mp4", # .mp4 \
"audio/mpg", # .mp3 \
"audio/mpeg", # .mp3 \
"audio/mpeg3", # .mp3 \
"audio/x-midi", # .mov \
"audio/x-mp3", # .mp3 \
"audio/x-mpg", # .mp3 \
"audio/x-mpeg", # .mp3 \
"audio/x-mpeg3", # .mp3 \
"audio/x-mpegaudio", # .mp3 \
"audio/x-wav", # .mov \
"drawing/dwg", # .dwg \
"gzip/document", # .gz \
"image/avi", # .avi \
"image/eps", # .eps \
"image/gi_", # .gif \
"image/gif", # .eps, .gif \
"image/jpeg", # .jpg, .jpeg \
"image/jpg", # .jpg \
"image/jp_", # .jpg \
"image/mpeg", # .mpeg \
"image/mov", # .mov \
"image/photoshop", # .psd \
"image/pipeg", # .jpg \
"image/pjpeg", # .jpg \
"image/png", # .png \
"image/psd", # .psd \
"image/vnd.dwg", # .dwg \
"image/vnd.rn-realflash", # .swf \
"image/vnd.swiftview-jpeg", # .jpg \
"image/x-eps", # .eps \
"image/x-dwg", # .dwg \
"image/x-photoshop", # .psd \
"image/x-xbitmap", # .gif, .jpg \
"multipart/x-tar", # .tar \
"multipart/x-zip", # .zip \
"octet-stream", # possibly some .ppt files \
"text/csv", # .csv \
"text/mspg-legacyinfo", # .msi \
"text/pdf", # .pdf \
"text/richtext", # .rtf \
"text/rtf", # .rtf \
"text/x-pdf", # .pdf \
"text/xml", # .xml, .rss \
"video/avi", # .avi, .mov \
"video/mp4v-es", # .mp4 \
"video/msvideo", # .avi \
"video/quicktime", # .mov \
"video/x-flv", # .flv \
"video/x-m4v", # .m4v \
"video/x-msvideo", # .avi \
"video/x-quicktime", # .mov \
"video/xmpg2", # .avi \
"zz-application/zz-winassoc-psd", # .psd \
]
class Browser(object):
"""hubcheck webdriver interface"""
def __init__(self, mimetypes=[], downloaddir='/tmp'):
self.logger = logging.getLogger(__name__)
self.logger.info("setting up a web browser")
self._browser = None
self.wait_time = 2
self.marker = 0
self.proxy_client = None
self.proxy_blacklist = PROXY_BLACKLIST
self.profile = None
self.downloaddir = downloaddir
self.mimetypes = mimetypes
def __del__(self):
self.close()
def setup_browser_preferences(self):
"""browser preferences should be setup by subclasses
"""
pass
def start_proxy_client(self):
# setup proxy if needed
if hubcheck.conf.settings.proxy is None:
self.logger.info("proxy not started, not starting client")
return
# start the client
self.proxy_client = hubcheck.conf.settings.proxy.create_client()
# setup the proxy website blacklist
if self.proxy_client is not None:
self.logger.info("setting up proxy blacklist")
for url_re in self.proxy_blacklist:
self.logger.debug("blacklisting %s" % url_re)
self.proxy_client.blacklist(url_re,200)
def stop_proxy_client(self):
if self.proxy_client is not None:
self.logger.info("stopping proxy client")
self.proxy_client.close()
self.proxy_client = None
def setup_browser_size_and_position(self):
# set the amount of time to wait for an element to appear on the page
self._browser.implicitly_wait(self.wait_time)
# place the browser window in the upper left corner of the screen
self._browser.set_window_position(0, 0)
# resize the window to just shy of our 1024x768 screen
self._browser.set_window_size(1070,700)
def launch(self):
"""subclass should add code required to launch the browser
"""
pass
def get(self,url):
if self._browser is None:
self.launch()
self.logger.debug("retrieving url: %s" % (url))
self._browser.get(url)
def close(self):
if self._browser is None:
return
self.logger.info("closing browser")
self._browser.quit()
self._browser = None
self.profile
self.stop_proxy_client()
def error_loading_page(self,har_entry):
"""
check if there was an error loading the web page
returns True or False
"""
harurl = har_entry['request']['url']
harstatus = har_entry['response']['status']
self.logger.debug("%s returned status %s" % (harurl,harstatus))
result = None
if (harstatus >= 100) and (harstatus <= 199):
# information codes
result = False
elif (harstatus >= 200) and (harstatus <= 299):
# success codes
result = False
elif (harstatus >= 300) and (harstatus <= 399):
# redirect codes
result = False
elif (harstatus >= 400) and (harstatus <= 499):
# client error codes
# client made an invalid request (bad links)
# page does not exist
result = True
elif (harstatus >= 500) and (harstatus <= 599):
# server error codes
# client made a valid request,
# but server failed while responsing.
result = True
else:
result = True
return result
def page_load_details(self,url=None,follow_redirects=True):
"""
return the har entry for the last page loaded
follow redirects to make sure you get the har entry
for the page that was eventually loaded.
A return value of None means no page was ever loaded.
"""
if not self.proxy_client:
return None
if url is None:
url = self._browser.current_url
self.logger.debug("processing har for %s" % (url))
har = self.proxy_client.har
self.logger.debug("har entry = %s" % (pprint.pformat(har)))
return_entry = None
for entry in har['log']['entries']:
harurl = entry['request']['url']
harstatus = entry['response']['status']
if url == None:
# we are following a redirect from below
return_entry = entry
elif url == harurl:
# the original url matches the url for this har entry exactly
return_entry = entry
elif (not url.endswith('/')) and (url+'/' == harurl):
# the original url almost matches the url for this har entry
return_entry = entry
if return_entry is not None:
if follow_redirects and (harstatus >= 300) and (harstatus <= 399):
# follow the redirect (should be the next har entry)
url = None
continue
else:
# found our match
break
self.logger.debug("har for url = %s" % (pprint.pformat(return_entry)))
return return_entry
def take_screenshot(self,filename=None):
"""
Take a screen shot of the browser, store it in filename.
"""
if self._browser is None:
return
if filename is None:
dts = datetime.datetime.today().strftime("%Y%m%d%H%M%S")
filename = 'hcss_%s.png' % dts
self.logger.debug("screenshot filename: %s" % (filename))
self._browser.save_screenshot(filename)
def next_marker(self):
self.marker += 1
return self.marker
| {
"content_hash": "89c50e7e724a26ce9b90b27b357dc1a4",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 93,
"avg_line_length": 45.163636363636364,
"alnum_prop": 0.4137336093857833,
"repo_name": "codedsk/hubcheck",
"id": "300d26a47173ec68c9cbc913d71ba1fd8d873df4",
"size": "17388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hubcheck/browser/browser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1248"
},
{
"name": "Makefile",
"bytes": "846"
},
{
"name": "Python",
"bytes": "1355360"
},
{
"name": "Shell",
"bytes": "1483"
}
],
"symlink_target": ""
} |
"""Add required extra spec
Revision ID: 59eb64046740
Revises: 162a3e673105
Create Date: 2015-01-29 15:33:25.348140
"""
# revision identifiers, used by Alembic.
revision = '59eb64046740'
down_revision = '4ee2cf4be19a'
from alembic import op
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy.sql import table
def upgrade():
session = sa.orm.Session(bind=op.get_bind().connect())
es_table = table(
'share_type_extra_specs',
sa.Column('created_at', sa.DateTime),
sa.Column('deleted', sa.Integer),
sa.Column('share_type_id', sa.String(length=36)),
sa.Column('spec_key', sa.String(length=255)),
sa.Column('spec_value', sa.String(length=255)))
st_table = table(
'share_types',
sa.Column('deleted', sa.Integer),
sa.Column('id', sa.Integer))
# NOTE(vponomaryov): field 'deleted' is integer here.
existing_required_extra_specs = (session.query(es_table).
filter(es_table.c.spec_key ==
'driver_handles_share_servers').
filter(es_table.c.deleted == 0).
all())
exclude_st_ids = [es.share_type_id for es in existing_required_extra_specs]
# NOTE(vponomaryov): field 'deleted' is string here.
share_types = (session.query(st_table).
filter(st_table.c.deleted.in_(('0', 'False', ))).
filter(st_table.c.id.notin_(exclude_st_ids)).
all())
extra_specs = []
for st in share_types:
extra_specs.append({
'spec_key': 'driver_handles_share_servers',
'spec_value': 'True',
'deleted': 0,
'created_at': timeutils.utcnow(),
'share_type_id': st.id,
})
op.bulk_insert(es_table, extra_specs)
session.close_all()
def downgrade():
"""Downgrade method.
We can't determine, which extra specs should be removed after insertion,
that's why do nothing here.
"""
| {
"content_hash": "fd909aa6c099f6856af8882ad9359817",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 30.66176470588235,
"alnum_prop": 0.5717026378896882,
"repo_name": "bswartz/manila",
"id": "073f933c106dcc97a9f80dde51d86c7cb4baf9a3",
"size": "2660",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manila/db/migrations/alembic/versions/59eb64046740_add_required_extra_spec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "9952105"
},
{
"name": "Shell",
"bytes": "106606"
}
],
"symlink_target": ""
} |
import json
import requests
class OneID:
def __init__(self, api_id = None, api_key=None, server_flag=""):
"""server_flag
:param api_id: Your OneID API ID credentials (from https://keychain.oneid.com/register)
:param api_key: Your OneID API Key credentials (from https://keychain.oneid.com/register)
:param server_flag: If you want to connect to a different API should be (for example) "-test" when using a non-production server
"""
self.repo_server = "https://account%s.oneid.com/repo/" % server_flag
self.helper_server = "https://keychain%s.oneid.com" % server_flag
self.script_header = '<script src="https://api%s.oneid.com/js/oneid.js" type="text/javascript"></script>' % server_flag
self.server_flag = server_flag
self.creds_file = "api_key" + server_flag + ".json"
# Set the API credentials
self.set_credentials(api_id, api_key)
def _call_keychain(self, method, data={}):
"""Call the OneID Keychain Service. (i.e. to validate signatures)
:param method: The OneID API call you wish to call
:param data: Data for the OneID API CAll
"""
url = "%s/%s" % (self.helper_server, method)
r = requests.post(url, json.dumps(data), auth=(self.api_id, self.api_key))
return r.json()
def _call_repo(self, method, data={}):
"""Call the OneID Repo. (i.e. to do a OneID Confirm request)
:param method: The OneID API call you wish to call
:param data: Data for the OneID API CAll
"""
url = "%s/%s" % (self.repo_server, method)
r = requests.post(url, json.dumps(data), auth=(self.api_id, self.api_key))
return r.json()
def set_credentials(self, api_id="", api_key=""):
"""Set the credentials used for access to the OneID Helper Service
:param api_id: Your OneID API ID
:param api_key: Your OneID API key
"""
if api_id != "":
self.api_id = api_id
self.api_key = api_key
else:
f = open(self.creds_file, 'r')
creds = json.loads(f.read())
f.close()
self.api_id = creds["API_ID"]
self.api_key = creds["API_KEY"]
def validate(self, oneid_payload):
"""Validate the data received by a callback
:param oneid_payload: The dictionary you want to validate, typically the payload from a OneID sign in call
"""
if not isinstance(oneid_payload, dict):
oneid_payload = json.loads(oneid_payload)
data_to_validate = { "nonces" : oneid_payload["nonces"],
"uid" : oneid_payload["uid"] }
if "attr_claim_tokens" in oneid_payload:
data_to_validate["attr_claim_tokens"] = oneid_payload["attr_claim_tokens"]
keychain_response = self._call_keychain("validate", data_to_validate)
if not self.success(keychain_response):
keychain_response["failed"] = "failed"
return keychain_response
oneid_payload.update(keychain_response)
return oneid_payload
def confirm(self, uid, token, message):
data = {
'two_factor_token' : token,
'message' : message
}
confirm = self._call_repo("send_2fa", data)
confirm.update({
'uid' : uid
})
valid_response = self._call_keychain('validate', confirm)
return valid_response
def redirect(self, redirect_url, oneid_response):
"""Create the JSON string that instructs the AJAX code to redirect the browser to the account
:param redirect_url: The URL of where you'd like to go
:param oneid_response: The validated OneID response data
:param sessionid: For the OneID example, your session ID
"""
return json.dumps({"error" : oneid_response['error'],
"errorcode" : str(oneid_response['errorcode']),
"url" : redirect_url })
def success(self, oneid_response):
"""Check errorcode in a response
:param oneid_response:
"""
return oneid_response.get("errorcode", None) == 0
| {
"content_hash": "444a6d0eaf0e6564a278d335df8f86ef",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 137,
"avg_line_length": 37.75,
"alnum_prop": 0.5891674550614948,
"repo_name": "OneID/oneid-python-sdk",
"id": "d7a04e0fa7080fe9c54833ad32e34635c57a1eab",
"size": "4300",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oneid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12743"
}
],
"symlink_target": ""
} |
import os, sys, platform, glob, subprocess, types, re
DEFAULT_API_LEVEL = 10
android_api_levels = {
3: 'android-1.5',
4: 'android-1.6',
5: 'android-2.0',
6: 'android-2.0.1',
7: 'android-2.1',
8: 'android-2.2',
9: 'android-2.3',
10: 'android-2.3.3',
11: 'android-3.0'
}
class Device:
def __init__(self, name, port=-1, emulator=False, offline=False):
self.name = name
self.port = port
self.emulator = emulator
self.offline = offline
def get_name(self):
return self.name
def get_port(self):
return self.port
def is_emulator(self):
return self.emulator
def is_device(self):
return not self.emulator
def is_offline(self):
return self.offline
class AndroidSDK:
def __init__(self, android_sdk, api_level=DEFAULT_API_LEVEL):
self.android_sdk = self.find_sdk(android_sdk)
if self.android_sdk is None:
raise Exception('No Android SDK directory found')
self.set_api_level(api_level)
def set_api_level(self, level):
self.api_level = level
self.find_platform_dir()
self.find_google_apis_dir()
def try_best_match_api_level(self, level):
# Don't go backwards
if level <= self.api_level:
return
orig_level = self.api_level
orig_platform_dir = self.platform_dir
orig_google_apis_dir = self.google_apis_dir
check_level = level
while check_level > orig_level:
self.find_platform_dir(check_level, False)
if self.platform_dir:
self.api_level = check_level
print "[INFO] Targeting Android SDK version %s" % self.api_level
break
check_level -= 1
if not self.platform_dir:
# Couldn't match. Set it back and return.
self.platform_dir = orig_platform_dir
return
# Now give the Google APIs a chance to match.
check_level = level
while check_level > orig_level:
self.find_google_apis_dir(check_level)
if self.google_apis_dir:
break
check_level -= 1
if not self.google_apis_dir:
# Couldn't match, so set it back to what it was.
self.google_apis_dir = orig_google_apis_dir
def find_sdk(self, supplied):
if platform.system() == 'Windows':
default_dirs = ['C:\\android-sdk', 'C:\\android', 'C:\\Program Files\\android-sdk', 'C:\\Program Files\\android']
else:
default_dirs = ['/opt/android', '/opt/android-sdk', '/usr/android', '/usr/android-sdk']
if 'ANDROID_SDK' in os.environ:
return os.environ['ANDROID_SDK']
if supplied is not None:
return supplied
for default_dir in default_dirs:
if os.path.exists(default_dir):
return default_dir
path = os.environ['PATH']
for dir in os.path.split(os.pathsep):
if os.path.exists(os.path.join(dir, 'android')) \
or os.path.exists(os.path.join(dir, 'android.exe')):
return dir
return None
def find_dir(self, version, prefix):
dirs = glob.glob(os.path.join(self.android_sdk, prefix+str(version)+"*"))
if len(dirs) > 0:
#grab the first.. good enough?
return dirs[0]
return None
def find_platform_dir(self, api_level=-1, raise_error=True):
if api_level == -1:
api_level = self.api_level
platform_dir = self.find_dir(api_level, os.path.join('platforms', 'android-'))
if platform_dir is None:
old_style_dir = os.path.join(self.android_sdk, 'platforms', android_api_levels[api_level])
if os.path.exists(old_style_dir):
platform_dir = old_style_dir
if platform_dir is None and raise_error:
raise Exception("No \"%s\" or \"%s\" in the Android SDK" % ('android-%s' % api_level, android_api_levels[api_level]))
self.platform_dir = platform_dir
def find_google_apis_dir(self, api_level=-1):
if api_level == -1:
api_level = self.api_level
if 'GOOGLE_APIS' in os.environ:
self.google_apis_dir = os.environ['GOOGLE_APIS']
return self.google_apis_dir
self.google_apis_dir = self.find_dir(api_level, os.path.join('add-ons', 'google_apis-'))
if self.google_apis_dir is None:
self.google_apis_dir = self.find_dir(api_level, os.path.join('add-ons', 'addon?google?apis?google*'))
def get_maps_jar(self):
if self.google_apis_dir is not None:
return os.path.join(self.google_apis_dir, "libs", "maps.jar")
return None
def get_android_jar(self):
if self.platform_dir is not None:
return os.path.join(self.platform_dir, "android.jar")
return None
def get_android_sdk(self):
return self.android_sdk
def get_platform_dir(self):
return self.platform_dir
def get_google_apis_dir(self):
return self.google_apis_dir
def get_platform_tools_dir(self):
if self.platform_dir is not None:
platform_tools = os.path.join(self.platform_dir, 'tools')
if os.path.exists(platform_tools):
return platform_tools
return None
def get_sdk_platform_tools_dir(self):
if self.android_sdk is not None:
sdk_platform_tools = os.path.join(self.android_sdk, 'platform-tools')
if os.path.exists(sdk_platform_tools):
return sdk_platform_tools
return None
def get_build_tools_dir(self):
if self.android_sdk is not None:
build_tools = os.path.join(self.android_sdk, 'build-tools')
if os.path.exists(build_tools):
return build_tools
return None
def get_api_level(self):
return self.api_level
def get_tool(self, topdir, tool):
if topdir is not None:
tool_path = os.path.join(topdir, tool)
if platform.system() == "Windows":
if os.path.exists(tool_path+".exe"): return tool_path+".exe"
elif os.path.exists(tool_path+".bat"): return tool_path+".bat"
else: return None
if os.path.exists(tool_path):
return tool_path
return None
def get_sdk_tool(self, tool):
return self.get_tool(os.path.join(self.android_sdk, 'tools'), tool)
def get_platform_tool(self, tool):
platform_tools_dir = self.get_platform_tools_dir()
sdk_platform_tools_dir = self.get_sdk_platform_tools_dir()
build_tools_dir = self.get_build_tools_dir()
tool_path = None
if platform_tools_dir is not None:
tool_path = self.get_tool(platform_tools_dir, tool)
if tool_path is None and sdk_platform_tools_dir is not None:
tool_path = self.get_tool(sdk_platform_tools_dir, tool)
if tool_path is None or not os.path.exists(tool_path):
tool_path = self.get_sdk_tool(tool)
# Many tools were moved to build-tools/17.0.0 (or something equivalent in windows) in sdk tools r22
if tool_path is None and build_tools_dir is not None:
# Here, we list all the directories in build-tools and check inside
# each one for the tool we are looking for (there can be future versions besides 17.0.0).
for dirname in os.listdir(build_tools_dir):
build_tools_version_dir = os.path.join(build_tools_dir, dirname)
tool_path = self.get_tool(build_tools_version_dir, tool)
if tool_path is not None:
break
return tool_path
def get_dx(self):
return self.get_platform_tool('dx')
def get_dx_jar(self):
platform_tools_dir = self.get_platform_tools_dir()
sdk_platform_tools_dir = self.get_sdk_platform_tools_dir()
build_tools_dir = self.get_build_tools_dir()
dx_jar_path = None
if platform_tools_dir is not None:
dx_jar_path = self.get_lib_dx_jar(platform_tools_dir)
if sdk_platform_tools_dir is not None and dx_jar_path is None:
dx_jar_path = self.get_lib_dx_jar(sdk_platform_tools_dir)
if build_tools_dir is not None and dx_jar_path is None:
for dirname in os.listdir(build_tools_dir):
build_tools_version_dir = os.path.join(build_tools_dir, dirname)
dx_jar_path = self.get_lib_dx_jar(build_tools_version_dir)
if dx_jar_path is not None:
break
return dx_jar_path
def get_lib_dx_jar(self, topdir):
if topdir is not None:
lib_dx_jar_path = os.path.join(topdir, 'lib', 'dx.jar')
if os.path.exists(lib_dx_jar_path):
return lib_dx_jar_path
return None
def get_dexdump(self):
return self.get_platform_tool('dexdump')
def get_zipalign(self):
return self.get_sdk_tool('zipalign')
def get_aapt(self):
# for aapt (and maybe eventually for others) we
# want to favor platform-tools over android-x/tools
# because of new resource qualifiers for honeycomb
sdk_platform_tools_dir = self.get_sdk_platform_tools_dir()
if not sdk_platform_tools_dir is None and os.path.exists(os.path.join(sdk_platform_tools_dir, 'aapt')):
return os.path.join(sdk_platform_tools_dir, 'aapt')
return self.get_platform_tool('aapt')
def get_apkbuilder(self):
return self.get_sdk_tool('apkbuilder')
def get_android(self):
return self.get_sdk_tool('android')
def get_emulator(self):
return self.get_sdk_tool('emulator')
def get_adb(self):
return self.get_platform_tool('adb')
def get_mksdcard(self):
return self.get_sdk_tool('mksdcard')
def get_aidl(self):
return self.get_platform_tool('aidl')
def sdk_path(self, *path):
return os.path.join(self.android_sdk, *path)
def platform_path(self, *path):
return os.path.join(self.platform_dir, *path)
def google_apis_path(self, *path):
return os.path.join(self.google_apis_dir, *path)
def list_devices(self):
adb = self.get_adb()
(out, err) = subprocess.Popen([adb, 'devices'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if type(err) != types.NoneType and len(err) > 0:
raise Exception(err)
devices = []
for line in out.splitlines():
line = line.strip()
if line.startswith("List of devices"): continue
elif line.startswith("emulator-"):
(name, status) = line.split()
port = int(name[name.index("-")+1:])
offline = False
if status == "offline":
offline = True
devices.append(Device(name, port, True, offline))
elif "device" in line:
name = line.split()[0]
devices.append(Device(name))
return devices
def run_adb(self, args, device_args=None):
adb_args = [self.get_adb()]
if device_args != None:
adb_args.extend(device_args)
adb_args.extend(args)
(out, err) = subprocess.Popen(adb_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if type(err) != types.NoneType and len(err) > 0:
raise Exception(err)
return out
def list_processes(self, adb_args=None):
out = self.run_adb(['shell', 'ps'], adb_args)
processes = []
for line in out.splitlines():
line = line.strip()
tokens = re.split(r"\s+", line)
if len(tokens) < 2: continue
if tokens[0] == 'USER': continue
process = {"pid": tokens[1], "name": tokens[len(tokens)-1]}
processes.append(process)
return processes
def jdwp_kill(self, app_id, adb_args=None, forward_port=51111):
import socket, struct, uuid
pid = None
for process in self.list_processes(adb_args):
if process['name'] == app_id:
pid = process['pid']
break
if pid == None:
raise Exception("No processes running with the name: %s" % app_id)
out = self.run_adb(['jdwp'], adb_args)
found_pid = False
for line in out.splitlines():
if line == pid:
found_pid = True
break
if not found_pid:
raise Exception("The application %s (PID %s) is not debuggable, and cannot be killed via JDWP" % (app_id, pid))
self.run_adb(['forward', 'tcp:%d' % forward_port, 'jdwp:%s' % pid], adb_args)
jdwp_socket = socket.create_connection(('', forward_port))
jdwp_socket.settimeout(5.0)
jdwp_socket.send('JDWP-Handshake')
try:
handshake = jdwp_socket.recv(14)
except:
jdwp_socket.close()
raise Exception('Timeout when waiting for handshake, make sure no other DDMS debuggers are running (i.e. Eclipse)')
if handshake != 'JDWP-Handshake':
jdwp_socket.close()
raise Exception('Incorrect handshake, make sure the process is still running')
# Taken from Android ddmlib
DDMS_CMD = 0x01
DDMS_CMD_SET = 0xc7
# just a random 32 bit integer should be good enough
packetId = uuid.uuid4().time_low
packetLen = 23
# the string EXIT bitshifted into an integer
EXIT = 1163413844
EXIT_LEN = 4
exitCode = 1
packet = struct.pack('!2I3B3I', packetLen, packetId, 0, DDMS_CMD_SET, DDMS_CMD, EXIT, EXIT_LEN, exitCode)
jdwp_socket.send(packet)
jdwp_socket.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s ANDROID_SDK [API]" % sys.argv[0]
print " ANDROID_SDK is the default path to the Android SDK. Use '-' if there is no default path"
print " API (optional) is an Android API version (i.e. 4, 5, 6, 7, 8). The default is 7."
print ""
print "Prints the SDK directory, Android Platform directory, and Google APIs directory"
sys.exit(1)
sdk_path = sys.argv[1]
if sdk_path == '-':
sdk_path = None
api_level = DEFAULT_API_LEVEL
if len(sys.argv) > 2:
api_level = int(sys.argv[2])
try:
sdk = AndroidSDK(sdk_path, api_level)
print "ANDROID_SDK=%s" % sdk.get_android_sdk()
print "ANDROID_API_LEVEL=%d" % sdk.get_api_level()
print "ANDROID_PLATFORM=%s" % sdk.get_platform_dir()
print "GOOGLE_APIS=%s" % sdk.get_google_apis_dir()
except Exception, e:
print >>sys.stderr, e
| {
"content_hash": "f4ed4434b4eab5e9bc58ed4dc51e56ee",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 120,
"avg_line_length": 31.167076167076168,
"alnum_prop": 0.682538431217974,
"repo_name": "pinnamur/titanium_mobile",
"id": "89265c0424c53273f74454421f049cc8f4052b2e",
"size": "12755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "support/android/androidsdk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "188070"
},
{
"name": "C#",
"bytes": "80533"
},
{
"name": "C++",
"bytes": "202887"
},
{
"name": "CSS",
"bytes": "19521"
},
{
"name": "Java",
"bytes": "2550087"
},
{
"name": "JavaScript",
"bytes": "4575134"
},
{
"name": "Makefile",
"bytes": "7605"
},
{
"name": "Objective-C",
"bytes": "3397775"
},
{
"name": "Objective-C++",
"bytes": "8560"
},
{
"name": "PHP",
"bytes": "17988"
},
{
"name": "Perl",
"bytes": "12759"
},
{
"name": "Python",
"bytes": "1844919"
},
{
"name": "Shell",
"bytes": "28070"
}
],
"symlink_target": ""
} |
"""API for Google Nest Device Access bound to Home Assistant OAuth."""
from __future__ import annotations
import datetime
import logging
from typing import cast
from aiohttp import ClientSession
from google.oauth2.credentials import Credentials
from google_nest_sdm.auth import AbstractAuth
from google_nest_sdm.google_nest_subscriber import GoogleNestSubscriber
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client, config_entry_oauth2_flow
from .const import (
API_URL,
CONF_PROJECT_ID,
CONF_SUBSCRIBER_ID,
DATA_NEST_CONFIG,
DOMAIN,
OAUTH2_TOKEN,
SDM_SCOPES,
)
_LOGGER = logging.getLogger(__name__)
class AsyncConfigEntryAuth(AbstractAuth):
"""Provide Google Nest Device Access authentication tied to an OAuth2 based config entry."""
def __init__(
self,
websession: ClientSession,
oauth_session: config_entry_oauth2_flow.OAuth2Session,
client_id: str,
client_secret: str,
) -> None:
"""Initialize Google Nest Device Access auth."""
super().__init__(websession, API_URL)
self._oauth_session = oauth_session
self._client_id = client_id
self._client_secret = client_secret
async def async_get_access_token(self) -> str:
"""Return a valid access token for SDM API."""
if not self._oauth_session.valid_token:
await self._oauth_session.async_ensure_token_valid()
return cast(str, self._oauth_session.token["access_token"])
async def async_get_creds(self) -> Credentials:
"""Return an OAuth credential for Pub/Sub Subscriber."""
# We don't have a way for Home Assistant to refresh creds on behalf
# of the google pub/sub subscriber. Instead, build a full
# Credentials object with enough information for the subscriber to
# handle this on its own. We purposely don't refresh the token here
# even when it is expired to fully hand off this responsibility and
# know it is working at startup (then if not, fail loudly).
token = self._oauth_session.token
creds = Credentials(
token=token["access_token"],
refresh_token=token["refresh_token"],
token_uri=OAUTH2_TOKEN,
client_id=self._client_id,
client_secret=self._client_secret,
scopes=SDM_SCOPES,
)
creds.expiry = datetime.datetime.fromtimestamp(token["expires_at"])
return creds
async def new_subscriber(
hass: HomeAssistant, entry: ConfigEntry
) -> GoogleNestSubscriber | None:
"""Create a GoogleNestSubscriber."""
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
config = hass.data[DOMAIN][DATA_NEST_CONFIG]
if not (
subscriber_id := entry.data.get(
CONF_SUBSCRIBER_ID, config.get(CONF_SUBSCRIBER_ID)
)
):
_LOGGER.error("Configuration option 'subscriber_id' required")
return None
return await new_subscriber_with_impl(hass, entry, subscriber_id, implementation)
async def new_subscriber_with_impl(
hass: HomeAssistant,
entry: ConfigEntry,
subscriber_id: str,
implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation,
) -> GoogleNestSubscriber:
"""Create a GoogleNestSubscriber, used during ConfigFlow."""
config = hass.data[DOMAIN][DATA_NEST_CONFIG]
session = config_entry_oauth2_flow.OAuth2Session(hass, entry, implementation)
auth = AsyncConfigEntryAuth(
aiohttp_client.async_get_clientsession(hass),
session,
config[CONF_CLIENT_ID],
config[CONF_CLIENT_SECRET],
)
return GoogleNestSubscriber(auth, config[CONF_PROJECT_ID], subscriber_id)
| {
"content_hash": "bbde45e08813ae15e83380ecdff4f220",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 96,
"avg_line_length": 35.81818181818182,
"alnum_prop": 0.6796954314720812,
"repo_name": "jawilson/home-assistant",
"id": "3934b0b3cf16b285f052ff0b5d27411412eb450c",
"size": "3940",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "homeassistant/components/nest/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import core
import helpers
import printers
import settings
| {
"content_hash": "b8822e1f956fb78f7437a890f520412f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 15,
"avg_line_length": 14.75,
"alnum_prop": 0.864406779661017,
"repo_name": "martinlatrille/Aito",
"id": "2cd00dbd0aeccc705296607ac0d6faeb3ca31b78",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libaito/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "54"
},
{
"name": "Python",
"bytes": "11415"
}
],
"symlink_target": ""
} |
import copy
import os
import unittest
import mock
from feaas import storage
from feaas.managers import cloudstack
class CloudStackManagerTestCase(unittest.TestCase):
def set_api_envs(self, url="http://cloudstackapi", api_key="key",
secret_key="secret"):
os.environ["CLOUDSTACK_API_URL"] = self.url = url
os.environ["CLOUDSTACK_API_KEY"] = self.api_key = api_key
os.environ["CLOUDSTACK_SECRET_KEY"] = self.secret_key = secret_key
def del_api_envs(self):
self._remove_envs("CLOUDSTACK_API_URL", "CLOUDSTACK_API_KEY",
"CLOUDSTACK_SECRET_KEY")
def set_vm_envs(self, template_id="abc123", zone_id="zone1",
service_offering_id="qwe123", project_id=None,
network_ids=None):
os.environ["CLOUDSTACK_TEMPLATE_ID"] = self.template_id = template_id
self.service_offering_id = service_offering_id
os.environ["CLOUDSTACK_SERVICE_OFFERING_ID"] = self.service_offering_id
os.environ["CLOUDSTACK_ZONE_ID"] = self.zone_id = zone_id
if project_id:
os.environ["CLOUDSTACK_PROJECT_ID"] = self.project_id = project_id
if network_ids:
os.environ["CLOUDSTACK_NETWORK_IDS"] = self.network_ids = network_ids
def del_vm_envs(self):
self._remove_envs("CLOUDSTACK_TEMPLATE_ID", "CLOUDSTACK_SERVICE_OFFERING_ID",
"CLOUDSTACK_ZONE_ID", "CLOUDSTACK_PROJECT_ID",
"CLOUDSTACK_NETWORK_IDS")
def _remove_envs(self, *envs):
for env in envs:
if env in os.environ:
del os.environ[env]
def test_init(self):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
client = cloudstack.CloudStackManager(storage=None)
self.assertEqual(self.url, client.client.api_url)
self.assertEqual(self.api_key, client.client.api_key)
self.assertEqual(self.secret_key, client.client.secret)
def test_init_no_api_url(self):
with self.assertRaises(cloudstack.MissConfigurationError) as cm:
cloudstack.CloudStackManager(storage=None)
exc = cm.exception
self.assertEqual(("env var CLOUDSTACK_API_URL is required",),
exc.args)
def test_init_no_api_key(self):
os.environ["CLOUDSTACK_API_URL"] = "something"
with self.assertRaises(cloudstack.MissConfigurationError) as cm:
cloudstack.CloudStackManager(storage=None)
self.addCleanup(self.del_api_envs)
exc = cm.exception
self.assertEqual(("env var CLOUDSTACK_API_KEY is required",),
exc.args)
def test_init_no_secret_key(self):
os.environ["CLOUDSTACK_API_URL"] = "something"
os.environ["CLOUDSTACK_API_KEY"] = "not_secret"
with self.assertRaises(cloudstack.MissConfigurationError) as cm:
cloudstack.CloudStackManager(storage=None)
self.addCleanup(self.del_api_envs)
exc = cm.exception
self.assertEqual(("env var CLOUDSTACK_SECRET_KEY is required",),
exc.args)
@mock.patch("uuid.uuid4")
def test_start_instance(self, uuid):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(project_id="project-123", network_ids="net-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "abc123", "nic": [{"ipaddress": "10.0.0.1"}]}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.start_instance("some_instance")
self.assertEqual(instance, got_instance)
self.assertEqual(1, len(instance.units))
unit = instance.units[0]
self.assertEqual("abc123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("10.0.0.1", unit.dns_name)
self.assertEqual("creating", unit.state)
strg_mock.retrieve_instance.assert_called_with(name="some_instance")
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "networkids": self.network_ids,
"projectid": self.project_id}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
@mock.patch("uuid.uuid4")
def test_start_instance_no_project_id(self, uuid):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(network_ids="net-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "abc123", "nic": [{"ipaddress": "10.0.0.1"}]}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.start_instance("some_instance")
self.assertEqual(instance, got_instance)
self.assertEqual(1, len(instance.units))
unit = instance.units[0]
self.assertEqual("abc123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("10.0.0.1", unit.dns_name)
self.assertEqual("creating", unit.state)
strg_mock.retrieve_instance.assert_called_with(name="some_instance")
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "networkids": self.network_ids}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
@mock.patch("uuid.uuid4")
def test_start_instance_no_network_id(self, uuid):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(project_id="proj-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "abc123", "nic": []}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.start_instance("some_instance")
self.assertEqual(instance, got_instance)
self.assertEqual(1, len(instance.units))
unit = instance.units[0]
self.assertEqual("abc123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("", unit.dns_name)
self.assertEqual("creating", unit.state)
strg_mock.retrieve_instance.assert_called_with(name="some_instance")
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "projectid": self.project_id}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
@mock.patch("uuid.uuid4")
def test_start_instance_public_network_name(self, uuid):
def cleanup():
del os.environ["CLOUDSTACK_PUBLIC_NETWORK_NAME"]
self.addCleanup(cleanup)
os.environ["CLOUDSTACK_PUBLIC_NETWORK_NAME"] = "NOPOWER"
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(project_id="project-123", network_ids="net-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "abc123", "nic": [{"ipaddress": "10.0.0.1", "networkname": "POWERNET"},
{"ipaddress": "192.168.1.1", "networkname": "NOPOWER"},
{"ipaddress": "172.16.42.1", "networkname": "KPOWER"}]}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.start_instance("some_instance")
self.assertEqual(instance, got_instance)
self.assertEqual(1, len(instance.units))
unit = instance.units[0]
self.assertEqual("abc123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("192.168.1.1", unit.dns_name)
self.assertEqual("creating", unit.state)
strg_mock.retrieve_instance.assert_called_with(name="some_instance")
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "networkids": self.network_ids,
"projectid": self.project_id}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
@mock.patch("uuid.uuid4")
def test_start_instance_multi_nic_no_network_name(self, uuid):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(project_id="project-123", network_ids="net-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "abc123", "nic": [{"ipaddress": "10.0.0.1", "networkname": "POWERNET"},
{"ipaddress": "192.168.1.1", "networkname": "NOPOWER"},
{"ipaddress": "172.16.42.1", "networkname": "KPOWER"}]}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.start_instance("some_instance")
self.assertEqual(instance, got_instance)
self.assertEqual(1, len(instance.units))
unit = instance.units[0]
self.assertEqual("abc123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("172.16.42.1", unit.dns_name)
self.assertEqual("creating", unit.state)
strg_mock.retrieve_instance.assert_called_with(name="some_instance")
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "networkids": self.network_ids,
"projectid": self.project_id}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
def test_start_instance_timeout(self):
def cleanup():
del os.environ["CLOUDSTACK_MAX_TRIES"]
self.addCleanup(cleanup)
os.environ["CLOUDSTACK_MAX_TRIES"] = "1"
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs()
self.addCleanup(self.del_vm_envs)
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 0}
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
with self.assertRaises(cloudstack.MaxTryExceededError) as cm:
manager.start_instance("some_instance")
exc = cm.exception
self.assertEqual(1, exc.max_tries)
def test_terminate_instance(self):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
instance = storage.Instance(name="some_instance",
units=[storage.Unit(id="vm-123"),
storage.Unit(id="vm-456")])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock = mock.Mock()
got_instance = manager.terminate_instance("some_instance")
self.assertEqual(instance, got_instance)
expected_calls = [mock.call({"id": "vm-123"}), mock.call({"id": "vm-456"})]
self.assertEqual(expected_calls, client_mock.destroyVirtualMachine.call_args_list)
@mock.patch("sys.stderr")
def test_terminate_instance_ignores_exceptions(self, stderr):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
instance = storage.Instance(name="some_instance",
units=[storage.Unit(id="vm-123"),
storage.Unit(id="vm-456")])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.destroyVirtualMachine.side_effect = Exception("wat", "wot")
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.terminate_instance("some_instance")
self.assertEqual(instance, got_instance)
stderr.write.assert_called_with("[ERROR] Failed to terminate CloudStack VM: wat wot")
@mock.patch("uuid.uuid4")
def test_physical_scale_up(self, uuid):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(project_id="project-123", network_ids="net-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance",
units=[storage.Unit(id="123")])
strg_mock = mock.Mock()
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "qwe123", "nic": [{"ipaddress": "10.0.0.5"}]}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
units = manager.physical_scale(instance, 2)
self.assertEqual(2, len(instance.units))
self.assertEqual(1, len(units))
unit = instance.units[1]
self.assertEqual("qwe123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("10.0.0.5", unit.dns_name)
self.assertEqual("creating", unit.state)
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "networkids": self.network_ids,
"projectid": self.project_id}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
def test_physical_scale_down(self):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
units = [storage.Unit(id="vm-123"), storage.Unit(id="vm-456"),
storage.Unit(id="vm-789")]
instance = storage.Instance(name="some_instance", units=copy.deepcopy(units))
strg_mock = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock = mock.Mock()
got_units = manager.physical_scale(instance, 1)
self.assertEqual(1, len(instance.units))
self.assertEqual(2, len(got_units))
self.assertEqual("vm-789", instance.units[0].id)
expected_calls = [mock.call({"id": "vm-123"}), mock.call({"id": "vm-456"})]
self.assertEqual(expected_calls, client_mock.destroyVirtualMachine.call_args_list)
class MaxTryExceededErrorTestCase(unittest.TestCase):
def test_error_message(self):
exc = cloudstack.MaxTryExceededError(40)
self.assertEqual(40, exc.max_tries)
self.assertEqual(("exceeded 40 tries",), exc.args)
| {
"content_hash": "11230cc8a94f8890c09927b524f6ce7c",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 93,
"avg_line_length": 51.113110539845756,
"alnum_prop": 0.6087612533319922,
"repo_name": "tsuru/varnishapi",
"id": "384357854c9f6a57ccc025691cd3d09ad0d601c5",
"size": "20044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cloudstack_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "156890"
},
{
"name": "Shell",
"bytes": "775"
},
{
"name": "VCL",
"bytes": "431"
}
],
"symlink_target": ""
} |
from page_sets.login_helpers import facebook_login
from telemetry.page import page as page_module
class MobileFacebookPage(page_module.Page):
def __init__(self, url, page_set, shared_page_state_class, name='facebook'):
super(MobileFacebookPage, self).__init__(
url=url, page_set=page_set, name=name,
credentials_path='data/credentials.json',
shared_page_state_class=shared_page_state_class)
def RunNavigateSteps(self, action_runner):
facebook_login.LoginWithMobileSite(action_runner, 'facebook3',
self.credentials_path)
super(MobileFacebookPage, self).RunNavigateSteps(action_runner)
| {
"content_hash": "91ecdda7a83920a821ca62230b14cb0f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 44.4,
"alnum_prop": 0.7012012012012012,
"repo_name": "danakj/chromium",
"id": "0dc1438e6d2be813acd798508cc62877f3dac02e",
"size": "829",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/perf/page_sets/mobile_facebook_page.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Memoize the data produced by slow operations into Google storage.
Caches computations described in terms of command lines and inputs directories
or files, which yield a set of output file.
"""
import collections
import hashlib
import logging
import os
import platform
import shutil
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.directory_storage
import pynacl.file_tools
import pynacl.gsd_storage
import pynacl.hashing_tools
import pynacl.log_tools
import pynacl.working_directory
import command
import substituter
CloudStorageItem = collections.namedtuple('CloudStorageItem',
['dir_item', 'log_url'])
class UserError(Exception):
pass
class HumanReadableSignature(object):
"""Accumator of signature information in human readable form.
A replacement for hashlib that collects the inputs for later display.
"""
def __init__(self):
self._items = []
def update(self, data):
"""Add an item to the signature."""
# Drop paranoid nulls for human readable output.
data = data.replace('\0', '')
self._items.append(data)
def hexdigest(self):
"""Fake version of hexdigest that returns the inputs."""
return ('*' * 30 + ' PACKAGE SIGNATURE ' + '*' * 30 + '\n' +
'\n'.join(self._items) + '\n' +
'=' * 70 + '\n')
class Once(object):
"""Class to memoize slow operations."""
def __init__(self, storage, use_cached_results=True, cache_results=True,
print_url=None, system_summary=None, extra_paths={}):
"""Constructor.
Args:
storage: An storage layer to read/write from (GSDStorage).
use_cached_results: Flag indicating that cached computation results
should be used when possible.
cache_results: Flag that indicates if successful computations should be
written to the cache.
print_url: Function that accepts a CloudStorageItem for printing URL
results, or None if no printing is needed.
extra_paths: Extra substitution paths that can be used by commands.
"""
self._storage = storage
self._directory_storage = pynacl.directory_storage.DirectoryStorageAdapter(
storage
)
self._use_cached_results = use_cached_results
self._cache_results = cache_results
self._cached_cloud_items = {}
self._print_url = print_url
self._system_summary = system_summary
self._path_hash_cache = {}
self._extra_paths = extra_paths
def KeyForOutput(self, package, output_hash):
"""Compute the key to store a given output in the data-store.
Args:
package: Package name.
output_hash: Stable hash of the package output.
Returns:
Key that this instance of the package output should be stored/retrieved.
"""
return 'object/%s_%s.tgz' % (package, output_hash)
def KeyForBuildSignature(self, build_signature):
"""Compute the key to store a computation result in the data-store.
Args:
build_signature: Stable hash of the computation.
Returns:
Key that this instance of the computation result should be
stored/retrieved.
"""
return 'computed/%s.txt' % build_signature
def KeyForLog(self, package, output_hash):
"""Compute the key to store a given log file in the data-store.
Args:
package: Package name.
output_hash: Stable hash of the package output.
Returns:
Key that this instance of the package log should be stored/retrieved.
"""
return 'log/%s_%s.log' % (package, output_hash)
def GetLogFile(self, work_dir, package):
"""Returns the local log file for a given package.
Args:
work_dir: The work directory for the package.
package: The package name.
Returns:
Path to the local log file within the work directory.
"""
return os.path.join(work_dir, '%s.log' % package)
def WriteOutputFromHash(self, work_dir, package, out_hash, output):
"""Write output from the cache.
Args:
work_dir: Working directory path.
package: Package name (for tgz name).
out_hash: Hash of desired output.
output: Output path.
Returns:
CloudStorageItem on success, None if not.
"""
key = self.KeyForOutput(package, out_hash)
dir_item = self._directory_storage.GetDirectory(key, output)
if not dir_item:
logging.debug('Failed to retrieve %s' % key)
return None
if pynacl.hashing_tools.StableHashPath(output) != out_hash:
logging.warning('Object does not match expected hash, '
'has hashing method changed?')
return None
log_key = self.KeyForLog(package, out_hash)
log_file = self.GetLogFile(work_dir, package)
pynacl.file_tools.RemoveFile(log_file)
log_url = self._storage.GetFile(log_key, log_file)
return CloudStorageItem(dir_item, log_url)
def _ProcessCloudItem(self, package, cloud_item):
"""Processes cached directory storage items.
Args:
package: Package name for the cached directory item.
cloud_item: CloudStorageItem representing a memoized item in the cloud.
"""
# Store the cached URL as a tuple for book keeping.
self._cached_cloud_items[package] = cloud_item
# If a print URL function has been specified, print the URL now.
if self._print_url is not None:
self._print_url(cloud_item)
def WriteResultToCache(self, work_dir, package, build_signature, output):
"""Cache a computed result by key.
Also prints URLs when appropriate.
Args:
work_dir: work directory for the package builder.
package: Package name (for tgz name).
build_signature: The input hash of the computation.
output: A path containing the output of the computation.
"""
if not self._cache_results:
return
out_hash = pynacl.hashing_tools.StableHashPath(output)
try:
output_key = self.KeyForOutput(package, out_hash)
# Try to get an existing copy in a temporary directory.
wd = pynacl.working_directory.TemporaryWorkingDirectory()
with wd as temp_dir:
temp_output = os.path.join(temp_dir, 'out')
dir_item = self._directory_storage.GetDirectory(output_key, temp_output)
log_key = self.KeyForLog(package, out_hash)
log_file = self.GetLogFile(work_dir, package)
log_url = None
if dir_item is None:
# Isn't present. Cache the computed result instead.
dir_item = self._directory_storage.PutDirectory(output, output_key)
if os.path.isfile(log_file):
log_url = self._storage.PutFile(log_file, log_key)
logging.info('Computed fresh result and cached it.')
else:
# Cached version is present. Replace the current output with that.
if self._use_cached_results:
pynacl.file_tools.RemoveDirectoryIfPresent(output)
shutil.move(temp_output, output)
pynacl.file_tools.RemoveFile(log_file)
log_url = self._storage.GetFile(log_key, log_file)
logging.info('Recomputed result matches cached value, '
'using cached value instead.')
else:
log_key_exists = self._storage.Exists(log_key)
if log_key_exists:
log_url = log_key_exists
# Upload an entry mapping from computation input to output hash.
self._storage.PutData(
out_hash, self.KeyForBuildSignature(build_signature))
cloud_item = CloudStorageItem(dir_item, log_url)
self._ProcessCloudItem(package, cloud_item)
except pynacl.gsd_storage.GSDStorageError:
logging.info('Failed to cache result.')
raise
def ReadMemoizedResultFromCache(self, work_dir, package,
build_signature, output):
"""Read a cached result (if it exists) from the cache.
Also prints URLs when appropriate.
Args:
work_dir: Working directory for the build.
package: Package name (for tgz name).
build_signature: Build signature of the computation.
output: Output path.
Returns:
Boolean indicating successful retrieval.
"""
# Check if its in the cache.
if self._use_cached_results:
out_hash = self._storage.GetData(
self.KeyForBuildSignature(build_signature))
if out_hash is not None:
cloud_item = self.WriteOutputFromHash(work_dir, package,
out_hash, output)
if cloud_item is not None:
logging.info('Retrieved cached result.')
self._ProcessCloudItem(package, cloud_item)
return True
return False
def GetCachedCloudItems(self):
"""Returns the complete list of all cached cloud items for this run."""
return self._cached_cloud_items.values()
def GetCachedCloudItemForPackage(self, package):
"""Returns cached cloud item for package or None if not processed."""
return self._cached_cloud_items.get(package, None)
def Run(self, package, inputs, output, commands, cmd_options=None,
working_dir=None, memoize=True, signature_file=None, subdir=None):
"""Run an operation once, possibly hitting cache.
Args:
package: Name of the computation/module.
inputs: A dict of names mapped to files that are inputs.
output: An output directory.
commands: A list of command.Command objects to run.
working_dir: Working directory to use, or None for a temp dir.
memoize: Boolean indicating the the result should be memoized.
signature_file: File to write human readable build signatures to or None.
subdir: If not None, use this directory instead of the output dir as the
substituter's output path. Must be a subdirectory of output.
"""
if working_dir is None:
wdm = pynacl.working_directory.TemporaryWorkingDirectory()
else:
wdm = pynacl.working_directory.FixedWorkingDirectory(working_dir)
pynacl.file_tools.MakeDirectoryIfAbsent(output)
nonpath_subst = { 'package': package }
with wdm as work_dir:
# Compute the build signature with modified inputs.
build_signature = self.BuildSignature(
package, inputs=inputs, commands=commands)
# Optionally write human readable version of signature.
if signature_file:
signature_file.write(self.BuildSignature(
package, inputs=inputs, commands=commands,
hasher=HumanReadableSignature()))
signature_file.flush()
# We're done if it's in the cache.
if (memoize and self.ReadMemoizedResultFromCache(work_dir, package,
build_signature,
output)):
return
if subdir:
assert subdir.startswith(output)
# Filter out commands that have a run condition of False.
# This must be done before any commands are invoked in case the run
# conditions rely on any pre-existing states.
commands = [command for command in commands
if command.CheckRunCond(cmd_options)]
# Create a logger that will save the log for each command.
# This logger will process any messages and then pass the results
# up to the base logger.
base_logger = pynacl.log_tools.GetConsoleLogger()
cmd_logger = base_logger.getChild('OnceCmdLogger')
cmd_logger.setLevel(logging.DEBUG)
log_file = self.GetLogFile(work_dir, package)
file_log_handler = logging.FileHandler(log_file, 'wb')
file_log_handler.setLevel(logging.DEBUG)
file_log_handler.setFormatter(
logging.Formatter(fmt='[%(levelname)s - %(asctime)s] %(message)s'))
cmd_logger.addHandler(file_log_handler)
# Log some helpful information
cmd_logger.propagate = False
cmd_logger.debug('Hostname: %s', platform.node())
cmd_logger.debug('Machine: %s', platform.machine())
cmd_logger.debug('Platform: %s', sys.platform)
cmd_logger.propagate = True
for command in commands:
paths = inputs.copy()
# Add the extra paths supplied by our caller, and the original working
# directory
paths.update(self._extra_paths)
paths.update({'work_dir': work_dir})
paths['output'] = subdir if subdir else output
nonpath_subst['build_signature'] = build_signature
subst = substituter.Substituter(work_dir, paths, nonpath_subst)
command.Invoke(cmd_logger, subst)
# Uninstall the file log handler
cmd_logger.removeHandler(file_log_handler)
file_log_handler.close()
# Confirm that we aren't hitting something we've cached.
for path in self._path_hash_cache:
if not os.path.relpath(output, path).startswith(os.pardir + os.sep):
raise UserError(
'Package %s outputs to a directory already used as an input: %s' %
(package, path))
if memoize:
self.WriteResultToCache(work_dir, package, build_signature, output)
def SystemSummary(self):
"""Gather a string describing intrinsic properties of the current machine.
Ideally this would capture anything relevant about the current machine that
would cause build output to vary (other than build recipe + inputs).
"""
if self._system_summary is not None:
return self._system_summary
# Note there is no attempt to canonicalize these values. If two
# machines that would in fact produce identical builds differ in
# these values, it just means that a superfluous build will be
# done once to get the mapping from new input hash to preexisting
# output hash into the cache.
assert len(sys.platform) != 0, len(platform.machine()) != 0
# Use environment from command so we can access MinGW on windows.
env = command.PlatformEnvironment([])
def GetCompilerVersion(compiler_name):
try:
compiler_file = pynacl.file_tools.Which(
compiler_name, paths=env['PATH'].split(os.pathsep))
p = subprocess.Popen([compiler_file, '-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
_, compiler_version = p.communicate()
assert p.returncode == 0
except pynacl.file_tools.ExecutableNotFound:
compiler_version = 0
return compiler_version
items = [
('platform', sys.platform),
('machine', platform.machine()),
('gcc-v', GetCompilerVersion('gcc')),
('arm-gcc-v', GetCompilerVersion('arm-linux-gnueabihf-gcc')),
]
self._system_summary = str(items)
return self._system_summary
def BuildSignature(self, package, inputs, commands, hasher=None):
"""Compute a total checksum for a computation.
The computed hash includes system properties, inputs, and the commands run.
Args:
package: The name of the package computed.
inputs: A dict of names -> files/directories to be included in the
inputs set.
commands: A list of command.Command objects describing the commands run
for this computation.
hasher: Optional hasher to use.
Returns:
A hex formatted sha1 to use as a computation key or a human readable
signature.
"""
if hasher is None:
h = hashlib.sha1()
else:
h = hasher
h.update('package:' + package)
h.update('summary:' + self.SystemSummary())
for command in commands:
h.update('command:')
h.update(str(command))
for key in sorted(inputs.keys()):
h.update('item_name:' + key + '\x00')
if inputs[key] in self._path_hash_cache:
path_hash = self._path_hash_cache[inputs[key]]
else:
path_hash = 'item:' + pynacl.hashing_tools.StableHashPath(inputs[key])
self._path_hash_cache[inputs[key]] = path_hash
h.update(path_hash)
return h.hexdigest()
| {
"content_hash": "45f778ad380a2fa00560c114ae74d9a4",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 80,
"avg_line_length": 36.94688221709007,
"alnum_prop": 0.654769346168271,
"repo_name": "mxOBS/deb-pkg_trusty_chromium-browser",
"id": "a3538e5a53b746e4e756c2b394414e748a1491e2",
"size": "16188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "native_client/toolchain_build/once.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "230130"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "12435900"
},
{
"name": "C++",
"bytes": "264378706"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "795726"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Emacs Lisp",
"bytes": "2360"
},
{
"name": "Go",
"bytes": "31783"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "19491230"
},
{
"name": "Java",
"bytes": "7637875"
},
{
"name": "JavaScript",
"bytes": "12723911"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "14392"
},
{
"name": "Makefile",
"bytes": "208315"
},
{
"name": "Objective-C",
"bytes": "1460032"
},
{
"name": "Objective-C++",
"bytes": "7760068"
},
{
"name": "PLpgSQL",
"bytes": "175360"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427212"
},
{
"name": "Python",
"bytes": "11447382"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104846"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1208350"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from operator import itemgetter
from flask import flash, jsonify, redirect, request, session
from sqlalchemy import func, inspect
from sqlalchemy.orm import joinedload, lazyload
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from indico.core.db import db
from indico.core.logger import Logger
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.vc.exceptions import VCRoomError, VCRoomNotFoundError
from indico.modules.vc.forms import VCRoomListFilterForm
from indico.modules.vc.models.vc_rooms import VCRoom, VCRoomEventAssociation, VCRoomLinkType, VCRoomStatus
from indico.modules.vc.notifications import notify_created
from indico.modules.vc.util import find_event_vc_rooms, get_managed_vc_plugins, get_vc_plugins, resolve_title
from indico.modules.vc.views import WPVCEventPage, WPVCManageEvent, WPVCService
from indico.util.date_time import as_utc, get_day_end, get_day_start, now_utc
from indico.util.i18n import _
from indico.util.iterables import group_list
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.rh import RHProtected
from indico.web.util import _pop_injected_js, jsonify_data, jsonify_template
def process_vc_room_association(plugin, event, vc_room, form, event_vc_room=None, allow_same_room=False):
# disable autoflush, so that the new event_vc_room does not influence the result
with db.session.no_autoflush:
if event_vc_room is None:
event_vc_room = VCRoomEventAssociation()
plugin.update_data_association(event, vc_room, event_vc_room, form.data)
existing = set()
if event_vc_room.link_object is not None:
# check whether there is a room-event association already present
# for the given event, room and plugin
q = (VCRoomEventAssociation.query
.filter(VCRoomEventAssociation.event == event,
VCRoomEventAssociation.link_object == event_vc_room.link_object)
.join(VCRoom))
if allow_same_room:
q = q.filter(VCRoom.id != vc_room.id)
existing = {x.vc_room for x in q}
if event_vc_room.link_type != VCRoomLinkType.event and existing:
db.session.rollback()
flash(_("There is already a videoconference attached to '{link_object_title}'.").format(
link_object_title=resolve_title(event_vc_room.link_object)), 'error')
return None
elif event_vc_room.link_type == VCRoomLinkType.event and vc_room in existing:
db.session.rollback()
flash(_('This {plugin_name} room is already attached to the event.').format(plugin_name=plugin.friendly_name),
'error')
return None
else:
return event_vc_room
class RHVCManageEventBase(RHManageEventBase):
pass
class RHEventVCRoomMixin:
normalize_url_spec = {
'locators': {
lambda self: self.event_vc_room
}
}
def _process_args(self):
self.event_vc_room = VCRoomEventAssociation.get_or_404(request.view_args['event_vc_room_id'])
self.vc_room = self.event_vc_room.vc_room
class RHVCManageEvent(RHVCManageEventBase):
"""List the available videoconference rooms."""
def _process(self):
room_event_assocs = VCRoomEventAssociation.find_for_event(self.event, include_hidden=True,
include_deleted=True).all()
event_vc_rooms = [event_vc_room for event_vc_room in room_event_assocs if event_vc_room.vc_room.plugin]
return WPVCManageEvent.render_template('manage_event.html', self.event,
event_vc_rooms=event_vc_rooms, plugins=list(get_vc_plugins().values()))
class RHVCManageEventSelectService(RHVCManageEventBase):
"""
List available videoconference plugins to create a new
videoconference room.
"""
def _process(self):
action = request.args.get('vc_room_action', '.manage_vc_rooms_create')
attach = request.args.get('attach', '')
return jsonify_template('vc/manage_event_select.html', event=self.event, vc_room_action=action,
plugins=list(get_vc_plugins().values()), attach=attach)
class RHVCManageEventCreateBase(RHVCManageEventBase):
def _process_args(self):
RHVCManageEventBase._process_args(self)
try:
self.plugin = get_vc_plugins()[request.view_args['service']]
except KeyError:
raise NotFound
class RHVCManageEventCreate(RHVCManageEventCreateBase):
"""Load the form for the selected VC plugin."""
def _process(self):
if not self.plugin.can_manage_vc_rooms(session.user, self.event):
flash(_('You are not allowed to create {plugin_name} rooms for this event.').format(
plugin_name=self.plugin.friendly_name), 'error')
raise Forbidden
form = self.plugin.create_form(event=self.event)
if form.validate_on_submit():
vc_room = VCRoom(created_by_user=session.user)
vc_room.type = self.plugin.service_name
vc_room.status = VCRoomStatus.created
event_vc_room = process_vc_room_association(self.plugin, self.event, vc_room, form)
if not event_vc_room:
return jsonify_data(flash=False)
with db.session.no_autoflush:
self.plugin.update_data_vc_room(vc_room, form.data, is_new=True)
try:
# avoid flushing the incomplete vc room to the database
with db.session.no_autoflush:
self.plugin.create_room(vc_room, self.event)
notify_created(self.plugin, vc_room, event_vc_room, self.event, session.user)
except VCRoomError as err:
if err.field is None:
raise
field = getattr(form, err.field)
field.errors.append(str(err))
db.session.rollback() # otherwise the incomplete vc room would be added to the db!
else:
db.session.add(vc_room)
flash(_("{plugin_name} room '{room.name}' created").format(
plugin_name=self.plugin.friendly_name, room=vc_room), 'success')
return jsonify_data(flash=False)
form_html = self.plugin.render_form(plugin=self.plugin, event=self.event, form=form,
skip_fields=form.skip_fields | {'name'})
return jsonify(html=form_html, js=_pop_injected_js())
class RHVCSystemEventBase(RHEventVCRoomMixin, RHVCManageEventBase):
def _process_args(self):
RHVCManageEventBase._process_args(self)
RHEventVCRoomMixin._process_args(self)
if self.vc_room.type != request.view_args['service']:
raise NotFound
self.plugin = self.vc_room.plugin
class RHVCManageEventModify(RHVCSystemEventBase):
"""Modify an existing VC room."""
def _process(self):
if not self.plugin.can_manage_vc_rooms(session.user, self.event):
flash(_('You are not allowed to modify {} rooms for this event.').format(self.plugin.friendly_name),
'error')
raise Forbidden
form = self.plugin.create_form(self.event,
existing_vc_room=self.vc_room,
existing_event_vc_room=self.event_vc_room)
if form.validate_on_submit():
self.plugin.update_data_vc_room(self.vc_room, form.data)
event_vc_room = process_vc_room_association(
self.plugin, self.event, self.vc_room, form, event_vc_room=self.event_vc_room, allow_same_room=True)
if not event_vc_room:
return jsonify_data(flash=False)
self.vc_room.modified_dt = now_utc()
try:
self.plugin.update_room(self.vc_room, self.event)
except VCRoomNotFoundError as err:
Logger.get('modules.vc').warning('Videoconference %r not found. Setting it as deleted.', self.vc_room)
self.vc_room.status = VCRoomStatus.deleted
flash(str(err), 'error')
return jsonify_data(flash=False)
except VCRoomError as err:
if err.field is None:
raise
field = getattr(form, err.field)
field.errors.append(str(err))
db.session.rollback()
else:
# TODO
# notify_modified(self.vc_room, self.event, session.user)
flash(_("{plugin_name} room '{room.name}' updated").format(
plugin_name=self.plugin.friendly_name, room=self.vc_room), 'success')
return jsonify_data(flash=False)
form_html = self.plugin.render_form(plugin=self.plugin, event=self.event, form=form,
existing_vc_room=self.vc_room,
skip_fields=form.skip_fields | {'name'})
return jsonify(html=form_html, js=_pop_injected_js())
class RHVCManageEventRefresh(RHVCSystemEventBase):
"""Refresh an existing VC room, fetching information from the VC system."""
def _process(self):
if not self.plugin.can_manage_vc_rooms(session.user, self.event):
flash(_('You are not allowed to refresh {plugin_name} rooms for this event.').format(
plugin_name=self.plugin.friendly_name), 'error')
raise Forbidden
Logger.get('modules.vc').info('Refreshing videoconference %r from event %r', self.vc_room, self.event)
try:
self.plugin.refresh_room(self.vc_room, self.event)
except VCRoomNotFoundError as err:
Logger.get('modules.vc').warning('Videoconference %r not found. Setting it as deleted.', self.vc_room)
self.vc_room.status = VCRoomStatus.deleted
flash(str(err), 'error')
return redirect(url_for('.manage_vc_rooms', self.event))
flash(_("{plugin_name} room '{room.name}' refreshed").format(
plugin_name=self.plugin.friendly_name, room=self.vc_room), 'success')
return redirect(url_for('.manage_vc_rooms', self.event))
class RHVCManageEventRemove(RHVCSystemEventBase):
"""Remove an existing VC room."""
def _process(self):
if not self.plugin.can_manage_vc_rooms(session.user, self.event):
flash(_('You are not allowed to remove {} rooms from this event.').format(self.plugin.friendly_name),
'error')
raise Forbidden
delete_all = request.args.get('delete_all') == '1'
self.event_vc_room.delete(session.user, delete_all=delete_all)
flash(_("{plugin_name} room '{room.name}' removed").format(
plugin_name=self.plugin.friendly_name, room=self.vc_room), 'success')
return redirect(url_for('.manage_vc_rooms', self.event))
class RHVCEventPage(RHDisplayEventBase):
"""List the VC rooms in an event page."""
def _process(self):
event_vc_rooms = [event_vc_room
for event_vc_room in VCRoomEventAssociation.find_for_event(self.event).all()
if event_vc_room.vc_room.plugin]
vc_plugins_available = bool(get_vc_plugins())
linked_to = defaultdict(lambda: defaultdict(list))
for event_vc_room in event_vc_rooms:
linked_to[event_vc_room.link_type.name][event_vc_room.link_object].append(event_vc_room)
return WPVCEventPage.render_template('event_vc.html', self.event,
event_vc_rooms=event_vc_rooms, linked_to=linked_to,
vc_plugins_available=vc_plugins_available)
class RHVCManageAttach(RHVCManageEventCreateBase):
"""Attach a room to the event."""
def _process(self):
defaults = FormDefaults(self.plugin.get_vc_room_attach_form_defaults(self.event))
form = self.plugin.vc_room_attach_form(prefix='vc-', obj=defaults, event=self.event,
service=self.plugin.service_name)
if form.validate_on_submit():
vc_room = form.data['room']
if not self.plugin.can_manage_vc_rooms(session.user, self.event):
flash(_('You are not allowed to attach {plugin_name} rooms to this event.').format(
plugin_name=self.plugin.friendly_name), 'error')
elif not self.plugin.can_manage_vc_room(session.user, vc_room):
flash(_("You are not authorized to attach the room '{0}'").format(vc_room.name), 'error')
else:
event_vc_room = process_vc_room_association(self.plugin, self.event, vc_room, form)
if event_vc_room:
flash(_('The room has been attached to the event.'), 'success')
db.session.add(event_vc_room)
return jsonify_data(flash=False)
return jsonify_template('vc/attach_room.html', event=self.event, form=form,
skip_fields=form.conditional_fields | {'room'},
plugin=self.plugin)
class RHVCManageSearch(RHVCManageEventCreateBase):
"""Search for a room based on its name."""
def _process_args(self):
RHVCManageEventCreateBase._process_args(self)
self.query = request.args.get('q', '')
if len(self.query) < 3:
raise BadRequest('A query has to be provided, with at least 3 characters')
def _iter_allowed_rooms(self):
query = (db.session.query(VCRoom, func.count(VCRoomEventAssociation.id).label('event_count'))
.filter(func.lower(VCRoom.name).contains(self.query.lower()), VCRoom.status != VCRoomStatus.deleted,
VCRoom.type == self.plugin.service_name)
.join(VCRoomEventAssociation)
# Plugins might add eager-loaded extensions to the table - since we cannot group by them
# we need to make sure everything is lazy-loaded here.
.options((lazyload(r) for r in inspect(VCRoom).relationships.keys()),
joinedload('events').joinedload('event').joinedload('acl_entries'))
.group_by(VCRoom.id)
.order_by(db.desc('event_count'))
.limit(10))
return ((room, count) for room, count in query if room.plugin.can_manage_vc_room(session.user, room))
def _process(self):
result = [{'id': room.id, 'name': room.name} for room, count in self._iter_allowed_rooms()]
return jsonify(result)
class RHVCRoomList(RHProtected):
"""Provide a list of videoconference rooms."""
def _check_access(self):
RHProtected._check_access(self)
if not get_managed_vc_plugins(session.user):
raise Forbidden
def _process(self):
form = VCRoomListFilterForm(request.args, csrf_enabled=False)
results = None
if request.args and form.validate():
reverse = form.direction.data == 'desc'
from_dt = as_utc(get_day_start(form.start_date.data)) if form.start_date.data else None
to_dt = as_utc(get_day_end(form.end_date.data)) if form.end_date.data else None
results = find_event_vc_rooms(from_dt=from_dt, to_dt=to_dt, distinct=True)
results = group_list((r for r in results if r.event),
key=lambda r: r.event.start_dt.date(),
sort_by=lambda r: r.event.start_dt,
sort_reverse=reverse)
results = dict(sorted(results.items(), key=itemgetter(0), reverse=reverse))
return WPVCService.render_template('vc_room_list.html', form=form, results=results,
action=url_for('.vc_room_list'))
| {
"content_hash": "2124571d29db876b674a99d07931101f",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 118,
"avg_line_length": 45.706214689265536,
"alnum_prop": 0.6164400494437577,
"repo_name": "ThiefMaster/indico",
"id": "24e93a5c20559cda16a0117bd86d6f8429878e38",
"size": "16394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/vc/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1411006"
},
{
"name": "JavaScript",
"bytes": "2083786"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5133951"
},
{
"name": "SCSS",
"bytes": "476568"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
import logging
import sys
from kombu.tests.utils import redirect_stdouts
from celery import beat
from celery import platforms
from celery.app import app_or_default
from celery.bin import celerybeat as celerybeat_bin
from celery.apps import beat as beatapp
from celery.utils.compat import defaultdict
from celery.tests.utils import AppCase
class MockedShelveModule(object):
shelves = defaultdict(lambda: {})
def open(self, filename, *args, **kwargs):
return self.shelves[filename]
mocked_shelve = MockedShelveModule()
class MockService(beat.Service):
started = False
in_sync = False
persistence = mocked_shelve
def start(self):
self.__class__.started = True
def sync(self):
self.__class__.in_sync = True
class MockBeat(beatapp.Beat):
running = False
def run(self):
self.__class__.running = True
class MockBeat2(beatapp.Beat):
Service = MockService
def install_sync_handler(self, b):
pass
class MockBeat3(beatapp.Beat):
Service = MockService
def install_sync_handler(self, b):
raise TypeError("xxx")
class test_Beat(AppCase):
def test_loglevel_string(self):
b = beatapp.Beat(loglevel="DEBUG")
self.assertEqual(b.loglevel, logging.DEBUG)
b2 = beatapp.Beat(loglevel=logging.DEBUG)
self.assertEqual(b2.loglevel, logging.DEBUG)
def test_init_loader(self):
b = beatapp.Beat()
b.init_loader()
def test_process_title(self):
b = beatapp.Beat()
b.set_process_title()
def test_run(self):
b = MockBeat2()
MockService.started = False
b.run()
self.assertTrue(MockService.started)
def psig(self, fun, *args, **kwargs):
handlers = {}
def i(sig, handler):
handlers[sig] = handler
p, platforms.install_signal_handler = \
platforms.install_signal_handler, i
try:
fun(*args, **kwargs)
return handlers
finally:
platforms.install_signal_handler = p
def test_install_sync_handler(self):
b = beatapp.Beat()
clock = MockService()
MockService.in_sync = False
handlers = self.psig(b.install_sync_handler, clock)
self.assertRaises(SystemExit, handlers["SIGINT"],
"SIGINT", object())
self.assertTrue(MockService.in_sync)
MockService.in_sync = False
def test_setup_logging(self):
b = beatapp.Beat()
b.redirect_stdouts = False
b.setup_logging()
self.assertRaises(AttributeError, getattr, sys.stdout, "logger")
@redirect_stdouts
def test_logs_errors(self, stdout, stderr):
class MockLogger(object):
_critical = []
def debug(self, *args, **kwargs):
pass
def critical(self, msg, *args, **kwargs):
self._critical.append(msg)
logger = MockLogger()
b = MockBeat3(socket_timeout=None)
b.start_scheduler(logger)
self.assertTrue(logger._critical)
@redirect_stdouts
def test_use_pidfile(self, stdout, stderr):
from celery import platforms
class create_pidlock(object):
instance = [None]
def __init__(self, file):
self.file = file
self.instance[0] = self
def acquire(self):
self.acquired = True
class Object(object):
def release(self):
pass
return Object()
prev, platforms.create_pidlock = platforms.create_pidlock, \
create_pidlock
try:
b = MockBeat2(pidfile="pidfilelockfilepid", socket_timeout=None)
b.start_scheduler()
self.assertTrue(create_pidlock.instance[0].acquired)
finally:
platforms.create_pidlock = prev
class MockDaemonContext(object):
opened = False
closed = False
def open(self):
self.__class__.opened = True
def close(self):
self.__class__.closed = True
def create_daemon_context(*args, **kwargs):
context = MockDaemonContext()
return context, context.close
class test_div(AppCase):
def setup(self):
self.prev, beatapp.Beat = beatapp.Beat, MockBeat
self.ctx, celerybeat_bin.create_daemon_context = \
celerybeat_bin.create_daemon_context, create_daemon_context
def teardown(self):
beatapp.Beat = self.prev
def test_main(self):
sys.argv = [sys.argv[0], "-s", "foo"]
try:
celerybeat_bin.main()
self.assertTrue(MockBeat.running)
finally:
MockBeat.running = False
def test_detach(self):
cmd = celerybeat_bin.BeatCommand()
cmd.app = app_or_default()
cmd.run(detach=True)
self.assertTrue(MockDaemonContext.opened)
self.assertTrue(MockDaemonContext.closed)
def test_parse_options(self):
cmd = celerybeat_bin.BeatCommand()
cmd.app = app_or_default()
options, args = cmd.parse_options("celerybeat", ["-s", "foo"])
self.assertEqual(options.schedule, "foo")
| {
"content_hash": "10b4deed404d8c52d2192ca972707c36",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 76,
"avg_line_length": 26.054455445544555,
"alnum_prop": 0.5971879156374691,
"repo_name": "frac/celery",
"id": "447fb92c0b015d43c047325cb8e6b84975338afc",
"size": "5263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "celery/tests/test_bin/test_celerybeat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "975783"
},
{
"name": "Shell",
"bytes": "30099"
}
],
"symlink_target": ""
} |
class datastore_problem_desc:
def __init__(self, datastores):
self.datastores = datastores
def generate_problem(self):
problem = {}
problem["subject"] = "vsphere datastores"
problem["options"] = self.__get_options()
problem["columns"] = self.__get_columns()
return problem
def __get_options(self):
data_options = []
for datastore in self.datastores:
data_dict = { "values": {
"capacityMB" : datastore['capacityMB'],
"freeSpaceMB" : datastore['freeSpaceMB']
},
"name": datastore['datastore_name'],
"key": datastore['_id']
}
data_options.append(data_dict)
return data_options
def __get_columns(self):
columns = [ {
"format": "number:0",
"type": "numeric",
"full_name": "Datastore Total Capacity",
"is_objective": "true",
"range": {
"low": 1000,
"high": 100000
},
"goal": "max",
"key": "capacityMB"
},
{
"format": "number:0",
"type": "numeric",
"full_name": "Datastore Free Space",
"is_objective": "true",
"range": {
"low": 100,
"high": 100000
},
"goal": "min",
"key": "freeSpaceMB"
} ]
return columns | {
"content_hash": "9454613ba2313230c25c7abc1d168ea2",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 34.25454545454546,
"alnum_prop": 0.3535031847133758,
"repo_name": "rickyaeztor/watson-virtual-infra-mgt-system",
"id": "6b158cccc1f44a4739a998deb6f494660d182c10",
"size": "1886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Models/datastore_problem_desc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6001"
},
{
"name": "HTML",
"bytes": "10412"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "15600"
}
],
"symlink_target": ""
} |
import os
import random
import unittest
import numpy as np
from pkg_resources import resource_filename
from .. import pybgen
from .truths import truths
__all__ = ["reader_tests"]
class ReaderTests(unittest.TestCase):
def setUp(self):
# Getting the truth for this file
self.truths = truths["dosage"][self.truth_filename]
# Reading the BGEN files
bgen_fn = resource_filename(__name__, self.bgen_filename)
self.bgen = pybgen.PyBGEN(bgen_fn)
def tearDown(self):
# Closing the object
self.bgen.close()
def _compare_variant(self, expected, observed):
"""Compare two variants."""
self.assertEqual(expected.name, observed.name)
self.assertEqual(expected.chrom, observed.chrom)
self.assertEqual(expected.pos, observed.pos)
self.assertEqual(expected.a1, observed.a1)
self.assertEqual(expected.a2, observed.a2)
def test_check_returned_value(self):
"""Tests the module is returning dosage data."""
self.assertFalse(self.bgen._return_probs)
def test_repr(self):
"""Tests the __repr__ representation."""
self.assertEqual(
"PyBGEN({:,d} samples; {:,d} variants)".format(
self.truths["nb_samples"],
self.truths["nb_variants"],
),
str(self.bgen),
)
def test_nb_samples(self):
"""Tests the number of samples."""
self.assertEqual(self.truths["nb_samples"], self.bgen.nb_samples)
def test_nb_variants(self):
"""Tests the number of variants."""
self.assertEqual(self.truths["nb_variants"], self.bgen.nb_variants)
def test_samples(self):
"""Tests the samples attribute."""
if self.truths["samples"] is None:
self.assertTrue(self.bgen.samples is None)
else:
self.assertEqual(self.truths["samples"], self.bgen.samples)
def test_get_first_variant(self):
"""Tests getting the first variant of the file."""
# The variant to retrieve
name = "RSID_2"
# Getting the results (there should be only one
r = self.bgen.get_variant(name)
self.assertEqual(1, len(r))
variant, dosage = r.pop()
# Checking the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Checking the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
def test_get_middle_variant(self):
"""Tests getting a variant in the middle of the file."""
# The variant to retrieve
name = "RSID_148"
# Getting the results (there should be only one
r = self.bgen.get_variant(name)
self.assertEqual(1, len(r))
variant, dosage = r.pop()
# Checking the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Checking the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
def test_get_last_variant(self):
"""Tests getting the last variant of the file."""
# The variant to retrieve
name = "RSID_200"
# Getting the results (there should be only one
r = self.bgen.get_variant(name)
self.assertEqual(1, len(r))
variant, dosage = r.pop()
# Checking the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Checking the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
def test_get_missing_variant(self):
"""Tests getting a variant which is absent from the BGEN file."""
with self.assertRaises(ValueError) as cm:
self.bgen.get_variant("UNKOWN_VARIANT_NAME")
self.assertEqual(
"UNKOWN_VARIANT_NAME: name not found",
str(cm.exception),
)
def test_iter_all_variants(self):
"""Tests the iteration of all variants."""
seen_variants = set()
for variant, dosage in self.bgen.iter_variants():
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"],
variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
self.assertEqual(seen_variants, self.truths["variant_set"])
def test_as_iterator(self):
"""Tests the module as iterator."""
seen_variants = set()
for variant, dosage in self.bgen:
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
self.assertEqual(seen_variants, self.truths["variant_set"])
def test_iter_variant_info(self):
"""Tests the iteration of all variants' information."""
seen_variants = set()
for variant in self.bgen.iter_variant_info():
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Checking if we checked all variants
self.assertEqual(seen_variants, self.truths["variant_set"])
def test_iter_variants_in_region(self):
"""Tests the iteration of all variants in a genomic region."""
seen_variants = set()
iterator = self.bgen.iter_variants_in_region("01", 67000, 70999)
for variant, dosage in iterator:
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
expected = set()
for name in self.truths["variant_set"]:
variant = self.truths["variants"][name]["variant"]
if variant.chrom == "01":
if variant.pos >= 67000 and variant.pos <= 70999:
expected.add(name)
self.assertEqual(seen_variants, expected)
def test_get_specific_variant(self):
"""Test for specific variant lookup."""
seen_variants = set()
iterator = self.bgen.get_specific_variant("01", 67000, "A", "G")
for variant, dosage in iterator:
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
expected = set()
for name in self.truths["variant_set"]:
variant = self.truths["variants"][name]["variant"]
if variant.chrom == "01":
if variant.pos == 67000:
expected.add(name)
self.assertEqual(seen_variants, expected)
def test_get_missing_specific_variant(self):
"""Tests getting a specific variant which is absent from the file."""
with self.assertRaises(ValueError) as cm:
self.bgen.get_specific_variant("01", 67000, "A", "T")
self.assertEqual(
"01:67000 A/T: variant not found",
str(cm.exception),
)
def test_iter_seeks(self):
"""Tests the _iter_seeks function."""
# Fetching random seeks from the index
self.bgen._bgen_index.execute(
"SELECT rsid, file_start_position FROM Variant"
)
seeks = random.sample(self.bgen._bgen_index.fetchall(), 5)
seen_variants = set()
iterator = self.bgen._iter_seeks([_[1] for _ in seeks])
for variant, dosage in iterator:
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
self.assertEqual(seen_variants, {_[0] for _ in seeks})
def test_iter_variants_by_name(self):
"""Tests the iteration of variants by name."""
# Fetching random variants in the index
self.bgen._bgen_index.execute("SELECT rsid FROM Variant")
names = [
_[0] for _ in random.sample(self.bgen._bgen_index.fetchall(), 5)
]
seen_variants = set()
iterator = self.bgen.iter_variants_by_names(names)
for variant, dosage in iterator:
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"],
variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
self.assertEqual(seen_variants, set(names))
class ProbsReaderTests(ReaderTests):
def setUp(self):
# Getting the truth for this file
self.truths = truths["probs"][self.truth_filename]
# Reading the BGEN files
bgen_fn = resource_filename(__name__, self.bgen_filename)
self.bgen = pybgen.PyBGEN(bgen_fn, probs_only=True)
def test_check_returned_value(self):
"""Tests the module is returning probability data."""
self.assertTrue(self.bgen._return_probs)
class Test32bits(ReaderTests):
bgen_filename = os.path.join("data", "example.32bits.bgen")
truth_filename = "example.32bits.truths.txt.bz2"
class Test32bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.32bits.bgen")
truth_filename = "example.32bits.probs.truths.txt.bz2"
class Test24bits(ReaderTests):
bgen_filename = os.path.join("data", "example.24bits.bgen")
truth_filename = "example.24bits.truths.txt.bz2"
class Test24bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.24bits.bgen")
truth_filename = "example.24bits.probs.truths.txt.bz2"
class Test16bits(ReaderTests):
bgen_filename = os.path.join("data", "example.16bits.bgen")
truth_filename = "example.16bits.truths.txt.bz2"
class Test16bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.16bits.bgen")
truth_filename = "example.16bits.probs.truths.txt.bz2"
@unittest.skipIf(not pybgen.HAS_ZSTD, "module 'zstandard' not installed")
class Test16bitsZstd(ReaderTests):
bgen_filename = os.path.join("data", "example.16bits.zstd.bgen")
truth_filename = "example.16bits.zstd.truths.txt.bz2"
@unittest.skipIf(not pybgen.HAS_ZSTD, "module 'zstandard' not installed")
class Test16bitsZstdProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.16bits.zstd.bgen")
truth_filename = "example.16bits.zstd.probs.truths.txt.bz2"
class Test9bits(ReaderTests):
bgen_filename = os.path.join("data", "example.9bits.bgen")
truth_filename = "example.9bits.truths.txt.bz2"
class Test9bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.9bits.bgen")
truth_filename = "example.9bits.probs.truths.txt.bz2"
class Test8bits(ReaderTests):
bgen_filename = os.path.join("data", "example.8bits.bgen")
truth_filename = "example.8bits.truths.txt.bz2"
class Test8bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.8bits.bgen")
truth_filename = "example.8bits.probs.truths.txt.bz2"
class Test3bits(ReaderTests):
bgen_filename = os.path.join("data", "example.3bits.bgen")
truth_filename = "example.3bits.truths.txt.bz2"
class Test3bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.3bits.bgen")
truth_filename = "example.3bits.probs.truths.txt.bz2"
class TestLayout1(ReaderTests):
bgen_filename = os.path.join("data", "cohort1.bgen")
truth_filename = "cohort1.truths.txt.bz2"
class TestLayout1Probs(ProbsReaderTests):
bgen_filename = os.path.join("data", "cohort1.bgen")
truth_filename = "cohort1.probs.truths.txt.bz2"
reader_tests = (
Test32bits, Test24bits, Test16bits, Test16bitsZstd, Test9bits, Test8bits,
Test3bits, TestLayout1, Test32bitsProbs, Test24bitsProbs, Test16bitsProbs,
Test16bitsZstdProbs, Test9bitsProbs, Test8bitsProbs, Test3bitsProbs,
TestLayout1Probs,
)
| {
"content_hash": "962c4812f09dab888937e32b80fac062",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 78,
"avg_line_length": 33.08114558472554,
"alnum_prop": 0.6037082461582859,
"repo_name": "lemieuxl/pybgen",
"id": "252c91e64030684c7472f120e57f4189050f8f44",
"size": "15033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybgen/tests/test_pybgen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62893"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
} |
"""
SUR and 3SLS estimation
"""
__author__= "Luc Anselin lanselin@gmail.com, \
Pedro V. Amaral pedrovma@gmail.com"
import numpy as np
import numpy.linalg as la
import scipy.stats as stats
import summary_output as SUMMARY
import user_output as USER
from sur_utils import sur_dict2mat,sur_mat2dict,sur_corr,\
sur_crossprod,sur_est,sur_resids,check_k
from diagnostics_sur import sur_setp,sur_lrtest,sur_lmtest,surLMe,sur_chow
__all__ = ['SUR','ThreeSLS']
class BaseSUR():
""" Base class for SUR estimation, both two step as well as iterated
Parameters
----------
bigy : dictionary with vector for dependent variable by equation
bigX : dictionary with matrix of explanatory variables by equation
(note, already includes constant term)
iter : whether or not to use iterated estimation
default = False
maxiter : maximum iterations; default = 5
epsilon : precision criterion to end iterations
default = 0.00001
verbose : flag to print out iteration number and value of log det(sig)
at the beginning and the end of the iteration
Attributes
----------
bigy : dictionary with y values
bigX : dictionary with X values
bigXX : dictionary with X_t'X_r cross-products
bigXy : dictionary with X_t'y_r cross-products
n_eq : number of equations
n : number of observations in each cross-section
bigK : vector with number of explanatory variables (including constant)
for each equation
bOLS : dictionary with OLS regression coefficients for each equation
olsE : N x n_eq array with OLS residuals for each equation
bSUR : dictionary with SUR regression coefficients for each equation
varb : variance-covariance matrix
bigE : N x n_eq array with SUR residuals for each equation
sig : Sigma matrix of inter-equation error covariances
ldetS1 : log det(Sigma) for SUR model
resids : n by n_eq array of residuals
sig_ols : Sigma matrix for OLS residuals
ldetS0 : log det(Sigma) for null model (OLS by equation, diagonals only)
niter : number of iterations (=0 for iter=False)
corr : inter-equation SUR error correlation matrix
llik : log-likelihood (including the constant pi)
Methods
-------
sur_ols : OLS estimation by equation
"""
def __init__(self,bigy,bigX,iter=False,maxiter=5,epsilon=0.00001,verbose=False):
# setting up the cross-products
self.bigy = bigy
self.bigX = bigX
self.n_eq = len(bigy.keys())
self.n = bigy[0].shape[0]
self.bigK = np.zeros((self.n_eq,1),dtype=np.int_)
for r in range(self.n_eq):
self.bigK[r] = self.bigX[r].shape[1]
self.bigXX,self.bigXy = sur_crossprod(self.bigX,self.bigy)
# OLS regression by equation, sets up initial residuals
self.sur_ols() # creates self.bOLS and self.olsE
# SUR estimation using OLS residuals - two step estimation
self.bSUR,self.varb,self.sig = sur_est(self.bigXX,self.bigXy,self.olsE,self.bigK)
resids = sur_resids(self.bigy,self.bigX,self.bSUR) # matrix of residuals
# Sigma and log det(Sigma) for null model
self.sig_ols = self.sig
sols = np.diag(np.diag(self.sig))
self.ldetS0 = np.log(np.diag(sols)).sum()
det0 = self.ldetS0
# setup for iteration
det1 = la.slogdet(self.sig)[1]
self.ldetS1 = det1
#self.niter = 0
if iter: # iterated FGLS aka ML
n_iter = 0
while np.abs(det1-det0) > epsilon and n_iter <= maxiter:
n_iter += 1
det0 = det1
self.bSUR,self.varb,self.sig = sur_est(self.bigXX,self.bigXy,\
resids,self.bigK)
resids = sur_resids(self.bigy,self.bigX,self.bSUR)
det1 = la.slogdet(self.sig)[1]
if verbose:
print (n_iter,det0,det1)
self.bigE = sur_resids(self.bigy,self.bigX,self.bSUR)
self.ldetS1 = det1
self.niter = n_iter
else:
self.niter = 1
self.bigE = resids
self.corr = sur_corr(self.sig)
lik = self.n_eq * (1.0 + np.log(2.0*np.pi)) + self.ldetS1
self.llik = - (self.n / 2.0) * lik
def sur_ols(self):
'''OLS estimation of SUR equations
Parameters
----------
self : BaseSUR object
Creates
-------
self.bOLS : dictionary with regression coefficients for each equation
self.olsE : N x n_eq array with OLS residuals for each equation
'''
self.bOLS = {}
for r in range(self.n_eq):
self.bOLS[r] = np.dot(la.inv(self.bigXX[(r,r)]),self.bigXy[(r,r)])
self.olsE = sur_resids(self.bigy,self.bigX,self.bOLS)
class SUR(BaseSUR):
""" User class for SUR estimation, both two step as well as iterated
Parameters
----------
bigy : dictionary with vector for dependent variable by equation
bigX : dictionary with matrix of explanatory variables by equation
(note, already includes constant term)
w : spatial weights object, default = None
nonspat_diag : boolean; flag for non-spatial diagnostics, default = True
spat_diag : boolean; flag for spatial diagnostics, default = False
iter : boolean; whether or not to use iterated estimation
default = False
maxiter : integer; maximum iterations; default = 5
epsilon : float; precision criterion to end iterations
default = 0.00001
verbose : boolean; flag to print out iteration number and value
of log det(sig) at the beginning and the end of the iteration
name_bigy : dictionary with name of dependent variable for each equation
default = None, but should be specified
is done when sur_stackxy is used
name_bigX : dictionary with names of explanatory variables for each
equation
default = None, but should be specified
is done when sur_stackxy is used
name_ds : string; name for the data set
name_w : string; name for the weights file
Attributes
----------
bigy : dictionary with y values
bigX : dictionary with X values
bigXX : dictionary with X_t'X_r cross-products
bigXy : dictionary with X_t'y_r cross-products
n_eq : number of equations
n : number of observations in each cross-section
bigK : vector with number of explanatory variables (including constant)
for each equation
bOLS : dictionary with OLS regression coefficients for each equation
olsE : N x n_eq array with OLS residuals for each equation
bSUR : dictionary with SUR regression coefficients for each equation
varb : variance-covariance matrix
sig : Sigma matrix of inter-equation error covariances
ldetS1 : log det(Sigma) for SUR model
bigE : n by n_eq array of residuals
sig_ols : Sigma matrix for OLS residuals (diagonal)
ldetS0 : log det(Sigma) for null model (OLS by equation)
niter : number of iterations (=0 for iter=False)
corr : inter-equation error correlation matrix
llik : log-likelihood (including the constant pi)
sur_inf : dictionary with standard error, asymptotic t and p-value,
one for each equation
lrtest : Likelihood Ratio test on off-diagonal elements of sigma
(tupel with test,df,p-value)
lmtest : Lagrange Multipler test on off-diagonal elements of sigma
(tupel with test,df,p-value)
lmEtest : Lagrange Multiplier test on error spatial autocorrelation in SUR
surchow : list with tuples for Chow test on regression coefficients
each tuple contains test value, degrees of freedom, p-value
name_bigy : dictionary with name of dependent variable for each equation
name_bigX : dictionary with names of explanatory variables for each
equation
name_ds : string; name for the data set
name_w : string; name for the weights file
Examples
--------
First import pysal to load the spatial analysis tools.
>>> import pysal
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that pysal.open()
also reads data in CSV format.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
The specification of the model to be estimated can be provided as lists.
Each equation should be listed separately. In this example, equation 1
has HR80 as dependent variable and PS80 and UE80 as exogenous regressors.
For equation 2, HR90 is the dependent variable, and PS90 and UE90 the
exogenous regressors.
>>> y_var = ['HR80','HR90']
>>> x_var = [['PS80','UE80'],['PS90','UE90']]
Although not required for this method, we can load a weights matrix file
to allow for spatial diagnostics.
>>> w = pysal.queen_from_shapefile(pysal.examples.get_path("NAT.shp"))
>>> w.transform='r'
The SUR method requires data to be provided as dictionaries. PySAL
provides the tool sur_dictxy to create these dictionaries from the
list of variables. The line below will create four dictionaries
containing respectively the dependent variables (bigy), the regressors
(bigX), the dependent variables' names (bigyvars) and regressors' names
(bigXvars). All these will be created from th database (db) and lists
of variables (y_var and x_var) created above.
>>> bigy,bigX,bigyvars,bigXvars = pysal.spreg.sur_utils.sur_dictxy(db,y_var,x_var)
We can now run the regression and then have a summary of the output by typing:
'print(reg.summary)'
>>> reg = SUR(bigy,bigX,w=w,name_bigy=bigyvars,name_bigX=bigXvars,spat_diag=True,name_ds="nat")
>>> print(reg.summary)
REGRESSION
----------
SUMMARY OF OUTPUT: SEEMINGLY UNRELATED REGRESSIONS (SUR)
--------------------------------------------------------
Data set : nat
Weights matrix : unknown
Number of Equations : 2 Number of Observations: 3085
Log likelihood (SUR): -19902.966 Number of Iterations : 1
----------
<BLANKLINE>
SUMMARY OF EQUATION 1
---------------------
Dependent Variable : HR80 Number of Variables : 3
Mean dependent var : 6.9276 Degrees of Freedom : 3082
S.D. dependent var : 6.8251
<BLANKLINE>
------------------------------------------------------------------------------------
Variable Coefficient Std.Error z-Statistic Probability
------------------------------------------------------------------------------------
Constant_1 5.1390718 0.2624673 19.5798587 0.0000000
PS80 0.6776481 0.1219578 5.5564132 0.0000000
UE80 0.2637240 0.0343184 7.6846277 0.0000000
------------------------------------------------------------------------------------
<BLANKLINE>
SUMMARY OF EQUATION 2
---------------------
Dependent Variable : HR90 Number of Variables : 3
Mean dependent var : 6.1829 Degrees of Freedom : 3082
S.D. dependent var : 6.6403
<BLANKLINE>
------------------------------------------------------------------------------------
Variable Coefficient Std.Error z-Statistic Probability
------------------------------------------------------------------------------------
Constant_2 3.6139403 0.2534996 14.2561949 0.0000000
PS90 1.0260715 0.1121662 9.1477755 0.0000000
UE90 0.3865499 0.0341996 11.3027760 0.0000000
------------------------------------------------------------------------------------
<BLANKLINE>
<BLANKLINE>
REGRESSION DIAGNOSTICS
TEST DF VALUE PROB
LM test on Sigma 1 680.168 0.0000
LR test on Sigma 1 768.385 0.0000
<BLANKLINE>
OTHER DIAGNOSTICS - CHOW TEST
VARIABLES DF VALUE PROB
Constant_1, Constant_2 1 26.729 0.0000
PS80, PS90 1 8.241 0.0041
UE80, UE90 1 9.384 0.0022
<BLANKLINE>
DIAGNOSTICS FOR SPATIAL DEPENDENCE
TEST DF VALUE PROB
Lagrange Multiplier (error) 2 1333.625 0.0000
<BLANKLINE>
ERROR CORRELATION MATRIX
EQUATION 1 EQUATION 2
1.000000 0.469548
0.469548 1.000000
================================ END OF REPORT =====================================
"""
def __init__(self,bigy,bigX,w=None,nonspat_diag=True,spat_diag=False,vm=False,\
iter=False,maxiter=5,epsilon=0.00001,verbose=False,\
name_bigy=None,name_bigX=None,name_ds=None,name_w=None):
#need checks on match between bigy, bigX dimensions
# init moved here before name check
BaseSUR.__init__(self,bigy=bigy,bigX=bigX,iter=iter,\
maxiter=maxiter,epsilon=epsilon,verbose=verbose)
self.name_ds = USER.set_name_ds(name_ds)
self.name_w = USER.set_name_w(name_w, w)
#initialize names - should be generated by sur_stack
if name_bigy:
self.name_bigy = name_bigy
else: # need to construct y names
self.name_bigy = {}
for r in range(self.n_eq):
yn = 'dep_var_' + str(r)
self.name_bigy[r] = yn
if name_bigX:
self.name_bigX = name_bigX
else: # need to construct x names
self.name_bigX = {}
for r in range(self.n_eq):
k = self.bigX[r].shape[1] - 1
name_x = ['var_' + str(i + 1) + "_" + str(r) for i in range(k)]
ct = 'Constant_' + str(r) # NOTE: constant always included in X
name_x.insert(0, ct)
self.name_bigX[r] = name_x
#inference
self.sur_inf = sur_setp(self.bSUR,self.varb)
if nonspat_diag:
#LR test on off-diagonal elements of Sigma
self.lrtest = sur_lrtest(self.n,self.n_eq,self.ldetS0,self.ldetS1)
#LM test on off-diagonal elements of Sigma
self.lmtest = sur_lmtest(self.n,self.n_eq,self.sig_ols)
else:
self.lrtest = None
self.lmtest = None
#LM test on spatial error autocorrelation
if spat_diag:
if not w:
raise Exception, "Error: spatial weights needed"
WS = w.sparse
self.lmEtest = surLMe(self.n_eq,WS,self.bigE,self.sig)
else:
self.lmEtest = None
#LM test on spatial lag autocorrelation
# test on constancy of coefficients across equations
if check_k(self.bigK): # only for equal number of variables
self.surchow = sur_chow(self.n_eq,self.bigK,self.bSUR,self.varb)
else:
self.surchow = None
#Listing of the results
self.title = "SEEMINGLY UNRELATED REGRESSIONS (SUR)"
SUMMARY.SUR(reg=self, nonspat_diag=nonspat_diag, spat_diag=spat_diag, surlm=True)
class BaseThreeSLS():
""" Base class for 3SLS estimation, two step
Parameters
----------
bigy : dictionary with vector for dependent variable by equation
bigX : dictionary with matrix of explanatory variables by equation
(note, already includes constant term)
bigyend : dictionary with matrix of endogenous variables by equation
bigq : dictionary with matrix of instruments by equation
Attributes
----------
bigy : dictionary with y values
bigZ : dictionary with matrix of exogenous and endogenous variables
for each equation
bigZHZH : dictionary with matrix of cross products Zhat_r'Zhat_s
bigZHy : dictionary with matrix of cross products Zhat_r'y_end_s
n_eq : number of equations
n : number of observations in each cross-section
bigK : vector with number of explanatory variables (including constant,
exogenous and endogenous) for each equation
b2SLS : dictionary with 2SLS regression coefficients for each equation
tslsE : N x n_eq array with OLS residuals for each equation
b3SLS : dictionary with 3SLS regression coefficients for each equation
varb : variance-covariance matrix
sig : Sigma matrix of inter-equation error covariances
bigE : n by n_eq array of residuals
corr : inter-equation 3SLS error correlation matrix
Methods
-------
tsls_2sls : 2SLS estimation by equation
"""
def __init__(self,bigy,bigX,bigyend,bigq):
# setting up the cross-products
self.bigy = bigy
self.n_eq = len(bigy.keys())
self.n = bigy[0].shape[0]
# dictionary with exog and endog, Z
self.bigZ = {}
for r in range(self.n_eq):
self.bigZ[r] = np.hstack((bigX[r],bigyend[r]))
# number of explanatory variables by equation
self.bigK = np.zeros((self.n_eq,1),dtype=np.int_)
for r in range(self.n_eq):
self.bigK[r] = self.bigZ[r].shape[1]
# dictionary with instruments, H
bigH = {}
for r in range(self.n_eq):
bigH[r] = np.hstack((bigX[r],bigq[r]))
# dictionary with instrumental variables, X and yend_predicted, Z-hat
bigZhat = {}
for r in range(self.n_eq):
try:
HHi = la.inv(np.dot(bigH[r].T,bigH[r]))
except:
raise Exception, "ERROR: singular cross product matrix, check instruments"
Hye = np.dot(bigH[r].T,bigyend[r])
yp = np.dot(bigH[r],np.dot(HHi,Hye))
bigZhat[r] = np.hstack((bigX[r],yp))
self.bigZHZH,self.bigZHy = sur_crossprod(bigZhat,self.bigy)
# 2SLS regression by equation, sets up initial residuals
self.sur_2sls() # creates self.b2SLS and self.tslsE
self.b3SLS,self.varb,self.sig = sur_est(self.bigZHZH,self.bigZHy,self.tslsE,self.bigK)
self.bigE = sur_resids(self.bigy,self.bigZ,self.b3SLS) # matrix of residuals
# inter-equation correlation matrix
self.corr = sur_corr(self.sig)
def sur_2sls(self):
'''2SLS estimation of SUR equations
Parameters
----------
self : BaseSUR object
Creates
-------
self.b2SLS : dictionary with regression coefficients for each equation
self.tslsE : N x n_eq array with OLS residuals for each equation
'''
self.b2SLS = {}
for r in range(self.n_eq):
self.b2SLS[r] = np.dot(la.inv(self.bigZHZH[(r,r)]),self.bigZHy[(r,r)])
self.tslsE = sur_resids(self.bigy,self.bigZ,self.b2SLS)
class ThreeSLS(BaseThreeSLS):
""" User class for 3SLS estimation
Parameters
----------
bigy : dictionary with vector for dependent variable by equation
bigX : dictionary with matrix of explanatory variables by equation
(note, already includes constant term)
bigyend : dictionary with matrix of endogenous variables by equation
bigq : dictionary with matrix of instruments by equation
nonspat_diag : boolean; flag for non-spatial diagnostics, default = True
name_bigy : dictionary with name of dependent variable for each equation
default = None, but should be specified
is done when sur_stackxy is used
name_bigX : dictionary with names of explanatory variables for each
equation
default = None, but should be specified
is done when sur_stackxy is used
name_bigyend : dictionary with names of endogenous variables for each
equation
default = None, but should be specified
is done when sur_stackZ is used
name_bigq : dictionary with names of instrumental variables for each
equations
default = None, but should be specified
is done when sur_stackZ is used
name_ds : string; name for the data set
Attributes
----------
bigy : dictionary with y values
bigZ : dictionary with matrix of exogenous and endogenous variables
for each equation
bigZHZH : dictionary with matrix of cross products Zhat_r'Zhat_s
bigZHy : dictionary with matrix of cross products Zhat_r'y_end_s
n_eq : number of equations
n : number of observations in each cross-section
bigK : vector with number of explanatory variables (including constant,
exogenous and endogenous) for each equation
b2SLS : dictionary with 2SLS regression coefficients for each equation
tslsE : N x n_eq array with OLS residuals for each equation
b3SLS : dictionary with 3SLS regression coefficients for each equation
varb : variance-covariance matrix
sig : Sigma matrix of inter-equation error covariances
bigE : n by n_eq array of residuals
corr : inter-equation 3SLS error correlation matrix
tsls_inf : dictionary with standard error, asymptotic t and p-value,
one for each equation
surchow : list with tuples for Chow test on regression coefficients
each tuple contains test value, degrees of freedom, p-value
name_ds : string; name for the data set
name_bigy : dictionary with name of dependent variable for each equation
name_bigX : dictionary with names of explanatory variables for each
equation
name_bigyend : dictionary with names of endogenous variables for each
equation
name_bigq : dictionary with names of instrumental variables for each
equations
Examples
--------
First import pysal to load the spatial analysis tools.
>>> import pysal
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that pysal.open()
also reads data in CSV format.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
The specification of the model to be estimated can be provided as lists.
Each equation should be listed separately. In this example, equation 1
has HR80 as dependent variable, PS80 and UE80 as exogenous regressors,
RD80 as endogenous regressor and FP79 as additional instrument.
For equation 2, HR90 is the dependent variable, PS90 and UE90 the
exogenous regressors, RD90 as endogenous regressor and FP99 as
additional instrument
>>> y_var = ['HR80','HR90']
>>> x_var = [['PS80','UE80'],['PS90','UE90']]
>>> yend_var = [['RD80'],['RD90']]
>>> q_var = [['FP79'],['FP89']]
The SUR method requires data to be provided as dictionaries. PySAL
provides two tools to create these dictionaries from the list of variables:
sur_dictxy and sur_dictZ. The tool sur_dictxy can be used to create the
dictionaries for Y and X, and sur_dictZ for endogenous variables (yend) and
additional instruments (q).
>>> bigy,bigX,bigyvars,bigXvars = pysal.spreg.sur_utils.sur_dictxy(db,y_var,x_var)
>>> bigyend,bigyendvars = pysal.spreg.sur_utils.sur_dictZ(db,yend_var)
>>> bigq,bigqvars = pysal.spreg.sur_utils.sur_dictZ(db,q_var)
We can now run the regression and then have a summary of the output by typing:
print(reg.summary)
Alternatively, we can just check the betas and standard errors, asymptotic t
and p-value of the parameters:
>>> reg = ThreeSLS(bigy,bigX,bigyend,bigq,name_bigy=bigyvars,name_bigX=bigXvars,name_bigyend=bigyendvars,name_bigq=bigqvars,name_ds="NAT")
>>> reg.b3SLS
{0: array([[ 6.92426353],
[ 1.42921826],
[ 0.00049435],
[ 3.5829275 ]]), 1: array([[ 7.62385875],
[ 1.65031181],
[-0.21682974],
[ 3.91250428]])}
>>> reg.tsls_inf
{0: array([[ 0.23220853, 29.81916157, 0. ],
[ 0.10373417, 13.77770036, 0. ],
[ 0.03086193, 0.01601807, 0.98721998],
[ 0.11131999, 32.18584124, 0. ]]), 1: array([[ 0.28739415, 26.52753638, 0. ],
[ 0.09597031, 17.19606554, 0. ],
[ 0.04089547, -5.30204786, 0.00000011],
[ 0.13586789, 28.79638723, 0. ]])}
"""
def __init__(self,bigy,bigX,bigyend,bigq,nonspat_diag=True,\
name_bigy=None,name_bigX=None,name_bigyend=None,name_bigq=None,\
name_ds=None):
#need checks on match between bigy, bigX dimensions
BaseThreeSLS.__init__(self,bigy=bigy,bigX=bigX,bigyend=bigyend,\
bigq=bigq)
self.name_ds = USER.set_name_ds(name_ds)
#initialize names - should be generated by sur_stack
if name_bigy:
self.name_bigy = name_bigy
else: # need to construct y names
self.name_bigy = {}
for r in range(self.n_eq):
yn = 'dep_var_' + str(r+1)
self.name_bigy[r] = yn
if name_bigX:
self.name_bigX = name_bigX
else: # need to construct x names
self.name_bigX = {}
for r in range(self.n_eq):
k = bigX[r].shape[1] - 1
name_x = ['var_' + str(i + 1) + "_" + str(r+1) for i in range(k)]
ct = 'Constant_' + str(r+1) # NOTE: constant always included in X
name_x.insert(0, ct)
self.name_bigX[r] = name_x
if name_bigyend:
self.name_bigyend = name_bigyend
else: # need to construct names
self.name_bigyend = {}
for r in range(self.n_eq):
ky = bigyend[r].shape[1]
name_ye = ['end_' + str(i + 1) + "_" + str(r+1) for i in range(ky)]
self.name_bigyend[r] = name_ye
if name_bigq:
self.name_bigq = name_bigq
else: # need to construct names
self.name_bigq = {}
for r in range(self.n_eq):
ki = bigq[r].shape[1]
name_i = ['inst_' + str(i + 1) + "_" + str(r+1) for i in range(ki)]
self.name_bigq[r] = name_i
#inference
self.tsls_inf = sur_setp(self.b3SLS,self.varb)
# test on constancy of coefficients across equations
if check_k(self.bigK): # only for equal number of variables
self.surchow = sur_chow(self.n_eq,self.bigK,self.b3SLS,self.varb)
else:
self.surchow = None
#Listing of the results
self.title = "THREE STAGE LEAST SQUARES (3SLS)"
SUMMARY.SUR(reg=self, tsls=True, nonspat_diag=nonspat_diag)
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
from sur_utils import sur_dictxy,sur_dictZ
db = pysal.open(pysal.examples.get_path('NAT.dbf'), 'r')
y_var = ['HR80','HR90']
x_var = [['PS80','UE80'],['PS90','UE90']]
#Example SUR
#"""
w = pysal.queen_from_shapefile(pysal.examples.get_path("NAT.shp"))
w.transform='r'
bigy0,bigX0,bigyvars0,bigXvars0 = sur_dictxy(db,y_var,x_var)
reg0 = SUR(bigy0,bigX0,w=w,name_bigy=bigyvars0,name_bigX=bigXvars0,\
spat_diag=True,name_ds="nat")
print reg0.summary
"""
#Example 3SLS
yend_var = [['RD80'],['RD90']]
q_var = [['FP79'],['FP89']]
bigy1,bigX1,bigyvars1,bigXvars1 = sur_dictxy(db,y_var,x_var)
bigyend1,bigyendvars1 = sur_dictZ(db,yend_var)
bigq1,bigqvars1 = sur_dictZ(db,q_var)
reg1 = ThreeSLS(bigy1,bigX1,bigyend1,bigq1,name_ds="nat")
print reg1.summary
#"""
| {
"content_hash": "b340f095c054f49f703cb1aa9ddc0622",
"timestamp": "",
"source": "github",
"line_count": 697,
"max_line_length": 146,
"avg_line_length": 44.766140602582496,
"alnum_prop": 0.5412473559387219,
"repo_name": "schmidtc/pysal",
"id": "c881adbf8cca4598762ca1e733df804c12eef106",
"size": "31202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysal/spreg/sur.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8941"
},
{
"name": "Makefile",
"bytes": "408"
},
{
"name": "Python",
"bytes": "2547465"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
("core", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="submitformpage",
name="body",
field=wagtail.core.fields.RichTextField(
help_text=b"Edit the content you want to see before the form.",
blank=True,
),
preserve_default=True,
),
migrations.AddField(
model_name="submitformpage",
name="from_address",
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name="submitformpage",
name="subject",
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name="submitformpage",
name="thank_you_text",
field=wagtail.core.fields.RichTextField(
help_text=b"Set the message users will see after submitting the form.",
blank=True,
),
preserve_default=True,
),
migrations.AddField(
model_name="submitformpage",
name="to_address",
field=models.CharField(
help_text="Optional - form submissions will be emailed to this address",
max_length=255,
blank=True,
),
preserve_default=True,
),
]
| {
"content_hash": "9a2bffe4a7ea5fb336108678dd45352b",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 88,
"avg_line_length": 30.528301886792452,
"alnum_prop": 0.5327564894932015,
"repo_name": "springload/madewithwagtail",
"id": "8a6c6c742c1d8c5899aac30a181edf7f0541c5a4",
"size": "1644",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "core/migrations/0002_auto_20150419_2004.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "544"
},
{
"name": "Dockerfile",
"bytes": "3206"
},
{
"name": "HTML",
"bytes": "68623"
},
{
"name": "Handlebars",
"bytes": "1075"
},
{
"name": "JavaScript",
"bytes": "6793"
},
{
"name": "Makefile",
"bytes": "633"
},
{
"name": "Python",
"bytes": "114751"
},
{
"name": "SCSS",
"bytes": "39328"
},
{
"name": "Shell",
"bytes": "4365"
}
],
"symlink_target": ""
} |
from gaiatest import GaiaTestCase
class TestGallery(GaiaTestCase):
_gallery_items_locator = ('css selector', 'li.thumbnail')
_current_image_locator = ('css selector', '#frame2 > img')
_photos_toolbar_locator = ('id', 'fullscreen-toolbar')
def setUp(self):
GaiaTestCase.setUp(self)
# add photo to storage
self.push_resource('IMG_0001.jpg', 'DCIM/100MZLLA')
# launch the Gallery app
self.app = self.apps.launch('Gallery')
def test_gallery_view(self):
# https://moztrap.mozilla.org/manage/case/1326/
self.wait_for_element_displayed(*self._gallery_items_locator)
gallery_items = self.marionette.execute_script("return window.wrappedJSObject.files;")
for index, item in enumerate(gallery_items):
# If the current item is not a video, set it as the gallery item to tap.
if 'video' not in item['metadata']:
first_gallery_item = self.marionette.find_elements(*self._gallery_items_locator)[index]
break
self.marionette.tap(first_gallery_item)
current_image = self.marionette.find_element(*self._current_image_locator)
photos_toolbar = self.marionette.find_element(*self._photos_toolbar_locator)
self.wait_for_element_displayed(*self._current_image_locator)
self.assertIsNotNone(current_image.get_attribute('src'))
self.assertTrue(photos_toolbar.is_displayed())
# TODO
# Add steps to view picture full screen
# TODO
# Repeat test with landscape orientation
| {
"content_hash": "a316cd2368a9cb924e6ae26c1f03f0ca",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 103,
"avg_line_length": 36.93023255813954,
"alnum_prop": 0.654911838790932,
"repo_name": "wilebeast/FireFox-OS",
"id": "51852df8a5149e2964cd590a7f2421af2f7d8790",
"size": "1788",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "B2G/gaia/tests/python/gaiatest/tests/test_gallery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import unittest
import numpy
import six.moves.cPickle as pickle
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import initializers
from chainer.links.connection import convolution_nd
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import conv
from chainer.utils import conv_nd
@testing.parameterize(*(testing.product({
'dims': [(3, 4), (3, 4, 3)],
'dtype': [numpy.float32]
}) + testing.product({
'dims': [(5,)],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
})))
class TestConvolutionND(unittest.TestCase):
def setUp(self):
ndim = len(self.dims)
self.ksize = (3,) * ndim
self.stride = (2,) * ndim
self.pad = (1,) * ndim
self.link = convolution_nd.ConvolutionND(
ndim, 3, 2, self.ksize, stride=self.stride, pad=self.pad,
initial_bias=initializers.Uniform(scale=1., dtype=self.dtype))
self.link.cleargrads()
x_shape = (2, 3) + self.dims
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
gy_shape = (2, 2) + tuple(
conv.get_conv_outsize(d, k, s, p) for (d, k, s, p) in zip(
self.dims, self.ksize, self.stride, self.pad))
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.check_backward_options = {'eps': 1e-2, 'atol': 1e-3, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_backward_options = {
'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4}
@attr.gpu
def test_im2col_consistency(self):
col_cpu = conv_nd.im2col_nd_cpu(
self.x, self.ksize, self.stride, self.pad)
col_gpu = conv_nd.im2col_nd_gpu(
cuda.to_gpu(self.x), self.ksize, self.stride, self.pad)
testing.assert_allclose(col_cpu, col_gpu.get(), atol=0, rtol=0)
@attr.gpu
def test_col2im_consistency(self):
col = conv_nd.im2col_nd_cpu(self.x, self.ksize, self.stride, self.pad)
im_cpu = conv_nd.col2im_nd_cpu(col, self.stride, self.pad, self.dims)
im_gpu = conv_nd.col2im_nd_gpu(
cuda.to_gpu(col), self.stride, self.pad, self.dims)
testing.assert_allclose(im_cpu, im_gpu.get())
def check_forward_consistency(self):
x_cpu = chainer.Variable(self.x)
y_cpu = self.link(x_cpu)
self.assertEqual(y_cpu.data.dtype, self.dtype)
self.link.to_gpu()
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
y_gpu = self.link(x_gpu)
self.assertEqual(y_gpu.data.dtype, self.dtype)
testing.assert_allclose(y_cpu.data, y_gpu.data.get())
@attr.cudnn
@condition.retry(3)
def test_forward_consistency(self):
self.check_forward_consistency()
@attr.gpu
@condition.retry(3)
def test_forward_consistency_im2col(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_forward_consistency()
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b),
**self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col(self):
self.link.to_gpu()
with chainer.using_config('use_cudnn', 'never'):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_pickling(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
y_data1 = y.data
del x, y
pickled = pickle.dumps(self.link, -1)
del self.link
self.link = pickle.loads(pickled)
x = chainer.Variable(x_data)
y = self.link(x)
y_data2 = y.data
testing.assert_allclose(y_data1, y_data2, atol=0, rtol=0)
def test_pickling_cpu(self):
self.check_pickling(self.x)
@attr.gpu
def test_pickling_gpu(self):
self.link.to_gpu()
self.check_pickling(cuda.to_gpu(self.x))
class TestConvolutionNDNoInitialBias(unittest.TestCase):
def test_no_initial_bias(self):
ndim = 3
ksize = 3
link = convolution_nd.ConvolutionND(
ndim, 3, 2, ksize, nobias=True)
self.assertIsNone(link.b)
testing.run_module(__name__, __file__)
| {
"content_hash": "bb9030546a162aa7899a9c1a37b72199",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 31.510204081632654,
"alnum_prop": 0.6090241796200345,
"repo_name": "kashif/chainer",
"id": "59eabd3e683744ccdd2d860c89e157a19ed7c580",
"size": "4632",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/links_tests/connection_tests/test_convolution_nd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2730306"
}
],
"symlink_target": ""
} |
import optparse
import os
import subprocess
import sys
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
def run_command_with_code(cmd, redirect_output=True, check_exit_code=True):
"""Runs a command in an out-of-process shell.
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(cmd, redirect_output=True, check_exit_code=True):
return run_command_with_code(cmd, redirect_output, check_exit_code)[0]
def print_help():
help = """
Setup is complete.
In case the arguments do not match contact support.
"""
print(help)
def pip_install(*args):
run_command(['pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(venv=VENV):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and distribute.
pip_install('pip')
pip_install('distribute')
# pip_install('-r', PIP_REQUIRES)
def die(message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version():
if sys.version_info < (2, 6):
die("Need Python Version >= 2.6")
def parse_args():
"""Parse command-line arguments."""
parser = optparse.OptionParser()
return parser.parse_args()
def main(argv):
(options, args) = parse_args()
check_python_version()
install_dependencies()
post_process()
print_help()
if __name__ == '__main__':
main(sys.argv) | {
"content_hash": "c88853cf723a69b471014e58e8ef9e09",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 26.06578947368421,
"alnum_prop": 0.6491670873296315,
"repo_name": "ofirgut007/Bezeqint01",
"id": "ab0ff578d868397c5ad618df6d3ffd1d59cfe2dd",
"size": "1981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13078"
}
],
"symlink_target": ""
} |
"""optik.option
Defines the Option class and some standard value-checking functions.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
# Original Optik revision this is based on:
__Optik_revision__ = "option.py,v 1.19.2.1 2002/07/23 01:51:14 gward Exp"
# Copyright (c) 2001 Gregory P. Ward. All rights reserved.
# See the README.txt distributed with Optik for licensing terms.
# created 2001/10/17, GPW (from optik.py)
import sys
import string
from types import TupleType, ListType, DictType
from SCons.Optik.errors import OptionError, OptionValueError
_builtin_cvt = { "int" : (int, "integer"),
"long" : (long, "long integer"),
"float" : (float, "floating-point"),
"complex" : (complex, "complex") }
def check_builtin (option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
#"%s: invalid %s argument %s" % (opt, what, repr(value)))
"option %s: invalid %s value: %s" % (opt, what, repr(value)))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = string.join(map(repr, option.choices),", ")
raise OptionValueError(
"option %s: invalid choice: %s (choose from %s)"
% (opt, repr(value), choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = "NO"+"DEFAULT"
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. where we expect an argument to this option.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex" : check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__ (self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings (self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = filter(None, opts)
if not opts:
raise OptionError("at least one option string must be supplied",
self)
return opts
def _set_opt_strings (self, opts):
self._short_opts = []
self._long_opts = []
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %s: "
"must be at least two characters long" % (`opt`,), self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %s: "
"must be of the form -x, (x any non-dash char)" % (`opt`,),
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %s: "
"must start with --, followed by non-dash" % (`opt`,),
self)
self._long_opts.append(opt)
def _set_attrs (self, attrs):
for attr in self.ATTRS:
if attrs.has_key(attr):
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
raise OptionError(
"invalid keyword arguments: %s" % string.join(attrs.keys(),", "),
self)
# -- Constructor validation methods --------------------------------
def _check_action (self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %s" % (`self.action`,), self)
def _check_type (self):
if self.type is None:
# XXX should factor out another class attr here: list of
# actions that *require* a type
if self.action in ("store", "append"):
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
if self.type not in self.TYPES:
raise OptionError("invalid option type: %s" % (`self.type`,), self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %s" % (`self.action`,), self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (TupleType, ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% string.split(str(type(self.choices)),"'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %s" % (repr(self.type),), self)
def _check_dest (self):
if self.action in self.STORE_ACTIONS and self.dest is None:
# No destination given, and we need one for this action.
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = string.replace(self._long_opts[0][2:],'-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const (self):
if self.action != "store_const" and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %s" % (repr(self.action),),
self)
def _check_nargs (self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %s" % (repr(self.action),),
self)
def _check_callback (self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %s" % (repr(self.callback),), self)
if (self.callback_args is not None and
type(self.callback_args) is not TupleType):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %s"
% (repr(self.callback_args),), self)
if (self.callback_kwargs is not None and
type(self.callback_kwargs) is not DictType):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %s"
% (repr(self.callback_kwargs),), self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%s) for non-callback option"
% (repr(self.callback),), self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__ (self):
if self._short_opts or self._long_opts:
return string.join(self._short_opts + self._long_opts,"/")
else:
raise RuntimeError, "short_opts and long_opts both empty!"
def takes_value (self):
return self.type is not None
# -- Processing methods --------------------------------------------
def check_value (self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def process (self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
if value is not None:
if self.nargs == 1:
value = self.check_value(opt, value)
else:
def cv(v,check=self.check_value,o=opt):
return check(o,v)
value = tuple(map(cv,value))
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action (self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, 1)
elif action == "store_false":
setattr(values, dest, 0)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
apply( self.callback, (self, opt, value, parser,)+ args, kwargs)
elif action == "help":
parser.print_help()
sys.exit(0)
elif action == "version":
parser.print_version()
sys.exit(0)
else:
raise RuntimeError, "unknown action %s" % (repr(self.action),)
return 1
# class Option
| {
"content_hash": "05a5974457362e109774fb826c371c6a",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 84,
"avg_line_length": 37.38917525773196,
"alnum_prop": 0.5301578548287034,
"repo_name": "datalogics/scons",
"id": "7493205d4d6ea35043db9637fea36620c7d41254",
"size": "14507",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/engine/SCons/Optik/option.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1519"
},
{
"name": "HTML",
"bytes": "43855"
},
{
"name": "Perl",
"bytes": "23384"
},
{
"name": "Python",
"bytes": "4756209"
},
{
"name": "Shell",
"bytes": "13866"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import with_statement
import time
import rpyc
import threading
from threading import Thread
from thread import allocate_lock
import unittest
# # # # #
# Testing X number of sim connections.
# # # # #
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self.daemon = False
self._return = None
def run(self):
if self._Thread__target is not None:
try:
self._return = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
except:
trace = traceback.format_exc()[34:].strip()
print('Exception::', trace)
class Test1(unittest.TestCase):
cePath = ('localhost', 8008)
userName = 'user'
max_clients = 300
conn_lock = allocate_lock()
def makeConnection(self, identifier):
proxy = False
with self.conn_lock:
# Connect to RPyc server
try:
ce_ip, ce_port = self.cePath
proxy = rpyc.connect(ce_ip, ce_port)
except:
print('*ERROR* Cannot connect to CE path `{}`! Exiting!'.format(self.cePath))
return False
# Authenticate on RPyc server
try:
check = proxy.root.login(self.userName, 'EP')
proxy.root.hello(identifier)
# print('Connect and authenticate to CE at `{}` is OK.'.format(self.cePath))
except:
check = False
if not check:
print('*ERROR* Cannot authenticate on CE path `{}`! Exiting!'.format(self.cePath))
return False
# Success
return proxy
def test_a_serial(self):
"""
Testing max nr of virtual clients.
"""
print('Testing `{}` clients in serial...'.format(self.max_clients))
conns = []
for i in range(100, self.max_clients+100):
c = self.makeConnection('client::' + str(i))
if not c: continue
print( c.root.echo('Hi there! Serial client `{}` here!'.format(c._config['connid'])) )
c.root.list_eps()
c.root.list_libraries()
c.close() ; del c
conns.append(True)
print('Made `{}` serial client connections.'.format(len(conns)))
self.assertEqual(len(conns), self.max_clients)
def test_b_parallel(self):
"""
Testing max nr of virtual clients.
"""
def hello(i=1):
c = self.makeConnection('client::' + str(i))
time.sleep(0.5)
if not c: return False
print( c.root.echo('Hi there! Parallel client `{}` here!'.format(c._config['connid'])) )
c.root.list_eps()
c.root.list_libraries()
c.close() ; del c
return True
conns = []
print('Testing `{}` clients in parallel...'.format(self.max_clients))
for i in range(100, self.max_clients+100):
t = ThreadWithReturnValue(target=hello, kwargs={'i':i})
conns.append(t)
[t.start() for t in conns]
[t.join() for t in conns]
result = [t._return for t in conns if t._return]
del conns
print('Made `{}` parallel client connections. Threads remaninig: `{}`.'.format(len(result), threading.activeCount()))
self.assertEqual(len(result), self.max_clients)
# # #
if __name__ == '__main__':
unittest.main()
# Eof()
| {
"content_hash": "3d2b4d397517cd023f52081a87fa2f7f",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 125,
"avg_line_length": 30.433333333333334,
"alnum_prop": 0.5457283680175247,
"repo_name": "ctgriffiths/twister",
"id": "12926435333dd803e52338313415e7c011d46f76",
"size": "3671",
"binary": false,
"copies": "2",
"ref": "refs/heads/git_hub_branch",
"path": "demo/unit-testing/testing_connections.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3155"
},
{
"name": "CSS",
"bytes": "18573"
},
{
"name": "HTML",
"bytes": "45771"
},
{
"name": "Java",
"bytes": "2326750"
},
{
"name": "JavaScript",
"bytes": "192415"
},
{
"name": "Perl",
"bytes": "5976"
},
{
"name": "Python",
"bytes": "1484873"
},
{
"name": "Shell",
"bytes": "12526"
},
{
"name": "Tcl",
"bytes": "21007"
}
],
"symlink_target": ""
} |
"""
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(
n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0,
data_transposed=True,
)
(idx,) = w.nonzero()
# distort the clean signal
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
(idx_r,) = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
omp.fit(X, y_noisy)
coef = omp.coef_
(idx_r,) = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
(idx_r,) = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle("Sparse signal recovery with Orthogonal Matching Pursuit", fontsize=16)
plt.show()
| {
"content_hash": "f50558cb84dfa1449ecfd5af8d65e421",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 84,
"avg_line_length": 25.246753246753247,
"alnum_prop": 0.7052469135802469,
"repo_name": "betatim/scikit-learn",
"id": "b0c8b5d093eee57f493c8358c75d32ec83f684ed",
"size": "1944",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "examples/linear_model/plot_omp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668499"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10504881"
},
{
"name": "Shell",
"bytes": "41551"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import base64
import urllib3
import sys
def load_api_key(api_key_file):
try:
f = open(api_key_file, "r")
key = f.read().strip()
if key == '':
# The key file should't be blank
print('The api_key.txt file appears to be blank, please paste YOUR_API_KEY here')
sys.exit(0)
else:
# setup the key
global api_key
api_key["key"] = key
f.close()
except IOError:
# The file doesn't exist, so show the message and create the file.
print('API Key not found!')
# create a blank key file
open('api_key.txt', 'a').close()
sys.exit(0)
except Exception as e:
print(e)
def get_api_key_with_prefix(key):
global api_key
global api_key_prefix
if api_key.get(key) and api_key_prefix.get(key):
return api_key_prefix[key] + ' ' + api_key[key]
elif api_key.get(key):
return api_key[key]
def get_basic_auth_token():
global username
global password
return urllib3.util.make_headers(basic_auth=username + ':' + password).get('authorization')
def auth_settings():
return {
'apiKey': {
'type': 'api_key',
'in': 'query',
'key': 'key',
'value': get_api_key_with_prefix('key')
},
'accessToken': {
'type': 'api_key',
'in': 'header',
'key': 'Authorization',
'value': get_api_key_with_prefix('Authorization')
},
}
# Default Base url
host = "https://api-platform.systran.net"
# Default api client
api_client = None
# Authentication settings
api_key = {}
api_key_prefix = {}
username = ''
password = ''
| {
"content_hash": "8490ce5386e3a35c1951d9c6ecaebcc7",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 95,
"avg_line_length": 27.76923076923077,
"alnum_prop": 0.5678670360110804,
"repo_name": "SYSTRAN/geographic-api-python-client",
"id": "967294317ff3cbd5c7d81d32851e3d24ec26cd11",
"size": "2527",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "systran_geographic_api/configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "176181"
}
],
"symlink_target": ""
} |
from datetime import date
from holidays.constants import JAN, MAY, SEP, AUG, OCT, DEC
from holidays.holiday_base import HolidayBase
from dateutil.easter import easter, EASTER_ORTHODOX
from dateutil.relativedelta import relativedelta as rd
from holidays.utils import islamic_to_gre
class NorthMacedonia(HolidayBase):
"""
https://en.wikipedia.org/wiki/Public_holidays_in_North_Macedonia
"""
country = "MK"
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, JAN, 1)] = "New Year's Day"
self[date(year, JAN, 7)] = "Christmas Day (Orthodox)"
eater_day = easter(year, method=EASTER_ORTHODOX)
self[eater_day + rd(days=1)] = "Easter Monday(Orthodox)"
self[date(year, MAY, 1)] = "Labour Day"
self[date(year, MAY, 24)] = "Saints Cyril and Methodius Day"
self[date(year, AUG, 2)] = "Republic Day"
self[date(year, SEP, 8)] = "Independence Day"
self[date(year, OCT, 11)] = "Day of Macedonian Uprising in 1941"
self[
date(year, OCT, 23)
] = "Day of the Macedonian Revolutionary Struggle"
self[date(year, DEC, 8)] = "Saint Clement of Ohrid Day"
for date_obs in islamic_to_gre(year, 10, 1):
self[date_obs] = "Eid al-Fitr"
class MK(NorthMacedonia):
pass
class MKD(NorthMacedonia):
pass
| {
"content_hash": "b24655198e88be04bc56bfc53d1e85a9",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 72,
"avg_line_length": 31.977272727272727,
"alnum_prop": 0.6361051883439943,
"repo_name": "ryanss/holidays.py",
"id": "180faf0ddc5b54bfbd1de0cbfcae64e639e81753",
"size": "1909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holidays/countries/north_macedonia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "214061"
}
],
"symlink_target": ""
} |
import unittest
import p4lib
from mock23 import Mock
from test_utils import change_stdout, test_options, test_raw_result
HAVE_OUTPUT = "depot-file#4 - client-file"
class HaveTestCase(unittest.TestCase):
def setUp(self):
p4lib._run = Mock(spec='p4lib._run', return_value=("", "", 0))
def test_global(self):
change_stdout(HAVE_OUTPUT)
p4 = p4lib.P4()
result_files = p4.have()
p4lib._run.assert_called_with(['p4', 'have'])
self.assertEqual(1, len(result_files))
file_0 = result_files[0]
self.assertEqual(r"depot-file", file_0["depotFile"])
self.assertEqual(r"client-file", file_0["localFile"])
self.assertEqual(4, file_0["rev"])
def test_file(self):
change_stdout(HAVE_OUTPUT)
p4 = p4lib.P4()
p4.have("file.cpp")
p4lib._run.assert_called_with(['p4', 'have', 'file.cpp'])
def test_file_list(self):
change_stdout(HAVE_OUTPUT)
p4 = p4lib.P4()
p4.have(["file.cpp", "file2.cpp"])
p4lib._run.assert_called_with(['p4', 'have', 'file.cpp', 'file2.cpp'])
def test_escapes_arobas_in_filenames(self):
change_stdout(HAVE_OUTPUT)
p4 = p4lib.P4()
p4.have("//depot/file@1.cpp")
p4lib._run.assert_called_with(['p4', 'have', '//depot/file%401.cpp'])
def test_does_not_escapes_arobas_in_cl_numbers(self):
change_stdout(HAVE_OUTPUT)
p4 = p4lib.P4()
p4.have("//depot/file_1.cpp@12345")
p4lib._run.assert_called_with(['p4', 'have',
'//depot/file_1.cpp@12345'])
def test_raw_result(self):
test_raw_result(self, HAVE_OUTPUT, "have", files="file.cpp")
def test_with_options(self):
test_options(self, "have", files="file.cpp",
expected=["have", "file.cpp"])
| {
"content_hash": "4dd5f24683b0a01df310b678c4fcce06",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 78,
"avg_line_length": 28.303030303030305,
"alnum_prop": 0.5770877944325482,
"repo_name": "Mokona/python-p4lib",
"id": "c28c81c0c0aa5282ebe662eac527bdea2d5f628d",
"size": "1868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mocked/p4lib_have_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21622"
},
{
"name": "Python",
"bytes": "359429"
},
{
"name": "Shell",
"bytes": "1306"
}
],
"symlink_target": ""
} |
import urllib, json, sys, os.path, argparse
import hashlib
import time
import re
# //==== EDIT THIS ====\\
log_file = open('log.txt', 'r+')
username = 'dronenerds'
sleep_time = 120
download_img = False
console_log = True
download_img1 = True
path_name = 'images/'
# //==== EDIT THIS ====\\
def find_new_images(images, existing):
ids = [i['id'] for i in existing]
return [i for i in images if i['id'] not in ids]
def get_img(username):
url = 'http://www.instagram.com/{}/media'.format(username)
response = urllib.urlopen(url)
data = json.loads(response.read())
return data['items']
def download_image(url):
filename = url.split('/')[-1]
fullfilename = os.path.join(path_name, filename)
urllib.urlretrieve(url, fullfilename)
def get_url(geturl):
instaurl = geturl['images']['standard_resolution']['url']
path = re.sub(r'\w\d{3}x\d{3}\/', '', instaurl)
path = path.split('?')[0]
return path
def get_args():
parser = argparse.ArgumentParser(description='Download images from Instagram')
parser.add_argument('-u', '--username', type=str, help='Instagram username')
parser.add_argument('-s', '--sleep', type=int, default=120, help='How long to sleep inbetween checks')
parser.add_argument('-d', '--dry-run', action='store_false', help='Don\'t actually download old images, but download new ones')
parser.add_argument('-i', '--i-path', type= str, default='images/', help='Image download folder')
args = parser.parse_args()
return args.username, args.sleep, args.dry_run, args.i_path
def main():
if console_log:
username, sleep_time, download_img1, path_name = get_args()
else:
pass
print "Getting twenty photos from {}".format(username)
images = get_img(username)
if not os.path.exists(path_name):
os.makedirs(path_name)
if download_img:
print "Downloading..."
for i in images:
download_image(get_url(i))
last = images
while True:
time.sleep(sleep_time)
images = get_img(username)
new_images = find_new_images(images, last)
last = images
if new_images:
print "{} new post(s)".format(len(new_images))
if download_img1:
for image in new_images:
download_image(get_url(image))
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "2e1b336c38add34583d6cf7b431f57da",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 131,
"avg_line_length": 29.092105263157894,
"alnum_prop": 0.670737222976029,
"repo_name": "HaydnAnderson/image-downloader",
"id": "8b7bb131cca60a24ecaa3f90c8f9f30255c23be8",
"size": "2211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get-img.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2211"
}
],
"symlink_target": ""
} |
''' worker
to be used instead of the script included by the rqworker module
this script is similar but uses the app's config file
'''
import sys
import redis
from rq import Queue, Worker, Connection
from application import app
redis_config = app.config['REDIS_CONFIG']
worker_conn = redis.Redis(host=redis_config['host'], port=redis_config['port']
, password=redis_config['password'])
def main():
with Connection(worker_conn):
queues = [Queue(connection=worker_conn)]
if len(sys.argv) > 1:
queues = map(Queue, sys.argv[1])
w = Worker(queues)
w.work()
if __name__ == '__main__':
main()
| {
"content_hash": "07a7421a44ac08262d2f593b0f69c704",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 24.14814814814815,
"alnum_prop": 0.6503067484662577,
"repo_name": "aquaya/pipeline",
"id": "3d1564e0cafcf23be03acd91e5ad07703a2bd367",
"size": "674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/scripts/worker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "23321"
},
{
"name": "Python",
"bytes": "294961"
}
],
"symlink_target": ""
} |
import mimetypes
import os
import shutil
import sys
import tempfile
import uuid
from datetime import datetime, timedelta
from collections import namedtuple
import pygit2
from celery import current_task
from django_statsd.clients import statsd
from django.conf import settings
from django.db import connections
from django.utils import translation
from django.utils.functional import cached_property
import olympia.core.logger
from olympia import amo
from olympia.amo.utils import id_to_path
from olympia.versions.models import Version
from olympia.files.utils import extract_extension_to_dest, get_all_files
from .models import GitExtractionEntry
log = olympia.core.logger.getLogger('z.git_storage')
# A mixture of Blob and TreeEntry
TreeEntryWrapper = namedtuple('Entry', 'tree_entry, path, blob')
BRANCHES = {
amo.CHANNEL_LISTED: 'listed',
amo.CHANNEL_UNLISTED: 'unlisted',
}
# Constants from libgit2 includes/git2/diff.h
# while they're not in upstream pygit2 I added them here (cgrebs)
# We don't have all line constants here though since we don't
# really make use of them in the frontend.
GIT_DIFF_LINE_CONTEXT = ' '
GIT_DIFF_LINE_ADDITION = '+'
GIT_DIFF_LINE_DELETION = '-'
# Both files have no LF at end
GIT_DIFF_LINE_CONTEXT_EOFNL = '='
# Old has no LF at end, new does
GIT_DIFF_LINE_ADD_EOFNL = '>'
# Old has LF at end, new does not
GIT_DIFF_LINE_DEL_EOFNL = '<'
# This matches typing in addons-frontend
GIT_DIFF_LINE_MAPPING = {
GIT_DIFF_LINE_CONTEXT: 'normal',
GIT_DIFF_LINE_ADDITION: 'insert',
GIT_DIFF_LINE_DELETION: 'delete',
GIT_DIFF_LINE_CONTEXT_EOFNL: 'normal-eofnl',
GIT_DIFF_LINE_ADD_EOFNL: 'insert-eofnl',
GIT_DIFF_LINE_DEL_EOFNL: 'delete-eofnl',
}
# Prefix folder name we are using to store extracted add-on data to avoid any
# clashes, e.g with .git folders.
EXTRACTED_PREFIX = 'extracted'
# Rename and copy threshold, 50% is the default git threshold
SIMILARITY_THRESHOLD = 50
# Some official mimetypes belong to the `text` category, even though their
# names don't include `text/`.
MIMETYPE_CATEGORY_MAPPING = {
'application/json': 'text',
'application/xml': 'text',
}
# Some mimetypes need to be hanged to some other mimetypes.
MIMETYPE_COMPAT_MAPPING = {
# See: https://github.com/mozilla/addons-server/issues/11382
'image/svg': 'image/svg+xml',
# See: https://github.com/mozilla/addons-server/issues/11383
'image/x-ms-bmp': 'image/bmp',
# See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types#textjavascript # noqa
'application/javascript': 'text/javascript',
}
class BrokenRefError(RuntimeError):
pass
class MissingMasterBranchError(RuntimeError):
pass
def get_mime_type_for_blob(tree_or_blob, name):
"""Returns the mimetype and type category for a git blob.
The type category can be ``image``, ``directory``, ``text`` or ``binary``.
"""
if tree_or_blob == pygit2.GIT_OBJ_TREE:
return 'application/octet-stream', 'directory'
(mimetype, _) = mimetypes.guess_type(name)
# We use `text/plain` as default.
mimetype = MIMETYPE_COMPAT_MAPPING.get(mimetype, mimetype or 'text/plain')
known_type_cagegories = ('image', 'text')
default_type_category = 'binary'
# If mimetype has an explicit category, use it.
type_category = (
MIMETYPE_CATEGORY_MAPPING.get(mimetype, mimetype.split('/')[0])
if mimetype
else default_type_category
)
return (
mimetype,
default_type_category
if type_category not in known_type_cagegories
else type_category,
)
class TemporaryWorktree:
def __init__(self, repository):
self.git_repository = repository
self.name = uuid.uuid4().hex
self.temp_directory = tempfile.mkdtemp(dir=settings.TMP_PATH)
self.path = os.path.join(self.temp_directory, self.name)
self.extraction_target_path = os.path.join(self.path, EXTRACTED_PREFIX)
self.obj = None
self.repo = None
def __enter__(self):
self.obj = self.git_repository.add_worktree(self.name, self.path)
self.repo = pygit2.Repository(self.obj.path)
# Clean the workdir (of the newly created worktree)
for entry in self.repo[self.repo.head.target].tree:
path = os.path.join(self.path, entry.name)
if os.path.isfile(path):
os.unlink(path)
else:
shutil.rmtree(path)
os.makedirs(self.extraction_target_path)
return self
def __exit__(self, type, value, traceback):
# Remove temp directory
shutil.rmtree(self.temp_directory)
# Prune temp worktree
if self.obj is not None:
self.obj.prune(True)
# Remove worktree ref in upstream repository
self.git_repository.lookup_branch(self.name).delete()
class AddonGitRepository:
GIT_DESCRIPTION = '.git/description'
def __init__(self, addon_or_id, package_type='addon'):
from olympia.addons.models import Addon
assert package_type in ('addon',)
# Always enforce the search path being set to our ROOT
# setting. This is sad, libgit tries to fetch the global git
# config file (~/.gitconfig) and falls over permission errors while
# doing so in our web-environment.
# We are setting this here to avoid creating a unnecessary global
# state but since this is overwriting a global value in pygit2 it
# affects all pygit2 calls.
# https://github.com/libgit2/pygit2/issues/339
# https://github.com/libgit2/libgit2/issues/2122
git_home = settings.ROOT
pygit2.option(
pygit2.GIT_OPT_SET_SEARCH_PATH, pygit2.GIT_CONFIG_LEVEL_GLOBAL, git_home
)
# This will cause .keep file existence checks to be skipped when
# accessing packfiles, which can help performance with remote
# filesystems.
# See: https://github.com/mozilla/addons-server/issues/13019
pygit2.option(pygit2.GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, True)
# Enable calling fsync() for various operations touching .git
pygit2.option(pygit2.GIT_OPT_ENABLE_FSYNC_GITDIR, True)
self.addon_id = (
addon_or_id.pk if isinstance(addon_or_id, Addon) else addon_or_id
)
self.git_repository_path = os.path.join(
settings.GIT_FILE_STORAGE_PATH,
id_to_path(self.addon_id, breadth=2),
package_type,
)
@property
def is_extracted(self):
return os.path.exists(self.git_repository_path)
@property
def is_recent(self):
git_description_path = os.path.join(
self.git_repository_path, self.GIT_DESCRIPTION
)
if not self.is_extracted or not os.path.exists(git_description_path):
return False
# A git repository is recent when it was created less than 1 hour ago.
an_hour_ago = datetime.utcnow() - timedelta(hours=1)
mtime = datetime.utcfromtimestamp(
os.path.getmtime(
# There is no way to get the creation time of a file/folder on
# most UNIX systems, so we use a file that is created by
# default but never modified (GIT_DESCRIPTION) to determine
# whether a git repository is recent or not.
git_description_path
)
)
return mtime > an_hour_ago
@cached_property
def git_repository(self):
if not self.is_extracted:
os.makedirs(self.git_repository_path)
git_repository = pygit2.init_repository(
path=self.git_repository_path, bare=False
)
# Write first commit to 'master' to act as HEAD
tree = git_repository.TreeBuilder().write()
git_repository.create_commit(
'HEAD', # ref
self.get_author(), # author, using addons-robot
self.get_author(), # commiter, using addons-robot
'Initializing repository', # message
tree, # tree
[],
) # parents
log.info('Initialized git repository "%s"', self.git_repository_path)
else:
git_repository = pygit2.Repository(self.git_repository_path)
# We have to verify that the 'master' branch exists because we
# might have mis-initialized repositories in the past.
# See: https://github.com/mozilla/addons-server/issues/14127
try:
master_ref = 'refs/heads/master'
git_repository.lookup_reference(master_ref)
except KeyError:
message = f'Reference "{master_ref}" not found'
log.exception(message)
raise MissingMasterBranchError(message)
return git_repository
def delete(self):
if not self.is_extracted:
log.error('called delete() on a non-extracted git repository')
return
# Reset the git hash of each version of the add-on related to this git
# repository.
Version.unfiltered.filter(addon_id=self.addon_id).update(git_hash='')
shutil.rmtree(self.git_repository_path)
@classmethod
def extract_and_commit_from_version(cls, version, author=None, note=None):
"""Extract the XPI from `version` and comit it.
This is doing the following:
* Create a temporary `git worktree`_
* Remove all files in that worktree
* Extract the xpi behind `version` into the worktree
* Commit all files
Kinda like doing::
$ workdir_name=$(uuid)
$ mkdir /tmp/$workdir_name
$ git worktree add /tmp/$workdir_name
Preparing worktree (new branch 'af4172e4-d8c7…')
HEAD is now at 8c5223e Initial commit
$ git worktree list
/tmp/addon-repository 8c5223e [master]
/tmp/af4172e4-d8c7-4486-a5f2-316458da91ff 8c5223e [af4172e4-d8c7…]
$ unzip dingrafowl-falcockalo-lockapionk.zip -d /tmp/$workdir_name
Archive: dingrafowl-falcockalo-lockapionk.zip
extracting: /tmp/af4172e4-d8c7…/manifest.json
$ pushd /tmp/$workdir_name
/tmp/af4172e4-d8c7-4486-a5f2-316458da91ff /tmp/addon-repository
$ git status
On branch af4172e4-d8c7-4486-a5f2-316458da91ff
Untracked files:
(use "git add <file>..." to include in what will be committed)
manifest.json
$ git add *
$ git commit -a -m "Creating new version"
[af4172e4-d8c7-4486-a5f2-316458da91ff c4285f8] Creating new version
…
$ cd addon-repository
$ git checkout -b listed
Switched to a new branch 'listed'
# We don't technically do a full cherry-pick but it's close enough
# and does almost what we do. We are technically commiting
# directly on top of the branch as if we checked out the branch
# in the worktree (via -b) but pygit doesn't properly support that
# so we "simply" set the parents correctly.
$ git cherry-pick c4285f8
[listed a4d0f63] Creating new version…
This ignores the fact that there may be a race-condition of two
versions being created at the same time. Since all relevant file based
work is done in a temporary worktree there won't be any conflicts and
usually the last upload simply wins the race and we're setting the
HEAD of the branch (listed/unlisted) to that specific commit.
.. _`git worktree`: https://git-scm.com/docs/git-worktree
"""
current_language = translation.get_language()
try:
# Make sure we're always using the en-US locale by default
# to have unified commit messages and avoid any __str__
# to give us wrong results
translation.activate('en-US')
repo = cls(version.addon.id, package_type='addon')
branch = repo.find_or_create_branch(BRANCHES[version.channel])
note = f' ({note})' if note else ''
commit = repo._commit_through_worktree(
file_obj=version.file,
message=(
'Create new version {version} ({version_id}) for '
'{addon} from {file_obj}{note}'.format(
version=repr(version),
version_id=version.id,
addon=repr(version.addon),
file_obj=repr(version.file),
note=note,
)
),
author=author,
branch=branch,
)
if (
current_task
and current_task.request.id is not None
and not current_task.request.is_eager
):
# Extraction might have taken a while, and our connection might
# be gone and we haven't realized it. Django cleans up
# connections after CONN_MAX_AGE but only during the
# request/response cycle, so if we're inside a task let's do it
# ourselves before using the database again - it will
# automatically reconnect if needed (use 'default' since we
# want the primary db where writes go).
connections['default'].close_if_unusable_or_obsolete()
# Set the latest git hash on the related version.
version.update(git_hash=commit.hex)
finally:
translation.activate(current_language)
return repo
def get_author(self, user=None):
if user is not None:
author_name = f'User {user.id}'
author_email = user.email
else:
author_name = 'Mozilla Add-ons Robot'
author_email = 'addons-dev-automation+github@mozilla.com'
return pygit2.Signature(name=author_name, email=author_email)
def find_or_create_branch(self, name):
"""Lookup or create the branch named `name`"""
try:
branch = self.git_repository.branches.get(name)
except pygit2.GitError:
message = f'Reference for branch "{name}" is broken'
log.exception(message)
raise BrokenRefError(message)
if branch is None:
branch = self.git_repository.create_branch(
name, self.git_repository.head.peel()
)
return branch
def _commit_through_worktree(self, file_obj, message, author, branch):
"""
Create a temporary worktree that we can use to unpack the extension
without disturbing the current git workdir since it creates a new
temporary directory where we extract to.
"""
with TemporaryWorktree(self.git_repository) as worktree:
if file_obj:
# Now extract the extension to the workdir
extract_extension_to_dest(
source=file_obj.file.path,
dest=worktree.extraction_target_path,
force_fsync=True,
)
# Stage changes, `TemporaryWorktree` always cleans the whole
# directory so we can simply add all changes and have the correct
# state.
# Fetch all files and strip the absolute path but keep the
# `extracted/` prefix
files = get_all_files(worktree.extraction_target_path, worktree.path, '')
# Make sure the index is up to date
worktree.repo.index.read()
# For security reasons git doesn't allow adding .git subdirectories
# anywhere in the repository. So we're going to rename them and add
# a random postfix.
# In order to disable the effect of the special git config files,
# we also have to postfix them.
files_to_rename = (
'.git',
'.gitattributes',
'.gitconfig',
'.gitignore',
'.gitmodules',
)
# Sort files by path length to rename the deepest files first.
files.sort(key=len, reverse=True)
for filename in files:
if os.path.basename(filename) in files_to_rename:
renamed = f'{filename}.{uuid.uuid4().hex[:8]}'
shutil.move(
os.path.join(worktree.path, filename),
os.path.join(worktree.path, renamed),
)
# Add all changes to the index (git add --all ...)
worktree.repo.index.add_all()
worktree.repo.index.write()
tree = worktree.repo.index.write_tree()
# Now create an commit directly on top of the respective branch
oid = worktree.repo.create_commit(
None,
# author, using the actual uploading user if possible.
self.get_author(author),
# committer, using addons-robot because that's the user
# actually doing the commit.
self.get_author(),
message,
tree,
# Set the current branch HEAD as the parent of this commit
# so that it'll go straight into the branches commit log
#
# We use `lookup_reference` to fetch the most up-to-date
# reference to the branch in order to avoid an error described
# in: https://github.com/mozilla/addons-server/issues/13932
[self.git_repository.lookup_reference(branch.name).target],
)
# Fetch the commit object
commit = worktree.repo.get(oid)
# And set the commit we just created as HEAD of the relevant
# branch, and updates the reflog. This does not require any
# merges.
#
# We use `lookup_reference` to fetch the most up-to-date reference
# to the branch in order to avoid an error described in:
# https://github.com/mozilla/addons-server/issues/13932
self.git_repository.lookup_reference(branch.name).set_target(commit.hex)
return commit
def get_root_tree(self, commit):
"""Return the root tree object.
This doesn't contain the ``EXTRACTED_PREFIX`` prefix folder.
"""
# When `commit` is a commit hash, e.g passed to us through the API
# serializers we have to fetch the actual commit object to proceed.
if isinstance(commit, str):
commit = self.git_repository.revparse_single(commit)
return self.git_repository[commit.tree[EXTRACTED_PREFIX].oid]
def iter_tree(self, tree):
"""Recursively iterate through a tree.
This includes the directories.
"""
for tree_entry in tree:
tree_or_blob = self.git_repository[tree_entry.oid]
if isinstance(tree_or_blob, pygit2.Tree):
yield TreeEntryWrapper(
blob=None, tree_entry=tree_entry, path=tree_entry.name
)
for child in self.iter_tree(tree_or_blob):
yield TreeEntryWrapper(
blob=child.blob,
tree_entry=child.tree_entry,
path=os.path.join(tree_entry.name, child.path),
)
else:
yield TreeEntryWrapper(
blob=tree_or_blob, tree_entry=tree_entry, path=tree_entry.name
)
def get_raw_diff(self, commit, parent=None, include_unmodified=False):
"""Return the raw diff object.
This is cached as we'll be calling it multiple times, e.g
once to render the actual diff and again to fetch specific
status information (added, removed etc) in a later step.
"""
diff_cache = getattr(self, '_diff_cache', {})
flags = pygit2.GIT_DIFF_NORMAL | pygit2.GIT_DIFF_IGNORE_WHITESPACE_CHANGE
if include_unmodified:
flags |= pygit2.GIT_DIFF_INCLUDE_UNMODIFIED
try:
return diff_cache[(commit, parent, include_unmodified)]
except KeyError:
if parent is None:
retval = self.get_root_tree(commit).diff_to_tree(
# We always show the whole file by default
context_lines=sys.maxsize,
interhunk_lines=0,
flags=flags,
swap=True,
)
else:
retval = self.git_repository.diff(
self.get_root_tree(parent),
self.get_root_tree(commit),
# We always show the whole file by default
context_lines=sys.maxsize,
flags=flags,
interhunk_lines=0,
)
diff_cache[(commit, parent, include_unmodified)] = retval
self._diff_cache = diff_cache
return retval
def get_diff(self, commit, parent=None, pathspec=None):
"""Get a diff from `parent` to `commit`.
If `parent` is not given we assume it's the first commit and handle
it accordingly.
:param pathspec: If a list of files is given we only retrieve a list
for them.
"""
diff = self.get_raw_diff(
commit, parent=parent, include_unmodified=pathspec is not None
)
changes = []
for patch in diff:
# Support for this hasn't been implemented upstream yet, we'll
# work on this upstream if needed but for now just selecting
# files based on `pathspec` works for us.
if pathspec and patch.delta.old_file.path not in pathspec:
continue
if parent is None:
changes.append(self._render_patch(patch, commit, commit, pathspec))
else:
changes.append(self._render_patch(patch, commit, parent, pathspec))
return changes
def get_deltas(self, commit, parent, pathspec=None):
"""Only fetch deltas from `parent` to `commit`.
This method specifically does not render any textual changes
but fetches as few details as possible to use a different
`pygit2` API to retrieve changes and to improve performance
significantly.
The entries returned are fairly similar to what `get_diff`
returns but don't include `hunks`, `lines_deleted` / `lines_added`
as well as `new_ending_new_line` and `old_ending_new_line`
We also don't expose `size` and `is_binary` as it's unreliable since
the `deltas` iterator tries to not examine the files content if
possible - so they might have wrong values.
"""
diff = self.get_raw_diff(
commit, parent=parent, include_unmodified=pathspec is not None
)
deltas = []
for delta in diff.deltas:
if pathspec and delta.old_file.path not in pathspec:
continue
deltas.append(
{
'path': delta.new_file.path,
'mode': delta.status_char(),
'old_path': delta.old_file.path,
'parent': commit if parent is None else parent,
'hash': commit,
}
)
return deltas
def _render_patch(self, patch, commit, parent, pathspec=None):
"""
This will be moved to a proper drf serializer in the future
but until the format isn't set we'll keep it like that to simplify
experimentation.
"""
old_ending_new_line = True
new_ending_new_line = True
hunks = []
for hunk in patch.hunks:
changes = []
for line in hunk.lines:
# Properly set line ending changes. We can do it directly
# in the for-loop as line-ending changes should always be
# present at the very end of a file so there's no risk of
# these values being overwritten.
origin = line.origin
if origin == GIT_DIFF_LINE_CONTEXT_EOFNL:
old_ending_new_line = new_ending_new_line = False
elif origin == GIT_DIFF_LINE_ADD_EOFNL:
old_ending_new_line = False
elif origin == GIT_DIFF_LINE_DEL_EOFNL:
new_ending_new_line = False
changes.append(
{
'content': line.content.rstrip('\r\n'),
'type': GIT_DIFF_LINE_MAPPING[origin],
# Can be `-1` for additions
'old_line_number': line.old_lineno,
'new_line_number': line.new_lineno,
}
)
hunks.append(
{
'header': hunk.header.rstrip('\r\n'),
'old_start': hunk.old_start,
'new_start': hunk.new_start,
'old_lines': hunk.old_lines,
'new_lines': hunk.new_lines,
'changes': changes,
}
)
# We are exposing unchanged files fully to the frontend client
# so that it can show them for an better review experience.
# We are using the "include unmodified"-flag for git but that
# doesn't render any hunks and there's no way to enforce it.
# Unfortunately that means we have to simulate line changes and
# hunk data for unmodified files.
# Unchanged files are *only* exposed in case of explicitly requesting
# a diff view for an file. That way we increase performance for
# reguar unittests and full-tree diffs.
generate_unmodified_fake_diff = (
not patch.delta.is_binary
and pathspec is not None
and (
patch.delta.status == pygit2.GIT_DELTA_UNMODIFIED
or (
# See:
# https://github.com/mozilla/addons-server/issues/15966
patch.delta.status == pygit2.GIT_DELTA_MODIFIED
and len(hunks) == 0
)
)
)
if generate_unmodified_fake_diff:
tree = self.get_root_tree(commit)
blob_or_tree = tree[patch.delta.new_file.path]
actual_blob = self.git_repository[blob_or_tree.oid]
mime_category = get_mime_type_for_blob(
blob_or_tree.type, patch.delta.new_file.path
)[1]
if mime_category == 'text':
data = actual_blob.data
changes = [
{
'content': line,
'type': GIT_DIFF_LINE_MAPPING[GIT_DIFF_LINE_CONTEXT],
'old_line_number': lineno,
'new_line_number': lineno,
}
for lineno, line in enumerate(data.split(b'\n'), start=1)
]
hunks.append(
{
'header': '@@ -0 +0 @@',
'old_start': 0,
'new_start': 0,
'old_lines': changes[-1]['old_line_number'],
'new_lines': changes[-1]['new_line_number'],
'changes': changes,
}
)
entry = {
'path': patch.delta.new_file.path,
'size': patch.delta.new_file.size,
'lines_added': patch.line_stats[1],
'lines_deleted': patch.line_stats[2],
'is_binary': patch.delta.is_binary,
'mode': patch.delta.status_char(),
'hunks': hunks,
'old_path': patch.delta.old_file.path,
'parent': parent,
'hash': commit,
'new_ending_new_line': new_ending_new_line,
'old_ending_new_line': old_ending_new_line,
}
return entry
def skip_git_extraction(version):
return version.addon.type != amo.ADDON_EXTENSION
def create_git_extraction_entry(version):
if skip_git_extraction(version):
log.debug(
'Skipping git extraction of add-on "%s" not a web-extension.',
version.addon.id,
)
return
log.info('Adding add-on "%s" to the git extraction queue.', version.addon.id)
GitExtractionEntry.objects.create(addon=version.addon)
def extract_version_to_git(version_id):
"""Extract a `Version` into our git storage backend."""
# We extract deleted or disabled versions as well so we need to make sure
# we can access them.
version = Version.unfiltered.get(pk=version_id)
if skip_git_extraction(version):
# We log a warning message because this should not happen (as the CRON
# task should not select non-webextension versions).
log.warning(
'Skipping git extraction of add-on "%s": not a web-extension.',
version.addon.id,
)
return
log.info('Extracting version "%s" into git backend', version_id)
try:
with statsd.timer('git.extraction.version'):
repo = AddonGitRepository.extract_and_commit_from_version(version=version)
statsd.incr('git.extraction.version.success')
except Exception as exc:
statsd.incr('git.extraction.version.failure')
raise exc
log.info('Extracted version "%s" into "%s".', version_id, repo.git_repository_path)
| {
"content_hash": "6b609a9826a22c5fe551d5a5388822fb",
"timestamp": "",
"source": "github",
"line_count": 801,
"max_line_length": 109,
"avg_line_length": 37.54057428214732,
"alnum_prop": 0.5767542401064184,
"repo_name": "mozilla/addons-server",
"id": "f1791bfa03f7ed6e6e8011ed2ba21676538d4454",
"size": "30080",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/olympia/git/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245459"
},
{
"name": "Dockerfile",
"bytes": "3900"
},
{
"name": "HTML",
"bytes": "290496"
},
{
"name": "JavaScript",
"bytes": "750827"
},
{
"name": "Less",
"bytes": "212819"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6811560"
},
{
"name": "Shell",
"bytes": "8638"
},
{
"name": "Smarty",
"bytes": "1261"
}
],
"symlink_target": ""
} |
"""
Created on Wed Dec 16 20:23:01 2020
@author: Salomé
"""
# Standard library imports
import numpy as np
# Mosqito functions import
from mosqito.sq_metrics.tonality.tone_to_noise_ecma._critical_band import _critical_band
def _find_highest_tone(freqs, spec_db, index, nb_tones, ind):
"""
Method to find the two highest tones in a given spectrum from a given index
according to their critical band
Parameters
----------
freqs : numpy.array
frequency axis
spec_db : numpy.array
signal spectrum in dB
index : numpy.array
list of candidate tones index
index : numpy.array
list of candidate tones index
nb_tones : integer
number of candidate tones non examinated
Returns
-------
ind_p : integer
index of the highest tone in the critical band
ind_p : integer
index of the second highest tone in the critical band
index : numpy.array
list of candidate tones index updated
nb_tones : integer
number of candidate tones non examinated updated
"""
f = freqs[ind]
# critical band centered on f
f1, f2 = _critical_band(f)
low_limit_idx = np.argmin(np.abs(freqs - f1))
high_limit_idx = np.argmin(np.abs(freqs - f2))
# Other tones in the critical band centered on f tones
multiple_idx = index[index > low_limit_idx]
multiple_idx = multiple_idx[multiple_idx < high_limit_idx]
if len(multiple_idx) > 1:
sort_spec = np.argsort(-1 * spec_db[multiple_idx])
# highest tones in the critical band
ind_p = multiple_idx[sort_spec[0]]
ind_s = multiple_idx[sort_spec[1]]
# suppression of the lower values
for s in sort_spec[2:]:
sup = np.where(index == multiple_idx[s])[0]
index = np.delete(index, sup)
nb_tones -= 1
if ind_p != ind:
# screening to find the highest value in the critical band centered on fp
ind_p, ind_s, index, nb_tones = _find_highest_tone(
freqs, spec_db, index, nb_tones, ind_p
)
else:
ind_p = ind
ind_s = None
return ind_p, ind_s, index, nb_tones
| {
"content_hash": "33cfa930cd101567befc227264728f78",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 88,
"avg_line_length": 28.584415584415584,
"alnum_prop": 0.615174920490686,
"repo_name": "Eomys/MoSQITo",
"id": "6802e004ac13f68aaa87bac98abbaa662648320e",
"size": "2226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mosqito/sq_metrics/tonality/tone_to_noise_ecma/_find_highest_tone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2115412"
},
{
"name": "Python",
"bytes": "318039"
}
],
"symlink_target": ""
} |
"""This module contains the classes that represent Telegram InputVenueMessageContent."""
from typing import Any
from telegram import InputMessageContent
class InputVenueMessageContent(InputMessageContent):
"""Represents the content of a venue message to be sent as the result of an inline query.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`latitude`, :attr:`longitude` and :attr:`title`
are equal.
Note:
Foursquare details and Google Pace details are mutually exclusive. However, this
behaviour is undocumented and might be changed by Telegram.
Args:
latitude (:obj:`float`): Latitude of the location in degrees.
longitude (:obj:`float`): Longitude of the location in degrees.
title (:obj:`str`): Name of the venue.
address (:obj:`str`): Address of the venue.
foursquare_id (:obj:`str`, optional): Foursquare identifier of the venue, if known.
foursquare_type (:obj:`str`, optional): Foursquare type of the venue, if known.
(For example, "arts_entertainment/default", "arts_entertainment/aquarium" or
"food/icecream".)
google_place_id (:obj:`str`, optional): Google Places identifier of the venue.
google_place_type (:obj:`str`, optional): Google Places type of the venue. (See
`supported types <https://developers.google.com/places/web-service/supported_types>`_.)
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Attributes:
latitude (:obj:`float`): Latitude of the location in degrees.
longitude (:obj:`float`): Longitude of the location in degrees.
title (:obj:`str`): Name of the venue.
address (:obj:`str`): Address of the venue.
foursquare_id (:obj:`str`): Optional. Foursquare identifier of the venue, if known.
foursquare_type (:obj:`str`): Optional. Foursquare type of the venue, if known.
google_place_id (:obj:`str`): Optional. Google Places identifier of the venue.
google_place_type (:obj:`str`): Optional. Google Places type of the venue.
"""
__slots__ = (
'longitude',
'google_place_type',
'title',
'address',
'foursquare_id',
'foursquare_type',
'google_place_id',
'latitude',
'_id_attrs',
)
def __init__(
self,
latitude: float,
longitude: float,
title: str,
address: str,
foursquare_id: str = None,
foursquare_type: str = None,
google_place_id: str = None,
google_place_type: str = None,
**_kwargs: Any,
):
# Required
self.latitude = latitude
self.longitude = longitude
self.title = title
self.address = address
# Optionals
self.foursquare_id = foursquare_id
self.foursquare_type = foursquare_type
self.google_place_id = google_place_id
self.google_place_type = google_place_type
self._id_attrs = (
self.latitude,
self.longitude,
self.title,
)
| {
"content_hash": "931578d780f6811f97216fc1bf536ee9",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 99,
"avg_line_length": 37.76190476190476,
"alnum_prop": 0.6226355611601513,
"repo_name": "tzpBingo/github-trending",
"id": "c13107d5bb22830e15a61a1a00722b30435b92e1",
"size": "3981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/telegram/inline/inputvenuemessagecontent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
} |
"""
.. _tut-overview:
Overview of MEG/EEG analysis with MNE-Python
============================================
This tutorial covers the basic EEG/MEG pipeline for event-related analysis:
loading data, epoching, averaging, plotting, and estimating cortical activity
from sensor data. It introduces the core MNE-Python data structures
`~mne.io.Raw`, `~mne.Epochs`, `~mne.Evoked`, and `~mne.SourceEstimate`, and
covers a lot of ground fairly quickly (at the expense of depth). Subsequent
tutorials address each of these topics in greater detail.
We begin by importing the necessary Python modules:
"""
# %%
import os
import numpy as np
import mne
# %%
# Loading data
# ^^^^^^^^^^^^
#
# MNE-Python data structures are based around the FIF file format from
# Neuromag, but there are reader functions for :ref:`a wide variety of other
# data formats <data-formats>`. MNE-Python also has interfaces to a
# variety of :ref:`publicly available datasets <datasets>`,
# which MNE-Python can download and manage for you.
#
# We'll start this tutorial by loading one of the example datasets (called
# ":ref:`sample-dataset`"), which contains EEG and MEG data from one subject
# performing an audiovisual experiment, along with structural MRI scans for
# that subject. The `mne.datasets.sample.data_path` function will automatically
# download the dataset if it isn't found in one of the expected locations, then
# return the directory path to the dataset (see the documentation of
# `~mne.datasets.sample.data_path` for a list of places it checks before
# downloading). Note also that for this tutorial to run smoothly on our
# servers, we're using a filtered and downsampled version of the data
# (:file:`sample_audvis_filt-0-40_raw.fif`), but an unfiltered version
# (:file:`sample_audvis_raw.fif`) is also included in the sample dataset and
# could be substituted here when running the tutorial locally.
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
# %%
# By default, `~mne.io.read_raw_fif` displays some information about the file
# it's loading; for example, here it tells us that there are four "projection
# items" in the file along with the recorded data; those are :term:`SSP
# projectors <projector>` calculated to remove environmental noise from the MEG
# signals, plus a projector to mean-reference the EEG channels; these are
# discussed in the tutorial :ref:`tut-projectors-background`. In addition to
# the information displayed during loading, you can get a glimpse of the basic
# details of a `~mne.io.Raw` object by printing it; even more is available by
# printing its ``info`` attribute (a `dictionary-like object <mne.Info>` that
# is preserved across `~mne.io.Raw`, `~mne.Epochs`, and `~mne.Evoked` objects).
# The ``info`` data structure keeps track of channel locations, applied
# filters, projectors, etc. Notice especially the ``chs`` entry, showing that
# MNE-Python detects different sensor types and handles each appropriately. See
# :ref:`tut-info-class` for more on the `~mne.Info` class.
print(raw)
print(raw.info)
# %%
# `~mne.io.Raw` objects also have several built-in plotting methods; here we
# show the power spectral density (PSD) for each sensor type with
# `~mne.io.Raw.plot_psd`, as well as a plot of the raw sensor traces with
# `~mne.io.Raw.plot`. In the PSD plot, we'll only plot frequencies below 50 Hz
# (since our data are low-pass filtered at 40 Hz). In interactive Python
# sessions, `~mne.io.Raw.plot` is interactive and allows scrolling, scaling,
# bad channel marking, annotations, projector toggling, etc.
raw.plot_psd(fmax=50)
raw.plot(duration=5, n_channels=30)
# %%
# Preprocessing
# ^^^^^^^^^^^^^
#
# MNE-Python supports a variety of preprocessing approaches and techniques
# (maxwell filtering, signal-space projection, independent components analysis,
# filtering, downsampling, etc); see the full list of capabilities in the
# :mod:`mne.preprocessing` and :mod:`mne.filter` submodules. Here we'll clean
# up our data by performing independent components analysis
# (`~mne.preprocessing.ICA`); for brevity we'll skip the steps that helped us
# determined which components best capture the artifacts (see
# :ref:`tut-artifact-ica` for a detailed walk-through of that process).
# set up and fit the ICA
ica = mne.preprocessing.ICA(n_components=20, random_state=97, max_iter=800)
ica.fit(raw)
ica.exclude = [1, 2] # details on how we picked these are omitted here
ica.plot_properties(raw, picks=ica.exclude)
# %%
# Once we're confident about which component(s) we want to remove, we pass them
# as the ``exclude`` parameter and then apply the ICA to the raw signal. The
# `~mne.preprocessing.ICA.apply` method requires the raw data to be loaded into
# memory (by default it's only read from disk as-needed), so we'll use
# `~mne.io.Raw.load_data` first. We'll also make a copy of the `~mne.io.Raw`
# object so we can compare the signal before and after artifact removal
# side-by-side:
orig_raw = raw.copy()
raw.load_data()
ica.apply(raw)
# show some frontal channels to clearly illustrate the artifact removal
chs = ['MEG 0111', 'MEG 0121', 'MEG 0131', 'MEG 0211', 'MEG 0221', 'MEG 0231',
'MEG 0311', 'MEG 0321', 'MEG 0331', 'MEG 1511', 'MEG 1521', 'MEG 1531',
'EEG 001', 'EEG 002', 'EEG 003', 'EEG 004', 'EEG 005', 'EEG 006',
'EEG 007', 'EEG 008']
chan_idxs = [raw.ch_names.index(ch) for ch in chs]
orig_raw.plot(order=chan_idxs, start=12, duration=4)
raw.plot(order=chan_idxs, start=12, duration=4)
# %%
# .. _overview-tut-events-section:
#
# Detecting experimental events
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The sample dataset includes several :term:`"STIM" channels <stim channel>`
# that recorded electrical signals sent from the stimulus delivery computer (as
# brief DC shifts / squarewave pulses). These pulses (often called "triggers")
# are used in this dataset to mark experimental events: stimulus onset,
# stimulus type, and participant response (button press). The individual STIM
# channels are combined onto a single channel, in such a way that voltage
# levels on that channel can be unambiguously decoded as a particular event
# type. On older Neuromag systems (such as that used to record the sample data)
# this summation channel was called ``STI 014``, so we can pass that channel
# name to the `mne.find_events` function to recover the timing and identity of
# the stimulus events.
events = mne.find_events(raw, stim_channel='STI 014')
print(events[:5]) # show the first 5
# %%
# The resulting events array is an ordinary 3-column :class:`NumPy array
# <numpy.ndarray>`, with sample number in the first column and integer event ID
# in the last column; the middle column is usually ignored. Rather than keeping
# track of integer event IDs, we can provide an *event dictionary* that maps
# the integer IDs to experimental conditions or events. In this dataset, the
# mapping looks like this:
#
# .. _sample-data-event-dict-table:
#
# +----------+----------------------------------------------------------+
# | Event ID | Condition |
# +==========+==========================================================+
# | 1 | auditory stimulus (tone) to the left ear |
# +----------+----------------------------------------------------------+
# | 2 | auditory stimulus (tone) to the right ear |
# +----------+----------------------------------------------------------+
# | 3 | visual stimulus (checkerboard) to the left visual field |
# +----------+----------------------------------------------------------+
# | 4 | visual stimulus (checkerboard) to the right visual field |
# +----------+----------------------------------------------------------+
# | 5 | smiley face (catch trial) |
# +----------+----------------------------------------------------------+
# | 32 | subject button press |
# +----------+----------------------------------------------------------+
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'buttonpress': 32}
# %%
# Event dictionaries like this one are used when extracting epochs from
# continuous data; the ``/`` character in the dictionary keys allows pooling
# across conditions by requesting partial condition descriptors (i.e.,
# requesting ``'auditory'`` will select all epochs with Event IDs 1 and 2;
# requesting ``'left'`` will select all epochs with Event IDs 1 and 3). An
# example of this is shown in the next section. There is also a convenient
# `~mne.viz.plot_events` function for visualizing the distribution of events
# across the duration of the recording (to make sure event detection worked as
# expected). Here we'll also make use of the `~mne.Info` attribute to get the
# sampling frequency of the recording (so our x-axis will be in seconds instead
# of in samples).
fig = mne.viz.plot_events(events, event_id=event_dict, sfreq=raw.info['sfreq'],
first_samp=raw.first_samp)
# %%
# For paradigms that are not event-related (e.g., analysis of resting-state
# data), you can extract regularly spaced (possibly overlapping) spans of data
# by creating events using `mne.make_fixed_length_events` and then proceeding
# with epoching as described in the next section.
#
#
# .. _tut-section-overview-epoching:
#
# Epoching continuous data
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# The `~mne.io.Raw` object and the events array are the bare minimum needed to
# create an `~mne.Epochs` object, which we create with the `~mne.Epochs` class
# constructor. Here we'll also specify some data quality constraints: we'll
# reject any epoch where peak-to-peak signal amplitude is beyond reasonable
# limits for that channel type. This is done with a *rejection dictionary*; you
# may include or omit thresholds for any of the channel types present in your
# data. The values given here are reasonable for this particular dataset, but
# may need to be adapted for different hardware or recording conditions. For a
# more automated approach, consider using the `autoreject package`_.
reject_criteria = dict(mag=4000e-15, # 4000 fT
grad=4000e-13, # 4000 fT/cm
eeg=150e-6, # 150 µV
eog=250e-6) # 250 µV
# %%
# We'll also pass the event dictionary as the ``event_id`` parameter (so we can
# work with easy-to-pool event labels instead of the integer event IDs), and
# specify ``tmin`` and ``tmax`` (the time relative to each event at which to
# start and end each epoch). As mentioned above, by default `~mne.io.Raw` and
# `~mne.Epochs` data aren't loaded into memory (they're accessed from disk only
# when needed), but here we'll force loading into memory using the
# ``preload=True`` parameter so that we can see the results of the rejection
# criteria being applied:
epochs = mne.Epochs(raw, events, event_id=event_dict, tmin=-0.2, tmax=0.5,
reject=reject_criteria, preload=True)
# %%
# Next we'll pool across left/right stimulus presentations so we can compare
# auditory versus visual responses. To avoid biasing our signals to the left or
# right, we'll use `~mne.Epochs.equalize_event_counts` first to randomly sample
# epochs from each condition to match the number of epochs present in the
# condition with the fewest good epochs.
conds_we_care_about = ['auditory/left', 'auditory/right',
'visual/left', 'visual/right']
epochs.equalize_event_counts(conds_we_care_about) # this operates in-place
aud_epochs = epochs['auditory']
vis_epochs = epochs['visual']
del raw, epochs # free up memory
# %%
# Like `~mne.io.Raw` objects, `~mne.Epochs` objects also have a number of
# built-in plotting methods. One is `~mne.Epochs.plot_image`, which shows each
# epoch as one row of an image map, with color representing signal magnitude;
# the average evoked response and the sensor location are shown below the
# image:
aud_epochs.plot_image(picks=['MEG 1332', 'EEG 021'])
##############################################################################
# .. note::
#
# Both `~mne.io.Raw` and `~mne.Epochs` objects have `~mne.Epochs.get_data`
# methods that return the underlying data as a
# :class:`NumPy array <numpy.ndarray>`. Both methods have a ``picks``
# parameter for subselecting which channel(s) to return; ``raw.get_data()``
# has additional parameters for restricting the time domain. The resulting
# matrices have dimension ``(n_channels, n_times)`` for `~mne.io.Raw` and
# ``(n_epochs, n_channels, n_times)`` for `~mne.Epochs`.
#
# Time-frequency analysis
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# The :mod:`mne.time_frequency` submodule provides implementations of several
# algorithms to compute time-frequency representations, power spectral density,
# and cross-spectral density. Here, for example, we'll compute for the auditory
# epochs the induced power at different frequencies and times, using Morlet
# wavelets. On this dataset the result is not especially informative (it just
# shows the evoked "auditory N100" response); see :ref:`here
# <inter-trial-coherence>` for a more extended example on a dataset with richer
# frequency content.
frequencies = np.arange(7, 30, 3)
power = mne.time_frequency.tfr_morlet(aud_epochs, n_cycles=2, return_itc=False,
freqs=frequencies, decim=3)
power.plot(['MEG 1332'])
# %%
# Estimating evoked responses
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Now that we have our conditions in ``aud_epochs`` and ``vis_epochs``, we can
# get an estimate of evoked responses to auditory versus visual stimuli by
# averaging together the epochs in each condition. This is as simple as calling
# the `~mne.Epochs.average` method on the `~mne.Epochs` object, and then using
# a function from the :mod:`mne.viz` module to compare the global field power
# for each sensor type of the two `~mne.Evoked` objects:
aud_evoked = aud_epochs.average()
vis_evoked = vis_epochs.average()
mne.viz.plot_compare_evokeds(dict(auditory=aud_evoked, visual=vis_evoked),
legend='upper left', show_sensors='upper right')
# %%
# We can also get a more detailed view of each `~mne.Evoked` object using other
# plotting methods such as `~mne.Evoked.plot_joint` or
# `~mne.Evoked.plot_topomap`. Here we'll examine just the EEG channels, and see
# the classic auditory evoked N100-P200 pattern over dorso-frontal electrodes,
# then plot scalp topographies at some additional arbitrary times:
# sphinx_gallery_thumbnail_number = 13
aud_evoked.plot_joint(picks='eeg')
aud_evoked.plot_topomap(times=[0., 0.08, 0.1, 0.12, 0.2], ch_type='eeg')
##############################################################################
# Evoked objects can also be combined to show contrasts between conditions,
# using the `mne.combine_evoked` function. A simple difference can be
# generated by passing ``weights=[1, -1]``. We'll then plot the difference wave
# at each sensor using `~mne.Evoked.plot_topo`:
evoked_diff = mne.combine_evoked([aud_evoked, vis_evoked], weights=[1, -1])
evoked_diff.pick_types(meg='mag').plot_topo(color='r', legend=False)
##############################################################################
# Inverse modeling
# ^^^^^^^^^^^^^^^^
#
# Finally, we can estimate the origins of the evoked activity by projecting the
# sensor data into this subject's :term:`source space` (a set of points either
# on the cortical surface or within the cortical volume of that subject, as
# estimated by structural MRI scans). MNE-Python supports lots of ways of doing
# this (dynamic statistical parametric mapping, dipole fitting, beamformers,
# etc.); here we'll use minimum-norm estimation (MNE) to generate a continuous
# map of activation constrained to the cortical surface. MNE uses a linear
# :term:`inverse operator` to project EEG+MEG sensor measurements into the
# source space. The inverse operator is computed from the
# :term:`forward solution` for this subject and an estimate of :ref:`the
# covariance of sensor measurements <tut-compute-covariance>`. For this
# tutorial we'll skip those computational steps and load a pre-computed inverse
# operator from disk (it's included with the :ref:`sample data
# <sample-dataset>`). Because this "inverse problem" is underdetermined (there
# is no unique solution), here we further constrain the solution by providing a
# regularization parameter specifying the relative smoothness of the current
# estimates in terms of a signal-to-noise ratio (where "noise" here is akin to
# baseline activity level across all of cortex).
# load inverse operator
inverse_operator_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-inv.fif')
inv_operator = mne.minimum_norm.read_inverse_operator(inverse_operator_file)
# set signal-to-noise ratio (SNR) to compute regularization parameter (λ²)
snr = 3.
lambda2 = 1. / snr ** 2
# generate the source time course (STC)
stc = mne.minimum_norm.apply_inverse(vis_evoked, inv_operator,
lambda2=lambda2,
method='MNE') # or dSPM, sLORETA, eLORETA
##############################################################################
# Finally, in order to plot the source estimate on the subject's cortical
# surface we'll also need the path to the sample subject's structural MRI files
# (the ``subjects_dir``):
# path to subjects' MRI files
subjects_dir = os.path.join(sample_data_folder, 'subjects')
# plot the STC
stc.plot(initial_time=0.1, hemi='split', views=['lat', 'med'],
subjects_dir=subjects_dir)
##############################################################################
# The remaining tutorials have *much more detail* on each of these topics (as
# well as many other capabilities of MNE-Python not mentioned here:
# connectivity analysis, encoding/decoding models, lots more visualization
# options, etc). Read on to learn more!
#
# .. LINKS
#
# .. _`autoreject package`: http://autoreject.github.io/
| {
"content_hash": "33e474d089f4706026132e732531fb77",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 79,
"avg_line_length": 49.61185983827493,
"alnum_prop": 0.671846137129197,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "adeba5d304078d817f60d1ac82858b04557ff5fa",
"size": "18434",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "0.24/_downloads/642494b64bd51f58d66c30234acc9e13/10_overview.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import optparse
def Concatenate(filenames):
"""Concatenate files.
Args:
files: Array of file names.
The last name is the target; all earlier ones are sources.
Returns:
True, if the operation was successful.
"""
if len(filenames) < 2:
print("An error occurred generating %s:\nNothing to do." % filenames[-1])
return False
try:
with open(filenames[-1], "wb") as target:
for filename in filenames[:-1]:
with open(filename, "rb") as current:
target.write(current.read())
return True
except IOError as e:
print("An error occurred when writing %s:\n%s" % (filenames[-1], e))
return False
def main():
parser = optparse.OptionParser()
parser.set_usage("""Concatenate several files into one.
Equivalent to: cat file1 ... > target.""")
(options, args) = parser.parse_args()
exit(0 if Concatenate(args) else 1)
if __name__ == "__main__":
main()
| {
"content_hash": "730ba73184912c5633b243f1c6e6d1d8",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 24.475,
"alnum_prop": 0.6384065372829418,
"repo_name": "Simran-B/arangodb",
"id": "a5dbe45cccd7f134143ea63c820e480f0b2cc11a",
"size": "2965",
"binary": false,
"copies": "9",
"ref": "refs/heads/devel",
"path": "3rdParty/V8/v7.9.317/tools/concatenate-files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "61827"
},
{
"name": "Batchfile",
"bytes": "3282"
},
{
"name": "C",
"bytes": "275955"
},
{
"name": "C++",
"bytes": "29221660"
},
{
"name": "CMake",
"bytes": "375992"
},
{
"name": "CSS",
"bytes": "212174"
},
{
"name": "EJS",
"bytes": "218744"
},
{
"name": "HTML",
"bytes": "23114"
},
{
"name": "JavaScript",
"bytes": "30616196"
},
{
"name": "LLVM",
"bytes": "14753"
},
{
"name": "Makefile",
"bytes": "526"
},
{
"name": "NASL",
"bytes": "129286"
},
{
"name": "NSIS",
"bytes": "49153"
},
{
"name": "PHP",
"bytes": "46519"
},
{
"name": "Pascal",
"bytes": "75391"
},
{
"name": "Perl",
"bytes": "9811"
},
{
"name": "PowerShell",
"bytes": "7885"
},
{
"name": "Python",
"bytes": "181384"
},
{
"name": "Ruby",
"bytes": "1041531"
},
{
"name": "SCSS",
"bytes": "254419"
},
{
"name": "Shell",
"bytes": "128175"
},
{
"name": "TypeScript",
"bytes": "25245"
},
{
"name": "Yacc",
"bytes": "68516"
}
],
"symlink_target": ""
} |
__authors__ = ['Andrew Taylor']
import random
import time
import pygame
# Python comes with some color conversion methods.
import colorsys
# For Math things, what else
import math
from VisualisationPlugin import VisualisationPlugin
import logging
# Video available here:
# http://www.youtube.com/watch?v=ySJlUu2926A&feature=youtu.be
class SpeedingBlobsVisualisationPlugin(VisualisationPlugin):
logger = logging.getLogger(__name__)
speed_blobs = None
blob_speeds = [500]
def configure(self, config):
self.config = config
self.logger.info("Config: %s" % config)
def new_random_blob(self, canvas):
blob_entry = {}
# Random Speed, ms/pixel
blob_entry["speed"] = self.blob_speeds[random.randint(0, len(self.blob_speeds) - 1)]
w = canvas.get_width()
h = canvas.get_height()
# Random X location
blob_entry["start_x"] = random.randint(0, w)
# Random Y location
blob_entry["start_y"] = random.randint(0, h)
# Random direction
direction = {}
direction["x"] = random.randint(0, 5) - 2
direction["y"] = random.randint(0, 5) - 2
if (direction["x"] == 0 and direction["y"] == 0):
direction["x"] = 1
blob_entry["direction"] = direction
# Start time
blob_entry["start_time"] = pygame.time.get_ticks()
# Random colour
blob_entry["colour"] = float(random.randint(0, 100)) / 200.0
blob_entry["decay"] = float(random.randint(3, 6))
blob_entry["complete"] = False
return blob_entry
def initial_blob_config(self, canvas):
# Put 5 blobs in
self.speed_blobs = []
for i in range(4):
self.speed_blobs.append(self.new_random_blob(canvas))
return self.speed_blobs
def configure(self, config):
self.config = config
self.logger.info("Config: %s" % config)
self.clock = pygame.time.Clock()
# Example, and following two functions taken from http://www.pygame.org/wiki/RGBColorConversion
# Normalization method, so the colors are in the range [0, 1]
def normalize(self, color):
return color[0] / 255.0, color[1] / 255.0, color[2] / 255.0
# Reformats a color tuple, that uses the range [0, 1] to a 0xFF
# representation.
def reformat(self, color):
return int(round(color[0] * 255)) % 256, \
int(round(color[1] * 255)) % 256, \
int(round(color[2] * 255)) % 256
def draw_frame(self, canvas):
if self.speed_blobs is None:
self.initial_blob_config(canvas)
t = pygame.time.get_ticks()
self.logger.debug("Ticks: %d" % t)
canvas = self.draw_blobs(canvas, self.speed_blobs, t)
# Check to see if we need to replace a blob with a new one
for idx, blob in enumerate(self.speed_blobs):
if blob.get("complete") is True:
self.speed_blobs[idx] = self.new_random_blob(canvas)
# Limit the frame rate
self.clock.tick(25)
return canvas
def draw_splash(self, canvas):
"""
Construct a splash screen suitable to display for a plugin selection menu
"""
test_blobs = []
blob_entry = {}
# Random X location
blob_entry["x"] = 2
# Random Y location
blob_entry["y"] = 2
# Random colour
blob_entry["colour"] = 0.2
blob_entry["height"] = 2
blob_entry["decay"] = 10
test_blobs.append(blob_entry)
blob_entry = {}
# Random X location
blob_entry["x"] = (canvas.get_width() - 1) / 2.0
# Random Y location
blob_entry["y"] = (canvas.get_height() - 1) / 2.0
# Random colour
blob_entry["colour"] = 0.5
blob_entry["height"] = 0.5
blob_entry["decay"] = 7.0
test_blobs.append(blob_entry)
blob_entry = {}
# Random X location
blob_entry["x"] = (canvas.get_width() - 1) / 2.0 + 5
# Random Y location
blob_entry["y"] = (canvas.get_height() - 1) / 2.0
# Random colour
blob_entry["colour"] = 0.5
blob_entry["height"] = 0.5
blob_entry["decay"] = 7.0
test_blobs.append(blob_entry)
# Draw the blobs
canvas = self.draw_blobs(canvas, test_blobs, 0)
return canvas
def draw_blobs(self, canvas, blobs, t):
# Period
t_background_period = 20000
# Fraction of the way through
background_hue = (float(t) / float(t_background_period)) % 1
# Create a blank "sheet"
sheet = [[0 for y in range(canvas.get_height())] for x in range(canvas.get_width())]
# Draw all of the blobs
for blob in blobs:
blob_height = blob["colour"]
# If the blobs are defined as static, then
# draw them where they lie, else calculate
# where they should appear
blob_x = blob.get("x")
blob_y = blob.get("y")
if blob_x is None or blob_y is None:
# try to calculate the blob's position
t_delta = t - blob["start_time"]
# print "%d" % t_delta
squares_to_travel = float(t_delta) / float(blob["speed"])
direction = blob["direction"]
offset = blob["decay"]
x_offset = 0
y_offset = 0
x_delta = 0
y_delta = 0
if (direction["x"] == 0):
x_offset = 0
else:
x_delta = direction["x"] * squares_to_travel - blob["start_x"]
if (direction["x"] < 0):
x_offset = blob["decay"] + canvas.get_width()
if (direction["x"] > 0):
x_offset = -blob["decay"]
if (direction["y"] == 0):
y_offset = 0
else:
y_delta = direction["y"] * squares_to_travel - blob["start_y"]
if (direction["y"] < 0):
y_offset = blob["decay"] + canvas.get_height()
if (direction["y"] > 0):
y_offset = -blob["decay"]
# print "x_dir %d x_offset %d , y_dir %d y_offset %d" % (direction["x"], x_offset, direction["y"], y_offset)
blob_x = blob["start_x"] + x_delta + x_offset
blob_y = blob["start_y"] + y_delta + y_offset
if (direction["x"] > 0):
if (blob_x > canvas.get_width() + blob["decay"]):
blob["complete"] = True
else:
if (blob_x < 0 - blob["decay"]):
blob["complete"] = True
if (direction["y"] > 0):
if (blob_y > canvas.get_height() + blob["decay"]):
blob["complete"] = True
else:
if (blob_y < 0 - blob["decay"]):
blob["complete"] = True
# The central pixel should remain the correct colour at all times.
# The problem occurs when the background colour 'overtakes' the blob colour
# bg hue [0,1] , blob hue say 0.5
# For blob hue > bg hue, then it is straight forward, the hue gradually
# decreases until it meets the bg hue value (according to the appropriate
# drop-off formula
# For bg hue > blob hue, then the decay starts to go in the other direction,
# with a negative delta, and the hue should actually be increased up to the
# bg hue value
# But then what happens when the bg hue wraps?
# The bg hue wraps from 0 to 1, and now what happens to the decay? where previously
# it may have gone through the green end of the spectrum, not it has to go through
# blue according to the above formula.
# If we think of the canvas as an sheet, and the blobs pinch the sheet up (like the general
# relativity rubber-sheet analogy, but the other way up) then it doesn't matter that numbers
# wrap, we just want to apply a height map colour, with the bottom varying
for x in range(canvas.get_width()):
for y in range(canvas.get_height()):
# Calculate how far away from the centre of the blob the centre of this pixel is
x_d = x - blob_x
y_d = y - blob_y
distance_away = math.sqrt(x_d * x_d + y_d * y_d)
decay = blob["decay"]
# Only draw pixels in the decay zone
if (distance_away < decay):
# Calculate the scaling factor
decay_amount = (math.cos(math.pi * distance_away / decay) + 1.0) / 2.0
# This compounds any blobs on top of each other automatically
sheet[x][y] += (blob_height * decay_amount)
# Now translate the sheet height into colours
for x in range(canvas.get_width()):
for y in range(canvas.get_height()):
hue = background_hue + sheet[x][y]
rgb_colour = self.reformat(colorsys.hsv_to_rgb(hue, 1.0, 1.0))
canvas.set_pixel(x, y, rgb_colour)
return canvas
| {
"content_hash": "a6e51b6355db492bc89fdec782a4f149",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 124,
"avg_line_length": 35.6015037593985,
"alnum_prop": 0.528194297782471,
"repo_name": "fraz3alpha/led-disco-dancefloor",
"id": "f12e44203eb2e713339b623fbc6d8a91240c6a47",
"size": "9470",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "software/controller/visualisation_plugins/speeding_blobs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "16623"
},
{
"name": "Eagle",
"bytes": "898737"
},
{
"name": "Python",
"bytes": "302101"
}
],
"symlink_target": ""
} |
"""Custom neural network layers.
Low-level primitives such as custom convolution with custom initialization.
This file was copied from this source:
google3/learning/brain/research/red_team/semi_supervised/libml/layers.py
"""
import numpy as np
from skai.semi_supervised.dataloader import prepare_ssl_data
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
class MixMode:
"""MixUp class to support multiple mixing modes."""
# x = labelled example
# y = unlabelled example
# xx.yxy = mix x with x, mix y with both x and y.
MODES = 'xx.yy xxy.yxy xx.yxy xx.yx xx. .yy xxy. .yxy .'.split()
def __init__(self, mode):
assert mode in self.MODES
self.mode = mode
@staticmethod
def augment_pair(x0, l0, x1, l1, beta, **kwargs):
"""MixUp application to two pair (x0, l0) and (x1, l1)."""
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x0)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
index = tf.random_shuffle(tf.range(tf.shape(x0)[0]))
xs = tf.gather(x1, index)
ls = tf.gather(l1, index)
xmix = x0 * mix + xs * (1 - mix)
lmix = l0 * mix[:, :, 0, 0] + ls * (1 - mix[:, :, 0, 0])
return xmix, lmix
@staticmethod
def augment(x, l, beta, **kwargs):
return MixMode.augment_pair(x, l, x, l, beta, **kwargs)
def __call__(self, xl, ll, betal):
assert len(xl) == len(ll) >= 2
assert len(betal) == 2
if self.mode == '.':
return xl, ll
elif self.mode == 'xx.':
mx0, ml0 = self.augment(xl[0], ll[0], betal[0])
return [mx0] + xl[1:], [ml0] + ll[1:]
elif self.mode == '.yy':
mx1, ml1 = self.augment(
tf.concat(xl[1:], 0), tf.concat(ll[1:], 0), betal[1])
return (xl[:1] + tf.split(mx1,
len(xl) - 1),
ll[:1] + tf.split(ml1,
len(ll) - 1))
elif self.mode == 'xx.yy':
mx0, ml0 = self.augment(xl[0], ll[0], betal[0])
mx1, ml1 = self.augment(
tf.concat(xl[1:], 0), tf.concat(ll[1:], 0), betal[1])
return ([mx0] + tf.split(mx1,
len(xl) - 1), [ml0] + tf.split(ml1,
len(ll) - 1))
elif self.mode == 'xxy.':
mx, ml = self.augment(
tf.concat(xl, 0), tf.concat(ll, 0),
sum(betal) / len(betal))
return (tf.split(mx, len(xl))[:1] + xl[1:],
tf.split(ml, len(ll))[:1] + ll[1:])
elif self.mode == '.yxy':
mx, ml = self.augment(
tf.concat(xl, 0), tf.concat(ll, 0),
sum(betal) / len(betal))
return (xl[:1] + tf.split(mx, len(xl))[1:],
ll[:1] + tf.split(ml, len(ll))[1:])
elif self.mode == 'xxy.yxy':
mx, ml = self.augment(
tf.concat(xl, 0), tf.concat(ll, 0),
sum(betal) / len(betal))
return tf.split(mx, len(xl)), tf.split(ml, len(ll))
elif self.mode == 'xx.yxy':
mx0, ml0 = self.augment(xl[0], ll[0], betal[0])
mx1, ml1 = self.augment(tf.concat(xl, 0), tf.concat(ll, 0), betal[1])
mx1, ml1 = [tf.split(m, len(xl))[1:] for m in (mx1, ml1)]
return [mx0] + mx1, [ml0] + ml1
elif self.mode == 'xx.yx':
mx0, ml0 = self.augment(xl[0], ll[0], betal[0])
mx1, ml1 = zip(*[
self.augment_pair(xl[i], ll[i], xl[0], ll[0], betal[1])
for i in range(1, len(xl))
])
return [mx0] + list(mx1), [ml0] + list(ml1)
raise NotImplementedError(self.mode)
def smart_shape(x):
s, t = x.shape, tf.shape(x)
return [t[i] if s[i].value is None else s[i] for i in range(len(s))]
def entropy_from_logits(logits):
"""Computes entropy from classifier logits.
Args:
logits: a tensor of shape (batch_size, class_count) representing the logits
of a classifier.
Returns:
A tensor of shape (batch_size,) of floats giving the entropies batch-wise.
"""
distribution = tfp.distributions.Categorical(logits=logits)
return distribution.entropy()
def entropy_penalty(logits, entropy_penalty_multiplier, mask):
"""Computes an entropy penalty using the classifier logits.
Args:
logits: a tensor of shape (batch_size, class_count) representing the logits
of a classifier.
entropy_penalty_multiplier: A float by which the entropy is multiplied.
mask: A tensor that optionally masks out some of the costs.
Returns:
The mean entropy penalty
"""
entropy = entropy_from_logits(logits)
losses = entropy * entropy_penalty_multiplier
losses *= tf.cast(mask, tf.float32)
return tf.reduce_mean(losses)
def kl_divergence_from_logits(logits_a, logits_b):
"""Gets KL divergence from logits parameterizing categorical distributions.
Args:
logits_a: A tensor of logits parameterizing the first distribution.
logits_b: A tensor of logits parameterizing the second distribution.
Returns:
The (batch_size,) shaped tensor of KL divergences.
"""
distribution1 = tfp.distributions.Categorical(logits=logits_a)
distribution2 = tfp.distributions.Categorical(logits=logits_b)
return tfp.distributions.kl_divergence(distribution1, distribution2)
def mse_from_logits(output_logits, target_logits):
"""Computes MSE between predictions associated with logits.
Args:
output_logits: A tensor of logits from the primary model.
target_logits: A tensor of logits from the secondary model.
Returns:
The mean MSE
"""
diffs = tf.nn.softmax(output_logits) - tf.nn.softmax(target_logits)
squared_diffs = tf.square(diffs)
return tf.reduce_mean(squared_diffs, -1)
def interleave_offsets(batch, num_augmentations):
groups = [batch // (num_augmentations + 1)] * (num_augmentations + 1)
for x in range(batch - sum(groups)):
groups[-x - 1] += 1
offsets = [0]
for g in groups:
offsets.append(offsets[-1] + g)
assert offsets[-1] == batch
return offsets
def interleave(xy, batch: int):
"""Interleave a list of batches."""
num_augmentations = len(xy) - 1
offsets = interleave_offsets(batch, num_augmentations)
def chunk(v):
return [v[offsets[p]:offsets[p + 1]] for p in range(num_augmentations + 1)]
xy = [chunk(v) for v in xy]
for i in range(1, num_augmentations + 1):
xy[0][i], xy[i][i] = xy[i][i], xy[0][i]
return [tf.concat(v, axis=0) for v in xy]
def logit_norm(v):
return v * tf.rsqrt(tf.reduce_mean(tf.square(v)) + 1e-6)
def renorm(v):
return v / tf.reduce_sum(v, axis=-1, keepdims=True)
def closed_form_uniform_argmax(logt, unsort_index, nclass):
"""Closed form distribution of argmax of uniform distributions."""
# Direct implementation from stackoverflow:
# https://math.stackexchange.com/questions/158561/characterising-argmax-of-uniform-distributions
p = [0]
logt = logt.astype('d')
qni = np.zeros(logt.shape[0], 'd')
for i in range(1, nclass + 1):
qi, qni = qni, np.exp((nclass - i) * logt[:, i - 1] - logt[:, i:].sum(1))
p.append(p[-1] + (qni - qi) / (nclass - i + 1))
p = np.array(p[1:], 'f').T
return p[[[i] for i in range(logt.shape[0])], unsort_index]
def shakeshake(a, b, training):
if not training:
return 0.5 * (a + b)
mu = tf.random_uniform([tf.shape(a)[0]] + [1] * (len(a.shape) - 1), 0, 1)
mixf = a + mu * (b - a)
mixb = a + mu[::1] * (b - a)
return tf.stop_gradient(mixf - mixb) + mixb
class PMovingAverage:
"""Class to compute a moving average of probability distributions."""
def __init__(self, name, nclass, buf_size):
self.ma = tf.Variable(
tf.ones([buf_size, nclass]) / nclass, trainable=False, name=name)
def __call__(self):
v = tf.reduce_mean(self.ma, axis=0)
return v / tf.reduce_sum(v)
def update(self, entry):
entry = tf.reduce_mean(entry, axis=0)
return tf.assign(self.ma, tf.concat([self.ma[1:], [entry]], axis=0))
class PData:
"""Class to compute a running average of a labeled distribution."""
def __init__(self, dataset: prepare_ssl_data.SSLDataset):
self.has_update = False
if dataset.p_unlabeled is not None:
self.p_data = tf.constant(dataset.p_unlabeled, name='p_data')
elif dataset.p_labeled is not None:
self.p_data = tf.constant(dataset.p_labeled, name='p_data')
else:
self.p_data = tf.Variable(
renorm(tf.ones([dataset.nclass])), trainable=False, name='p_data')
self.has_update = True
def __call__(self):
return self.p_data / tf.reduce_sum(self.p_data)
def update(self, entry, decay=0.999):
entry = tf.reduce_mean(entry, axis=0)
return tf.assign(self.p_data, self.p_data * decay + entry * (1 - decay))
| {
"content_hash": "52cc1ec02dcef508da01ca61b8d8d00f",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 98,
"avg_line_length": 33.85375494071146,
"alnum_prop": 0.615061295971979,
"repo_name": "google-research/skai",
"id": "d3a631b9ab6056e8fbc927f91703cef3f996347a",
"size": "9141",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/skai/semi_supervised/layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "368064"
},
{
"name": "Shell",
"bytes": "1252"
}
],
"symlink_target": ""
} |
import os
import math
import json
import csv
import pandas as pd
import numpy as np
import urllib.request
class LocalGovermentDirectoryStatesDataLoader:
def __init__(self, lgd_csv, wikidata_csv, clean_csv):
self.lgd_csv = lgd_csv
self.wikidata_csv = wikidata_csv
self.clean_csv = clean_csv
self.lgd_df = None
self.wikidata_df = None
self.clean_df = None
@staticmethod
def format_title(s):
# Converts to title case, except for the words like `of`, `and` etc
name_list = s.split(' ')
first_list = [name_list[0].capitalize()]
for name in name_list[1:]:
first_list.append(name if name in
["of", "and"] else name.capitalize())
return " ".join(first_list)
@staticmethod
def format_code(s):
# Converts into two character length code
# If the value is `0` then it makes it empty
# If the length is single character then it prepends it
# with `0` to make it two character length
s = s.zfill(2)
return "" if s == "00" else s
@staticmethod
def format_wikidataid(s):
return s.replace("http://www.wikidata.org/entity/", "")
def process(self):
# Load the lgd states data and set the type of columns to str
# if there are NA values then replace it with '' character
self.lgd_df = pd.read_csv(self.lgd_csv, dtype=str)
self.lgd_df.fillna('', inplace=True)
# Drop title rows in the top and empty rows after 39.
# The actual data is between 2nd and 40th row. So keep only them.
self.lgd_df = self.lgd_df.iloc[1:38]
# Take the the header row and set it as column header
new_header = self.lgd_df.iloc[0]
self.lgd_df = self.lgd_df[1:]
self.lgd_df.columns = new_header
# Convert name to lower case for matching
self.lgd_df['State Name(In English)'] = self.lgd_df[
'State Name(In English)'].str.lower()
# Load wikidata and set the type of columns to str
self.wikidata_df = pd.read_csv(self.wikidata_csv, dtype=str)
# Convert name to lower case for matching
self.wikidata_df['SLabel'] = self.wikidata_df['SLabel'].str.lower()
# Match both sets based on name
# It will be left join on lgd_df
self.clean_df = pd.merge(self.lgd_df,
self.wikidata_df,
how="left",
left_on=["State Name(In English)"],
right_on=["SLabel"])
# Create a new clean name from LGD data
self.clean_df["Name"] = self.clean_df["State Name(In English)"].apply(
LocalGovermentDirectoryStatesDataLoader.format_title)
# Delete the columns that are not required
del self.clean_df["SDescription"]
del self.clean_df["SLabel"]
del self.clean_df["S.No."]
del self.clean_df["State Name(In English)"]
del self.clean_df["State Name (In Local)"]
del self.clean_df["State Version"]
del self.clean_df["State or UT"]
# Rename the columns as per our CSV requirements
self.clean_df.columns = [
"StateCode", "Census2001Code", "Census2011Code", "WikiDataId",
"IsoCode", "Name"
]
# Reformat the columns as per our CSV requirements
self.clean_df["StateCode"] = self.clean_df["StateCode"].apply(
LocalGovermentDirectoryStatesDataLoader.format_code)
self.clean_df["Census2001Code"] = self.clean_df["Census2001Code"].apply(
LocalGovermentDirectoryStatesDataLoader.format_code)
self.clean_df["Census2011Code"] = self.clean_df["Census2011Code"].apply(
LocalGovermentDirectoryStatesDataLoader.format_code)
self.clean_df["WikiDataId"] = self.clean_df["WikiDataId"].apply(
LocalGovermentDirectoryStatesDataLoader.format_wikidataid)
# Update the ISO code for Dadra and Nagar Haveli and Daman and Diu
self.clean_df.loc[self.clean_df["Name"] ==
"Dadra and Nagar Haveli and Daman and Diu",
"IsoCode"] = "IN-DH"
def save(self):
self.clean_df.to_csv(self.clean_csv, index=False, header=True)
def main():
"""Runs the program."""
lgd_csv = os.path.join(os.path.dirname(__file__),
"./data/lgd_allStateofIndia_export.csv")
wikidata_csv = os.path.join(
os.path.dirname(__file__),
"./data/wikidata_india_states_and_ut_export.csv")
clean_csv = os.path.join(os.path.dirname(__file__),
"LocalGovernmentDirectory_States.csv")
loader = LocalGovermentDirectoryStatesDataLoader(lgd_csv, wikidata_csv,
clean_csv)
loader.process()
loader.save()
if __name__ == '__main__':
main()
| {
"content_hash": "f094a3e4d26f50cd2fa4b220e4855dfc",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 80,
"avg_line_length": 39.492063492063494,
"alnum_prop": 0.5902331189710611,
"repo_name": "datacommonsorg/data",
"id": "62973c718295e1c372c907d83a1af7c5c47948c1",
"size": "5556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/india_lgd/local_government_directory_states/preprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78"
},
{
"name": "Go",
"bytes": "51446"
},
{
"name": "HTML",
"bytes": "32842253"
},
{
"name": "JavaScript",
"bytes": "458"
},
{
"name": "Jupyter Notebook",
"bytes": "5088443"
},
{
"name": "Python",
"bytes": "3723204"
},
{
"name": "R",
"bytes": "28607"
},
{
"name": "Shell",
"bytes": "25468"
},
{
"name": "TypeScript",
"bytes": "13472"
}
],
"symlink_target": ""
} |
import os
import logging
import threading
import traceback
import contextlib
import collections
from functools import wraps
import synapse.common as s_common
import synapse.lib.task as s_task
import synapse.lib.queue as s_queue
from synapse.eventbus import EventBus
logger = logging.getLogger(__name__)
def current():
return threading.currentThread()
def iden():
return threading.currentThread().ident
def isfini():
return getattr(current(), 'isfini', False)
# Module level lock for the retnwait class
retnlock = threading.Lock()
class RetnWait:
'''
Emulate synchronous callback waiting with a thread local event.
Example:
Do a thing in a thread and wait for the thread to return:
with retnwait() as retn:
dothing(callback=retn.retn)
isset, valu = retn.wait(timeout=3)
'''
def __init__(self):
thrd = threading.currentThread()
self._retn_exc = None
self._retn_valu = None
self._retn_evnt = getattr(thrd, '_retn_evt', None)
if self._retn_evnt is None:
self._retn_evnt = thrd._retn_lock = threading.Event()
# ensure the event is clear
self._retn_evnt.clear()
def wait(self, timeout=None):
'''
Wait for an async callback to complete.
Args:
timeout (int/float): Timeout in seconds.
Returns:
((bool, object)): A Boolean flag indicating if the operation
finished or had a timeout or error condition set. The object
is either the return value from the callback or an excfo tufo.
'''
evnt = self._retn_evnt
if evnt is None:
return True, self._retn_valu
if not evnt.wait(timeout=timeout):
return False, ('TimeOut', {})
if self._retn_exc is not None:
return False, self._retn_exc
return True, self._retn_valu
def retn(self, valu):
'''
An ease-of-use API for single value callbacks.
Args:
valu (object): The object to set the return value too.
Notes:
This sets the retn_evnt under the hood, so a caller which is
blocked on a ``wait()`` call will return the valu.
Returns:
None
'''
with retnlock:
self._retn_valu = valu
if self._retn_evnt is not None:
self._retn_evnt.set()
def errx(self, exc):
'''
Set the exception information for the current RetnWait object.
Args:
exc (Exception): An Exception, or an Exception subclass.
Notes:
This is used by a caller to signal that an exception has occured.
This sets the retn_evnt under the hood, so a caller which is
blocked on a ``wait()`` call will return the excfo tufo.
Returns:
None
'''
with retnlock:
self._retn_exc = s_common.getexcfo(exc)
if self._retn_evnt is not None:
self._retn_evnt.set()
def capture(self, *args, **kwargs):
'''
This can be used as a generic callback function to capture callback arguments.
Notes:
This will capture the args and kwargs passed to it.
This sets the retn_evnt under the hood, so a caller which is
blocked on a ``wait()`` call will return the *args, **kwargs.
Returns:
None
'''
with retnlock:
self._retn_valu = (args, kwargs)
if self._retn_evnt is not None:
self._retn_evnt.set()
def __enter__(self):
return self
def __exit__(self, exc, cls, tb):
with retnlock:
self._retn_evnt.clear()
self._retn_evnt = None
def withlock(lock):
def decor(f):
@wraps(f)
def wrap(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return wrap
return decor
class cancelable:
'''
Use these to allow cancelation of blocking calls
(where possible) to shutdown threads.
Example:
with cancelable(sock.close):
byts = sock.recv(100)
'''
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self):
try:
self.func(*self.args, **self.kwargs)
except Exception as e:
logger.exception('Error executing %s', self.func)
def __enter__(self):
current().cancels.append(self)
return self
def __exit__(self, exc, cls, tb):
current().cancels.pop()
return
class Thread(threading.Thread, EventBus):
'''
A thread / EventBus to allow fini() etc.
'''
def __init__(self, func, *args, **kwargs):
EventBus.__init__(self)
threading.Thread.__init__(self)
self.setDaemon(True)
self.iden = s_common.guid()
self.task = (func, args, kwargs)
self.cancels = []
self.onfini(self._onThrFini)
def run(self):
func, args, kwargs = self.task
ret = func(*args, **kwargs)
self.fire('thread:done', thread=self, ret=ret)
self.fini()
def _onThrFini(self):
[cancel() for cancel in self.cancels]
def worker(func, *args, **kwargs):
'''
Fire a worker thread to run the given func(*args,**kwargs)
'''
thr = Thread(func, *args, **kwargs)
thr.start()
return thr
def newtask(func, *args, **kwargs):
return (func, args, kwargs)
class Pool(EventBus):
'''
A thread pool for firing and cleaning up threads.
The Pool() class can be used to keep persistant threads
for work processing as well as optionally spin up new
threads to handle "bursts" of activity.
# fixed pool of 16 worker threads
pool = Pool(size=16)
# dynamic pool of 5-10 workers
pool = Pool(size=5, maxsize=10)
# dynamic pool of 8-<infiniy> workers
pool = Pool(size=8, maxsize=-1)
'''
def __init__(self, size=3, maxsize=None):
EventBus.__init__(self)
self.workq = s_queue.Queue()
self._pool_lock = threading.Lock()
self._pool_avail = 0
if maxsize is None:
maxsize = size
self._pool_maxsize = maxsize
self._pool_threads = {}
self.onfini(self._onPoolFini)
for i in range(size):
self._fire_thread(self._run_work)
def wrap(self, func):
'''
Wrap a function to transparently dispatch via the pool.
Example:
# dispatch the message handler from a pool
bus.on('foo', pool.wrap( doFooThing ) )
'''
def poolcall(*args, **kwargs):
self.call(func, *args, **kwargs)
return poolcall
def call(self, func, *args, **kwargs):
'''
Call the given func(*args,**kwargs) in the pool.
'''
self._que_work((func, args, kwargs))
@contextlib.contextmanager
def task(self, func, *args, **kwargs):
'''
Call the given function in the pool with a task.
NOTE: Callers *must* use with-block syntax.
Example:
def foo(x):
dostuff()
def onretn(valu):
otherstuff()
with pool.task(foo, 10) as task:
task.onretn(onretn)
# the task is queued for execution *after* we
# leave the with block.
'''
call = (func, args, kwargs)
task = s_task.CallTask(call)
yield task
self._que_work((task.run, (), {}))
def _que_work(self, work):
with self._pool_lock:
if self.isfini:
raise s_common.IsFini(self.__class__.__name__)
# we're about to put work into the queue
# lets see if we should also fire another worker
# if there are available threads, no need to fire
if self._pool_avail != 0:
self.workq.put(work)
return
# got any breathing room?
if self._pool_maxsize > len(self._pool_threads):
self._fire_thread(self._run_work)
self.workq.put(work)
return
# got *all* the breathing room?
if self._pool_maxsize == -1:
self._fire_thread(self._run_work)
self.workq.put(work)
return
self.workq.put(work)
def _fire_thread(self, func, *args, **kwargs):
thr = Thread(func, *args, **kwargs)
thr.link(self.dist)
thr.name = 'SynPool(%d):%s' % (id(self), thr.iden)
self._pool_threads[thr.iden] = thr
def onfini():
self._pool_threads.pop(thr.iden, None)
thr.onfini(onfini)
thr.start()
return thr
def _run_work(self):
while not self.isfini:
self._pool_avail += 1
work = self.workq.get()
self._pool_avail -= 1
if work is None:
return
try:
func, args, kwargs = work
func(*args, **kwargs)
except Exception as e:
logger.exception('error running task for [%s]', work)
def _onPoolFini(self):
threads = list(self._pool_threads.values())
[self.workq.put(None) for i in range(len(threads))]
[t.fini() for t in threads]
#[ t.join() for t in threads ]
class RWLock:
'''
A multi-reader/exclusive-writer lock.
'''
def __init__(self):
self.lock = threading.Lock()
self.ident = os.urandom(16)
self.rw_holder = None
self.ro_holders = set()
self.ro_waiters = collections.deque()
self.rw_waiters = collections.deque()
def reader(self):
'''
Acquire a multi-reader lock.
Example:
lock = RWLock()
with lock.reader():
# other readers can be here too...
dowrites()
'''
# use thread locals with our GUID for holder ident
holder = getThreadLocal(self.ident, RWWith, self)
holder.event.clear()
holder.writer = False
with self.lock:
# if there's no rw holder, off we go!
if not self.rw_holder and not self.rw_waiters:
self.ro_holders.add(holder)
return holder
self.ro_waiters.append(holder)
holder.event.wait() # FIXME timeout
return holder
def writer(self):
'''
Acquire an exclusive-write lock.
Example:
lock = RWLock()
with lock.writer():
# no readers or other writers but us!
dowrites()
'''
holder = getThreadLocal(self.ident, RWWith, self)
holder.event.clear()
holder.writer = True
with self.lock:
if not self.rw_holder and not self.ro_holders:
self.rw_holder = holder
return holder
self.rw_waiters.append(holder)
holder.event.wait() # FIXME timeout
return holder
def release(self, holder):
'''
Used to release an RWWith holder
( you probably shouldn't use this )
'''
with self.lock:
if holder.writer:
self.rw_holder = None
# a write lock release should free readers first...
if self.ro_waiters:
while self.ro_waiters:
nexthold = self.ro_waiters.popleft()
self.ro_holders.add(nexthold)
hexthold.event.set()
return
if self.rw_waiters:
nexthold = self.rw_waiters.popleft()
self.rw_holder = nexthold
nexthold.event.set()
return
return
# releasing a read hold from here down...
self.ro_holders.remove(holder)
if self.ro_holders:
return
# the last reader should release a writer first
if self.rw_waiters:
nexthold = self.rw_waiters.popleft()
self.rw_holder = nexthold
nexthold.event.set()
return
# there should be no waiting readers here...
return
class RWWith:
'''
The RWWith class implements "with block" syntax for RWLock.
'''
def __init__(self, rwlock):
self.event = threading.Event()
self.writer = False
self.rwlock = rwlock
def __enter__(self):
return self
def __exit__(self, exclass, exc, tb):
self.rwlock.release(self)
def iCantWait(name=None):
'''
Mark the current thread as a no-wait thread.
Any no-wait thread will raise MustNotWait on blocking calls
within synapse APIs to prevent deadlock bugs.
Example:
iCantWait(name='FooThread')
'''
curthr = threading.currentThread()
curthr._syn_cantwait = True
if name is not None:
curthr.name = name
def iWillWait():
'''
Check if the current thread is a marked no-wait thead and raise MustNotWait.
Example:
def doBlockingThing():
iWillWait()
waitForThing()
'''
if getattr(threading.currentThread(), '_syn_cantwait', False):
name = threading.currentThread().name
raise s_common.MustNotWait(name)
def iMayWait():
'''
Function for no-wait aware APIs to use while handling no-wait threads.
Example:
def mayWaitThing():
if not iMayWait():
return False
waitForThing()
'''
return not getattr(threading.currentThread(), '_syn_cantwait', False)
| {
"content_hash": "796f4d81c67639d8f76acb238caeaa97",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 86,
"avg_line_length": 25.06104129263914,
"alnum_prop": 0.5422308188265635,
"repo_name": "vivisect/synapse",
"id": "ab49d04b738ea8bf3176b99b6c7617d942656713",
"size": "13959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synapse/lib/threads.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "716598"
}
],
"symlink_target": ""
} |
"""
Module containing functions for approximating the roots of functions.
"""
from sympy import diff, var, symbols
import numpy as np
from collections import namedtuple
def newtonraph(f, x0, tol=1.0e-9, n=1000):
r"""
Implementaion of the Newton-Raphson method for approximating a root with
an initial guess x0.
Parameters
----------
f : function
The polynomial function to be evaluated. For example, to evaluate the
polynomial :math:`f(x) = 3x^3 - 5x^2 + 2x - 1`, f should be similar to:
.. code-block:: python
def f(x):
# The variable in function f must be set as x.
return 3 * x ** 3 - 5 * x ** 2 + 2 * x - 1
x0 : int or float
The initial guess of the function's root.
tol : float default 1.0e-9
The level of error tolerance between the approximated root and the
actual root.
n : int default 1000
Number of iterations.
Returns
-------
x0 : int or float
If the function evaluated at x0 is 0, the root is located at x0 and is
returned by the function.
root : namedtuple
approx is the approximate root of the function in the interval between
a and b, iter is a list of the previous iterations, and count is the
number of iterations before reaching the root approximation.
Notes
-----
The Newton-Raphson is a root-finding method that is generally fast in
convergence given the initial guess to the root is well chosen. Thus,
plotting the function before utilizing the Newton-Raphson method is
often recommended to get a good initial guess by observation.
The Newton-Raphson method starts with an initial guess (hopefully close
to the true root), the function f is then approximated by its tangent line.
The tangent line to the curve :math:`y=f(x)` at the point :math:`x = x_n` is
defined as:
.. math::
y = f'(x_n)(x - x_n) + f(x_n)
The x-intercept of the tangent line is then set as the next approximation to
the true root of the function. This leads to the Newton-Raphson iteration,
defined as:
.. math::
x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)}
Examples
--------
>>> def f(x): return x ** 3 - 2 * x - 5
>>> root, iters, niter = newtonraph(f, 2)
>>> root
2.09455148154233
>>> iters
[2.10000000000000, 2.09456812110419, 2.09455148169820, 2.09455148154233]
>>> niter
4
>>> def f2(x): return x ** 2 - 10
>>> root2, iters2, niter2 = newtonraph(f2, 3)
>>> root2
3.16227766016838
>>> iters2
[3.16666666666667, 3.16228070175439, 3.16227766016984, 3.16227766016838]
>>> niter2
4
References
----------
Newton's method. (2017, April 23). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Newton%27s_method&oldid=776802339
Press, W., Teukolsky, S., Vetterling, W., & Flannery, B. (2007). Numerical recipes (3rd ed.).
Cambridge: Cambridge University Press.
Weisstein, Eric W. "Newton's Method." From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/NewtonsMethod.html
"""
if callable(f) is False:
raise TypeError('f must be a function with one parameter (variable)')
x = var('x')
root = namedtuple('root', 'approx iter count')
k = []
fx0 = f(x0)
if fx0 == 0.0:
return fx0
for i in np.arange(n):
dfx = diff(f(x))
dx = dfx.evalf(subs={x: x0})
x1 = x0 - (f(x0) / dx)
k.append(x1)
if abs(x1 - x0) < tol:
return root(approx=k[i], iter=k, count=len(k))
x0 = x1
raise ValueError('Iteration limit exceeded')
def bisection(f, a, b, tol=1.0e-9, n=1000):
r"""
Implements the bisection method of approximating a root within a given
interval :math:`[a, b]`.
Parameters
----------
f : function
The polynomial function to be evaluated. For example, to evaluate the
polynomial :math:`f(x) = 3x^3 - 5x^2 + 2x - 1`, f should be similar to:
.. code-block:: python
def f(x):
# The variable in function f must be set as x.
return 3 * x^3 - 5 * x^2 + 2 * x - 1
a : int or float
The lower bound of the interval in which to search for the root.
b: int or float
The upper bound of the interval in which to search for the root.
tol : float default 1.0e-9
The level of error tolerance between the approximated root and the
actual root.
n : int default 1000
Number of iterations.
Returns
-------
root : namedtuple
approx is the approximate root of the function in the interval between
a and b, iter is a list of the previous iterations, and count is the
number of iterations before reaching the root approximation.
Notes
-----
The bisection method is another approach to finding the root of a continuous
function :math:`f(x)` on an interval :math:`[a, b]`. The method takes advantage of a
corollary of the intermediate value theorem called Bolzano's theorem which
states that if the values of :math:`f(a)` and :math:`f(b)` have opposite signs, the
interval must contain at least one root. The iteration steps of the bisection
method are relatively straightforward, however; convergence towards a solution
is slow compared to other root-finding methods.
Examples
--------
>>> def f(x): return x ** 3 - 2 * x - 5
>>> root, iters, niter = bisection(f, 2, 2.2)
>>> root
2.094551482051611
>>> iters
[2.1,
2.05,
2.075,
2.0875000000000004,
2.09375,
2.096875,
2.0953125,
2.09453125,
2.094921875,
2.0947265625,
2.09462890625,
2.0945800781250004,
2.0945556640625003,
2.09454345703125,
2.0945495605468754,
2.094552612304688,
2.0945510864257817,
2.094551849365235,
2.094551467895508,
2.0945516586303716,
2.09455156326294,
2.094551515579224,
2.094551491737366,
2.0945514798164373,
2.0945514857769014,
2.0945514827966694,
2.0945514813065533,
2.094551482051611]
>>> niter
28
References
----------
Bisection method. (2017, April 21). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Bisection_method&oldid=776568784
Press, W., Teukolsky, S., Vetterling, W., & Flannery, B. (2007). Numerical recipes (3rd ed.).
Cambridge: Cambridge University Press.
Weisstein, Eric W. "Bisection." From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/Bisection.html
"""
if callable(f) is False:
raise TypeError('f must be a function with one parameter (variable)')
if (f(a) < 0 and f(b) < 0) or (f(a) > 0 and f(b) > 0):
raise ValueError('signs of function evaluated at points a and b must differ')
root = namedtuple('root', 'approx iter count')
k = []
for _ in np.arange(n):
c = float(a + b) / 2.0
k.append(c)
if f(c) == 0 or float(b - a) / 2.0 < tol:
return root(approx=c, iter=k, count=len(k))
if np.sign(f(c)) == np.sign(f(a)):
a = c
else:
b = c
raise ValueError('Iteration limit exceeded without convergence')
def secant(f, x0, x1, tol=1.0e-9, n=1000):
r"""
Approximates a root of a function using the secant method given two
initial guesses ideally located near the true root.
Parameters
----------
f : function
The polynomial function to be evaluated. For example, to evaluate the
polynomial :math:`f(x) = 3x^3 - 5x^2 + 2x - 1`, f should be similar to:
.. code-block:: python
def f(x):
# The variable in function f must be set as x.
return 3 * x^3 - 5 * x^2 + 2 * x - 1
x0 : int or float
Lower bound of the interval in which to search for a root of the function.
The lower bound should ideally be located close to the true root.
x1 : int or float
Upper bound of the interval. The upper bound should ideally be located close
to the true root.
Returns
-------
root : namedtuple
approx is the approximate root of the function in the interval between
a and b, iter is a list of the previous iterations, and count is the
number of iterations before reaching the root approximation.
Notes
-----
The secant method for finding roots of nonlinear equations is a variation
of the Newton-Raphson method that takes two initial guesses of the root,
compared to just one guess required for Newton-Raphson. Due to the extra
computations as a result of requiring two approximations, the secant method
often converges more slowly compared to Newton-Raphson; however, it is usually
more stable. The secant method has another advantage over NR as it does not need
the derivative of the function in question to be known or assumed to be easily
calculated. The secant method uses secant lines (hence the need for two initial
starting values) to find the root of a function while the Newton-Raphson method
approximates the root with a tangent line.
The general iteration equation of the secant method given two initial guesses
:math:`x_0` and :math:`x_1` is defined as:
.. math::
x_{n+1} = x_n - f(x_n) \bigg/ \frac{f(x_n) - f(x_{n-1})}{x_n - x_{n-1}}
Examples
--------
>>> def f(x): return x^3 - 2* x - 5
>>> root, iters, niter = secant(f, 1, 3)
>>> root
2.0945514815423265
>>> iters
[1.5454545454545454,
1.8591632292280496,
2.2003500781687437,
2.0797991804599714,
2.09370424253899,
2.094558562633902,
2.094551478163657,
2.094551481542313,
2.0945514815423265]
>>> niter
9
>>> def f2(x): return x ** 2 - 10
>>> root2, iters2, niter2 = secant(f2, 3, 4)
>>> root2
3.162277660168379
>>> iters
[3.142857142857143,
3.16,
3.1622846781504985,
3.1622776576400877,
3.1622776601683764,
3.162277660168379]
>>> niter
6
References
----------
Press, W., Teukolsky, S., Vetterling, W., & Flannery, B. (2007). Numerical recipes (3rd ed.).
Cambridge: Cambridge University Press.
Weisstein, Eric W. "Secant Method." From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/SecantMethod.html
"""
if callable(f) is False:
raise TypeError('f must be a function with one parameter (variable)')
root = namedtuple('root', 'approx iter count')
k = []
for _ in np.arange(n):
x2 = x1 - f(x1) / (float(f(x1) - f(x0)) / float(x1 - x0))
k.append(x2)
if abs(x2 - x1) < tol:
return root(approx=x2, iter=k, count=len(k))
x0 = x1
x1 = x2
raise ValueError('Iteration limit exceeded without convergence')
#def horner(f, x0, tol=1.0e-9, n=1000):
# if callable(f) is False:
# return 'f must be a function'
#
# x = symbols('x')
| {
"content_hash": "8f296c539b4d63efa7ef3e9a6a15021b",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 97,
"avg_line_length": 32.31197771587744,
"alnum_prop": 0.5979310344827586,
"repo_name": "aschleg/mathpy",
"id": "6692d9f942236b12f7543afec7ba1fafea920132",
"size": "11617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mathpy/numerical/roots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "448416"
}
],
"symlink_target": ""
} |
import numpy as np
import theano
import theano.tensor as T
import lasagne
from neuralnet import train
from custom_updates import *
from SVRGOptimizer import SVRGOptimizer
from StreamingSVRGOptimizer import StreamingSVRGOptimizer
from operator import itemgetter
MLPBN= True
def classifier_network(input_var, n_input, n_hidden, n_output):
input_layer = lasagne.layers.InputLayer(shape=(None, n_input), input_var=input_var)
hidden_layer = lasagne.layers.batch_norm(
lasagne.layers.DenseLayer(
input_layer, # lasagne.layers.dropout(input_layer, p=0.5),
num_units=n_hidden,
nonlinearity=lasagne.nonlinearities.rectify)
)
hidden_layer = lasagne.layers.batch_norm(
lasagne.layers.DenseLayer(
hidden_layer, # lasagne.layers.dropout(input_layer, p=0.5),
num_units=n_hidden,
nonlinearity=lasagne.nonlinearities.rectify)
)
output_layer = lasagne.layers.batch_norm(
lasagne.layers.DenseLayer(hidden_layer, num_units=n_output, nonlinearity=lasagne.nonlinearities.softmax)
)
return input_layer, hidden_layer, output_layer
class NeuralClassifier:
def __init__(self, n_input, n_hidden, n_output, input_var=None):
self.n_input = n_input
self.n_hidden = n_hidden
self.n_output = n_output
self.input_var = input_var or T.matrix('inputs')
self.target_var = T.ivector('targets')
self.input_layer, self.hidden_layer, self.output_layer = classifier_network(self.input_var, n_input, n_hidden, n_output)
def train(self, X_train, Y_train, X_val=None, Y_val=None, X_test=None, y_test=None,
objective=lasagne.objectives.binary_crossentropy,
update=lasagne.updates.adam,
n_epochs=100, batch_size=100, gradient="SVRG" , lambd=0.0,
**update_params):
network = self.output_layer
prediction = lasagne.layers.get_output(network)
l2_reg = lasagne.regularization.regularize_layer_params(network, lasagne.regularization.l2)
loss = objective(prediction, self.target_var) + lambd * l2_reg
loss = loss.mean()
params = lasagne.layers.get_all_params(network, trainable=True)
# svrg = False
if (update == custom_svrg1):
optimizer = SVRGOptimizer(update_params['m'], update_params['learning_rate'])
# m is fixed as 50
train_error, validation_error, acc_train, acc_val, acc_test, test_error = optimizer.minimize(loss, params,
X_train, Y_train, X_test, y_test,
self.input_var, self.target_var,
X_val, Y_val,
n_epochs=n_epochs, batch_size=batch_size, output_layer=network)
elif (update == custom_streaming_svrg1):
optimizer = StreamingSVRGOptimizer(update_params['m'], update_params['learning_rate'], update_params['k_s_0'], update_params['k_s_ratio'])
train_error, validation_error, acc_train, acc_val, acc_test, test_error = optimizer.minimize(loss, params,
X_train, Y_train, X_test, y_test,
self.input_var, self.target_var,
X_val, Y_val,
n_epochs=n_epochs, batch_size=batch_size, output_layer=network)
else: # The AdaGrad version of SGD
updates = update(loss, params, **update_params)
train_fn = theano.function([self.input_var, self.target_var], loss, updates=updates)
if X_val is not None:
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = objective(test_prediction, self.target_var)
test_loss = test_loss.mean()
test_acc_fn = T.mean(T.eq(T.argmax(test_prediction, axis=1), self.target_var),dtype=theano.config.floatX)
val_fn = theano.function([self.input_var, self.target_var], [test_loss, test_acc_fn])
else:
val_fn = None
test_error = []
acc_test = []
# these two are not realized yet
train_error, validation_error, acc_train, acc_val = train(
X_train, Y_train, X_val, Y_val,
train_fn, val_fn,
n_epochs, batch_size=batch_size#, toprint=it
)
np.savetxt("data/""_mlpbn"+str(MLPBN)+"_"+ gradient +"_loss_train.txt",train_error)
np.savetxt("data/""_mlpbn"+str(MLPBN)+"_"+ gradient +"_loss_val.txt",map(itemgetter(0), validation_error))
np.savetxt("data/""_mlpbn"+str(MLPBN)+"_"+ gradient +"_loss_gradient_number.txt",map(itemgetter(1),validation_error))
np.savetxt("data/""_mlpbn"+str(MLPBN)+"_"+ gradient +"_loss_test.txt",test_error)
np.savetxt("data/""_mlpbn"+str(MLPBN)+"_"+ gradient +"_acc_train.txt",acc_train)
np.savetxt("data/""_mlpbn"+str(MLPBN)+"_"+ gradient +"_acc_val.txt",acc_val)
np.savetxt("data/""_mlpbn"+str(MLPBN)+"_"+ gradient +"_acc_test.txt",acc_test)
return train_error, validation_error
| {
"content_hash": "c260bb99e562982c3accf84e9256c622",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 150,
"avg_line_length": 44.075630252100844,
"alnum_prop": 0.5982840800762631,
"repo_name": "myt00seven/svrg",
"id": "17c8d4746c023027bc792c3ec10b8ae2c8aa0fab",
"size": "5245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bk/svrg_bn_bk/neuralclassifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "69093"
},
{
"name": "Python",
"bytes": "1064699"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
} |
'''
This script produces a custom Gource log file that can be passed to Gource for
it to display family history information. Currently it supports ancestors only.
Choose a focus person and pass this person's name along with the Gramps .gramps
file path to the script via command line arguments.
$ python gramps2gource.py --name="Focus Person" --db=path/to/filename.gramps
Then display the custom log using gource:
$ cat /path/to/pedigree_<name>.log | gource -1280x720 --log-format custom
--font-size 20 --hide users,dirnames,date --stop-at-end
--camera-mode overview --seconds-per-day 1 --disable-bloom
--auto-skip-seconds 1 -i 0 -c 3.0 -
The visualisation can be recorded to file using:
$ cat /path/to/pedigree_<name>.log | gource -1280x720 --log-format custom
--font-size 20 --hide users,dirnames,date --stop-at-end
--camera-mode overview --seconds-per-day 1 --disable-bloom
--auto-skip-seconds 1 -i 0 -c 3.0 -output-ppm-stream -
--output-framerate 60 - | avconv -y -r 60 -f image2pipe -vcodec ppm -i -
-b 8192K /path/to/pedigree_<name>.mp4
Author: Chris Laws
'''
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.builtins import open
from future.builtins import int
import datetime
import logging
import sys
import time
import gramps
logger = logging.getLogger(__name__)
ref_dt = datetime.datetime(1970, 1, 1, 0, 0, 0)
ref_timestamp = time.mktime(ref_dt.timetuple())
try:
secondsInOneDay = datetime.timedelta(days=1).total_seconds()
except AttributeError as ex:
# python2.6 does not have total_seconds
one_day_dt = datetime.timedelta(days=1)
secondsInOneDay = (one_day_dt.microseconds + (one_day_dt.seconds + one_day_dt.days * 24 * 3600) * 10**6) / 10**6
GOURCE_ADDED = 'A' # maps to birth
GOURCE_DELETED = 'D' # maps to death
GOURCE_MODIFIED = 'M' # maps to change
GOURCE_UNKNOWN = '?' # maps to nothing
class Gramps2Gource(object):
'''
Create Gource custom logs from Gramps data files.
'''
def __init__(self, gramps_file):
self.db = gramps.parser.parse(gramps_file)
def get_ancestors(self, person, ancestors=None, gource_prefix=None):
"""
Return an unordered list of tuples for this person and their
ancestors. Each tuple contains a person handle and a pseudo-path
to be used by Gource.
"""
logger.debug("Collecting ancestors for {0}".format(person.name))
if ancestors is None:
ancestors = []
# Construct a pseudo path from the person's unique handle.
if gource_prefix:
gource_prefix = "{0}/{1}".format(gource_prefix, person.handle)
else:
gource_prefix = person.handle
person_name = person.name_with_dates
gource_path = "{0}/{1}".format(gource_prefix, person_name)
ancestors.append((person.handle, gource_path))
if person.child_of_handle:
family = self.db.get_family(person.child_of_handle)
# walk up the father's tree
if family.father:
self.get_ancestors(family.father,
ancestors=ancestors,
gource_prefix=gource_prefix)
# walk up the mother's tree
if family.mother:
self.get_ancestors(family.mother,
ancestors=ancestors,
gource_prefix=gource_prefix)
return ancestors
def pedigree(self, names, output_file):
"""
Creates a custom Gource log containing the pedigree information for
the specified names.
"""
if not names:
logger.error("No focus persons supplied")
sys.exit(1)
all_records = []
for name in names:
person_handles = []
logger.info("Generating pedigree output for: {0}".format(name))
person_handle = self.db.find_person(name)
if person_handle:
person = self.db.get_person(person_handle)
ancestor_handles = self.get_ancestors(person)
logger.debug("{0} has {1} ancestors in the database".format(
name, len(ancestor_handles)))
person_handles = ancestor_handles
if person_handles:
people_to_plot = []
for person_handle, person_gource_path in person_handles:
person = self.db.get_person(person_handle)
try:
associated_events = person.associated_events()
except TypeError:
associated_events = []
# Filter associated events to only include those with
# dates. Only dated events are useful when outputing
# a Gource formatted log.
associated_events_with_dates = []
for associated_event in associated_events:
obj, event, directEvent = associated_event
if event.date:
associated_events_with_dates.append(
associated_event)
if associated_events_with_dates:
people_to_plot.append(
(person, person_gource_path,
associated_events_with_dates))
if people_to_plot:
logger.info(
"Starting generation of custom gource log data")
records = self._to_pedigree_gource_log_format(
people_to_plot)
all_records.extend(records)
logger.info(
"Finished generation of custom gource log data")
if all_records:
# Sort events by time such that Gource displays the pedigree in reverse order
logger.info(
"Adjusting timestamps so gource displays them in reverse order")
records = [(ts * -1, name, event, path) for ts, name, event, path in all_records]
records.sort()
logger.info("Writing custom gource log data to {0}".format(output_file))
with open(output_file, 'w') as fd:
for ts, name, event, path in records:
fd.write("{0}|{1}|{2}|{3}\n".format(ts, name, event, path))
fd.write("\n") # add an empty line at the end to trigger EOF
logger.info(
"Completed. Custom gource log file: {0}".format(
output_file))
else:
logger.error(
"No gource log file created - no records to write")
def _to_gource_log_format(self, person_events):
"""
Return a list of custom gource formatted log entries based on the list
of person events passed in.
"""
records = []
for person, person_gource_path, related_events in person_events:
logger.debug("Creating log entries for {0}".format(person.name))
# Reduce events to only those that contain dates
related_events_with_dates = []
for related_event in related_events:
person_family_object, event, directEvent = related_event
if event.date:
related_events_with_dates.append(related_event)
else:
logger.debug("No date for event {0}".format(event.type))
if related_events_with_dates:
for obj, event, directEvent in related_events_with_dates:
if event.datetime.year < ref_dt.year:
# Year is less than the epoch meaning we can't use
# time.mktime to create a useful timestamp for us.
# Instead, subtract the necessary seconds from the
# epoch time to arrive at the event time.
ref_delta = ref_dt - event.datetime
delta_seconds = ref_delta.total_seconds()
timestamp = ref_timestamp - delta_seconds
else:
timestamp = time.mktime(event.datetime.timetuple())
# Gource requires timestamp as an int
timestamp = int(timestamp)
if event.type == 'Birth':
if directEvent:
gource_event = GOURCE_ADDED
else:
gource_event = GOURCE_MODIFIED
elif event.type in ['Baptism', 'Christening']:
gource_event = GOURCE_MODIFIED
elif event.type == 'Death':
gource_event = GOURCE_DELETED
elif event.type in ['Burial', 'Cremation']:
gource_event = GOURCE_MODIFIED
elif event.type in ['Marriage', 'Marriage Banns']:
gource_event = GOURCE_MODIFIED
elif event.type == 'Census':
gource_event = GOURCE_MODIFIED
elif event.type in ["Divorce", 'Divorce Filing']:
gource_event = GOURCE_MODIFIED
elif event.type == "Electoral Roll":
gource_event = GOURCE_MODIFIED
elif event.type == "Emigration":
gource_event = GOURCE_MODIFIED
elif event.type in ["Residence", "Property"]:
gource_event = GOURCE_MODIFIED
elif event.type in ["Immigration", "Emmigration"]:
gource_event = GOURCE_MODIFIED
elif event.type == "Occupation":
gource_event = GOURCE_MODIFIED
elif event.type == "Probate":
gource_event = GOURCE_MODIFIED
else:
gource_event = GOURCE_UNKNOWN
logger.debug("Don't know how to handle event type {0}".format(event.type))
if gource_event != GOURCE_UNKNOWN:
record = (timestamp, person.surname.lower(),
gource_event, person_gource_path)
records.append(record)
records.sort()
return records
def _to_pedigree_gource_log_format(self, person_events):
"""
Return a list of pedigree specific custom gource formatted log entries
based on the list of person events passed in.
"""
records = []
for person, gource_path, related_events in person_events:
logger.debug("Creating log entries for {0}".format(person.name))
# Reduce events to only those that contain dates
related_events_with_dates = []
for related_event in related_events:
person_family_object, event, directEvent = related_event
if event.date:
related_events_with_dates.append(related_event)
else:
logger.debug("No date for event {0}".format(event.type))
if related_events_with_dates:
for obj, event, directEvent in related_events_with_dates:
if event.datetime.year < ref_dt.year:
# Year is less than the epoch meaning we can't use
# time.mktime to create a useful timestamp for us.
# Instead, subtract the necessary seconds from the
# epoch time to arrive at the event time.
ref_delta = ref_dt - event.datetime
delta_seconds = ref_delta.total_seconds()
timestamp = ref_timestamp - delta_seconds
else:
timestamp = time.mktime(event.datetime.timetuple())
# Gource requires timestamp as an int
timestamp = int(timestamp)
# For this particular application we only want to capture
# the birth (ADDED) event.
if event.type == 'Birth':
if directEvent:
gource_event = GOURCE_ADDED
record = (timestamp, person.surname.lower(),
gource_event, gource_path)
records.append(record)
records.sort()
return records
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Create Gource custom logs from Gramps data")
parser.add_argument("-d", "--db", dest="database", default=None,
type=str,
help="The gramps database file to use")
parser.add_argument("-n", "--names", action='append', dest="names",
default=None, type=str,
help="The focus person to extract pedigree data for")
parser.add_argument("-o", "--output", dest="output", default=None,
type=str,
help="The name of the file to send the output to")
args = parser.parse_args()
logging.basicConfig(
level=logging.INFO, format='%(levelname)s - %(message)s')
if args.database is None:
print("Error: No gramps file provided")
args.print_usage()
sys.exit(1)
if args.names is None:
print("Error: No focus name(s) provided")
args.print_usage()
sys.exit(1)
if args.output is None:
if len(args.names) > 1:
args.output = "pedigree.log"
else:
lower_name = args.names[0].lower().replace(" ", "_")
args.output = "pedigree_{0}.log".format(lower_name)
g2g = Gramps2Gource(args.database)
g2g.pedigree(args.names, args.output)
logger.info("Done.")
| {
"content_hash": "eb537eae597a2750b397308834f073ac",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 116,
"avg_line_length": 39.62049861495845,
"alnum_prop": 0.5352723204922044,
"repo_name": "claws/gramps2gource",
"id": "f0d4aa68cc66626f277c712290884d18a7bbeca2",
"size": "14326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gramps2gource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49023"
}
],
"symlink_target": ""
} |
from js9 import j
def install(job):
from zeroos.orchestrator.sal.Container import Container
from zeroos.orchestrator.configuration import get_jwt_token
import time
service = job.service
job.context['token'] = get_jwt_token(service.aysrepo)
container_service = service.aysrepo.serviceGet(role='container', instance=service.model.data.container)
container = Container.from_ays(container_service, job.context['token'], logger=service.logger)
id = container.id
client = container.node.client
r = client.container.backup(id, service.model.data.url)
service.model.data.type = 'container'
meta = {
'name': container.name,
'node': container.node.addr,
'nics': container.nics,
'hostname': container.hostname,
'flist': container.flist,
'ports': container.ports,
'host_network': container.host_network,
'storage': container.storage,
'init_processes': container.init_processes,
'privileged': container.privileged,
}
service.model.data.timestamp = int(time.time())
service.model.data.meta = j.data.serializer.json.dumps(meta)
service.model.data.snapshot = r.get()
service.saveAll()
def monitor(job):
pass
| {
"content_hash": "a417b7198a51bbffced4a39c2ed0100f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 107,
"avg_line_length": 30.585365853658537,
"alnum_prop": 0.6770334928229665,
"repo_name": "zero-os/0-orchestrator",
"id": "b7610a56c8e57c4e5fc6a7d5383ecda444a77bd0",
"size": "1254",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "templates/backup.container/actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cap'n Proto",
"bytes": "18559"
},
{
"name": "Go",
"bytes": "432708"
},
{
"name": "HTML",
"bytes": "1160972"
},
{
"name": "Python",
"bytes": "1187653"
},
{
"name": "RAML",
"bytes": "100404"
},
{
"name": "Shell",
"bytes": "39084"
}
],
"symlink_target": ""
} |
"""(disabled by default) support for testing pytest and pytest plugins."""
from __future__ import absolute_import, division, print_function
import codecs
import gc
import os
import platform
import re
import subprocess
import six
import sys
import time
import traceback
from fnmatch import fnmatch
from weakref import WeakKeyDictionary
from _pytest.capture import MultiCapture, SysCapture
from _pytest._code import Source
from _pytest.main import Session, EXIT_INTERRUPTED, EXIT_OK
from _pytest.assertion.rewrite import AssertionRewritingHook
from _pytest.pathlib import Path
from _pytest.compat import safe_str
import py
import pytest
IGNORE_PAM = [ # filenames added when obtaining details about the current user
u"/var/lib/sss/mc/passwd"
]
def pytest_addoption(parser):
parser.addoption(
"--lsof",
action="store_true",
dest="lsof",
default=False,
help="run FD checks if lsof is available",
)
parser.addoption(
"--runpytest",
default="inprocess",
dest="runpytest",
choices=("inprocess", "subprocess"),
help=(
"run pytest sub runs in tests using an 'inprocess' "
"or 'subprocess' (python -m main) method"
),
)
parser.addini(
"pytester_example_dir", help="directory to take the pytester example files from"
)
def pytest_configure(config):
if config.getvalue("lsof"):
checker = LsofFdLeakChecker()
if checker.matching_platform():
config.pluginmanager.register(checker)
def raise_on_kwargs(kwargs):
if kwargs:
raise TypeError("Unexpected arguments: {}".format(", ".join(sorted(kwargs))))
class LsofFdLeakChecker(object):
def get_open_files(self):
out = self._exec_lsof()
open_files = self._parse_lsof_output(out)
return open_files
def _exec_lsof(self):
pid = os.getpid()
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
def _parse_lsof_output(self, out):
def isopen(line):
return line.startswith("f") and (
"deleted" not in line
and "mem" not in line
and "txt" not in line
and "cwd" not in line
)
open_files = []
for line in out.split("\n"):
if isopen(line):
fields = line.split("\0")
fd = fields[0][1:]
filename = fields[1][1:]
if filename in IGNORE_PAM:
continue
if filename.startswith("/"):
open_files.append((fd, filename))
return open_files
def matching_platform(self):
try:
py.process.cmdexec("lsof -v")
except (py.process.cmdexec.Error, UnicodeDecodeError):
# cmdexec may raise UnicodeDecodeError on Windows systems with
# locale other than English:
# https://bitbucket.org/pytest-dev/py/issues/66
return False
else:
return True
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(self, item):
lines1 = self.get_open_files()
yield
if hasattr(sys, "pypy_version_info"):
gc.collect()
lines2 = self.get_open_files()
new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}
leaked_files = [t for t in lines2 if t[0] in new_fds]
if leaked_files:
error = []
error.append("***** %s FD leakage detected" % len(leaked_files))
error.extend([str(f) for f in leaked_files])
error.append("*** Before:")
error.extend([str(f) for f in lines1])
error.append("*** After:")
error.extend([str(f) for f in lines2])
error.append(error[0])
error.append("*** function %s:%s: %s " % item.location)
error.append("See issue #2366")
item.warn(pytest.PytestWarning("\n".join(error)))
# XXX copied from execnet's conftest.py - needs to be merged
winpymap = {
"python2.7": r"C:\Python27\python.exe",
"python3.4": r"C:\Python34\python.exe",
"python3.5": r"C:\Python35\python.exe",
"python3.6": r"C:\Python36\python.exe",
}
def getexecutable(name, cache={}):
try:
return cache[name]
except KeyError:
executable = py.path.local.sysfind(name)
if executable:
import subprocess
popen = subprocess.Popen(
[str(executable), "--version"],
universal_newlines=True,
stderr=subprocess.PIPE,
)
out, err = popen.communicate()
if name == "jython":
if not err or "2.5" not in err:
executable = None
if "2.5.2" in err:
executable = None # http://bugs.jython.org/issue1790
elif popen.returncode != 0:
# handle pyenv's 127
executable = None
cache[name] = executable
return executable
@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"])
def anypython(request):
name = request.param
executable = getexecutable(name)
if executable is None:
if sys.platform == "win32":
executable = winpymap.get(name, None)
if executable:
executable = py.path.local(executable)
if executable.check():
return executable
pytest.skip("no suitable %s found" % (name,))
return executable
# used at least by pytest-xdist plugin
@pytest.fixture
def _pytest(request):
"""Return a helper which offers a gethookrecorder(hook) method which
returns a HookRecorder instance which helps to make assertions about called
hooks.
"""
return PytestArg(request)
class PytestArg(object):
def __init__(self, request):
self.request = request
def gethookrecorder(self, hook):
hookrecorder = HookRecorder(hook._pm)
self.request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def get_public_names(values):
"""Only return names from iterator values without a leading underscore."""
return [x for x in values if x[0] != "_"]
class ParsedCall(object):
def __init__(self, name, kwargs):
self.__dict__.update(kwargs)
self._name = name
def __repr__(self):
d = self.__dict__.copy()
del d["_name"]
return "<ParsedCall %r(**%r)>" % (self._name, d)
class HookRecorder(object):
"""Record all hooks called in a plugin manager.
This wraps all the hook calls in the plugin manager, recording each call
before propagating the normal calls.
"""
def __init__(self, pluginmanager):
self._pluginmanager = pluginmanager
self.calls = []
def before(hook_name, hook_impls, kwargs):
self.calls.append(ParsedCall(hook_name, kwargs))
def after(outcome, hook_name, hook_impls, kwargs):
pass
self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
def finish_recording(self):
self._undo_wrapping()
def getcalls(self, names):
if isinstance(names, str):
names = names.split()
return [call for call in self.calls if call._name in names]
def assert_contains(self, entries):
__tracebackhide__ = True
i = 0
entries = list(entries)
backlocals = sys._getframe(1).f_locals
while entries:
name, check = entries.pop(0)
for ind, call in enumerate(self.calls[i:]):
if call._name == name:
print("NAMEMATCH", name, call)
if eval(check, backlocals, call.__dict__):
print("CHECKERMATCH", repr(check), "->", call)
else:
print("NOCHECKERMATCH", repr(check), "-", call)
continue
i += ind + 1
break
print("NONAMEMATCH", name, "with", call)
else:
pytest.fail("could not find %r check %r" % (name, check))
def popcall(self, name):
__tracebackhide__ = True
for i, call in enumerate(self.calls):
if call._name == name:
del self.calls[i]
return call
lines = ["could not find call %r, in:" % (name,)]
lines.extend([" %s" % x for x in self.calls])
pytest.fail("\n".join(lines))
def getcall(self, name):
values = self.getcalls(name)
assert len(values) == 1, (name, values)
return values[0]
# functionality for test reports
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
return [x.report for x in self.getcalls(names)]
def matchreport(
self,
inamepart="",
names="pytest_runtest_logreport pytest_collectreport",
when=None,
):
"""return a testreport whose dotted import path matches"""
values = []
for rep in self.getreports(names=names):
try:
if not when and rep.when != "call" and rep.passed:
# setup/teardown passing reports - let's ignore those
continue
except AttributeError:
pass
if when and getattr(rep, "when", None) != when:
continue
if not inamepart or inamepart in rep.nodeid.split("::"):
values.append(rep)
if not values:
raise ValueError(
"could not find test report matching %r: "
"no test reports at all!" % (inamepart,)
)
if len(values) > 1:
raise ValueError(
"found 2 or more testreports matching %r: %s" % (inamepart, values)
)
return values[0]
def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures("pytest_collectreport")
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"):
if rep.passed:
if getattr(rep, "when", None) == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
elif rep.failed:
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return [len(x) for x in self.listoutcomes()]
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.calls[:] = []
@pytest.fixture
def linecomp(request):
return LineComp()
@pytest.fixture(name="LineMatcher")
def LineMatcher_fixture(request):
return LineMatcher
@pytest.fixture
def testdir(request, tmpdir_factory):
return Testdir(request, tmpdir_factory)
rex_outcome = re.compile(r"(\d+) ([\w-]+)")
class RunResult(object):
"""The result of running a command.
Attributes:
:ret: the return value
:outlines: list of lines captured from stdout
:errlines: list of lines captures from stderr
:stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
reconstruct stdout or the commonly used ``stdout.fnmatch_lines()``
method
:stderr: :py:class:`LineMatcher` of stderr
:duration: duration in seconds
"""
def __init__(self, ret, outlines, errlines, duration):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
self.duration = duration
def parseoutcomes(self):
"""Return a dictionary of outcomestring->num from parsing the terminal
output that the test process produced.
"""
for line in reversed(self.outlines):
if "seconds" in line:
outcomes = rex_outcome.findall(line)
if outcomes:
d = {}
for num, cat in outcomes:
d[cat] = int(num)
return d
raise ValueError("Pytest terminal report not found")
def assert_outcomes(
self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0
):
"""Assert that the specified outcomes appear with the respective
numbers (0 means it didn't occur) in the text output from a test run.
"""
d = self.parseoutcomes()
obtained = {
"passed": d.get("passed", 0),
"skipped": d.get("skipped", 0),
"failed": d.get("failed", 0),
"error": d.get("error", 0),
"xpassed": d.get("xpassed", 0),
"xfailed": d.get("xfailed", 0),
}
expected = {
"passed": passed,
"skipped": skipped,
"failed": failed,
"error": error,
"xpassed": xpassed,
"xfailed": xfailed,
}
assert obtained == expected
class CwdSnapshot(object):
def __init__(self):
self.__saved = os.getcwd()
def restore(self):
os.chdir(self.__saved)
class SysModulesSnapshot(object):
def __init__(self, preserve=None):
self.__preserve = preserve
self.__saved = dict(sys.modules)
def restore(self):
if self.__preserve:
self.__saved.update(
(k, m) for k, m in sys.modules.items() if self.__preserve(k)
)
sys.modules.clear()
sys.modules.update(self.__saved)
class SysPathsSnapshot(object):
def __init__(self):
self.__saved = list(sys.path), list(sys.meta_path)
def restore(self):
sys.path[:], sys.meta_path[:] = self.__saved
class Testdir(object):
"""Temporary test directory with tools to test/run pytest itself.
This is based on the ``tmpdir`` fixture but provides a number of methods
which aid with testing pytest itself. Unless :py:meth:`chdir` is used all
methods will use :py:attr:`tmpdir` as their current working directory.
Attributes:
:tmpdir: The :py:class:`py.path.local` instance of the temporary directory.
:plugins: A list of plugins to use with :py:meth:`parseconfig` and
:py:meth:`runpytest`. Initially this is an empty list but plugins can
be added to the list. The type of items to add to the list depends on
the method using them so refer to them for details.
"""
class TimeoutExpired(Exception):
pass
def __init__(self, request, tmpdir_factory):
self.request = request
self._mod_collections = WeakKeyDictionary()
name = request.function.__name__
self.tmpdir = tmpdir_factory.mktemp(name, numbered=True)
self.test_tmproot = tmpdir_factory.mktemp("tmp-" + name, numbered=True)
os.environ["PYTEST_DEBUG_TEMPROOT"] = str(self.test_tmproot)
self.plugins = []
self._cwd_snapshot = CwdSnapshot()
self._sys_path_snapshot = SysPathsSnapshot()
self._sys_modules_snapshot = self.__take_sys_modules_snapshot()
self.chdir()
self.request.addfinalizer(self.finalize)
method = self.request.config.getoption("--runpytest")
if method == "inprocess":
self._runpytest_method = self.runpytest_inprocess
elif method == "subprocess":
self._runpytest_method = self.runpytest_subprocess
def __repr__(self):
return "<Testdir %r>" % (self.tmpdir,)
def finalize(self):
"""Clean up global state artifacts.
Some methods modify the global interpreter state and this tries to
clean this up. It does not remove the temporary directory however so
it can be looked at after the test run has finished.
"""
self._sys_modules_snapshot.restore()
self._sys_path_snapshot.restore()
self._cwd_snapshot.restore()
os.environ.pop("PYTEST_DEBUG_TEMPROOT", None)
def __take_sys_modules_snapshot(self):
# some zope modules used by twisted-related tests keep internal state
# and can't be deleted; we had some trouble in the past with
# `zope.interface` for example
def preserve_module(name):
return name.startswith("zope")
return SysModulesSnapshot(preserve=preserve_module)
def make_hook_recorder(self, pluginmanager):
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
self.request.addfinalizer(reprec.finish_recording)
return reprec
def chdir(self):
"""Cd into the temporary directory.
This is done automatically upon instantiation.
"""
self.tmpdir.chdir()
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
items = list(kwargs.items())
def to_text(s):
return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s)
if args:
source = u"\n".join(to_text(x) for x in args)
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for basename, value in items:
p = self.tmpdir.join(basename).new(ext=ext)
p.dirpath().ensure_dir()
source = Source(value)
source = u"\n".join(to_text(line) for line in source.lines)
p.write(source.strip().encode(encoding), "wb")
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
r"""Create new file(s) in the testdir.
:param str ext: The extension the file(s) should use, including the dot, e.g. `.py`.
:param list[str] args: All args will be treated as strings and joined using newlines.
The result will be written as contents to the file. The name of the
file will be based on the test function requesting this fixture.
:param kwargs: Each keyword is the name of a file, while the value of it will
be written as contents of the file.
Examples:
.. code-block:: python
testdir.makefile(".txt", "line1", "line2")
testdir.makefile(".ini", pytest="[pytest]\naddopts=-rs\n")
"""
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
"""Write a contest.py file with 'source' as contents."""
return self.makepyfile(conftest=source)
def makeini(self, source):
"""Write a tox.ini file with 'source' as contents."""
return self.makefile(".ini", tox=source)
def getinicfg(self, source):
"""Return the pytest section from the tox.ini config file."""
p = self.makeini(source)
return py.iniconfig.IniConfig(p)["pytest"]
def makepyfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .py extension."""
return self._makefile(".py", args, kwargs)
def maketxtfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .txt extension."""
return self._makefile(".txt", args, kwargs)
def syspathinsert(self, path=None):
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
This is undone automatically when this object dies at the end of each
test.
"""
if path is None:
path = self.tmpdir
sys.path.insert(0, str(path))
# a call to syspathinsert() usually means that the caller wants to
# import some dynamically created files, thus with python3 we
# invalidate its import caches
self._possibly_invalidate_import_caches()
def _possibly_invalidate_import_caches(self):
# invalidate caches if we can (py33 and above)
try:
import importlib
except ImportError:
pass
else:
if hasattr(importlib, "invalidate_caches"):
importlib.invalidate_caches()
def mkdir(self, name):
"""Create a new (sub)directory."""
return self.tmpdir.mkdir(name)
def mkpydir(self, name):
"""Create a new python package.
This creates a (sub)directory with an empty ``__init__.py`` file so it
gets recognised as a python package.
"""
p = self.mkdir(name)
p.ensure("__init__.py")
return p
def copy_example(self, name=None):
import warnings
from _pytest.warning_types import PYTESTER_COPY_EXAMPLE
warnings.warn(PYTESTER_COPY_EXAMPLE, stacklevel=2)
example_dir = self.request.config.getini("pytester_example_dir")
if example_dir is None:
raise ValueError("pytester_example_dir is unset, can't copy examples")
example_dir = self.request.config.rootdir.join(example_dir)
for extra_element in self.request.node.iter_markers("pytester_example_path"):
assert extra_element.args
example_dir = example_dir.join(*extra_element.args)
if name is None:
func_name = self.request.function.__name__
maybe_dir = example_dir / func_name
maybe_file = example_dir / (func_name + ".py")
if maybe_dir.isdir():
example_path = maybe_dir
elif maybe_file.isfile():
example_path = maybe_file
else:
raise LookupError(
"{} cant be found as module or package in {}".format(
func_name, example_dir.bestrelpath(self.request.confg.rootdir)
)
)
else:
example_path = example_dir.join(name)
if example_path.isdir() and not example_path.join("__init__.py").isfile():
example_path.copy(self.tmpdir)
return self.tmpdir
elif example_path.isfile():
result = self.tmpdir.join(example_path.basename)
example_path.copy(result)
return result
else:
raise LookupError(
'example "{}" is not found as a file or directory'.format(example_path)
)
Session = Session
def getnode(self, config, arg):
"""Return the collection node of a file.
:param config: :py:class:`_pytest.config.Config` instance, see
:py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the
configuration
:param arg: a :py:class:`py.path.local` instance of the file
"""
session = Session(config)
assert "::" not in str(arg)
p = py.path.local(arg)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([str(p)], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def getpathnode(self, path):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
create the (configured) pytest Config instance.
:param path: a :py:class:`py.path.local` instance of the file
"""
config = self.parseconfigure(path)
session = Session(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def genitems(self, colitems):
"""Generate all test items from a collection node.
This recurses into the collection node and returns a list of all the
test items contained within.
"""
session = colitems[0].session
result = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
"""Run the "test_func" Item.
The calling test instance (class containing the test method) must
provide a ``.getrunner()`` method which should return a runner which
can run the test protocol for a single item, e.g.
:py:func:`_pytest.runner.runtestprotocol`.
"""
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance
for the result.
:param source: the source code of the test module
:param cmdlineargs: any extra command line arguments to use
:return: :py:class:`HookRecorder` instance of the result
"""
p = self.makepyfile(source)
values = list(cmdlineargs) + [p]
return self.inline_run(*values)
def inline_genitems(self, *args):
"""Run ``pytest.main(['--collectonly'])`` in-process.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself like :py:meth:`inline_run`, but returns a
tuple of the collected items and a :py:class:`HookRecorder` instance.
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
def inline_run(self, *args, **kwargs):
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself. This means it can return a
:py:class:`HookRecorder` instance which gives more detailed results
from that run than can be done by matching stdout/stderr from
:py:meth:`runpytest`.
:param args: command line arguments to pass to :py:func:`pytest.main`
:param plugin: (keyword-only) extra plugin instances the
``pytest.main()`` instance should use
:return: a :py:class:`HookRecorder` instance
"""
finalizers = []
try:
# When running pytest inline any plugins active in the main test
# process are already imported. So this disables the warning which
# will trigger to say they can no longer be rewritten, which is
# fine as they have already been rewritten.
orig_warn = AssertionRewritingHook._warn_already_imported
def revert_warn_already_imported():
AssertionRewritingHook._warn_already_imported = orig_warn
finalizers.append(revert_warn_already_imported)
AssertionRewritingHook._warn_already_imported = lambda *a: None
# Any sys.module or sys.path changes done while running pytest
# inline should be reverted after the test run completes to avoid
# clashing with later inline tests run within the same pytest test,
# e.g. just because they use matching test module names.
finalizers.append(self.__take_sys_modules_snapshot().restore)
finalizers.append(SysPathsSnapshot().restore)
# Important note:
# - our tests should not leave any other references/registrations
# laying around other than possibly loaded test modules
# referenced from sys.modules, as nothing will clean those up
# automatically
rec = []
class Collect(object):
def pytest_configure(x, config):
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins = kwargs.get("plugins") or []
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec(object):
pass
reprec.ret = ret
# typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing
if ret == EXIT_INTERRUPTED and not kwargs.get("no_reraise_ctrlc"):
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
raise KeyboardInterrupt()
return reprec
finally:
for finalizer in finalizers:
finalizer()
def runpytest_inprocess(self, *args, **kwargs):
"""Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides.
"""
if kwargs.get("syspathinsert"):
self.syspathinsert()
now = time.time()
capture = MultiCapture(Capture=SysCapture)
capture.start_capturing()
try:
try:
reprec = self.inline_run(*args, **kwargs)
except SystemExit as e:
class reprec(object):
ret = e.args[0]
except Exception:
traceback.print_exc()
class reprec(object):
ret = 3
finally:
out, err = capture.readouterr()
capture.stop_capturing()
sys.stdout.write(out)
sys.stderr.write(err)
res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now)
res.reprec = reprec
return res
def runpytest(self, *args, **kwargs):
"""Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`.
"""
args = self._ensure_basetemp(args)
return self._runpytest_method(*args, **kwargs)
def _ensure_basetemp(self, args):
args = list(args)
for x in args:
if safe_str(x).startswith("--basetemp"):
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
return args
def parseconfig(self, *args):
"""Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config to create
a new :py:class:`_pytest.core.PluginManager` and call the
pytest_cmdline_parse hook to create a new
:py:class:`_pytest.config.Config` instance.
If :py:attr:`plugins` has been populated they should be plugin modules
to be registered with the PluginManager.
"""
args = self._ensure_basetemp(args)
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins)
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure)
return config
def parseconfigure(self, *args):
"""Return a new pytest configured Config instance.
This returns a new :py:class:`_pytest.config.Config` instance like
:py:meth:`parseconfig`, but also calls the pytest_configure hook.
"""
config = self.parseconfig(*args)
config._do_configure()
self.request.addfinalizer(config._ensure_unconfigure)
return config
def getitem(self, source, funcname="test_func"):
"""Return the test item for a test function.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning the test item for the requested
function name.
:param source: the module source
:param funcname: the name of the test function for which to return a
test item
"""
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, "%r item not found in module:\n%s\nitems: %s" % (
funcname,
source,
items,
)
def getitems(self, source):
"""Return all test items collected from the module.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning all test items contained within.
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit=False):
"""Return the module collection node for ``source``.
This writes ``source`` to a file using :py:meth:`makepyfile` and then
runs the pytest collection on it, returning the collection node for the
test module.
:param source: the source code of the module to collect
:param configargs: any extra arguments to pass to
:py:meth:`parseconfigure`
:param withinit: whether to also write an ``__init__.py`` file to the
same directory to ensure it is a package
"""
if isinstance(source, Path):
path = self.tmpdir.join(str(source))
assert not withinit, "not supported for paths"
else:
kw = {self.request.function.__name__: Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__="#")
self.config = config = self.parseconfigure(path, *configargs)
return self.getnode(config, path)
def collect_by_name(self, modcol, name):
"""Return the collection node for name from the module collection.
This will search a module collection node for a collection node
matching the given name.
:param modcol: a module collection node; see :py:meth:`getmodulecol`
:param name: the name of the node to return
"""
if modcol not in self._mod_collections:
self._mod_collections[modcol] = list(modcol.collect())
for colitem in self._mod_collections[modcol]:
if colitem.name == name:
return colitem
def popen(self, cmdargs, stdout, stderr, **kw):
"""Invoke subprocess.Popen.
This calls subprocess.Popen making sure the current working directory
is in the PYTHONPATH.
You probably want to use :py:meth:`run` instead.
"""
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(
filter(None, [os.getcwd(), env.get("PYTHONPATH", "")])
)
kw["env"] = env
popen = subprocess.Popen(
cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw
)
popen.stdin.close()
return popen
def run(self, *cmdargs, **kwargs):
"""Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and stderr.
:param args: the sequence of arguments to pass to `subprocess.Popen()`
:param timeout: the period in seconds after which to timeout and raise
:py:class:`Testdir.TimeoutExpired`
Returns a :py:class:`RunResult`.
"""
__tracebackhide__ = True
timeout = kwargs.pop("timeout", None)
raise_on_kwargs(kwargs)
cmdargs = [
str(arg) if isinstance(arg, py.path.local) else arg for arg in cmdargs
]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print("running:", *cmdargs)
print(" in:", py.path.local())
f1 = codecs.open(str(p1), "w", encoding="utf8")
f2 = codecs.open(str(p2), "w", encoding="utf8")
try:
now = time.time()
popen = self.popen(
cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32")
)
def handle_timeout():
__tracebackhide__ = True
timeout_message = (
"{seconds} second timeout expired running:"
" {command}".format(seconds=timeout, command=cmdargs)
)
popen.kill()
popen.wait()
raise self.TimeoutExpired(timeout_message)
if timeout is None:
ret = popen.wait()
elif six.PY3:
try:
ret = popen.wait(timeout)
except subprocess.TimeoutExpired:
handle_timeout()
else:
end = time.time() + timeout
resolution = min(0.1, timeout / 10)
while True:
ret = popen.poll()
if ret is not None:
break
if time.time() > end:
handle_timeout()
time.sleep(resolution)
finally:
f1.close()
f2.close()
f1 = codecs.open(str(p1), "r", encoding="utf8")
f2 = codecs.open(str(p2), "r", encoding="utf8")
try:
out = f1.read().splitlines()
err = f2.read().splitlines()
finally:
f1.close()
f2.close()
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time() - now)
def _dump_lines(self, lines, fp):
try:
for line in lines:
print(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
def _getpytestargs(self):
return sys.executable, "-mpytest"
def runpython(self, script):
"""Run a python script using sys.executable as interpreter.
Returns a :py:class:`RunResult`.
"""
return self.run(sys.executable, script)
def runpython_c(self, command):
"""Run python -c "command", return a :py:class:`RunResult`."""
return self.run(sys.executable, "-c", command)
def runpytest_subprocess(self, *args, **kwargs):
"""Run pytest as a subprocess with given arguments.
Any plugins added to the :py:attr:`plugins` list will added using the
``-p`` command line option. Additionally ``--basetemp`` is used put
any temporary files and directories in a numbered directory prefixed
with "runpytest-" so they do not conflict with the normal numbered
pytest location for temporary files and directories.
:param args: the sequence of arguments to pass to the pytest subprocess
:param timeout: the period in seconds after which to timeout and raise
:py:class:`Testdir.TimeoutExpired`
Returns a :py:class:`RunResult`.
"""
__tracebackhide__ = True
p = py.path.local.make_numbered_dir(
prefix="runpytest-", keep=None, rootdir=self.tmpdir
)
args = ("--basetemp=%s" % p,) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ("-p", plugins[0]) + args
args = self._getpytestargs() + args
return self.run(*args, timeout=kwargs.get("timeout"))
def spawn_pytest(self, string, expect_timeout=10.0):
"""Run pytest using pexpect.
This makes sure to use the right pytest and sets up the temporary
directory locations.
The pexpect child is returned.
"""
basetemp = self.tmpdir.mkdir("temp-pexpect")
invoke = " ".join(map(str, self._getpytestargs()))
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd, expect_timeout=10.0):
"""Run a command using pexpect.
The pexpect child is returned.
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
logfile = self.tmpdir.join("spawn.out").open("wb")
child = pexpect.spawn(cmd, logfile=logfile)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
py.io.saferepr(out),
)
class LineComp(object):
def __init__(self):
self.stringio = py.io.TextIO()
def assert_contains_lines(self, lines2):
"""Assert that lines2 are contained (linearly) in lines1.
Return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher(object):
"""Flexible matching of text.
This is a convenience class to test large texts like the output of
commands.
The constructor takes a list of lines without their trailing newlines, i.e.
``text.splitlines()``.
"""
def __init__(self, lines):
self.lines = lines
self._log_output = []
def str(self):
"""Return the entire original text."""
return "\n".join(self.lines)
def _getlines(self, lines2):
if isinstance(lines2, str):
lines2 = Source(lines2)
if isinstance(lines2, Source):
lines2 = lines2.strip().lines
return lines2
def fnmatch_lines_random(self, lines2):
"""Check lines exist in the output using in any order.
Lines are checked using ``fnmatch.fnmatch``. The argument is a list of
lines which have to occur in the output, in any order.
"""
self._match_lines_random(lines2, fnmatch)
def re_match_lines_random(self, lines2):
"""Check lines exist in the output using ``re.match``, in any order.
The argument is a list of lines which have to occur in the output, in
any order.
"""
self._match_lines_random(lines2, lambda name, pat: re.match(pat, name))
def _match_lines_random(self, lines2, match_func):
"""Check lines exist in the output.
The argument is a list of lines which have to occur in the output, in
any order. Each line can contain glob whildcards.
"""
lines2 = self._getlines(lines2)
for line in lines2:
for x in self.lines:
if line == x or match_func(x, line):
self._log("matched: ", repr(line))
break
else:
self._log("line %r not found in output" % line)
raise ValueError(self._log_text)
def get_lines_after(self, fnline):
"""Return all lines following the given line in the text.
The given line can contain glob wildcards.
"""
for i, line in enumerate(self.lines):
if fnline == line or fnmatch(line, fnline):
return self.lines[i + 1 :]
raise ValueError("line %r not found in output" % fnline)
def _log(self, *args):
self._log_output.append(" ".join((str(x) for x in args)))
@property
def _log_text(self):
return "\n".join(self._log_output)
def fnmatch_lines(self, lines2):
"""Search captured text for matching lines using ``fnmatch.fnmatch``.
The argument is a list of lines which have to match and can use glob
wildcards. If they do not match a pytest.fail() is called. The
matches and non-matches are also printed on stdout.
"""
__tracebackhide__ = True
self._match_lines(lines2, fnmatch, "fnmatch")
def re_match_lines(self, lines2):
"""Search captured text for matching lines using ``re.match``.
The argument is a list of lines which have to match using ``re.match``.
If they do not match a pytest.fail() is called.
The matches and non-matches are also printed on stdout.
"""
__tracebackhide__ = True
self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match")
def _match_lines(self, lines2, match_func, match_nickname):
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
:param list[str] lines2: list of string patterns to match. The actual
format depends on ``match_func``
:param match_func: a callable ``match_func(line, pattern)`` where line
is the captured line from stdout/stderr and pattern is the matching
pattern
:param str match_nickname: the nickname for the match function that
will be logged to stdout when a match occurs
"""
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
nextline = None
extralines = []
__tracebackhide__ = True
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
self._log("exact match:", repr(line))
break
elif match_func(nextline, line):
self._log("%s:" % match_nickname, repr(line))
self._log(" with:", repr(nextline))
break
else:
if not nomatchprinted:
self._log("nomatch:", repr(line))
nomatchprinted = True
self._log(" and:", repr(nextline))
extralines.append(nextline)
else:
self._log("remains unmatched: %r" % (line,))
pytest.fail(self._log_text)
| {
"content_hash": "387498839105f8ce5465128bbf9866d1",
"timestamp": "",
"source": "github",
"line_count": 1381,
"max_line_length": 93,
"avg_line_length": 33.84359160028964,
"alnum_prop": 0.583807608370063,
"repo_name": "ddboline/pytest",
"id": "8782a30ba733bacd1f712ed3256fdeffcbd829ac",
"size": "46738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/_pytest/pytester.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1293"
},
{
"name": "Python",
"bytes": "1785701"
}
],
"symlink_target": ""
} |
"""SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py 2014/09/27 12:51:43 garyo"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created != []:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" this function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )
file.write('{\\rtf')
if text!='':
file.write(text.replace('\n', '\\par '))
else:
file.write(name+'\\par\\par')
file.write('}')
file.close()
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "e0abfb87594361569983ceecbee94029",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 114,
"avg_line_length": 38.34535104364326,
"alnum_prop": 0.6368764845605701,
"repo_name": "unix1986/scons",
"id": "9611af4710bb51cd19bd04ef2000cf647303219c",
"size": "20208",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "engine/SCons/Tool/packaging/msi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "537068"
},
{
"name": "Python",
"bytes": "1939355"
},
{
"name": "Shell",
"bytes": "1502"
}
],
"symlink_target": ""
} |
import urllib
from buildbot.changes import svnpoller
from twisted.internet import defer
from twisted.python import log
from common import chromium_utils
from master.try_job_base import text_to_dict
from master.try_job_repo import TryJobRepoBase
class SVNPoller(svnpoller.SVNPoller):
"""A hook in svnpoller.SVNPoller for TryJobSubversion.
This class is intentionally minimalistic. It does nothing but delegate
change process to TryJobSubversion.
"""
def __init__(self, try_job, **kwargs):
svnpoller.SVNPoller.__init__(self, **kwargs)
self.try_job = try_job
def submit_changes(self, changes):
"""Passes the changes to TryJobSubversion.
Instead of submitting the changes to the master, pass them to
TryJobSubversion. We don't want buildbot to see these changes.
"""
return self.try_job.process_svn_changes(changes)
class TryJobSubversion(TryJobRepoBase):
"""Poll a Subversion server to grab patches to try."""
def __init__(self, name, pools, svn_url, properties=None,
last_good_urls=None, code_review_sites=None):
TryJobRepoBase.__init__(self, name, pools, properties, last_good_urls,
code_review_sites)
self.watcher = SVNPoller(self,
svnurl=svn_url,
svnbin=chromium_utils.SVN_BIN,
pollinterval=10)
@defer.deferredGenerator
def process_svn_changes(self, changes):
"""For each change submit a job"""
for change in changes:
# pylint: disable=E1101
options = self.parse_options(text_to_dict(change['comments']))
# Generate patch_url.
diff_filename = findSingleDiff(change['files'])
patch_url = ('%s/%s@%s' % (
self.watcher.svnurl,
urllib.quote(diff_filename),
change['revision'])
)
options['patch_url'] = patch_url
options['patch_storage'] = 'svn'
# Read patch contents.
wfd = defer.waitForDeferred(self.watcher.getProcessOutput(
['cat', patch_url, '--non-interactive']))
yield wfd
options['patch'] = wfd.getResult()
self.addJob(options)
def findSingleDiff(files):
"""Find the only .diff file"""
# Implicitly skips over non-files like directories.
diffs = [f for f in files if f.endswith(".diff")]
if len(diffs) != 1:
# We only accept changes with 1 diff file.
log.msg("Try job with too many files %s" % (','.join(files)))
return diffs[0]
| {
"content_hash": "dd9b07c12ebf624f9b7b62dae2d8097b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 74,
"avg_line_length": 32.61842105263158,
"alnum_prop": 0.6494554255748286,
"repo_name": "eunchong/build",
"id": "567a5e5438d817d640648710efee3a807937882b",
"size": "2646",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/master/try_job_svn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "211818"
},
{
"name": "HTML",
"bytes": "429981"
},
{
"name": "JavaScript",
"bytes": "75624"
},
{
"name": "Makefile",
"bytes": "21204"
},
{
"name": "Python",
"bytes": "6143109"
},
{
"name": "Shell",
"bytes": "23512"
}
],
"symlink_target": ""
} |
"""
Simple pong game to test how ecore and evas work together.
"""
from os.path import abspath, join as join_path
from functools import partial
from collections import defaultdict
from random import uniform as randf, seed
from ecore.evas import SoftwareX11
from ecore import main_loop_begin, animator_add, animator_frametime_set,\
main_loop_quit
from evas import Rect
from emotion import Emotion
from edje import Edje
# Initializes the random number generation
seed()
# The size of the screen
SCREEN_SIZE = (480, 480)
# Whether the game is running or not
GAME_RUNNING = True
# Directory where the freaking assets are
DATA_DIR = abspath("./data")
THEME = join_path(DATA_DIR, 'theme.edj')
# The game (you just lost it :3)
def game_main():
# Instantiate a canvas using the SoftwareX11 rendering engine. This
# does all the work of creating the canvas, assigning it to the
# rendering engine and integrating it with the `ecore' loop.
#
# From there onwards, the only thing left is add objects to the
# Canvas, and manipulate these objects. The `ecore' loop will call
# render automatically on `idle'.
w, h = SCREEN_SIZE
ee = SoftwareX11(w=w, h=h)
canvas = ee.evas
# Setups the game window and field by creating objects and adding
# them to the canvas.
setup_field(ee)
# Sets the framerate of the game to 60FPS
animator_frametime_set(1.0 / 60.0)
# Finally, shows all this stuff to the user and enters the main
# loop. From there ownards the only say we have is on the callbacks
# from the events we've set to watch.
ee.title = "Pong"
ee.size_max_set(*SCREEN_SIZE)
ee.size_min_set(*SCREEN_SIZE)
ee.show()
main_loop_begin()
# Stops stuff when the game ends
fire_hooks('the:end')
# Whenever we lose the gayme, this function is called, showing a "nice"
# game-over screen and disabling the game object's handlers.
def game_over(canvas):
global GAME_RUNNING
bg = background(canvas, (0, 0, 0, 128))
txt = text(canvas, "GAME OVER", 48)
GAME_RUNNING = False
# Hooks are collections of functions that are called when a certain
# user-defined event happens. This dictionary maps each user-defined
# event (by name) to a list of functions to be called.
hooks = defaultdict(list)
# Allows the user to add new hooks to a certain kind of event
def add_hook(kind, fn, *args, **kwargs):
hooks[kind].append(partial(fn, *args, **kwargs))
# Allows the user to fire all hooks related to the given event, with any
# optional positional/keyword arguments
def fire_hooks(kind, *args, **kwargs):
for hook in hooks[kind]:
hook(*args, **kwargs)
# Setups the field, by adding background, paddles, ball and the textual
# representations of scores and player-names to the canvas. These are
# self-aware objects that know how to update themselves, so we don't
# need to worry either about re-rendering them (which is done by
# Evas/Ecore itself), nor with manipulating them.
def setup_field(engine):
canvas = engine.evas
make_player(canvas)
splash_screen(canvas)
main_menu(canvas)
game_screen(canvas)
# Adds some useful hooks
add_hook('game:over', game_over, canvas)
fire_hooks('play:bgm', 'bgm.mp3')
# Abstract "classes" for the standard Evas objects. These include simple
# things like background and centered text, to full featured objects,
# like the paddles and the ball itself.
#
# Basically these use the underlying canvas methods to add simple
# geometry objects to the canvas (ie.: canvas.Rectangle()) will create a
# Rectangle object and assign it to the canvas, automatically rendering
# it whenever it's needed.
def background(canvas, colour):
bg = canvas.Rectangle(color=colour)
bg.resize(*canvas.size)
bg.show()
return bg
# Centered text stuff
def text(canvas, string, size, colour=(200, 200, 200, 128)):
text = canvas.Text(color=colour)
text.text_set(string)
text.font_set('Monospace', size)
text.show()
center_text(canvas.rect, text)
return text
# Centers a text object in the given rectangle
def center_text(rect, text):
x, y = rect.center
text.move(x - text.horiz_advance/2
,y - text.vert_advance/2)
# Splash screen and main menu
def splash_screen(canvas):
def show_main_menu(obj, signal, source):
fire_hooks('show:main-menu')
splash = Edje(canvas, file=THEME, group='splash')
splash.signal_callback_add("show,main-menu", "", show_main_menu)
splash.show()
def main_menu(canvas):
def on_show():
menu.signal_emit("show,main-menu", "")
def quit_game(obj, signal, source):
main_loop_quit()
def start_game(obj, signal, source):
fire_hooks('game:new')
menu = Edje(canvas, file=THEME, group="main-menu")
menu.signal_callback_add("game,new", "", start_game)
menu.signal_callback_add("game,quit", "", quit_game)
menu.show()
add_hook('show:main-menu', on_show)
def game_screen(canvas):
def start_game():
set_score(0)
xpaddle('top', canvas, 10)
xpaddle('bottom', canvas, canvas.rect.right - PADDLE_WIDTH - 10)
ypaddle('left', canvas, 10)
ypaddle('right', canvas, canvas.rect.bottom - PADDLE_WIDTH - 10)
ball(canvas)
game.show()
def on_score_change(new_score):
game.part_text_set("game/score", str(new_score))
game = Edje(canvas, file=THEME, group="game-screen")
add_hook("game:new", start_game)
add_hook('score:change', on_score_change)
# The sizes of the paddle
PADDLE_WIDTH = 20
PADDLE_HEIGHT = 200
# Creates a base paddle, at the given position, and using the given
# controller function. The controller is called at 60FPS, in the
# animator callback set by this object, and it's expected to update the
# paddle's state depending on the mouse input.
def paddle(name, canvas, pos, size, controller):
# Handles the mouse input by updating the paddle's position. This is
# run at 60 FPS, as long as the game is running (ie.: no
# game-over).
#
# Since the animator expects each callback to return whether it
# should continue running or not — by signaling with either True or
# False — we just return the value of GAME_RUNNING here.
def handle_input():
screen = canvas.rect.move_by(10, 10).inflate(-20, -20)
px, py = canvas.pointer_canvas_xy
controller(pad, px, py)
pad.rect = pad.rect.clamp(screen)
return GAME_RUNNING
pad = canvas.Rectangle(name=name, color=(238, 238, 236, 255))
pad.resize(*size)
pad.move(*pos)
pad.show()
# Adds the pad area as a solid area, so the ball collides with it.
add_collision_object(pad)
# Adds the input handler to the list of animator callbacks
animator_add(handle_input)
return pad
# Provides a paddle that's controlled by the x-axis of the mouse input.
def xpaddle(name, canvas, pos):
def controller(pad, mouse_x, mouse_y):
pad.move(mouse_x, pad.rect.y)
return paddle(name, canvas
,(canvas.rect.center_x, pos)
,(PADDLE_HEIGHT, PADDLE_WIDTH)
,controller)
# Provides a paddle that's controlled by the y-axis of the mouse input.
def ypaddle(name, canvas, pos):
def controller(pad, mouse_x, mouse_y):
pad.move(pad.rect.x, mouse_y)
return paddle(name, canvas
,(pos, canvas.rect.center_y)
,(PADDLE_WIDTH, PADDLE_HEIGHT)
,controller)
# Checks for collisions
solid_areas = []
def add_collision_object(obj):
solid_areas.append(obj)
def collidesp(rect):
for obj in solid_areas:
if obj.rect.intercepts(rect):
return obj
# The "ball"
BALL_SIZE = (20, 20)
def ball(canvas):
def clamp_bounds(solid):
rect = ball.rect
if solid.name == 'right': rect.right = solid.rect.left
if solid.name == 'left': rect.left = solid.rect.right
if solid.name == 'bottom': rect.bottom = solid.rect.top
if solid.name == 'top': rect.top = solid.rect.bottom
ball.rect = rect
def check_collisions():
solid = collidesp(ball.rect)
w, h = BALL_SIZE
if solid:
fire_hooks('play:sfx', 'hit.wav')
increase_score()
clamp_bounds(solid)
reverse(x=(solid.name in ['left', 'right'])
,y=(solid.name in ['top', 'bottom']))
def outta_screen_p():
return ball.rect.left < 0 or ball.rect.right > canvas.rect.right \
or ball.rect.top < 0 or ball.rect.bottom > canvas.bottom
def input_handler():
move()
check_collisions()
if outta_screen_p():
fire_hooks('game:over')
return GAME_RUNNING
def move():
pos[0] += speed['x']
pos[1] += speed['y']
ball.move(*pos)
def init_ball(ball):
ball.resize(*BALL_SIZE)
ball.move(*canvas.rect.center)
ball.show()
animator_add(input_handler)
return ball
def reverse(x=False, y=False):
w, h = BALL_SIZE
if x: speed['x'] = max(min(speed['x'], w/2), -w/2) * -1.1
if y: speed['y'] = max(min(speed['y'], h/2), -h/2) * -1.1
ball = init_ball(canvas.Rectangle(color=(171, 180, 161, 200)))
speed = {'x': randf(1, 2), 'y': randf(1, 2)}
pos = list(ball.pos)
return ball
# The player's stuff
SCORE = 0
def increase_score():
set_score(SCORE + 1)
return True
def set_score(new_score):
global SCORE
SCORE = new_score
fire_hooks('score:change', SCORE)
def score(canvas):
def on_score_change(new_score):
score_text.text_set(str(new_score))
center_text(canvas.rect, score_text)
score_text = text(canvas, '0', 200, (85, 87, 83, 255))
add_hook('score:change', on_score_change)
add_hook('game:new', set_score, 0)
return score_text
# Brings in some Emotion.
#
# The emotion lib is used to play sounds and videos, and it uses
# the usual Ecore event loop and Evas for rendering.
#
# Emotion can use either `xine' or `gstreamer' as the sound engine, it
# seems, but I haven't dug too much into this. But well, these two are
# fo sho :3
def make_player(canvas, engine="gstreamer"):
def stop(player):
player.file = ""
def play(player, media):
stop(player)
player.file = join_path(DATA_DIR, media)
player.play = True
def replay(player):
fname = player.file
stop(player)
play(player, fname)
sfx_player = Emotion(canvas, module_filename=engine)
bgm_player = Emotion(canvas, module_filename=engine)
bgm_player.on_playback_finished_add(replay)
add_hook('play:sfx', play, sfx_player)
add_hook('game:end', stop, sfx_player)
add_hook('play:bgm', play, bgm_player)
add_hook('game:over', stop, bgm_player)
add_hook('game:end', stop, bgm_player)
########################################################################
if __name__ == '__main__':
game_main()
| {
"content_hash": "53a983143d9c5ff2620cf5be3e3d3933",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 79,
"avg_line_length": 27.895261845386532,
"alnum_prop": 0.6357053459681745,
"repo_name": "robotlolita/pong",
"id": "a43f8730704b64c5f0870f14939e22c4aaafffd5",
"size": "12354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pong.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12354"
}
],
"symlink_target": ""
} |
import logging
from common.gps import apply_timezone_offset
def partition_gps(entities):
"""Partition Photo entities by GPS lat/lon and datetime.
Arguments:
entities: sequence of Photo entities
Returns
complete_images: sequence of Photos containing both GPS lat/lon and datetime
incomplete_images: sequence of Photos that do not contain both GPS lat/lon and datetime
"""
complete_images = []
incomplete_images = []
for entity in entities:
has_lat = entity.has_key('lat')
has_lon = entity.has_key('lon')
has_datetime = entity.has_key('image_datetime')
if has_lat and has_lon and has_datetime:
complete_images.append(entity)
else:
incomplete_images.append(entity)
return complete_images, incomplete_images
def update_incomplete(complete_image, incomplete_image):
"""Update incomplete photos based on complete photo. Only the lat/lon
are updated, not the datetime, since the datetime of the
complete photo is not relevant (the incomplete photo's camera
timestamp is used instead of GPS)
Arguments:
complete_image: a photo entity with GPS lat/lon and datetime
incomplete_image: a photo entity lacking GPS lat/lon and datetime
returns:
The updated incomplete photo entity, which is now complete
"""
if not incomplete_image.has_key('lat') and complete_image is not None:
incomplete_image['lat'] = complete_image['lat']
if not incomplete_image.has_key('lon') and complete_image is not None:
incomplete_image['lon'] = complete_image['lon']
if not incomplete_image.has_key('image_datetime') and incomplete_image.has_key('camera_datetime'):
logging.info("Repairing image_datetime from camera_datetime")
if incomplete_image.has_key('lon') and incomplete_image.has_key('lat'):
incomplete_image['image_datetime'] = apply_timezone_offset(incomplete_image['lat'], -incomplete_image['lon'], incomplete_image['camera_datetime'])
incomplete_image['datetime_repaired'] = True
return incomplete_image
| {
"content_hash": "a567e277210c56a4e7af8c7f42689c49",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 158,
"avg_line_length": 45.276595744680854,
"alnum_prop": 0.6912593984962406,
"repo_name": "google/eclipse2017",
"id": "b795df591331787a112d622819d5467f8e7936e3",
"size": "2706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/repair_missing_gps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1210"
},
{
"name": "HTML",
"bytes": "174182"
},
{
"name": "JavaScript",
"bytes": "72747"
},
{
"name": "Python",
"bytes": "665417"
},
{
"name": "Shell",
"bytes": "47103"
}
],
"symlink_target": ""
} |
import datetime
import functools
import hashlib
import os
import time
import uuid
import mock
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_utils import timeutils
import paramiko
import six
from six.moves import range
import cinder
from cinder import exception
from cinder import ssh_utils
from cinder import test
from cinder import utils
CONF = cfg.CONF
class ExecuteTestCase(test.TestCase):
@mock.patch('cinder.utils.processutils.execute')
def test_execute(self, mock_putils_exe):
output = utils.execute('a', 1, foo='bar')
self.assertEqual(mock_putils_exe.return_value, output)
mock_putils_exe.assert_called_once_with('a', 1, foo='bar')
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.utils.processutils.execute')
def test_execute_root(self, mock_putils_exe, mock_get_helper):
output = utils.execute('a', 1, foo='bar', run_as_root=True)
self.assertEqual(mock_putils_exe.return_value, output)
mock_helper = mock_get_helper.return_value
mock_putils_exe.assert_called_once_with('a', 1, foo='bar',
run_as_root=True,
root_helper=mock_helper)
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.utils.processutils.execute')
def test_execute_root_and_helper(self, mock_putils_exe, mock_get_helper):
mock_helper = mock.Mock()
output = utils.execute('a', 1, foo='bar', run_as_root=True,
root_helper=mock_helper)
self.assertEqual(mock_putils_exe.return_value, output)
self.assertFalse(mock_get_helper.called)
mock_putils_exe.assert_called_once_with('a', 1, foo='bar',
run_as_root=True,
root_helper=mock_helper)
class GetFromPathTestCase(test.TestCase):
def test_tolerates_nones(self):
f = utils.get_from_path
input = []
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [None]
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': None}]
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': None}}]
self.assertEqual([{'b': None}], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}]
self.assertEqual([{'b': {'c': None}}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': None}]
self.assertEqual([{'b': {'c': None}}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}]
self.assertEqual([{'b': {'c': None}}, {'b': None}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
def test_does_select(self):
f = utils.get_from_path
input = [{'a': 'a_1'}]
self.assertEqual(['a_1'], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': 'b_1'}}]
self.assertEqual([{'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}]
self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}]
self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': None}}]
self.assertEqual([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': {'c': 'c_2'}}}]
self.assertEqual([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}],
f(input, "a"))
self.assertEqual([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b"))
self.assertEqual(['c_1', 'c_2'], f(input, "a/b/c"))
self.assertEqual([], f(input, "a/b/c/d"))
self.assertEqual([], f(input, "c/a/b/d"))
self.assertEqual([], f(input, "i/r/t"))
def test_flattens_lists(self):
f = utils.get_from_path
input = [{'a': [1, 2, 3]}]
self.assertEqual([1, 2, 3], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}]
self.assertEqual([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEqual([1, 2, 3], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}]
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}]
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': [1, 2, {'b': 'b_1'}]}]
self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
def test_bad_xpath(self):
f = utils.get_from_path
self.assertRaises(exception.Error, f, [], None)
self.assertRaises(exception.Error, f, [], "")
self.assertRaises(exception.Error, f, [], "/")
self.assertRaises(exception.Error, f, [], "/a")
self.assertRaises(exception.Error, f, [], "/a/")
self.assertRaises(exception.Error, f, [], "//")
self.assertRaises(exception.Error, f, [], "//a")
self.assertRaises(exception.Error, f, [], "a//a")
self.assertRaises(exception.Error, f, [], "a//a/")
self.assertRaises(exception.Error, f, [], "a/a/")
def test_real_failure1(self):
# Real world failure case...
# We weren't coping when the input was a Dictionary instead of a List
# This led to test_accepts_dictionaries
f = utils.get_from_path
inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}],
'address': '192.168.0.3'},
'hostname': ''}
private_ips = f(inst, 'fixed_ip/address')
public_ips = f(inst, 'fixed_ip/floating_ips/address')
self.assertEqual(['192.168.0.3'], private_ips)
self.assertEqual(['1.2.3.4'], public_ips)
def test_accepts_dictionaries(self):
f = utils.get_from_path
input = {'a': [1, 2, 3]}
self.assertEqual([1, 2, 3], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': {'b': [1, 2, 3]}}
self.assertEqual([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEqual([1, 2, 3], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': [1, 2, {'b': 'b_1'}]}
self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
class GenericUtilsTestCase(test.TestCase):
@mock.patch('os.path.exists', return_value=True)
def test_find_config(self, mock_exists):
path = '/etc/cinder/cinder.conf'
cfgpath = utils.find_config(path)
self.assertEqual(path, cfgpath)
mock_exists.return_value = False
self.assertRaises(exception.ConfigNotFound,
utils.find_config,
path)
def test_as_int(self):
test_obj_int = '2'
test_obj_float = '2.2'
for obj in [test_obj_int, test_obj_float]:
self.assertEqual(2, utils.as_int(obj))
obj = 'not_a_number'
self.assertEqual(obj, utils.as_int(obj))
self.assertRaises(TypeError,
utils.as_int,
obj,
quiet=False)
def test_is_int_like(self):
self.assertTrue(utils.is_int_like(1))
self.assertTrue(utils.is_int_like(-1))
self.assertTrue(utils.is_int_like(0b1))
self.assertTrue(utils.is_int_like(0o1))
self.assertTrue(utils.is_int_like(0x1))
self.assertTrue(utils.is_int_like('1'))
self.assertFalse(utils.is_int_like(1.0))
self.assertFalse(utils.is_int_like('abc'))
def test_check_exclusive_options(self):
utils.check_exclusive_options()
utils.check_exclusive_options(something=None,
pretty_keys=True,
unit_test=True)
self.assertRaises(exception.InvalidInput,
utils.check_exclusive_options,
test=True,
unit=False,
pretty_keys=True)
self.assertRaises(exception.InvalidInput,
utils.check_exclusive_options,
test=True,
unit=False,
pretty_keys=False)
def test_require_driver_intialized(self):
driver = mock.Mock()
driver.initialized = True
utils.require_driver_initialized(driver)
driver.initialized = False
self.assertRaises(exception.DriverNotInitialized,
utils.require_driver_initialized,
driver)
def test_hostname_unicode_sanitization(self):
hostname = u"\u7684.test.example.com"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_periods(self):
hostname = "....test.example.com..."
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_dashes(self):
hostname = "----test.example.com---"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_characters(self):
hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
self.assertEqual("91----test-host.example.com-0",
utils.sanitize_hostname(hostname))
def test_hostname_translate(self):
hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
self.assertEqual("hello", utils.sanitize_hostname(hostname))
def test_is_valid_boolstr(self):
self.assertTrue(utils.is_valid_boolstr(True))
self.assertTrue(utils.is_valid_boolstr('trUe'))
self.assertTrue(utils.is_valid_boolstr(False))
self.assertTrue(utils.is_valid_boolstr('faLse'))
self.assertTrue(utils.is_valid_boolstr('yeS'))
self.assertTrue(utils.is_valid_boolstr('nO'))
self.assertTrue(utils.is_valid_boolstr('y'))
self.assertTrue(utils.is_valid_boolstr('N'))
self.assertTrue(utils.is_valid_boolstr(1))
self.assertTrue(utils.is_valid_boolstr('1'))
self.assertTrue(utils.is_valid_boolstr(0))
self.assertTrue(utils.is_valid_boolstr('0'))
@mock.patch('os.path.join', side_effect=lambda x, y: '/'.join((x, y)))
def test_make_dev_path(self, mock_join):
self.assertEqual('/dev/xvda', utils.make_dev_path('xvda'))
self.assertEqual('/dev/xvdb1', utils.make_dev_path('xvdb', 1))
self.assertEqual('/foo/xvdc1', utils.make_dev_path('xvdc', 1, '/foo'))
@mock.patch('cinder.utils.execute')
def test_read_file_as_root(self, mock_exec):
out = mock.Mock()
err = mock.Mock()
mock_exec.return_value = (out, err)
test_filepath = '/some/random/path'
output = utils.read_file_as_root(test_filepath)
mock_exec.assert_called_once_with('cat', test_filepath,
run_as_root=True)
self.assertEqual(out, output)
@mock.patch('cinder.utils.execute',
side_effect=putils.ProcessExecutionError)
def test_read_file_as_root_fails(self, mock_exec):
test_filepath = '/some/random/path'
self.assertRaises(exception.FileNotFound,
utils.read_file_as_root,
test_filepath)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_service_is_up(self, mock_utcnow):
fts_func = datetime.datetime.fromtimestamp
fake_now = 1000
down_time = 5
self.flags(service_down_time=down_time)
mock_utcnow.return_value = fts_func(fake_now)
# Up (equal)
service = {'updated_at': fts_func(fake_now - down_time),
'created_at': fts_func(fake_now - down_time)}
result = utils.service_is_up(service)
self.assertTrue(result)
# Up
service = {'updated_at': fts_func(fake_now - down_time + 1),
'created_at': fts_func(fake_now - down_time + 1)}
result = utils.service_is_up(service)
self.assertTrue(result)
# Down
service = {'updated_at': fts_func(fake_now - down_time - 1),
'created_at': fts_func(fake_now - down_time - 1)}
result = utils.service_is_up(service)
self.assertFalse(result)
def test_safe_parse_xml(self):
normal_body = ('<?xml version="1.0" ?>'
'<foo><bar><v1>hey</v1><v2>there</v2></bar></foo>')
def killer_body():
return (("""<!DOCTYPE x [
<!ENTITY a "%(a)s">
<!ENTITY b "%(b)s">
<!ENTITY c "%(c)s">]>
<foo>
<bar>
<v1>%(d)s</v1>
</bar>
</foo>""") % {
'a': 'A' * 10,
'b': '&a;' * 10,
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
dom = utils.safe_minidom_parse_string(normal_body)
# Some versions of minidom inject extra newlines so we ignore them
result = str(dom.toxml()).replace('\n', '')
self.assertEqual(normal_body, result)
self.assertRaises(ValueError,
utils.safe_minidom_parse_string,
killer_body())
def test_xhtml_escape(self):
self.assertEqual('"foo"', utils.xhtml_escape('"foo"'))
self.assertEqual(''foo'', utils.xhtml_escape("'foo'"))
def test_hash_file(self):
data = b'Mary had a little lamb, its fleece as white as snow'
flo = six.BytesIO(data)
h1 = utils.hash_file(flo)
h2 = hashlib.sha1(data).hexdigest()
self.assertEqual(h1, h2)
def test_check_ssh_injection(self):
cmd_list = ['ssh', '-D', 'my_name@name_of_remote_computer']
self.assertIsNone(utils.check_ssh_injection(cmd_list))
cmd_list = ['echo', '"quoted arg with space"']
self.assertIsNone(utils.check_ssh_injection(cmd_list))
cmd_list = ['echo', "'quoted arg with space'"]
self.assertIsNone(utils.check_ssh_injection(cmd_list))
def test_check_ssh_injection_on_error(self):
with_unquoted_space = ['ssh', 'my_name@ name_of_remote_computer']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_unquoted_space)
with_danger_chars = ['||', 'my_name@name_of_remote_computer']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_danger_chars)
with_danger_char = [';', 'my_name@name_of_remote_computer']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_danger_char)
with_special = ['cmd', 'virus;ls']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_special)
quoted_with_unescaped = ['cmd', '"arg\"withunescaped"']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
quoted_with_unescaped)
bad_before_quotes = ['cmd', 'virus;"quoted argument"']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
bad_before_quotes)
bad_after_quotes = ['echo', '"quoted argument";rm -rf']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
bad_after_quotes)
bad_within_quotes = ['echo', "'quoted argument `rm -rf`'"]
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
bad_within_quotes)
with_multiple_quotes = ['echo', '"quoted";virus;"quoted"']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_multiple_quotes)
with_multiple_quotes = ['echo', '"quoted";virus;\'quoted\'']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_multiple_quotes)
@mock.patch('paramiko.SSHClient')
def test_create_channel(self, mock_client):
test_width = 600
test_height = 800
mock_channel = mock.Mock()
mock_client.invoke_shell.return_value = mock_channel
utils.create_channel(mock_client, test_width, test_height)
mock_client.invoke_shell.assert_called_once_with()
mock_channel.resize_pty.assert_called_once_with(test_width,
test_height)
@mock.patch('os.stat')
def test_get_file_mode(self, mock_stat):
class stat_result(object):
st_mode = 0o777
st_gid = 33333
test_file = '/var/tmp/made_up_file'
mock_stat.return_value = stat_result
mode = utils.get_file_mode(test_file)
self.assertEqual(0o777, mode)
mock_stat.assert_called_once_with(test_file)
@mock.patch('os.stat')
def test_get_file_gid(self, mock_stat):
class stat_result(object):
st_mode = 0o777
st_gid = 33333
test_file = '/var/tmp/made_up_file'
mock_stat.return_value = stat_result
gid = utils.get_file_gid(test_file)
self.assertEqual(33333, gid)
mock_stat.assert_called_once_with(test_file)
@mock.patch('cinder.utils.CONF')
def test_get_root_helper(self, mock_conf):
mock_conf.rootwrap_config = '/path/to/conf'
self.assertEqual('sudo cinder-rootwrap /path/to/conf',
utils.get_root_helper())
class TemporaryChownTestCase(test.TestCase):
@mock.patch('os.stat')
@mock.patch('os.getuid', return_value=1234)
@mock.patch('cinder.utils.execute')
def test_get_uid(self, mock_exec, mock_getuid, mock_stat):
mock_stat.return_value.st_uid = 5678
test_filename = 'a_file'
with utils.temporary_chown(test_filename):
mock_exec.assert_called_once_with('chown', 1234, test_filename,
run_as_root=True)
mock_getuid.asset_called_once_with()
mock_stat.assert_called_once_with(test_filename)
calls = [mock.call('chown', 1234, test_filename, run_as_root=True),
mock.call('chown', 5678, test_filename, run_as_root=True)]
mock_exec.assert_has_calls(calls)
@mock.patch('os.stat')
@mock.patch('os.getuid', return_value=1234)
@mock.patch('cinder.utils.execute')
def test_supplied_owner_uid(self, mock_exec, mock_getuid, mock_stat):
mock_stat.return_value.st_uid = 5678
test_filename = 'a_file'
with utils.temporary_chown(test_filename, owner_uid=9101):
mock_exec.assert_called_once_with('chown', 9101, test_filename,
run_as_root=True)
self.assertFalse(mock_getuid.called)
mock_stat.assert_called_once_with(test_filename)
calls = [mock.call('chown', 9101, test_filename, run_as_root=True),
mock.call('chown', 5678, test_filename, run_as_root=True)]
mock_exec.assert_has_calls(calls)
@mock.patch('os.stat')
@mock.patch('os.getuid', return_value=5678)
@mock.patch('cinder.utils.execute')
def test_matching_uid(self, mock_exec, mock_getuid, mock_stat):
mock_stat.return_value.st_uid = 5678
test_filename = 'a_file'
with utils.temporary_chown(test_filename):
pass
mock_getuid.asset_called_once_with()
mock_stat.assert_called_once_with(test_filename)
self.assertFalse(mock_exec.called)
class TempdirTestCase(test.TestCase):
@mock.patch('tempfile.mkdtemp')
@mock.patch('shutil.rmtree')
def test_tempdir(self, mock_rmtree, mock_mkdtemp):
with utils.tempdir(a='1', b=2) as td:
self.assertEqual(mock_mkdtemp.return_value, td)
self.assertFalse(mock_rmtree.called)
mock_mkdtemp.assert_called_once_with(a='1', b=2)
mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value)
@mock.patch('tempfile.mkdtemp')
@mock.patch('shutil.rmtree', side_effect=OSError)
def test_tempdir_error(self, mock_rmtree, mock_mkdtemp):
with utils.tempdir(a='1', b=2) as td:
self.assertEqual(mock_mkdtemp.return_value, td)
self.assertFalse(mock_rmtree.called)
mock_mkdtemp.assert_called_once_with(a='1', b=2)
mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value)
class WalkClassHierarchyTestCase(test.TestCase):
def test_walk_class_hierarchy(self):
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B):
pass
class E(A):
pass
class_pairs = zip((D, B, E),
utils.walk_class_hierarchy(A, encountered=[C]))
for actual, expected in class_pairs:
self.assertEqual(expected, actual)
class_pairs = zip((D, B, C, E), utils.walk_class_hierarchy(A))
for actual, expected in class_pairs:
self.assertEqual(expected, actual)
class GetDiskOfPartitionTestCase(test.TestCase):
def test_devpath_is_diskpath(self):
devpath = '/some/path'
st_mock = mock.Mock()
output = utils._get_disk_of_partition(devpath, st_mock)
self.assertEqual('/some/path', output[0])
self.assertIs(st_mock, output[1])
with mock.patch('os.stat') as mock_stat:
devpath = '/some/path'
output = utils._get_disk_of_partition(devpath)
mock_stat.assert_called_once_with(devpath)
self.assertEqual(devpath, output[0])
self.assertIs(mock_stat.return_value, output[1])
@mock.patch('os.stat', side_effect=OSError)
def test_stat_oserror(self, mock_stat):
st_mock = mock.Mock()
devpath = '/some/path1'
output = utils._get_disk_of_partition(devpath, st_mock)
mock_stat.assert_called_once_with('/some/path')
self.assertEqual(devpath, output[0])
self.assertIs(st_mock, output[1])
@mock.patch('stat.S_ISBLK', return_value=True)
@mock.patch('os.stat')
def test_diskpath_is_block_device(self, mock_stat, mock_isblk):
st_mock = mock.Mock()
devpath = '/some/path1'
output = utils._get_disk_of_partition(devpath, st_mock)
self.assertEqual('/some/path', output[0])
self.assertEqual(mock_stat.return_value, output[1])
@mock.patch('stat.S_ISBLK', return_value=False)
@mock.patch('os.stat')
def test_diskpath_is_not_block_device(self, mock_stat, mock_isblk):
st_mock = mock.Mock()
devpath = '/some/path1'
output = utils._get_disk_of_partition(devpath, st_mock)
self.assertEqual(devpath, output[0])
self.assertEqual(st_mock, output[1])
class GetBlkdevMajorMinorTestCase(test.TestCase):
@mock.patch('os.stat')
def test_get_file_size(self, mock_stat):
class stat_result(object):
st_mode = 0o777
st_size = 1074253824
test_file = '/var/tmp/made_up_file'
mock_stat.return_value = stat_result
size = utils.get_file_size(test_file)
self.assertEqual(size, stat_result.st_size)
mock_stat.assert_called_once_with(test_file)
@mock.patch('os.stat')
def test_get_blkdev_major_minor(self, mock_stat):
class stat_result(object):
st_mode = 0o60660
st_rdev = os.makedev(253, 7)
test_device = '/dev/made_up_blkdev'
mock_stat.return_value = stat_result
dev = utils.get_blkdev_major_minor(test_device)
self.assertEqual('253:7', dev)
mock_stat.assert_called_once_with(test_device)
@mock.patch('os.stat')
@mock.patch.object(utils, 'execute')
def _test_get_blkdev_major_minor_file(self, test_partition,
mock_exec, mock_stat):
mock_exec.return_value = (
'Filesystem Size Used Avail Use%% Mounted on\n'
'%s 4096 2048 2048 50%% /tmp\n' % test_partition, None)
test_file = '/tmp/file'
test_disk = '/dev/made_up_disk'
class stat_result_file(object):
st_mode = 0o660
class stat_result_partition(object):
st_mode = 0o60660
st_rdev = os.makedev(8, 65)
class stat_result_disk(object):
st_mode = 0o60660
st_rdev = os.makedev(8, 64)
def fake_stat(path):
try:
return {test_file: stat_result_file,
test_partition: stat_result_partition,
test_disk: stat_result_disk}[path]
except KeyError:
raise OSError
mock_stat.side_effect = fake_stat
dev = utils.get_blkdev_major_minor(test_file)
mock_stat.assert_any_call(test_file)
mock_exec.assert_called_once_with('df', test_file)
if test_partition.startswith('/'):
mock_stat.assert_any_call(test_partition)
mock_stat.assert_any_call(test_disk)
return dev
def test_get_blkdev_major_minor_file(self):
dev = self._test_get_blkdev_major_minor_file('/dev/made_up_disk1')
self.assertEqual('8:64', dev)
def test_get_blkdev_major_minor_file_nfs(self):
dev = self._test_get_blkdev_major_minor_file('nfs-server:/export/path')
self.assertIsNone(dev)
@mock.patch('os.stat')
@mock.patch('stat.S_ISCHR', return_value=False)
@mock.patch('stat.S_ISBLK', return_value=False)
def test_get_blkdev_failure(self, mock_isblk, mock_ischr, mock_stat):
path = '/some/path'
self.assertRaises(exception.Error,
utils.get_blkdev_major_minor,
path, lookup_for_file=False)
mock_stat.assert_called_once_with(path)
mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode)
mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode)
@mock.patch('os.stat')
@mock.patch('stat.S_ISCHR', return_value=True)
@mock.patch('stat.S_ISBLK', return_value=False)
def test_get_blkdev_is_chr(self, mock_isblk, mock_ischr, mock_stat):
path = '/some/path'
output = utils.get_blkdev_major_minor(path, lookup_for_file=False)
mock_stat.assert_called_once_with(path)
mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode)
mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode)
self.assertIs(None, output)
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
super(MonkeyPatchTestCase, self).setUp()
self.example_package = 'cinder.tests.unit.monkey_patch_example.'
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package
+ 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION = []
from cinder.tests.unit.monkey_patch_example import example_a
from cinder.tests.unit.monkey_patch_example import example_b
self.assertEqual('Example function', example_a.example_function_a())
exampleA = example_a.ExampleClassA()
exampleA.example_method()
ret_a = exampleA.example_method_add(3, 5)
self.assertEqual(8, ret_a)
self.assertEqual('Example function', example_b.example_function_b())
exampleB = example_b.ExampleClassB()
exampleB.example_method()
ret_b = exampleB.example_method_add(3, 5)
self.assertEqual(8, ret_b)
package_a = self.example_package + 'example_a.'
self.assertTrue(
package_a + 'example_function_a'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(
package_a + 'ExampleClassA.example_method'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(
package_a + 'ExampleClassA.example_method_add'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertFalse(
package_b + 'example_function_b'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(
package_b + 'ExampleClassB.example_method'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(
package_b + 'ExampleClassB.example_method_add'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
class AuditPeriodTest(test.TestCase):
def setUp(self):
super(AuditPeriodTest, self).setUp()
test_time = datetime.datetime(second=23,
minute=12,
hour=8,
day=5,
month=3,
year=2012)
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = test_time
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
self.assertEqual(datetime.datetime(hour=7, day=5, month=3, year=2012),
begin)
self.assertEqual(datetime.datetime(hour=8, day=5, month=3, year=2012),
end)
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
self.assertEqual(datetime.datetime(minute=10,
hour=7,
day=5,
month=3,
year=2012),
begin)
self.assertEqual(datetime.datetime(minute=10,
hour=8,
day=5,
month=3,
year=2012),
end)
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
self.assertEqual(datetime.datetime(minute=30,
hour=6,
day=5,
month=3,
year=2012),
begin)
self.assertEqual(datetime.datetime(minute=30,
hour=7,
day=5,
month=3,
year=2012),
end)
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
self.assertEqual(datetime.datetime(day=4, month=3, year=2012), begin)
self.assertEqual(datetime.datetime(day=5, month=3, year=2012), end)
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
self.assertEqual(datetime.datetime(hour=6, day=4, month=3, year=2012),
begin)
self.assertEqual(datetime.datetime(hour=6, day=5, month=3, year=2012),
end)
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
self.assertEqual(datetime.datetime(hour=10, day=3, month=3, year=2012),
begin)
self.assertEqual(datetime.datetime(hour=10, day=4, month=3, year=2012),
end)
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(datetime.datetime(day=1, month=2, year=2012), begin)
self.assertEqual(datetime.datetime(day=1, month=3, year=2012), end)
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
self.assertEqual(datetime.datetime(day=2, month=2, year=2012), begin)
self.assertEqual(datetime.datetime(day=2, month=3, year=2012), end)
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
self.assertEqual(datetime.datetime(day=15, month=1, year=2012), begin)
self.assertEqual(datetime.datetime(day=15, month=2, year=2012), end)
@mock.patch('oslo_utils.timeutils.utcnow',
return_value=datetime.datetime(day=1,
month=1,
year=2012))
def test_month_jan_day_first(self, mock_utcnow):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(datetime.datetime(day=1, month=11, year=2011), begin)
self.assertEqual(datetime.datetime(day=1, month=12, year=2011), end)
@mock.patch('oslo_utils.timeutils.utcnow',
return_value=datetime.datetime(day=2,
month=1,
year=2012))
def test_month_jan_day_not_first(self, mock_utcnow):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(datetime.datetime(day=1, month=12, year=2011), begin)
self.assertEqual(datetime.datetime(day=1, month=1, year=2012), end)
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
self.assertEqual(datetime.datetime(day=1, month=1, year=2011), begin)
self.assertEqual(datetime.datetime(day=1, month=1, year=2012), end)
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
self.assertEqual(datetime.datetime(day=1, month=2, year=2011), begin)
self.assertEqual(datetime.datetime(day=1, month=2, year=2012), end)
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
self.assertEqual(datetime.datetime(day=1, month=6, year=2010), begin)
self.assertEqual(datetime.datetime(day=1, month=6, year=2011), end)
def test_invalid_unit(self):
self.assertRaises(ValueError,
utils.last_completed_audit_period,
unit='invalid_unit')
@mock.patch('cinder.utils.CONF')
def test_uses_conf_unit(self, mock_conf):
mock_conf.volume_usage_audit_period = 'hour'
begin1, end1 = utils.last_completed_audit_period()
self.assertEqual(60.0 * 60, (end1 - begin1).total_seconds())
mock_conf.volume_usage_audit_period = 'day'
begin2, end2 = utils.last_completed_audit_period()
self.assertEqual(60.0 * 60 * 24, (end2 - begin2).total_seconds())
class FakeSSHClient(object):
def __init__(self):
self.id = uuid.uuid4()
self.transport = FakeTransport()
def set_missing_host_key_policy(self, policy):
self.policy = policy
def load_system_host_keys(self):
self.system_host_keys = 'system_host_keys'
def load_host_keys(self, hosts_key_file):
self.hosts_key_file = hosts_key_file
def connect(self, ip, port=22, username=None, password=None,
pkey=None, timeout=10):
pass
def get_transport(self):
return self.transport
def get_policy(self):
return self.policy
def get_host_keys(self):
return '127.0.0.1 ssh-rsa deadbeef'
def close(self):
pass
def __call__(self, *args, **kwargs):
pass
class FakeSock(object):
def settimeout(self, timeout):
pass
class FakeTransport(object):
def __init__(self):
self.active = True
self.sock = FakeSock()
def set_keepalive(self, timeout):
pass
def is_active(self):
return self.active
class SSHPoolTestCase(test.TestCase):
"""Unit test for SSH Connection Pool."""
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_default_hosts_key_file(self, mock_isfile, mock_sshclient,
mock_open, mock_conf):
mock_ssh = mock.MagicMock()
mock_sshclient.return_value = mock_ssh
mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
host_key_files = sshpool.hosts_key_file
self.assertEqual('/var/lib/cinder/ssh_known_hosts', host_key_files)
mock_ssh.load_host_keys.assert_called_once_with(
'/var/lib/cinder/ssh_known_hosts')
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_host_key_file_kwargs(self, mock_isfile, mock_sshclient,
mock_open, mock_conf):
mock_ssh = mock.MagicMock()
mock_sshclient.return_value = mock_ssh
mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1,
hosts_key_file='dummy_host_keyfile')
host_key_files = sshpool.hosts_key_file
self.assertIn('dummy_host_keyfile', host_key_files)
self.assertIn('/var/lib/cinder/ssh_known_hosts', host_key_files)
expected = [
mock.call.load_host_keys('dummy_host_keyfile'),
mock.call.load_host_keys('/var/lib/cinder/ssh_known_hosts')]
mock_ssh.assert_has_calls(expected, any_order=True)
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('os.path.isfile', return_value=True)
@mock.patch('paramiko.RSAKey.from_private_key_file')
@mock.patch('paramiko.SSHClient')
def test_single_ssh_connect(self, mock_sshclient, mock_pkey, mock_isfile,
mock_open, mock_conf):
mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
# create with password
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with sshpool.item() as ssh:
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
self.assertEqual(first_id, second_id)
self.assertEqual(1, mock_sshclient.return_value.connect.call_count)
# create with private key
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
privatekey="test",
min_size=1,
max_size=1)
self.assertEqual(2, mock_sshclient.return_value.connect.call_count)
# attempt to create with no password or private key
self.assertRaises(paramiko.SSHException,
ssh_utils.SSHPool,
"127.0.0.1", 22, 10,
"test",
min_size=1,
max_size=1)
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
def test_closed_reopened_ssh_connections(self, mock_sshclient, mock_open):
mock_sshclient.return_value = eval('FakeSSHClient')()
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=4)
with sshpool.item() as ssh:
mock_sshclient.reset_mock()
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
ssh.get_transport().active = False
sshpool.remove(ssh)
self.assertEqual(first_id, second_id)
# create a new client
mock_sshclient.return_value = FakeSSHClient()
with sshpool.item() as ssh:
third_id = ssh.id
self.assertNotEqual(first_id, third_id)
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
def test_missing_ssh_hosts_key_config(self, mock_sshclient, mock_open,
mock_conf):
mock_sshclient.return_value = FakeSSHClient()
mock_conf.ssh_hosts_key_file = None
# create with password
self.assertRaises(exception.ParameterNotFound,
ssh_utils.SSHPool,
"127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
def test_create_default_known_hosts_file(self, mock_sshclient,
mock_open):
mock_sshclient.return_value = FakeSSHClient()
CONF.state_path = '/var/lib/cinder'
CONF.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
default_file = '/var/lib/cinder/ssh_known_hosts'
ssh_pool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with ssh_pool.item() as ssh:
mock_open.assert_called_once_with(default_file, 'a')
ssh_pool.remove(ssh)
@mock.patch('os.path.isfile', return_value=False)
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
def test_ssh_missing_hosts_key_file(self, mock_sshclient, mock_open,
mock_isfile):
mock_sshclient.return_value = FakeSSHClient()
CONF.ssh_hosts_key_file = '/tmp/blah'
self.assertNotIn(CONF.state_path, CONF.ssh_hosts_key_file)
self.assertRaises(exception.InvalidInput,
ssh_utils.SSHPool,
"127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
@mock.patch.multiple('cinder.ssh_utils.CONF',
strict_ssh_host_key_policy=True,
ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts')
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_strict_host_key_policy(self, mock_isfile, mock_sshclient,
mock_open):
mock_sshclient.return_value = FakeSSHClient()
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with sshpool.item() as ssh:
self.assertTrue(isinstance(ssh.get_policy(),
paramiko.RejectPolicy))
@mock.patch('six.moves.builtins.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_not_strict_host_key_policy(self, mock_isfile, mock_sshclient,
mock_open):
mock_sshclient.return_value = FakeSSHClient()
CONF.strict_ssh_host_key_policy = False
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with sshpool.item() as ssh:
self.assertTrue(isinstance(ssh.get_policy(),
paramiko.AutoAddPolicy))
class BrickUtils(test.TestCase):
"""Unit test to test the brick utility wrapper functions."""
@mock.patch('cinder.utils.CONF')
@mock.patch('os_brick.initiator.connector.get_connector_properties')
@mock.patch('cinder.utils.get_root_helper')
def test_brick_get_connector_properties(self, mock_helper, mock_get,
mock_conf):
mock_conf.my_ip = '1.2.3.4'
output = utils.brick_get_connector_properties()
mock_helper.assert_called_once_with()
mock_get.assert_called_once_with(mock_helper.return_value, '1.2.3.4',
False, False)
self.assertEqual(mock_get.return_value, output)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory')
@mock.patch('cinder.utils.get_root_helper')
def test_brick_get_connector(self, mock_helper, mock_factory):
output = utils.brick_get_connector('protocol')
mock_helper.assert_called_once_with()
self.assertEqual(mock_factory.return_value, output)
mock_factory.assert_called_once_with(
'protocol', mock_helper.return_value, driver=None,
execute=putils.execute, use_multipath=False,
device_scan_attempts=3)
class StringLengthTestCase(test.TestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_length=255))
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
11, 'name', max_length=255)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'', 'name', min_length=1)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_length=255)
class AddVisibleAdminMetadataTestCase(test.TestCase):
def test_add_visible_admin_metadata_visible_key_only(self):
admin_metadata = [{"key": "invisible_key", "value": "invisible_value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}]
metadata = [{"key": "key", "value": "value"},
{"key": "readonly", "value": "existing"}]
volume = {'volume_admin_metadata': admin_metadata,
'volume_metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual([{"key": "key", "value": "value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}],
volume['volume_metadata'])
admin_metadata = {"invisible_key": "invisible_value",
"readonly": "visible",
"attached_mode": "visible"}
metadata = {"key": "value", "readonly": "existing"}
volume = {'admin_metadata': admin_metadata,
'metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'key': 'value',
'attached_mode': 'visible',
'readonly': 'visible'},
volume['metadata'])
def test_add_visible_admin_metadata_no_visible_keys(self):
admin_metadata = [
{"key": "invisible_key1", "value": "invisible_value1"},
{"key": "invisible_key2", "value": "invisible_value2"},
{"key": "invisible_key3", "value": "invisible_value3"}]
metadata = [{"key": "key", "value": "value"}]
volume = {'volume_admin_metadata': admin_metadata,
'volume_metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual([{"key": "key", "value": "value"}],
volume['volume_metadata'])
admin_metadata = {"invisible_key1": "invisible_value1",
"invisible_key2": "invisible_value2",
"invisible_key3": "invisible_value3"}
metadata = {"key": "value"}
volume = {'admin_metadata': admin_metadata,
'metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'key': 'value'}, volume['metadata'])
def test_add_visible_admin_metadata_no_existing_metadata(self):
admin_metadata = [{"key": "invisible_key", "value": "invisible_value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}]
volume = {'volume_admin_metadata': admin_metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'},
volume['metadata'])
admin_metadata = {"invisible_key": "invisible_value",
"readonly": "visible",
"attached_mode": "visible"}
volume = {'admin_metadata': admin_metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'},
volume['metadata'])
class InvalidFilterTestCase(test.TestCase):
def test_admin_allows_all_options(self):
ctxt = mock.Mock(name='context')
ctxt.is_admin = True
filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
allowed_search_options = ('allowed1', 'allowed2')
allowed_orig = ('allowed1', 'allowed2')
utils.remove_invalid_filter_options(ctxt, filters,
allowed_search_options)
self.assertEqual(allowed_orig, allowed_search_options)
self.assertEqual(fltrs_orig, filters)
def test_admin_allows_some_options(self):
ctxt = mock.Mock(name='context')
ctxt.is_admin = False
filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
allowed_search_options = ('allowed1', 'allowed2')
allowed_orig = ('allowed1', 'allowed2')
utils.remove_invalid_filter_options(ctxt, filters,
allowed_search_options)
self.assertEqual(allowed_orig, allowed_search_options)
self.assertNotEqual(fltrs_orig, filters)
self.assertEqual(allowed_search_options, tuple(sorted(filters.keys())))
class IsBlkDeviceTestCase(test.TestCase):
@mock.patch('stat.S_ISBLK', return_value=True)
@mock.patch('os.stat')
def test_is_blk_device(self, mock_os_stat, mock_S_ISBLK):
dev = 'some_device'
self.assertTrue(utils.is_blk_device(dev))
@mock.patch('stat.S_ISBLK', return_value=False)
@mock.patch('os.stat')
def test_not_is_blk_device(self, mock_os_stat, mock_S_ISBLK):
dev = 'not_some_device'
self.assertFalse(utils.is_blk_device(dev))
@mock.patch('stat.S_ISBLK', side_effect=Exception)
@mock.patch('os.stat')
def test_fail_is_blk_device(self, mock_os_stat, mock_S_ISBLK):
dev = 'device_exception'
self.assertFalse(utils.is_blk_device(dev))
class WrongException(Exception):
pass
class TestRetryDecorator(test.TestCase):
def setUp(self):
super(TestRetryDecorator, self).setUp()
def test_no_retry_required(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException,
interval=2,
retries=3,
backoff_rate=2)
def succeeds():
self.counter += 1
return 'success'
ret = succeeds()
self.assertFalse(mock_sleep.called)
self.assertEqual('success', ret)
self.assertEqual(1, self.counter)
def test_retries_once(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException,
interval,
retries,
backoff_rate)
def fails_once():
self.counter += 1
if self.counter < 2:
raise exception.VolumeBackendAPIException(data='fake')
else:
return 'success'
ret = fails_once()
self.assertEqual('success', ret)
self.assertEqual(2, self.counter)
self.assertEqual(1, mock_sleep.call_count)
mock_sleep.assert_called_with(interval * backoff_rate)
def test_limit_is_reached(self):
self.counter = 0
retries = 3
interval = 2
backoff_rate = 4
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException,
interval,
retries,
backoff_rate)
def always_fails():
self.counter += 1
raise exception.VolumeBackendAPIException(data='fake')
self.assertRaises(exception.VolumeBackendAPIException,
always_fails)
self.assertEqual(retries, self.counter)
expected_sleep_arg = []
for i in range(retries):
if i > 0:
interval *= backoff_rate
expected_sleep_arg.append(float(interval))
mock_sleep.assert_has_calls(map(mock.call, expected_sleep_arg))
def test_wrong_exception_no_retry(self):
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException)
def raise_unexpected_error():
raise WrongException("wrong exception")
self.assertRaises(WrongException, raise_unexpected_error)
self.assertFalse(mock_sleep.called)
class VersionTestCase(test.TestCase):
def test_convert_version_to_int(self):
self.assertEqual(6002000, utils.convert_version_to_int('6.2.0'))
self.assertEqual(6004003, utils.convert_version_to_int((6, 4, 3)))
self.assertEqual(5, utils.convert_version_to_int((5, )))
self.assertRaises(exception.CinderException,
utils.convert_version_to_int, '5a.6b')
def test_convert_version_to_string(self):
self.assertEqual('6.7.0', utils.convert_version_to_str(6007000))
self.assertEqual('4', utils.convert_version_to_str(4))
def test_convert_version_to_tuple(self):
self.assertEqual((6, 7, 0), utils.convert_version_to_tuple('6.7.0'))
class LogTracingTestCase(test.TestCase):
def test_utils_setup_tracing(self):
self.mock_object(utils, 'LOG')
utils.setup_tracing(None)
self.assertFalse(utils.TRACE_API)
self.assertFalse(utils.TRACE_METHOD)
self.assertEqual(0, utils.LOG.warning.call_count)
utils.setup_tracing(['method'])
self.assertFalse(utils.TRACE_API)
self.assertTrue(utils.TRACE_METHOD)
self.assertEqual(0, utils.LOG.warning.call_count)
utils.setup_tracing(['method', 'api'])
self.assertTrue(utils.TRACE_API)
self.assertTrue(utils.TRACE_METHOD)
self.assertEqual(0, utils.LOG.warning.call_count)
def test_utils_setup_tracing_invalid_key(self):
self.mock_object(utils, 'LOG')
utils.setup_tracing(['fake'])
self.assertFalse(utils.TRACE_API)
self.assertFalse(utils.TRACE_METHOD)
self.assertEqual(1, utils.LOG.warning.call_count)
def test_utils_setup_tracing_valid_and_invalid_key(self):
self.mock_object(utils, 'LOG')
utils.setup_tracing(['method', 'fake'])
self.assertFalse(utils.TRACE_API)
self.assertTrue(utils.TRACE_METHOD)
self.assertEqual(1, utils.LOG.warning.call_count)
def test_trace_no_tracing(self):
self.mock_object(utils, 'LOG')
@utils.trace_method
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(None)
result = _trace_test_method()
self.assertEqual('OK', result)
self.assertEqual(0, utils.LOG.debug.call_count)
def test_utils_trace_method(self):
self.mock_object(utils, 'LOG')
@utils.trace_method
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method()
self.assertEqual('OK', result)
self.assertEqual(2, utils.LOG.debug.call_count)
def test_utils_trace_api(self):
self.mock_object(utils, 'LOG')
@utils.trace_api
def _trace_test_api(*args, **kwargs):
return 'OK'
utils.setup_tracing(['api'])
result = _trace_test_api()
self.assertEqual('OK', result)
self.assertEqual(2, utils.LOG.debug.call_count)
def test_utils_trace_method_default_logger(self):
mock_log = self.mock_object(utils, 'LOG')
@utils.trace_method
def _trace_test_method_custom_logger(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method_custom_logger()
self.assertEqual('OK', result)
self.assertEqual(2, mock_log.debug.call_count)
def test_utils_trace_method_inner_decorator(self):
mock_logging = self.mock_object(utils, 'logging')
mock_log = mock.Mock()
mock_log.isEnabledFor = lambda x: True
mock_logging.getLogger = mock.Mock(return_value=mock_log)
def _test_decorator(f):
def blah(*args, **kwargs):
return f(*args, **kwargs)
return blah
@_test_decorator
@utils.trace_method
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method(self)
self.assertEqual('OK', result)
self.assertEqual(2, mock_log.debug.call_count)
# Ensure the correct function name was logged
for call in mock_log.debug.call_args_list:
self.assertTrue('_trace_test_method' in str(call))
self.assertFalse('blah' in str(call))
def test_utils_trace_method_outer_decorator(self):
mock_logging = self.mock_object(utils, 'logging')
mock_log = mock.Mock()
mock_log.isEnabledFor = lambda x: True
mock_logging.getLogger = mock.Mock(return_value=mock_log)
def _test_decorator(f):
def blah(*args, **kwargs):
return f(*args, **kwargs)
return blah
@utils.trace_method
@_test_decorator
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method(self)
self.assertEqual('OK', result)
self.assertEqual(2, mock_log.debug.call_count)
# Ensure the incorrect function name was logged
for call in mock_log.debug.call_args_list:
self.assertFalse('_trace_test_method' in str(call))
self.assertTrue('blah' in str(call))
def test_utils_trace_method_outer_decorator_with_functools(self):
mock_log = mock.Mock()
mock_log.isEnabledFor = lambda x: True
self.mock_object(utils.logging, 'getLogger', mock_log)
mock_log = self.mock_object(utils, 'LOG')
def _test_decorator(f):
@functools.wraps(f)
def wraps(*args, **kwargs):
return f(*args, **kwargs)
return wraps
@utils.trace_method
@_test_decorator
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method()
self.assertEqual('OK', result)
self.assertEqual(2, mock_log.debug.call_count)
# Ensure the incorrect function name was logged
for call in mock_log.debug.call_args_list:
self.assertTrue('_trace_test_method' in str(call))
self.assertFalse('wraps' in str(call))
def test_utils_trace_method_with_exception(self):
self.LOG = self.mock_object(utils, 'LOG')
@utils.trace_method
def _trace_test_method(*args, **kwargs):
raise exception.APITimeout('test message')
utils.setup_tracing(['method'])
self.assertRaises(exception.APITimeout, _trace_test_method)
exception_log = self.LOG.debug.call_args_list[1]
self.assertTrue('exception' in str(exception_log))
self.assertTrue('test message' in str(exception_log))
def test_utils_trace_method_with_time(self):
mock_logging = self.mock_object(utils, 'logging')
mock_log = mock.Mock()
mock_log.isEnabledFor = lambda x: True
mock_logging.getLogger = mock.Mock(return_value=mock_log)
mock_time = mock.Mock(side_effect=[3.1, 6])
self.mock_object(time, 'time', mock_time)
@utils.trace_method
def _trace_test_method(*args, **kwargs):
return 'OK'
utils.setup_tracing(['method'])
result = _trace_test_method(self)
self.assertEqual('OK', result)
return_log = mock_log.debug.call_args_list[1]
self.assertTrue('2900' in str(return_log))
def test_utils_trace_wrapper_class(self):
mock_logging = self.mock_object(utils, 'logging')
mock_log = mock.Mock()
mock_log.isEnabledFor = lambda x: True
mock_logging.getLogger = mock.Mock(return_value=mock_log)
utils.setup_tracing(['method'])
@six.add_metaclass(utils.TraceWrapperMetaclass)
class MyClass(object):
def trace_test_method(self):
return 'OK'
test_class = MyClass()
result = test_class.trace_test_method()
self.assertEqual('OK', result)
self.assertEqual(2, mock_log.debug.call_count)
| {
"content_hash": "9266846a4b29a3416d83d9bb865cd6a7",
"timestamp": "",
"source": "github",
"line_count": 1684,
"max_line_length": 79,
"avg_line_length": 39.13479809976247,
"alnum_prop": 0.5548609319757826,
"repo_name": "petrutlucian94/cinder",
"id": "7d985f817ffad06764f7149e5809bfdfca119b1f",
"size": "66520",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12246766"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
"File (a.k.a document) pages; uploaded files."
import logging
import os.path
import tornado.web
import orderportal
from orderportal import constants
from orderportal import saver
from orderportal import settings
from orderportal import utils
from orderportal.requesthandler import RequestHandler
class FileSaver(saver.Saver):
doctype = constants.FILE
def check_name(self, value):
if not constants.NAME_RX.match(value):
raise tornado.web.HTTPError(400, reason="invalid file name")
try:
doc = self.rqh.get_entity_view("file", "name", value)
except tornado.web.HTTPError:
pass
else:
# Error if same name as file of another doc
if doc["_id"] != self.doc.get("_id"):
raise ValueError("file name already exists")
def set_file(self, infile, name=None):
self.file = infile
if name:
self["name"] = name
self["size"] = len(self.file.body)
self["content_type"] = infile.content_type or "application/octet-stream"
def post_process(self):
"Save the file as an attachment to the document."
# No new file uploaded, just skip out.
if self.file is None:
return
self.db.put_attachment(
self.doc,
self.file.body,
filename=self["name"],
content_type=self["content_type"],
)
class Files(RequestHandler):
"List of files page."
def get(self):
files = [r.doc for r in self.db.view("file", "name", include_docs=True)]
files.sort(key=lambda i: i["modified"], reverse=True)
self.render("files.html", files=files)
class File(RequestHandler):
"Return the file data."
def get(self, name):
self.doc = self.get_entity_view("file", "name", name)
filename = list(self.doc["_attachments"].keys())[0]
outfile = self.db.get_attachment(self.doc, filename)
if outfile is None:
self.write("")
else:
self.write(outfile.read())
outfile.close()
self.set_header("Content-Type", self.doc["content_type"])
class FileMeta(RequestHandler):
"Display meta page for a file with buttons."
def get(self, name):
file = self.get_entity_view("file", "name", name)
self.render("file_meta.html", file=file)
class FileCreate(RequestHandler):
"Create a new file page."
@tornado.web.authenticated
def get(self):
self.check_admin()
self.render("file_create.html")
@tornado.web.authenticated
def post(self):
self.check_admin()
try:
with FileSaver(rqh=self) as saver:
try:
infile = self.request.files["file"][0]
except (KeyError, IndexError):
raise ValueError("No file uploaded.")
name = (
self.get_argument("name", None)
or os.path.splitext(infile.filename)[0]
)
saver.check_name(name)
saver.set_file(infile, name)
saver["title"] = self.get_argument("title", None)
saver["hidden"] = utils.to_bool(self.get_argument("hidden", False))
saver["description"] = self.get_argument("description", None)
except ValueError as msg:
self.see_other("files", error=str(msg))
else:
self.see_other("files")
class FileEdit(RequestHandler):
"Edit or delete a file."
@tornado.web.authenticated
def get(self, name):
self.check_admin()
file = self.get_entity_view("file", "name", name)
self.render("file_edit.html", file=file)
@tornado.web.authenticated
def post(self, name):
self.check_admin()
if self.get_argument("_http_method", None) == "delete":
self.delete(name)
return
file = self.get_entity_view("file", "name", name)
with FileSaver(doc=file, rqh=self) as saver:
try:
infile = self.request.files["file"][0]
except (KeyError, IndexError):
# No new file upload, just leave it alone.
saver.file = None
else:
saver.set_file(infile)
saver["title"] = self.get_argument("title", None)
saver["hidden"] = utils.to_bool(self.get_argument("hidden", False))
saver["description"] = self.get_argument("description", None)
self.see_other("files")
@tornado.web.authenticated
def delete(self, name):
self.check_admin()
file = self.get_entity_view("file", "name", name)
self.delete_logs(file["_id"])
self.db.delete(file)
self.see_other("files")
class FileEditApiV1(FileEdit):
"API for editing a file."
def check_xsrf_cookie(self):
"Do not check for XSRF cookie when script is calling."
pass
class FileDownload(File):
"Download the file."
def get(self, name):
super(FileDownload, self).get(name)
ext = utils.get_filename_extension(self.doc["content_type"])
if ext:
name += ext
self.set_header(
"Content-Disposition", 'attachment; filename="{0}"'.format(name)
)
class FileLogs(RequestHandler):
"File log entries page."
def get(self, iuid):
file = self.get_entity(iuid, doctype=constants.FILE)
self.render(
"logs.html",
title="Logs for document '%s'" % (file.get("title") or file["name"]),
entity=file,
logs=self.get_logs(file["_id"]),
)
| {
"content_hash": "a63fc093489958a582c1d2379ec52c69",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 83,
"avg_line_length": 30.91304347826087,
"alnum_prop": 0.5738396624472574,
"repo_name": "pekrau/OrderPortal",
"id": "1471bdc9d0b64951d69792708599b6ccf862d347",
"size": "5688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orderportal/file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1276"
},
{
"name": "HTML",
"bytes": "260614"
},
{
"name": "JavaScript",
"bytes": "4214"
},
{
"name": "Python",
"bytes": "305263"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="text",
parent_name="scattergeo.marker.colorbar.title",
**kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "63e51412a51ddcbfd0e3e77836020558",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 29.11764705882353,
"alnum_prop": 0.5676767676767677,
"repo_name": "plotly/python-api",
"id": "7439cbadc267f126bbd2ccc9512f0050060e747f",
"size": "495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergeo/marker/colorbar/title/_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import ast
import json
import uuid
from collections import OrderedDict
from django.db import models, IntegrityError
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.core.files.storage import FileSystemStorage
from django.utils import timezone
from oauth_provider.consts import MAX_URL_LENGTH
from .exceptions import BadRequest
from .utils import get_lang
AGENT_PROFILE_UPLOAD_TO = "agent_profile"
ACTIVITY_STATE_UPLOAD_TO = "activity_state"
ACTIVITY_PROFILE_UPLOAD_TO = "activity_profile"
STATEMENT_ATTACHMENT_UPLOAD_TO = "attachment_payloads"
# Called when a user is created, saved, or logging in
def attach_user(sender, **kwargs):
user = kwargs["instance"]
if kwargs["created"]:
agent = Agent.objects.retrieve_or_create(
**{'name': user.username, 'mbox': 'mailto:%s' % user.email, 'objectType': 'Agent'})[0]
agent.user = user
agent.save()
post_save.connect(attach_user, sender=User)
class Verb(models.Model):
verb_id = models.CharField(
max_length=MAX_URL_LENGTH, db_index=True, unique=True)
canonical_data = JSONField(default=dict)
def return_verb_with_lang(self, lang=None, ids_only=False):
if ids_only:
return {'id': self.verb_id}
ret = OrderedDict(self.canonical_data)
if 'display' in ret and ret['display'].items():
ret['display'] = get_lang(self.canonical_data['display'], lang)
return ret
def get_a_name(self):
if 'display' in self.canonical_data:
return self.canonical_data['display'].get('en-US', self.verb_id)
return self.verb_id
def __unicode__(self):
return json.dumps(self.canonical_data, sort_keys=False)
class AgentManager(models.Manager):
def retrieve(self, **kwargs):
agent_ifps_can_only_be_one = [
'mbox', 'mbox_sha1sum', 'account', 'openid']
ifp_sent = [
a for a in agent_ifps_can_only_be_one if kwargs.get(a, None) is not None]
if ifp_sent:
# Get IFP
ifp = ifp_sent[0]
ifp_dict = {}
# If IFP is account, have to set the kwargs keys differently since they have different
# field names
if not 'account' == ifp:
ifp_dict[ifp] = kwargs[ifp]
else:
# Set ifp_dict and kwargs
ifp_dict['account_homePage'] = kwargs['account']['homePage']
ifp_dict['account_name'] = kwargs['account']['name']
try:
# Try getting agent by IFP in ifp_dict
agent = Agent.objects.filter(**ifp_dict)[0]
return agent
except IndexError:
return None
else:
return None
def retrieve_or_create(self, **kwargs):
agent_ifps_can_only_be_one = [
'mbox', 'mbox_sha1sum', 'account', 'openid']
ifp_sent = [
a for a in agent_ifps_can_only_be_one if kwargs.get(a, None) is not None]
is_group = kwargs.get('objectType', None) == "Group"
has_member = False
# Set member if incoming group
if is_group:
member = kwargs.pop('member', None)
if member:
has_member = True
# Create agent based on IFP
if ifp_sent:
# Get IFP
ifp = ifp_sent[0]
ifp_dict = {}
# If IFP is account, have to set the kwargs keys differently since they have different
# field names
if not 'account' == ifp:
ifp_dict[ifp] = kwargs[ifp]
else:
# Set ifp_dict and kwargs
ifp_dict['account_homePage'] = kwargs['account']['homePage']
kwargs['account_homePage'] = kwargs['account']['homePage']
ifp_dict['account_name'] = kwargs['account']['name']
kwargs['account_name'] = kwargs['account']['name']
del kwargs['account']
try:
# Try getting agent by IFP in ifp_dict
agent = Agent.objects.filter(**ifp_dict)[0]
created = False
except IndexError:
# If DNE create the agent based off of kwargs (kwargs now
# includes account_homePage and account_name fields)
try:
agent = Agent.objects.create(**kwargs)
created = True
except IntegrityError, ValidationError:
# Try getting agent by IFP in ifp_dict
agent = Agent.objects.filter(**ifp_dict)[0]
created = False
# For identified groups with members
if is_group and has_member:
# If newly created identified group add all of the incoming
# members
if created:
members = [self.retrieve_or_create(**a) for a in member]
agent.member.add(*(a for a, c in members))
agent.save()
# Only way it doesn't have IFP is if anonymous group
else:
agent, created = self.retrieve_or_create_anonymous_group(
member, kwargs)
return agent, created
def retrieve_or_create_anonymous_group(self, member, kwargs):
# Narrow oauth down to 2 members and one member having an account
if len(member) == 2 and ('account' in member[0] or 'account' in member[1]):
# If oauth account is in first member
if 'account' in member[0] and 'OAuth' in member[0]['account']['homePage']:
created_oauth_identifier = "anongroup:%s-%s" % (
member[0]['account']['name'], member[1]['mbox'])
try:
agent = Agent.objects.get(
oauth_identifier=created_oauth_identifier)
created = False
except Agent.DoesNotExist:
try:
agent = Agent.objects.create(**kwargs)
created = True
except IntegrityError, ValidationError:
agent = Agent.objects.get(
oauth_identifier=created_oauth_identifier)
created = False
# If oauth account is in second member
elif 'account' in member[1] and 'OAuth' in member[1]['account']['homePage']:
created_oauth_identifier = "anongroup:%s-%s" % (
member[1]['account']['name'], member[0]['mbox'])
try:
agent = Agent.objects.get(
oauth_identifier=created_oauth_identifier)
created = False
except Agent.DoesNotExist:
try:
agent = Agent.objects.create(**kwargs)
created = True
except IntegrityError, ValidationError:
agent = Agent.objects.get(
oauth_identifier=created_oauth_identifier)
created = False
# Non-oauth anonymous group that has 2 members, one having an
# account
else:
agent = Agent.objects.create(**kwargs)
created = True
# Normal non-oauth anonymous group
else:
agent = Agent.objects.create(**kwargs)
created = True
# If it is a newly created anonymous group, add the members
if created:
members = [self.retrieve_or_create(**a) for a in member]
agent.member.add(*(a for a, c in members))
return agent, created
def oauth_group(self, **kwargs):
try:
g = Agent.objects.get(oauth_identifier=kwargs['oauth_identifier'])
return g, False
except Agent.DoesNotExist:
return Agent.objects.retrieve_or_create(**kwargs)
class Agent(models.Model):
objectType = models.CharField(max_length=6, blank=True, default="Agent")
name = models.CharField(max_length=100, blank=True)
mbox = models.CharField(
max_length=128, db_index=True, null=True, unique=True)
mbox_sha1sum = models.CharField(
max_length=40, db_index=True, null=True, unique=True)
openid = models.CharField(
max_length=MAX_URL_LENGTH, db_index=True, null=True, unique=True)
oauth_identifier = models.CharField(
max_length=192, db_index=True, null=True, unique=True)
member = models.ManyToManyField('self', related_name="agents")
account_homePage = models.CharField(max_length=MAX_URL_LENGTH, null=True)
account_name = models.CharField(max_length=50, null=True)
user = models.OneToOneField(User, on_delete=models.SET_NULL, null=True)
objects = AgentManager()
class Meta:
unique_together = ("account_homePage", "account_name")
def to_dict(self, ids_only=False):
ret = OrderedDict()
if self.mbox:
ret['mbox'] = self.mbox
if self.mbox_sha1sum:
ret['mbox_sha1sum'] = self.mbox_sha1sum
if self.openid:
ret['openid'] = self.openid
if self.account_name:
ret['account'] = OrderedDict()
ret['account']['name'] = self.account_name
ret['account']['homePage'] = self.account_homePage
if self.objectType == 'Group':
ret['objectType'] = self.objectType
# show members for groups if ids_only is false
# show members' ids for anon groups if ids_only is true
if not ids_only or not (set(['mbox', 'mbox_sha1sum', 'openid', 'account']) & set(ret.keys())):
if self.member.all():
ret['member'] = [a.to_dict(ids_only)
for a in self.member.all()]
ret['objectType'] = self.objectType
if self.name and not ids_only:
ret['name'] = self.name
return ret
# Used only for /agent GET endpoint (check spec)
def to_dict_person(self):
ret = OrderedDict()
ret['objectType'] = "Person"
if self.name:
ret['name'] = [self.name]
if self.mbox:
ret['mbox'] = [self.mbox]
if self.mbox_sha1sum:
ret['mbox_sha1sum'] = [self.mbox_sha1sum]
if self.openid:
ret['openid'] = [self.openid]
if self.account_name:
ret['account'] = []
acc = OrderedDict()
acc['name'] = self.account_name
acc['homePage'] = self.account_homePage
ret['account'].append(acc)
return ret
def get_a_name(self):
if self.name:
return self.name
if self.mbox:
return self.mbox
if self.mbox_sha1sum:
return self.mbox_sha1sum
if self.openid:
return self.openid
if self.account_name:
return self.account_name
if self.objectType == 'Agent':
return "unknown"
else:
return "anonymous group"
def get_user_from_oauth_group(self):
if self.oauth_identifier:
if self.member.all()[0].account_homePage:
return self.member.all()[1]
else:
return self.member.all()[0]
return None
def __unicode__(self):
return json.dumps(self.to_dict(), sort_keys=False)
class Activity(models.Model):
activity_id = models.CharField(
max_length=MAX_URL_LENGTH, db_index=True, unique=True)
canonical_data = JSONField(default=dict)
authority = models.ForeignKey(Agent, null=True)
def return_activity_with_lang_format(self, lang=None, ids_only=False):
if ids_only:
return {'id': self.activity_id}
ret = self.canonical_data
if 'objectType' not in self.canonical_data:
ret['objectType'] = 'Activity'
if 'definition' in self.canonical_data:
if 'name' in ret['definition'] and ret['definition']['name'].items():
ret['definition']['name'] = get_lang(
ret['definition']['name'], lang)
if 'description' in ret['definition'] and ret['definition']['description'].items():
ret['definition']['description'] = get_lang(
ret['definition']['description'], lang)
if 'scale' in ret['definition']:
for s in ret['definition']['scale']:
if s.items():
s['description'] = get_lang(s['description'], lang)
if 'choices' in ret['definition']:
for c in ret['definition']['choices']:
if c.items():
c['description'] = get_lang(c['description'], lang)
if 'steps' in ret['definition']:
for st in ret['definition']['steps']:
if st.items:
st['description'] = get_lang(st['description'], lang)
if 'source' in ret['definition']:
for so in ret['definition']['source']:
if so.items:
so['description'] = get_lang(so['description'], lang)
for t in ret['definition']['target']:
if t.items():
t['description'] = get_lang(t['description'], lang)
return ret
def get_a_name(self):
if 'definition' in self.canonical_data:
return self.canonical_data['definition'].get('en-US', self.activity_id)
else:
return self.activity_id
def __unicode__(self):
return json.dumps(self.canonical_data, sort_keys=False)
class SubStatement(models.Model):
object_agent = models.ForeignKey(
Agent, related_name="object_of_substatement", on_delete=models.SET_NULL, null=True, db_index=True)
object_activity = models.ForeignKey(
Activity, related_name="object_of_substatement", on_delete=models.SET_NULL, null=True, db_index=True)
object_statementref = models.UUIDField(
null=True, editable=False, db_index=True)
actor = models.ForeignKey(
Agent, related_name="actor_of_substatement", null=True, on_delete=models.SET_NULL)
verb = models.ForeignKey(Verb, null=True, on_delete=models.SET_NULL)
result_success = models.NullBooleanField()
result_completion = models.NullBooleanField()
result_response = models.TextField(blank=True)
result_duration = models.CharField(max_length=40, blank=True)
result_score_scaled = models.FloatField(blank=True, null=True)
result_score_raw = models.FloatField(blank=True, null=True)
result_score_min = models.FloatField(blank=True, null=True)
result_score_max = models.FloatField(blank=True, null=True)
result_extensions = JSONField(default=dict, blank=True)
timestamp = models.DateTimeField(null=True)
context_registration = models.CharField(
max_length=40, blank=True, db_index=True)
context_instructor = models.ForeignKey(Agent, blank=True, null=True, on_delete=models.SET_NULL,
db_index=True, related_name='substatement_context_instructor')
context_team = models.ForeignKey(Agent, blank=True, null=True, on_delete=models.SET_NULL,
related_name="substatement_context_team")
context_revision = models.TextField(blank=True)
context_platform = models.CharField(max_length=50, blank=True)
context_language = models.CharField(max_length=50, blank=True)
context_extensions = JSONField(default=dict, blank=True)
context_ca_parent = models.ManyToManyField(
Activity, related_name="sub_context_ca_parent")
context_ca_grouping = models.ManyToManyField(
Activity, related_name="sub_context_ca_grouping")
context_ca_category = models.ManyToManyField(
Activity, related_name="sub_context_ca_category")
context_ca_other = models.ManyToManyField(
Activity, related_name="sub_context_ca_other")
# context also has a stmt field which is a statementref
context_statement = models.CharField(max_length=40, blank=True)
def to_dict(self, lang=None, ids_only=False):
ret = OrderedDict()
ret['actor'] = self.actor.to_dict(ids_only)
ret['verb'] = self.verb.return_verb_with_lang(lang, ids_only)
if self.object_agent:
ret['object'] = self.object_agent.to_dict(ids_only)
elif self.object_activity:
ret['object'] = self.object_activity.return_activity_with_lang_format(
lang, ids_only)
else:
ret['object'] = {
'id': str(self.object_statementref), 'objectType': 'StatementRef'}
ret['result'] = OrderedDict()
if self.result_success is not None:
ret['result']['success'] = self.result_success
if self.result_completion is not None:
ret['result']['completion'] = self.result_completion
if self.result_response:
ret['result']['response'] = self.result_response
if self.result_duration:
ret['result']['duration'] = self.result_duration
ret['result']['score'] = OrderedDict()
if self.result_score_scaled is not None:
ret['result']['score']['scaled'] = self.result_score_scaled
if self.result_score_raw is not None:
ret['result']['score']['raw'] = self.result_score_raw
if self.result_score_min is not None:
ret['result']['score']['min'] = self.result_score_min
if self.result_score_max is not None:
ret['result']['score']['max'] = self.result_score_max
# If there is no score, delete from dict
if not ret['result']['score']:
del ret['result']['score']
if self.result_extensions:
ret['result']['extensions'] = self.result_extensions
# If no result, delete from dict
if not ret['result']:
del ret['result']
ret['context'] = OrderedDict()
if self.context_registration:
ret['context']['registration'] = self.context_registration
if self.context_instructor:
ret['context'][
'instructor'] = self.context_instructor.to_dict(ids_only)
if self.context_team:
ret['context']['team'] = self.context_team.to_dict(ids_only)
if self.context_revision:
ret['context']['revision'] = self.context_revision
if self.context_platform:
ret['context']['platform'] = self.context_platform
if self.context_language:
ret['context']['language'] = self.context_language
if self.context_statement:
ret['context']['statement'] = {
'id': self.context_statement, 'objectType': 'StatementRef'}
ret['context']['contextActivities'] = OrderedDict()
if self.context_ca_parent.all():
ret['context']['contextActivities']['parent'] = [cap.return_activity_with_lang_format(
lang, ids_only) for cap in self.context_ca_parent.all()]
if self.context_ca_grouping.all():
ret['context']['contextActivities']['grouping'] = [cag.return_activity_with_lang_format(
lang, ids_only) for cag in self.context_ca_grouping.all()]
if self.context_ca_category.all():
ret['context']['contextActivities']['category'] = [cac.return_activity_with_lang_format(
lang, ids_only) for cac in self.context_ca_category.all()]
if self.context_ca_other.all():
ret['context']['contextActivities']['other'] = [cao.return_activity_with_lang_format(
lang, ids_only) for cao in self.context_ca_other.all()]
if self.context_extensions:
ret['context']['extensions'] = self.context_extensions
if not ret['context']['contextActivities']:
del ret['context']['contextActivities']
if not ret['context']:
del ret['context']
if self.timestamp:
ret['timestamp'] = self.timestamp.isoformat()
ret['objectType'] = "SubStatement"
return ret
def get_a_name(self):
if self.object_activity:
return self.object_activity.get_a_name()
elif self.object_agent:
return self.object_agent.get_a_name()
else:
return str(self.object_statementref)
def get_object(self):
if self.object_activity:
stmt_object = self.object_activity
elif self.object_agent:
stmt_object = self.object_agent
else:
stmt_object = {
'id': str(self.object_statementref), 'objectType': 'StatementRef'}
return stmt_object
def __unicode__(self):
return json.dumps(self.to_dict(), sort_keys=False)
class Statement(models.Model):
# If no statement_id is given, will create one automatically
statement_id = models.UUIDField(
default=uuid.uuid4, db_index=True, editable=False)
object_agent = models.ForeignKey(
Agent, related_name="object_of_statement", null=True, on_delete=models.SET_NULL, db_index=True)
object_activity = models.ForeignKey(
Activity, related_name="object_of_statement", null=True, on_delete=models.SET_NULL, db_index=True)
object_substatement = models.ForeignKey(
SubStatement, related_name="object_of_statement", null=True, on_delete=models.SET_NULL)
object_statementref = models.UUIDField(
null=True, editable=False, db_index=True)
actor = models.ForeignKey(Agent, related_name="actor_statement", db_index=True, null=True,
on_delete=models.SET_NULL)
verb = models.ForeignKey(Verb, null=True, on_delete=models.SET_NULL)
result_success = models.NullBooleanField()
result_completion = models.NullBooleanField()
result_response = models.TextField(blank=True)
result_duration = models.CharField(max_length=40, blank=True)
result_score_scaled = models.FloatField(blank=True, null=True)
result_score_raw = models.FloatField(blank=True, null=True)
result_score_min = models.FloatField(blank=True, null=True)
result_score_max = models.FloatField(blank=True, null=True)
result_extensions = JSONField(default=dict, blank=True)
stored = models.DateTimeField(default=timezone.now, db_index=True)
timestamp = models.DateTimeField(db_index=True)
authority = models.ForeignKey(Agent, blank=True, null=True, related_name="authority_statement", db_index=True,
on_delete=models.SET_NULL)
voided = models.NullBooleanField(default=False)
context_registration = models.CharField(
max_length=40, blank=True, db_index=True)
context_instructor = models.ForeignKey(Agent, blank=True, null=True, on_delete=models.SET_NULL,
db_index=True, related_name='statement_context_instructor')
context_team = models.ForeignKey(Agent, blank=True, null=True, on_delete=models.SET_NULL,
related_name="statement_context_team")
context_revision = models.TextField(blank=True)
context_platform = models.CharField(max_length=50, blank=True)
context_language = models.CharField(max_length=50, blank=True)
context_extensions = JSONField(default=dict, blank=True)
context_ca_parent = models.ManyToManyField(
Activity, related_name="stmt_context_ca_parent")
context_ca_grouping = models.ManyToManyField(
Activity, related_name="stmt_context_ca_grouping")
context_ca_category = models.ManyToManyField(
Activity, related_name="stmt_context_ca_category")
context_ca_other = models.ManyToManyField(
Activity, related_name="stmt_context_ca_other")
# context also has a stmt field which is a statementref
context_statement = models.CharField(max_length=40, blank=True)
version = models.CharField(max_length=7)
# Used in views
user = models.ForeignKey(User, null=True, blank=True,
db_index=True, on_delete=models.SET_NULL)
full_statement = JSONField()
def to_dict(self, lang=None, ret_format='exact'):
ret = OrderedDict()
if ret_format == 'exact':
return self.full_statement
ids_only = True if ret_format == 'ids' else False
ret['id'] = str(self.statement_id)
ret['actor'] = self.actor.to_dict(ids_only)
ret['verb'] = self.verb.return_verb_with_lang(lang, ids_only)
if self.object_agent:
ret['object'] = self.object_agent.to_dict(ids_only)
elif self.object_activity:
ret['object'] = self.object_activity.return_activity_with_lang_format(
lang, ids_only)
elif self.object_substatement:
ret['object'] = self.object_substatement.to_dict(lang, ids_only)
else:
ret['object'] = {
'id': str(self.object_statementref), 'objectType': 'StatementRef'}
ret['result'] = OrderedDict()
if self.result_success is not None:
ret['result']['success'] = self.result_success
if self.result_completion is not None:
ret['result']['completion'] = self.result_completion
if self.result_response:
ret['result']['response'] = self.result_response
if self.result_duration:
ret['result']['duration'] = self.result_duration
ret['result']['score'] = OrderedDict()
if self.result_score_scaled is not None:
ret['result']['score']['scaled'] = self.result_score_scaled
if self.result_score_raw is not None:
ret['result']['score']['raw'] = self.result_score_raw
if self.result_score_min is not None:
ret['result']['score']['min'] = self.result_score_min
if self.result_score_max is not None:
ret['result']['score']['max'] = self.result_score_max
# If there is no score, delete from dict
if not ret['result']['score']:
del ret['result']['score']
if self.result_extensions:
ret['result']['extensions'] = self.result_extensions
if not ret['result']:
del ret['result']
ret['context'] = OrderedDict()
if self.context_registration:
ret['context']['registration'] = self.context_registration
if self.context_instructor:
ret['context'][
'instructor'] = self.context_instructor.to_dict(ids_only)
if self.context_team:
ret['context']['team'] = self.context_team.to_dict(ids_only)
if self.context_revision:
ret['context']['revision'] = self.context_revision
if self.context_platform:
ret['context']['platform'] = self.context_platform
if self.context_language:
ret['context']['language'] = self.context_language
if self.context_statement:
ret['context']['statement'] = {
'id': self.context_statement, 'objectType': 'StatementRef'}
ret['context']['contextActivities'] = OrderedDict()
if self.context_ca_parent.all():
ret['context']['contextActivities']['parent'] = [cap.return_activity_with_lang_format(
lang, ids_only) for cap in self.context_ca_parent.all()]
if self.context_ca_grouping.all():
ret['context']['contextActivities']['grouping'] = [cag.return_activity_with_lang_format(
lang, ids_only) for cag in self.context_ca_grouping.all()]
if self.context_ca_category.all():
ret['context']['contextActivities']['category'] = [cac.return_activity_with_lang_format(
lang, ids_only) for cac in self.context_ca_category.all()]
if self.context_ca_other.all():
ret['context']['contextActivities']['other'] = [cao.return_activity_with_lang_format(
lang, ids_only) for cao in self.context_ca_other.all()]
if self.context_extensions:
ret['context']['extensions'] = self.context_extensions
if not ret['context']['contextActivities']:
del ret['context']['contextActivities']
if not ret['context']:
del ret['context']
ret['timestamp'] = self.timestamp.isoformat()
ret['stored'] = self.stored.isoformat()
if self.authority is not None:
ret['authority'] = self.authority.to_dict(ids_only)
ret['version'] = self.version
if self.stmt_attachments.all():
ret['attachments'] = [a.return_attachment_with_lang(
lang) for a in self.stmt_attachments.all()]
return ret
def get_a_name(self):
return self.statement_id
def get_object(self):
if self.object_activity:
stmt_object = self.object_activity
elif self.object_agent:
stmt_object = self.object_agent
elif self.object_substatement:
stmt_object = self.object_substatement
else:
stmt_object = {
'id': str(self.object_statementref), 'objectType': 'StatementRef'}
return stmt_object
def __unicode__(self):
return json.dumps(self.to_dict(), sort_keys=False)
class AttachmentFileSystemStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
return name
def _save(self, name, content, max_length=None):
if self.exists(name):
# if the file exists, do not call the superclasses _save method
return name
# if the file is new, DO call it
return super(AttachmentFileSystemStorage, self)._save(name, content)
class StatementAttachment(models.Model):
canonical_data = JSONField(default=dict)
payload = models.FileField(max_length=150, upload_to=STATEMENT_ATTACHMENT_UPLOAD_TO,
storage=AttachmentFileSystemStorage(), null=True)
statement = models.ForeignKey(
Statement, related_name="stmt_attachments", null=True)
def return_attachment_with_lang(self, lang=None):
ret = OrderedDict(self.canonical_data)
if 'display' in ret and ret['display'].items():
ret['display'] = get_lang(self.canonical_data['display'], lang)
if 'description' in ret and ret['description'].items():
ret['description'] = get_lang(
self.canonical_data['description'], lang)
return ret
def __unicode__(self):
return json.dumps(self.canonical_data, sort_keys=False)
class ActivityState(models.Model):
state_id = models.CharField(max_length=MAX_URL_LENGTH)
updated = models.DateTimeField(
auto_now_add=True, blank=True, db_index=True)
activity_id = models.CharField(max_length=MAX_URL_LENGTH, db_index=True)
registration_id = models.CharField(max_length=40, db_index=True)
content_type = models.CharField(max_length=255, blank=True)
etag = models.CharField(max_length=50, blank=True)
agent = models.ForeignKey(Agent)
json_state = JSONField(default=dict)
state = models.FileField(upload_to=ACTIVITY_STATE_UPLOAD_TO, null=True)
def delete(self, *args, **kwargs):
if self.state:
self.state.delete()
super(ActivityState, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
if self.json_state and isinstance(self.json_state, basestring):
try:
json.loads(self.json_state)
except Exception:
try:
ast.literal_eval(self.json_state)
except Exception:
raise BadRequest("The Activity State body is not valid JSON")
elif self.json_state and not isinstance(self.json_state, basestring):
raise BadRequest("The Activity State body is not valid JSON")
super(ActivityState, self).save(*args, **kwargs)
class ActivityProfile(models.Model):
profile_id = models.CharField(max_length=MAX_URL_LENGTH, db_index=True)
updated = models.DateTimeField(
auto_now_add=True, blank=True, db_index=True)
activity_id = models.CharField(max_length=MAX_URL_LENGTH, db_index=True)
content_type = models.CharField(max_length=255, blank=True)
etag = models.CharField(max_length=50, blank=True)
json_profile = JSONField(default=dict)
profile = models.FileField(upload_to=ACTIVITY_PROFILE_UPLOAD_TO, null=True)
def delete(self, *args, **kwargs):
if self.profile:
self.profile.delete()
super(ActivityProfile, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
if self.json_profile and isinstance(self.json_profile, basestring):
try:
json.loads(self.json_profile)
except Exception:
try:
ast.literal_eval(self.json_profile)
except Exception:
raise BadRequest("The Activity Profile body is not valid JSON")
elif self.json_profile and not isinstance(self.json_profile, basestring):
raise BadRequest("The Activity Profile body is not valid JSON")
super(ActivityProfile, self).save(*args, **kwargs)
class AgentProfile(models.Model):
profile_id = models.CharField(max_length=MAX_URL_LENGTH, db_index=True)
updated = models.DateTimeField(
auto_now_add=True, blank=True, db_index=True)
content_type = models.CharField(max_length=255, blank=True)
etag = models.CharField(max_length=50, blank=True)
agent = models.ForeignKey(Agent, db_index=True)
json_profile = JSONField(default=dict)
profile = models.FileField(upload_to=AGENT_PROFILE_UPLOAD_TO, null=True)
def delete(self, *args, **kwargs):
if self.profile:
self.profile.delete()
super(AgentProfile, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
if self.json_profile and isinstance(self.json_profile, basestring):
try:
json.loads(self.json_profile)
except Exception:
try:
ast.literal_eval(self.json_profile)
except Exception:
raise BadRequest("The Agent Profile body is not valid JSON")
elif self.json_profile and not isinstance(self.json_profile, basestring):
raise BadRequest("The Agent Profile body is not valid JSON")
super(AgentProfile, self).save(*args, **kwargs) | {
"content_hash": "bc7b7ee23f69a6df927c3bc910bf996e",
"timestamp": "",
"source": "github",
"line_count": 780,
"max_line_length": 114,
"avg_line_length": 44.47179487179487,
"alnum_prop": 0.5970364391143912,
"repo_name": "creighton/ADL_LRS",
"id": "beeb5c0ea8cfe5ff11e683db2dff08f75299a10e",
"size": "34688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lrs/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11916"
},
{
"name": "HTML",
"bytes": "53586"
},
{
"name": "JavaScript",
"bytes": "6977"
},
{
"name": "Python",
"bytes": "1298137"
}
],
"symlink_target": ""
} |
from JumpScale import j
from JumpScale.baselib.atyourservice81.Service import Service
import capnp
import msgpack
from JumpScale.baselib.atyourservice81 import model_capnp as ModelCapnp
class Actor():
def __init__(self, aysrepo, template=None, model=None, name=None):
"""
init from a template or from a model
"""
self.aysrepo = aysrepo
self.logger = j.atyourservice.logger
self._schema = None
self.model = None
if template is not None:
self._initFromTemplate(template)
elif model is not None:
self.model = model
elif name is not None:
self.loadFromFS(name)
else:
raise j.exceptions.Input(
message="template or model or name needs to be specified when creating an actor", level=1, source="", tags="", msgpub="")
@property
def path(self):
return j.sal.fs.joinPaths(self.aysrepo.path, "actors", self.model.name)
def loadFromFS(self, name):
"""
get content from fs and load in object
"""
if self.model is None:
self.model = self.aysrepo.db.actors.new()
actor_path = j.sal.fs.joinPaths(self.aysrepo.path, "actors", name)
self.logger.debug("load actor from FS: %s" % actor_path)
json = j.data.serializer.json.load(j.sal.fs.joinPaths(actor_path, "actor.json"))
# for now we don't reload the actions codes.
# when using distributed DB, the actions code could still be available
del json['actions']
self.model.dbobj = ModelCapnp.Actor.new_message(**json)
# need to save already here cause processActionFile is doing a find
# and it need to be able to find this new actor model we are creating
self.model.save()
# recreate the actions code from the action.py file from the file system
self._processActionsFile(j.sal.fs.joinPaths(actor_path, "actions.py"))
self.saveAll()
def saveToFS(self):
j.sal.fs.createDir(self.path)
path = j.sal.fs.joinPaths(self.path, "actor.json")
j.sal.fs.writeFile(filename=path, contents=str(self.model.dictJson), append=False)
actionspath = j.sal.fs.joinPaths(self.path, "actions.py")
j.sal.fs.writeFile(actionspath, self.model.actionsSourceCode)
# path3 = j.sal.fs.joinPaths(self.path, "config.json")
# if self.model.data != {}:
# j.sal.fs.writeFile(path3, self.model.dataJSON)
path4 = j.sal.fs.joinPaths(self.path, "schema.capnp")
if self.model.dbobj.serviceDataSchema.strip() != "":
j.sal.fs.writeFile(path4, self.model.dbobj.serviceDataSchema)
def saveAll(self):
self.model.save()
self.saveToFS()
def _initFromTemplate(self, template):
if self.model is None:
self.model = self.aysrepo.db.actors.new()
self.model.dbobj.name = template.name
self.model.dbobj.state = "new"
# git location of actor
self.model.dbobj.gitRepo.url = self.aysrepo.git.remoteUrl
actorpath = j.sal.fs.joinPaths(self.aysrepo.path, "actors", self.model.name)
self.model.dbobj.gitRepo.path = j.sal.fs.pathRemoveDirPart(self.path, actorpath)
# process origin,where does the template come from
# TODO: *1 need to check if template can come from other aysrepo than the one we work on right now
self.model.dbobj.origin.gitUrl = template.remoteUrl
self.model.dbobj.origin.path = template.pathRelative
self._initParent(template)
self._initProducers(template)
self._initFlists(template)
self._processActionsFile(j.sal.fs.joinPaths(template.path, "actions.py"))
self._initRecurringActions(template)
# hrd schema to capnp
if self.model.dbobj.serviceDataSchema != template.schemaCapnpText:
self.model.dbobj.serviceDataSchema = template.schemaCapnpText
self.processChange("dataschema")
if self.model.dbobj.dataUI != template.dataUI:
self.model.dbobj.dataUI = template.dataUI
self.processChange("ui")
# if self.model.dataJSON != template.configJSON:
# self.model.dbobj.data = msgpack.dumps(template.configDict)
# self.processChange("config")
self.saveToFS()
self.model.save()
def _initParent(self, template):
parent = template.schemaHrd.parentSchemaItemGet()
if parent is not None:
parent_name = parent.parent
parent_role = parent_name.split('.')[0]
self.model.parentSet(role=parent_role, auto=bool(parent.auto), optional=bool(parent.optional), argKey=parent.name)
def _initProducers(self, template):
consumed_actors = template.schemaHrd.consumeSchemaItemsGet()
self.model.dbobj.init('producers', len(consumed_actors))
for i, consume_info in enumerate(consumed_actors):
actor_name = consume_info.consume_link
actor_role = actor_name.split('.')[0]
producer = self.model.dbobj.producers[i]
producer.actorRole = actor_role
producer.minServices = int(consume_info.consume_nr_min)
producer.maxServices = int(consume_info.consume_nr_max)
producer.auto = bool(consume_info.auto)
producer.argKey = consume_info.name
def _initRecurringActions(self, template):
for action, reccuring_info in template.recurringDict.items():
action_model = self.model.actions[action]
action_model.period = j.data.types.duration.convertToSeconds(reccuring_info['period'])
action_model.log = j.data.types.bool.fromString(reccuring_info['log'])
def _initFlists(self, template):
self.model.dbobj.init('flists', len(template.flists))
for i, name in enumerate(template.flists):
info = template.flists[name]
flistObj = self.model.dbobj.flists[i]
flistObj.name = name
flistObj.mountpoint = info['mountpoint']
flistObj.namespace = info['namespace']
flistObj.mode = info['mode'].lower()
flistObj.storeUrl = info['store_url']
flistObj.content = info['content']
def _processActionsFile(self, path):
def string_has_triple_quotes(s):
return "'''" in s or '"""' in s
self._out = ""
actionmethodsRequired = ["input", "init", "install", "stop", "start", "monitor", "halt", "check_up", "check_down",
"check_requirements", "cleanup", "data_export", "data_import", "uninstall", "removedata",
"consume", "action_pre_", "action_post_", "init_actions_"]
actorMethods = ["input", "build"]
parsedActorMethods = actionmethodsRequired[:]
if j.sal.fs.exists(path):
content = j.sal.fs.fileGetContents(path)
else:
content = "class Actions():\n\n"
if content.find("class action(ActionMethodDecorator)") != -1:
raise j.exceptions.Input("There should be no decorator specified in %s" % self.path_actions)
content = content.replace("from JumpScale import j", "")
content = "from JumpScale import j\n\n%s" % content
state = "INIT"
amSource = ""
actionName = ""
amDoc = ""
amDecorator = ""
amMethodArgs = {}
# DO NOT CHANGE TO USE PYTHON PARSING UTILS
lines = content.splitlines()
for line in lines:
linestrip = line.strip()
if linestrip.startswith("#"): # general guard for comments in the beginning of the line
continue
if linestrip.startswith('"""') and len(linestrip.split('"""')) > 2:
continue
# if state == "INIT" and linestrip.startswith("class Actions"):
if state == "INIT" and linestrip != '':
state = "MAIN"
continue
if state in ["MAIN", "INIT"]:
if linestrip == "" or linestrip[0] == "#":
continue
if state == "DEF" and line[:7] != ' def' and (linestrip.startswith("@") or linestrip.startswith("def")):
# means we are at end of def to new one
parsedActorMethods.append(actionName)
self._addAction(actionName, amSource, amDecorator, amMethodArgs, amDoc)
amSource = ""
actionName = ""
amDoc = ""
amDecorator = ""
amMethodArgs = {}
state = 'MAIN'
if state in ["MAIN", "DEF"] and linestrip.startswith("@"):
amDecorator = linestrip
continue
if state == "MAIN" and linestrip.startswith("def"):
definition, args = linestrip.split("(", 1)
amDoc = ""
amSource = ""
amMethodArgs = args.rstrip('):')
actionName = definition[4:].strip()
if amDecorator == "":
if actionName in actorMethods:
amDecorator = "@actor"
else:
amDecorator = "@service"
state = "DEF"
canbeInDocString = True
continue
if state == "DEF" and line.strip() == "":
continue
if state == "DEF" and string_has_triple_quotes(line[4:8]) and canbeInDocString:
state = "DEFDOC"
amDoc = ""
continue
if state == "DEFDOC" and string_has_triple_quotes(line[4:8]):
state = "DEF"
canbeInDocString = False
continue
if state == "DEFDOC":
amDoc += "%s\n" % line[4:]
continue
if state == "DEF":
if not string_has_triple_quotes(linestrip):
canbeInDocString = False
if linestrip != line[4:].strip():
# means we were not rightfully intented
raise j.exceptions.Input(message="error in source of action from %s (indentation):\nline:%s\n%s" % (
self, line, content), level=1, source="", tags="", msgpub="")
amSource += "%s\n" % line[4:]
# process the last one
if actionName != "":
parsedActorMethods.append(actionName)
self._addAction(actionName, amSource, amDecorator, amMethodArgs, amDoc)
# check for removed actions in the actor
self._checkRemovedActions(parsedActorMethods)
for actionname in actionmethodsRequired:
if actionname not in self.model.actionsSortedList:
# not found
# check if we find the action in our default actions, if yes use that one
if actionname in j.atyourservice.baseActions:
actionobj, actionmethod = j.atyourservice.baseActions[actionname]
self._addAction2(actionname, actionobj)
else:
if actionname == "input":
amSource = "return None"
self._addAction(actionName="input", amSource=amSource,
amDecorator="actor", amMethodArgs="job", amDoc="")
else:
self._addAction(actionName=actionname, amSource="",
amDecorator="service", amMethodArgs="job", amDoc="")
def _checkRemovedActions(self, parsedMethods):
for action in self.model.actionsSortedList:
if action not in parsedMethods:
self.processChange('action_del_%s' % action)
def _addAction(self, actionName, amSource, amDecorator, amMethodArgs, amDoc):
if amSource == "":
amSource = "pass"
amDoc = amDoc.strip()
# THIS COULD BE DANGEROUS !!! (despiegk)
amSource = amSource.strip(" \n")
ac = j.core.jobcontroller.db.actions.new()
ac.dbobj.code = amSource
ac.dbobj.actorName = self.model.name
ac.dbobj.doc = amDoc
ac.dbobj.name = actionName
ac.dbobj.args = amMethodArgs
ac.dbobj.lastModDate = j.data.time.epoch
ac.dbobj.origin = "actoraction:%s:%s" % (self.model.dbobj.name, actionName)
if not j.core.jobcontroller.db.actions.exists(ac.key):
# will save in DB
ac.save()
else:
ac = j.core.jobcontroller.db.actions.get(key=ac.key)
self._addAction2(actionName, ac)
def _addAction2(self, actionName, action):
"""
@param actionName = actionName
@param action is the action object
"""
actionObj = self.model.actionAdd(name=actionName, key=action.key)
if actionObj.state == "new":
self.processChange("action_new_%s" % actionName)
else:
self.processChange("action_mod_%s" % actionName)
def processChange(self, changeCategory):
"""
template action change
categories :
- dataschema
- ui
- config
- action_new_actionname
- action_mod_actionname
- action_del_actionname
"""
# self.logger.debug('process change for %s (%s)' % (self, changeCategory))
if changeCategory == 'dataschema':
# TODO
pass
elif changeCategory == 'ui':
# TODO
pass
elif changeCategory == 'config':
# TODO
pass
elif changeCategory.find('action_new') != -1:
# TODO
pass
elif changeCategory.find('action_mod') != -1:
# TODO
pass
elif changeCategory.find('action_del') != -1:
action_name = changeCategory.split('action_del_')[1]
self.model.actionDelete(action_name)
self.saveAll()
for service in self.aysrepo.servicesFind(actor=self.model.name):
service.processChange(actor=self, changeCategory=changeCategory)
# SERVICE
def serviceCreate(self, instance="main", args={}):
instance = instance
service = self.aysrepo.serviceGet(role=self.model.role, instance=instance, die=False)
if service is not None:
service._check_args(self, args)
return service
# checking if we have the service on the file system
target = "%s!%s" % (self.model.name, instance)
services_dir = j.sal.fs.joinPaths(self.aysrepo.path, 'services')
results = j.sal.fs.walkExtended(services_dir, files=False, dirPattern=target)
if len(results) > 1:
raise j.exceptions.RuntimeError("found more then one service directory for %s" % target)
elif len(results) == 1:
service = Service(aysrepo=self.aysrepo, path=results[0])
else:
service = Service(aysrepo=self.aysrepo, actor=self, name=instance, args=args)
return service
@property
def services(self):
"""
return a list of instance name for this template
"""
return self.aysrepo.servicesFind(actor=self.model.dbobj.name)
# GENERIC
def __repr__(self):
return "actor: %-15s" % (self.model.name)
| {
"content_hash": "1a8146f98165249d63b884500b7278a9",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 137,
"avg_line_length": 38.36724565756824,
"alnum_prop": 0.5746992627085759,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "d320e2c5f327c20cc034e2e8d4a16a5b27616b31",
"size": "15462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/baselib/atyourservice81/Actor.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0054_auto_20170705_1603'),
]
operations = [
migrations.AlterField(
model_name='machine',
name='os_family',
field=models.CharField(choices=[(b'Darwin', b'macOS'), (b'Windows', b'Windows'), (
b'Linux', b'Linux')], db_index=True, default=b'Darwin', max_length=256, verbose_name=b'OS Family'),
),
]
| {
"content_hash": "95ea0df6b764940338aa1774003c3672",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 115,
"avg_line_length": 29.41176470588235,
"alnum_prop": 0.576,
"repo_name": "salopensource/sal",
"id": "3e4dc548e288385371ae9c5898e61c2976b62cb4",
"size": "549",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "server/migrations/0055_auto_20170822_1155.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "119817"
},
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "152173"
},
{
"name": "JavaScript",
"bytes": "279963"
},
{
"name": "Less",
"bytes": "67048"
},
{
"name": "Makefile",
"bytes": "2208"
},
{
"name": "Procfile",
"bytes": "23"
},
{
"name": "Python",
"bytes": "613680"
},
{
"name": "SCSS",
"bytes": "51035"
},
{
"name": "Shell",
"bytes": "4535"
}
],
"symlink_target": ""
} |
def get_model_desc(model):
"""
Generates a small description of a Keras model. Suitable for generating footer descriptions for charts.
:param model:
:return:
"""
desc = []
conf = model.get_config()
for layer in (conf["layers"] if "layers" in conf else conf):
if "layer" in layer["config"]:
name = "_".join([layer['class_name'], layer["config"]['layer']['class_name']])
config = layer["config"]['layer']["config"]
else:
name = layer['class_name']
config = layer["config"]
params = []
try:
params.append(config["p"])
except:
pass
try:
params.append(config["sigma"])
except:
pass
try:
params.append(config["output_dim"])
except:
pass
try:
params.append(config["activation"])
except:
pass
try:
params.append(config['l2'])
except:
pass
desc.append(name + "({})".format(",".join([str(p) for p in params])))
description = " -> ".join(desc)
try:
description += " : [optimizer= {}, clipnorm={} - batch_size={}]".format(model.optimizer.__class__.__name__,
model.optimizer.clipnorm,
model.model.history.params[
'batch_size'])
except:
pass
return description
| {
"content_hash": "490f20395a2ffb82910e143faa64d4f7",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 115,
"avg_line_length": 32.15686274509804,
"alnum_prop": 0.4378048780487805,
"repo_name": "cbaziotis/keras-utilities",
"id": "e8f9ce235cd7d947a5e2d0878581eecbd85e7782",
"size": "1640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kutilities/helpers/generic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29851"
},
{
"name": "Shell",
"bytes": "419"
}
],
"symlink_target": ""
} |
"""Open Vswitch Mac address module"""
import logging
from compass.hdsdiscovery import utils
from compass.hdsdiscovery import base
CLASS_NAME = "Mac"
class Mac(base.BasePlugin):
"""Open Vswitch MAC address module"""
def __init__(self, host, credential):
self.host = host
self.credential = credential
def process_data(self, oper="SCAN"):
"""Dynamically call the function according 'oper'
:param oper: operation of data processing
"""
func_name = oper.lower()
return getattr(self, func_name)()
def scan(self):
"""
Implemnets the scan method in BasePlugin class. In this module,
mac addesses were retrieved by ssh
"""
try:
user = self.credential['username']
pwd = self.credential['password']
except KeyError:
logging.error("Cannot find username and password in credential")
return None
cmd = ("BRIDGES=$(ovs-vsctl show |grep Bridge |cut -f 2 -d '\"');"
"for br in $BRIDGES; do"
"PORTS=$(ovs-ofctl show $br |grep addr |cut -f 1 -d ':' "
"|egrep -v 'eth|wlan|LOCAL'|awk -F '(' '{print $1}');"
"for port in $PORTS; do"
"RESULT=$(ovs-appctl fdb/show $br |"
"awk '$1 == '$port' {print $1" "$2" "$3}');"
"echo '$RESULT'"
"done;"
"done;")
output = None
try:
output = utils.ssh_remote_execute(self.host, user, pwd, cmd)
except:
return None
logging.debug("[scan][output] output is %s", output)
if not output:
return None
fields_arr = ['port', 'vlan', 'mac']
result = []
for line in output:
if not line or line == '\n':
continue
values_arr = line.split()
temp = {}
for field, value in zip(fields_arr, values_arr):
temp[field] = value
result.append(temp.copy())
return result
| {
"content_hash": "98b5fca19361f24202aa7e1cf4bb316f",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 30.676470588235293,
"alnum_prop": 0.5191754554170661,
"repo_name": "huawei-cloud/compass-core",
"id": "ef143e6144d9d10bf33593dc3cfbb30db9821989",
"size": "2086",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "compass/hdsdiscovery/vendors/ovswitch/plugins/mac.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "396968"
},
{
"name": "Shell",
"bytes": "28585"
}
],
"symlink_target": ""
} |
import numpy as np
from common.realtime import sec_since_boot, DT_MDL
from common.numpy_fast import interp
from system.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import LateralMpc
from selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import N as LAT_MPC_N
from selfdrive.controls.lib.drive_helpers import CONTROL_N, MIN_SPEED
from selfdrive.controls.lib.desire_helper import DesireHelper
import cereal.messaging as messaging
from cereal import log
TRAJECTORY_SIZE = 33
CAMERA_OFFSET = 0.04
PATH_COST = 1.0
LATERAL_MOTION_COST = 0.11
LATERAL_ACCEL_COST = 0.0
LATERAL_JERK_COST = 0.05
# Extreme steering rate is unpleasant, even
# when it does not cause bad jerk.
# TODO this cost should be lowered when low
# speed lateral control is stable on all cars
STEERING_RATE_COST = 800.0
class LateralPlanner:
def __init__(self, CP):
self.DH = DesireHelper()
# Vehicle model parameters used to calculate lateral movement of car
self.factor1 = CP.wheelbase - CP.centerToFront
self.factor2 = (CP.centerToFront * CP.mass) / (CP.wheelbase * CP.tireStiffnessRear)
self.last_cloudlog_t = 0
self.solution_invalid_cnt = 0
self.path_xyz = np.zeros((TRAJECTORY_SIZE, 3))
self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))
self.plan_yaw_rate = np.zeros((TRAJECTORY_SIZE,))
self.t_idxs = np.arange(TRAJECTORY_SIZE)
self.y_pts = np.zeros(TRAJECTORY_SIZE)
self.lat_mpc = LateralMpc()
self.reset_mpc(np.zeros(4))
def reset_mpc(self, x0=np.zeros(4)):
self.x0 = x0
self.lat_mpc.reset(x0=self.x0)
def update(self, sm):
# clip speed , lateral planning is not possible at 0 speed
self.v_ego = max(MIN_SPEED, sm['carState'].vEgo)
measured_curvature = sm['controlsState'].curvature
# Parse model predictions
md = sm['modelV2']
if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:
self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])
self.t_idxs = np.array(md.position.t)
self.plan_yaw = np.array(md.orientation.z)
self.plan_yaw_rate = np.array(md.orientationRate.z)
# Lane change logic
desire_state = md.meta.desireState
if len(desire_state):
self.l_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeLeft]
self.r_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeRight]
lane_change_prob = self.l_lane_change_prob + self.r_lane_change_prob
self.DH.update(sm['carState'], sm['carControl'].latActive, lane_change_prob)
d_path_xyz = self.path_xyz
self.lat_mpc.set_weights(PATH_COST, LATERAL_MOTION_COST,
LATERAL_ACCEL_COST, LATERAL_JERK_COST,
STEERING_RATE_COST)
y_pts = np.interp(self.v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:, 1])
heading_pts = np.interp(self.v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)
yaw_rate_pts = np.interp(self.v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw_rate)
self.y_pts = y_pts
assert len(y_pts) == LAT_MPC_N + 1
assert len(heading_pts) == LAT_MPC_N + 1
assert len(yaw_rate_pts) == LAT_MPC_N + 1
lateral_factor = max(0, self.factor1 - (self.factor2 * self.v_ego**2))
p = np.array([self.v_ego, lateral_factor])
self.lat_mpc.run(self.x0,
p,
y_pts,
heading_pts,
yaw_rate_pts)
# init state for next iteration
# mpc.u_sol is the desired second derivative of psi given x0 curv state.
# with x0[3] = measured_yaw_rate, this would be the actual desired yaw rate.
# instead, interpolate x_sol so that x0[3] is the desired yaw rate for lat_control.
self.x0[3] = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.lat_mpc.x_sol[:, 3])
# Check for infeasible MPC solution
mpc_nans = np.isnan(self.lat_mpc.x_sol[:, 3]).any()
t = sec_since_boot()
if mpc_nans or self.lat_mpc.solution_status != 0:
self.reset_mpc()
self.x0[3] = measured_curvature * self.v_ego
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.lat_mpc.cost > 20000. or mpc_nans:
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
def publish(self, sm, pm):
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message('lateralPlan')
plan_send.valid = sm.all_checks(service_list=['carState', 'controlsState', 'modelV2'])
lateralPlan = plan_send.lateralPlan
lateralPlan.modelMonoTime = sm.logMonoTime['modelV2']
lateralPlan.dPathPoints = self.y_pts.tolist()
lateralPlan.psis = self.lat_mpc.x_sol[0:CONTROL_N, 2].tolist()
lateralPlan.curvatures = (self.lat_mpc.x_sol[0:CONTROL_N, 3]/self.v_ego).tolist()
lateralPlan.curvatureRates = [float(x/self.v_ego) for x in self.lat_mpc.u_sol[0:CONTROL_N - 1]] + [0.0]
lateralPlan.mpcSolutionValid = bool(plan_solution_valid)
lateralPlan.solverExecutionTime = self.lat_mpc.solve_time
lateralPlan.desire = self.DH.desire
lateralPlan.useLaneLines = False
lateralPlan.laneChangeState = self.DH.lane_change_state
lateralPlan.laneChangeDirection = self.DH.lane_change_direction
pm.send('lateralPlan', plan_send)
| {
"content_hash": "06e40bfecb006e121fae182af80a41ff",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 129,
"avg_line_length": 41.015037593984964,
"alnum_prop": 0.6731439046746105,
"repo_name": "commaai/openpilot",
"id": "932ad49535370d6ec8e443790221bbdac59d1dc0",
"size": "5455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selfdrive/controls/lib/lateral_planner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "604924"
},
{
"name": "C++",
"bytes": "1125553"
},
{
"name": "Cython",
"bytes": "50503"
},
{
"name": "Dockerfile",
"bytes": "1239"
},
{
"name": "Emacs Lisp",
"bytes": "124"
},
{
"name": "HTML",
"bytes": "11493"
},
{
"name": "Kaitai Struct",
"bytes": "8093"
},
{
"name": "MATLAB",
"bytes": "35190"
},
{
"name": "Makefile",
"bytes": "14018"
},
{
"name": "Python",
"bytes": "2386885"
},
{
"name": "QML",
"bytes": "1132"
},
{
"name": "Shell",
"bytes": "32876"
}
],
"symlink_target": ""
} |
import json
import types
import pytest
from indy.did import create_and_store_my_did
from indy.ledger import build_nym_request
from plenum.common.constants import NYM, STEWARD, ROLE, VERKEY
from plenum.common.exceptions import UnauthorizedClientRequest, RequestNackedException, CouldNotAuthenticate
from plenum.common.txn_util import get_request_data
from plenum.common.util import randomString
from plenum.server.request_handlers.utils import get_nym_details
from plenum.test.helper import sdk_get_and_check_replies
from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request
NEW_ROLE = None
@pytest.fixture(scope='module')
def patch_nym_validation(txnPoolNodeSet):
# Disabling validation for only steward
def patched_dynamic_validation(self, request, req_pp_time):
self._validate_request_type(request)
identifier, req_id, operation = get_request_data(request)
error = None
if operation.get(ROLE) == STEWARD:
if self._steward_threshold_exceeded(self.config):
error = "New stewards cannot be added by other stewards " \
"as there are already {} stewards in the system". \
format(self.config.stewardThreshold)
if error:
raise UnauthorizedClientRequest(identifier,
req_id,
error)
for n in txnPoolNodeSet:
n.write_manager.request_handlers[NYM][0].dynamic_validation = types.MethodType(patched_dynamic_validation,
n.write_manager.request_handlers[
NYM][0])
@pytest.fixture(scope='function')
def nym_txn_data(looper, sdk_wallet_client):
seed = randomString(32)
wh, _ = sdk_wallet_client
sender_did, sender_verkey = \
looper.loop.run_until_complete(create_and_store_my_did(wh, json.dumps({'seed': seed})))
return wh, randomString(5), sender_did, sender_verkey
def test_create_did_without_endorser(looper, txnPoolNodeSet, nym_txn_data, sdk_pool_handle, patch_nym_validation):
wh, alias, sender_did, sender_verkey = nym_txn_data
nym_request = looper.loop.run_until_complete(
build_nym_request(sender_did, sender_did, sender_verkey, alias, NEW_ROLE))
request_couple = sdk_sign_and_send_prepared_request(looper, (wh, sender_did), sdk_pool_handle, nym_request)
sdk_get_and_check_replies(looper, [request_couple])
details = get_nym_details(txnPoolNodeSet[0].states[1], sender_did, is_committed=True)
assert details[ROLE] == NEW_ROLE
assert details[VERKEY] == sender_verkey
def test_create_did_without_endorser_empty_verkey(looper, nym_txn_data, sdk_wallet_client, sdk_pool_handle,
patch_nym_validation):
wh, alias, sender_did, sender_verkey = nym_txn_data
nym_request = looper.loop.run_until_complete(build_nym_request(sender_did, sender_did, None, alias, NEW_ROLE))
request_couple = sdk_sign_and_send_prepared_request(looper, (wh, sender_did), sdk_pool_handle, nym_request)
with pytest.raises(RequestNackedException, match=CouldNotAuthenticate.reason.format(sender_did)):
sdk_get_and_check_replies(looper, [request_couple])
def test_create_did_without_endorser_different_dest(looper, nym_txn_data, sdk_wallet_client, sdk_pool_handle,
patch_nym_validation):
wh, alias, sender_did, sender_verkey = nym_txn_data
nym_request = looper.loop.run_until_complete(
build_nym_request(sender_did, sdk_wallet_client[1], sender_verkey, alias, NEW_ROLE))
request_couple = sdk_sign_and_send_prepared_request(looper, (wh, sender_did), sdk_pool_handle, nym_request)
with pytest.raises(RequestNackedException, match=CouldNotAuthenticate.reason.format(sender_did)):
sdk_get_and_check_replies(looper, [request_couple])
| {
"content_hash": "865fdb7857a9f15dfdfe63d24658c9d2",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 120,
"avg_line_length": 46.60919540229885,
"alnum_prop": 0.6626387176325524,
"repo_name": "evernym/zeno",
"id": "5e4ac88ce6fd61e5ccd745359c5ac72133b54914",
"size": "4055",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/test/signing/test_create_did_without_endorser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "531061"
}
],
"symlink_target": ""
} |
"""Test AsyncIOMotorCursor."""
import asyncio
import sys
import traceback
import unittest
import warnings
from functools import partial
from unittest import SkipTest
import bson
from pymongo import CursorType
from pymongo.errors import InvalidOperation, ExecutionTimeout
from pymongo.errors import OperationFailure
from motor import motor_asyncio
from test.utils import one, safe_get, get_primary_pool, TestListener
from test.asyncio_tests import (asyncio_test,
AsyncIOTestCase,
AsyncIOMockServerTestCase,
server_is_mongos,
get_command_line)
class TestAsyncIOCursor(AsyncIOMockServerTestCase):
def test_cursor(self):
cursor = self.collection.find()
self.assertTrue(isinstance(cursor, motor_asyncio.AsyncIOMotorCursor))
self.assertFalse(cursor.started, "Cursor shouldn't start immediately")
@asyncio_test
def test_count(self):
yield from self.make_test_data()
coll = self.collection
self.assertEqual(
100,
(yield from coll.count_documents({'_id': {'$gt': 99}})))
@asyncio_test
def test_fetch_next(self):
yield from self.make_test_data()
coll = self.collection
# 200 results, only including _id field, sorted by _id.
cursor = coll.find({}, {'_id': 1}).sort('_id').batch_size(75)
self.assertEqual(None, cursor.cursor_id)
self.assertEqual(None, cursor.next_object()) # Haven't fetched yet.
i = 0
while (yield from cursor.fetch_next):
self.assertEqual({'_id': i}, cursor.next_object())
i += 1
# With batch_size 75 and 200 results, cursor should be exhausted on
# the server by third fetch.
if i <= 150:
self.assertNotEqual(0, cursor.cursor_id)
else:
self.assertEqual(0, cursor.cursor_id)
self.assertEqual(False, (yield from cursor.fetch_next))
self.assertEqual(None, cursor.next_object())
self.assertEqual(0, cursor.cursor_id)
self.assertEqual(200, i)
@unittest.skipUnless(sys.version_info >= (3, 4), "Python 3.4 required")
@unittest.skipIf('PyPy' in sys.version, "PyPy")
@asyncio_test
def test_fetch_next_delete(self):
client, server = self.client_server(auto_ismaster=True)
cursor = client.test.coll.find()
self.fetch_next(cursor)
request = yield from self.run_thread(server.receives, "find", "coll")
request.replies({"cursor": {
"id": 123,
"ns": "db.coll",
"firstBatch": [{"_id": 1}]}})
# Decref the cursor and clear from the event loop.
del cursor
yield
request = yield from self.run_thread(
server.receives, "killCursors", "coll")
request.ok()
@asyncio_test
def test_fetch_next_without_results(self):
coll = self.collection
# Nothing matches this query.
cursor = coll.find({'foo': 'bar'})
self.assertEqual(None, cursor.next_object())
self.assertEqual(False, (yield from cursor.fetch_next))
self.assertEqual(None, cursor.next_object())
# Now cursor knows it's exhausted.
self.assertEqual(0, cursor.cursor_id)
@asyncio_test
def test_fetch_next_is_idempotent(self):
# Subsequent calls to fetch_next don't do anything
yield from self.make_test_data()
coll = self.collection
cursor = coll.find()
self.assertEqual(None, cursor.cursor_id)
yield from cursor.fetch_next
self.assertTrue(cursor.cursor_id)
self.assertEqual(101, cursor._buffer_size())
yield from cursor.fetch_next # Does nothing
self.assertEqual(101, cursor._buffer_size())
yield from cursor.close()
@asyncio_test
def test_fetch_next_exception(self):
coll = self.collection
cursor = coll.find()
cursor.delegate._Cursor__id = 1234 # Not valid on server.
with self.assertRaises(OperationFailure):
yield from cursor.fetch_next
# Avoid the cursor trying to close itself when it goes out of scope.
cursor.delegate._Cursor__id = None
@asyncio_test(timeout=30)
def test_each(self):
yield from self.make_test_data()
cursor = self.collection.find({}, {'_id': 1}).sort('_id')
future = asyncio.Future(loop=self.loop)
results = []
def callback(result, error):
if error:
raise error
if result is not None:
results.append(result)
else:
# Done iterating.
future.set_result(True)
cursor.each(callback)
yield from future
expected = [{'_id': i} for i in range(200)]
self.assertEqual(expected, results)
@asyncio_test
def test_to_list_argument_checking(self):
# We need more than 10 documents so the cursor stays alive.
yield from self.make_test_data()
coll = self.collection
cursor = coll.find()
with self.assertRaises(ValueError):
yield from cursor.to_list(-1)
with self.assertRaises(TypeError):
yield from cursor.to_list('foo')
@asyncio_test
def test_to_list_with_length(self):
yield from self.make_test_data()
coll = self.collection
cursor = coll.find().sort('_id')
def expected(start, stop):
return [{'_id': i} for i in range(start, stop)]
self.assertEqual(expected(0, 10), (yield from cursor.to_list(10)))
self.assertEqual(expected(10, 100), (yield from cursor.to_list(90)))
# Test particularly rigorously around the 101-doc mark, since this is
# where the first batch ends
self.assertEqual(expected(100, 101), (yield from cursor.to_list(1)))
self.assertEqual(expected(101, 102), (yield from cursor.to_list(1)))
self.assertEqual(expected(102, 103), (yield from cursor.to_list(1)))
self.assertEqual([], (yield from cursor.to_list(0)))
self.assertEqual(expected(103, 105), (yield from cursor.to_list(2)))
# Only 95 docs left, make sure length=100 doesn't error or hang
self.assertEqual(expected(105, 200), (yield from cursor.to_list(100)))
self.assertEqual(0, cursor.cursor_id)
# Nothing left.
self.assertEqual([], (yield from cursor.to_list(100)))
yield from cursor.close()
@asyncio_test
def test_to_list_exc_info(self):
yield from self.make_test_data()
coll = self.collection
cursor = coll.find()
yield from cursor.to_list(length=10)
yield from self.collection.drop()
try:
yield from cursor.to_list(length=None)
except OperationFailure:
_, _, tb = sys.exc_info()
# The call tree should include PyMongo code we ran on a thread.
formatted = '\n'.join(traceback.format_tb(tb))
self.assertTrue('_unpack_response' in formatted
or '_check_command_response' in formatted)
@asyncio_test
def test_to_list_with_length_of_none(self):
yield from self.make_test_data()
collection = self.collection
cursor = collection.find()
docs = yield from cursor.to_list(None) # Unlimited.
count = yield from collection.count_documents({})
self.assertEqual(count, len(docs))
@asyncio_test
def test_to_list_tailable(self):
coll = self.collection
cursor = coll.find(cursor_type=CursorType.TAILABLE)
# Can't call to_list on tailable cursor.
with self.assertRaises(InvalidOperation):
yield from cursor.to_list(10)
@asyncio_test
def test_cursor_explicit_close(self):
client, server = self.client_server(auto_ismaster=True)
collection = client.test.coll
cursor = collection.find()
future = self.fetch_next(cursor)
self.assertTrue(cursor.alive)
request = yield from self.run_thread(server.receives, "find", "coll")
request.replies({"cursor": {
"id": 123,
"ns": "db.coll",
"firstBatch": [{"_id": 1}]}})
self.assertTrue((yield from future))
self.assertEqual(123, cursor.cursor_id)
future = self.ensure_future(cursor.close())
# No reply to OP_KILLCURSORS.
request = yield from self.run_thread(
server.receives, "killCursors", "coll")
request.ok()
yield from future
# Cursor reports it's alive because it has buffered data, even though
# it's killed on the server.
self.assertTrue(cursor.alive)
self.assertEqual({'_id': 1}, cursor.next_object())
self.assertFalse((yield from cursor.fetch_next))
self.assertFalse(cursor.alive)
@asyncio_test
def test_each_cancel(self):
yield from self.make_test_data()
loop = self.loop
collection = self.collection
results = []
future = asyncio.Future(loop=self.loop)
def cancel(result, error):
if error:
future.set_exception(error)
else:
results.append(result)
loop.call_soon(canceled)
return False # Cancel iteration.
def canceled():
try:
self.assertFalse(cursor.delegate._Cursor__killed)
self.assertTrue(cursor.alive)
# Resume iteration
cursor.each(each)
except Exception as e:
future.set_exception(e)
def each(result, error):
if error:
future.set_exception(error)
elif result:
pass
results.append(result)
else:
# Complete
future.set_result(None)
cursor = collection.find()
cursor.each(cancel)
yield from future
self.assertEqual((yield from collection.count_documents({})),
len(results))
@asyncio_test
def test_rewind(self):
yield from self.collection.insert_many([{}, {}, {}])
cursor = self.collection.find().limit(2)
count = 0
while (yield from cursor.fetch_next):
cursor.next_object()
count += 1
self.assertEqual(2, count)
cursor.rewind()
count = 0
while (yield from cursor.fetch_next):
cursor.next_object()
count += 1
self.assertEqual(2, count)
cursor.rewind()
count = 0
while (yield from cursor.fetch_next):
cursor.next_object()
break
cursor.rewind()
while (yield from cursor.fetch_next):
cursor.next_object()
count += 1
self.assertEqual(2, count)
self.assertEqual(cursor, cursor.rewind())
@unittest.skipUnless(sys.version_info >= (3, 4), "Python 3.4 required")
@unittest.skipIf("PyPy" in sys.version, "PyPy")
@asyncio_test
def test_cursor_del(self):
client, server = self.client_server(auto_ismaster=True)
cursor = client.test.coll.find()
future = self.fetch_next(cursor)
request = yield from self.run_thread(server.receives, "find", "coll")
request.replies({"cursor": {
"id": 123,
"ns": "db.coll",
"firstBatch": [{"_id": 1}]}})
yield from future # Complete the first fetch.
# Dereference the cursor.
del cursor
# Let the event loop iterate once more to clear its references to
# callbacks, allowing the cursor to be freed.
yield from asyncio.sleep(0, loop=self.loop)
request = yield from self.run_thread(
server.receives, "killCursors", "coll")
request.ok()
@unittest.skipUnless(sys.version_info >= (3, 4), "Python 3.4 required")
@asyncio_test
def test_exhaust(self):
if (yield from server_is_mongos(self.cx)):
self.assertRaises(InvalidOperation,
self.db.test.find, cursor_type=CursorType.EXHAUST)
return
self.assertRaises(ValueError, self.db.test.find, cursor_type=5)
cur = self.db.test.find(cursor_type=CursorType.EXHAUST)
self.assertRaises(InvalidOperation, cur.limit, 5)
cur = self.db.test.find(limit=5)
self.assertRaises(InvalidOperation, cur.add_option, 64)
cur = self.db.test.find()
cur.add_option(64)
self.assertRaises(InvalidOperation, cur.limit, 5)
yield from self.db.drop_collection("test")
# Insert enough documents to require more than one batch.
yield from self.db.test.insert_many([{} for _ in range(150)])
client = self.asyncio_client(maxPoolSize=1)
# Ensure a pool.
yield from client.db.collection.find_one()
socks = get_primary_pool(client).sockets
# Make sure the socket is returned after exhaustion.
cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST)
has_next = yield from cur.fetch_next
self.assertTrue(has_next)
self.assertEqual(0, len(socks))
while (yield from cur.fetch_next):
cur.next_object()
self.assertEqual(1, len(socks))
# Same as previous but with to_list instead of next_object.
docs = yield from client[self.db.name].test.find(
cursor_type=CursorType.EXHAUST).to_list(
None)
self.assertEqual(1, len(socks))
self.assertEqual(
(yield from self.db.test.count_documents({})),
len(docs))
# If the Cursor instance is discarded before being
# completely iterated we have to close and
# discard the socket.
sock = one(socks)
cur = client[self.db.name].test.find(
cursor_type=CursorType.EXHAUST).batch_size(1)
has_next = yield from cur.fetch_next
self.assertTrue(has_next)
self.assertEqual(0, len(socks))
if 'PyPy' in sys.version:
# Don't wait for GC or use gc.collect(), it's unreliable.
yield from cur.close()
del cur
yield from asyncio.sleep(0.1, loop=self.loop)
# The exhaust cursor's socket was discarded, although another may
# already have been opened to send OP_KILLCURSORS.
self.assertNotIn(sock, socks)
self.assertTrue(sock.closed)
@asyncio_test
def test_close_with_docs_in_batch(self):
# MOTOR-67 Killed cursor with docs batched is "alive", don't kill again.
yield from self.make_test_data() # Ensure multiple batches.
cursor = self.collection.find()
yield from cursor.fetch_next
yield from cursor.close() # Killed but still "alive": has a batch.
self.cx.close()
with warnings.catch_warnings(record=True) as w:
del cursor # No-op, no error.
self.assertEqual(0, len(w))
@asyncio_test
def test_aggregate_batch_size(self):
listener = TestListener()
cx = self.asyncio_client(event_listeners=[listener])
c = cx.motor_test.collection
yield from c.delete_many({})
yield from c.insert_many({'_id': i} for i in range(3))
# Two ways of setting batchSize.
cursor0 = c.aggregate([{'$sort': {'_id': 1}}]).batch_size(2)
cursor1 = c.aggregate([{'$sort': {'_id': 1}}], batchSize=2)
for cursor in cursor0, cursor1:
lst = []
while (yield from cursor.fetch_next):
lst.append(cursor.next_object())
self.assertEqual(lst, [{'_id': 0}, {'_id': 1}, {'_id': 2}])
aggregate = listener.first_command_started('aggregate')
self.assertEqual(aggregate.command['cursor']['batchSize'], 2)
getMore = listener.first_command_started('getMore')
self.assertEqual(getMore.command['batchSize'], 2)
@asyncio_test
def test_raw_batches(self):
c = self.collection
yield from c.delete_many({})
yield from c.insert_many({'_id': i} for i in range(4))
find = partial(c.find_raw_batches, {})
agg = partial(c.aggregate_raw_batches, [{'$sort': {'_id': 1}}])
for method in find, agg:
cursor = method().batch_size(2)
yield from cursor.fetch_next
batch = cursor.next_object()
self.assertEqual([{'_id': 0}, {'_id': 1}], bson.decode_all(batch))
lst = yield from method().batch_size(2).to_list(length=1)
self.assertEqual([{'_id': 0}, {'_id': 1}], bson.decode_all(lst[0]))
class TestAsyncIOCursorMaxTimeMS(AsyncIOTestCase):
def setUp(self):
super(TestAsyncIOCursorMaxTimeMS, self).setUp()
self.loop.run_until_complete(self.maybe_skip())
def tearDown(self):
self.loop.run_until_complete(self.disable_timeout())
super(TestAsyncIOCursorMaxTimeMS, self).tearDown()
@asyncio.coroutine
def maybe_skip(self):
if (yield from server_is_mongos(self.cx)):
raise SkipTest("mongos has no maxTimeAlwaysTimeOut fail point")
cmdline = yield from get_command_line(self.cx)
if '1' != safe_get(cmdline, 'parsed.setParameter.enableTestCommands'):
if 'enableTestCommands=1' not in cmdline['argv']:
raise SkipTest("testing maxTimeMS requires failpoints")
@asyncio.coroutine
def enable_timeout(self):
yield from self.cx.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="alwaysOn")
@asyncio.coroutine
def disable_timeout(self):
yield from self.cx.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="off")
@asyncio_test
def test_max_time_ms_query(self):
# Cursor parses server timeout error in response to initial query.
yield from self.enable_timeout()
cursor = self.collection.find().max_time_ms(100000)
with self.assertRaises(ExecutionTimeout):
yield from cursor.fetch_next
cursor = self.collection.find().max_time_ms(100000)
with self.assertRaises(ExecutionTimeout):
yield from cursor.to_list(10)
with self.assertRaises(ExecutionTimeout):
yield from self.collection.find_one(max_time_ms=100000)
@asyncio_test(timeout=60)
def test_max_time_ms_getmore(self):
# Cursor handles server timeout during getmore, also.
yield from self.collection.insert_many({} for _ in range(200))
try:
# Send initial query.
cursor = self.collection.find().max_time_ms(100000)
yield from cursor.fetch_next
cursor.next_object()
# Test getmore timeout.
yield from self.enable_timeout()
with self.assertRaises(ExecutionTimeout):
while (yield from cursor.fetch_next):
cursor.next_object()
yield from cursor.close()
# Send another initial query.
yield from self.disable_timeout()
cursor = self.collection.find().max_time_ms(100000)
yield from cursor.fetch_next
cursor.next_object()
# Test getmore timeout.
yield from self.enable_timeout()
with self.assertRaises(ExecutionTimeout):
yield from cursor.to_list(None)
# Avoid 'IOLoop is closing' warning.
yield from cursor.close()
finally:
# Cleanup.
yield from self.disable_timeout()
yield from self.collection.delete_many({})
@asyncio_test
def test_max_time_ms_each_query(self):
# Cursor.each() handles server timeout during initial query.
yield from self.enable_timeout()
cursor = self.collection.find().max_time_ms(100000)
future = asyncio.Future(loop=self.loop)
def callback(result, error):
if error:
future.set_exception(error)
elif not result:
# Done.
future.set_result(None)
with self.assertRaises(ExecutionTimeout):
cursor.each(callback)
yield from future
@asyncio_test(timeout=30)
def test_max_time_ms_each_getmore(self):
# Cursor.each() handles server timeout during getmore.
yield from self.collection.insert_many({} for _ in range(200))
try:
# Send initial query.
cursor = self.collection.find().max_time_ms(100000)
yield from cursor.fetch_next
cursor.next_object()
future = asyncio.Future(loop=self.loop)
def callback(result, error):
if error:
future.set_exception(error)
elif not result:
# Done.
future.set_result(None)
yield from self.enable_timeout()
with self.assertRaises(ExecutionTimeout):
cursor.each(callback)
yield from future
yield from cursor.close()
finally:
# Cleanup.
yield from self.disable_timeout()
yield from self.collection.delete_many({})
def test_iter(self):
# Iteration should be prohibited.
with self.assertRaises(TypeError):
for _ in self.db.test.find():
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "091ffbe3cc3d68dd21ddc5e63e410157",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 80,
"avg_line_length": 35.26332794830372,
"alnum_prop": 0.5875939160711013,
"repo_name": "wujuguang/motor",
"id": "de1c5862c7c991b5f7eec3c2f61822b8838a2fa5",
"size": "22402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/asyncio_tests/test_asyncio_cursor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "519031"
},
{
"name": "Shell",
"bytes": "2713"
}
],
"symlink_target": ""
} |
import json
import os
import sys
from unittest import mock
import unittest
import datetime
import dateutil
import dateutil.parser
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from azure.cli.testsdk.scenario_tests.const import MOCKED_TENANT_ID
from azure.cli.testsdk import ScenarioTest, MSGraphNameReplacer, MOCKED_USER_NAME
from knack.util import CLIError
from azure.cli.testsdk import ScenarioTest, LiveScenarioTest, ResourceGroupPreparer, KeyVaultPreparer
# This test example is from
# https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-add-app-roles-in-azure-ad-apps#example-user-app-role
TEST_APP_ROLES = '''[
{
"allowedMemberTypes": [
"User"
],
"displayName": "Writer",
"id": "d1c2ade8-0000-0000-0000-6d06b947c66f",
"isEnabled": true,
"description": "Writers Have the ability to create tasks.",
"value": "Writer"
},
{
"allowedMemberTypes": [
"Application"
],
"displayName": "ConsumerApps",
"id": "47fbb575-0000-0000-0000-0f7a6c30beac",
"isEnabled": true,
"description": "Consumer apps have access to the consumer data.",
"value": "Consumer"
}
]
'''
# This test example is from
# https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-optional-claims#configuring-optional-claims
TEST_OPTIONAL_CLAIMS = '''{
"idToken": [
{
"name": "auth_time",
"essential": false
}
],
"accessToken": [
{
"name": "ipaddr",
"essential": false
}
],
"saml2Token": [
{
"name": "upn",
"essential": false
},
{
"name": "extension_ab603c56068041afb2f6832e2a17e237_skypeId",
"source": "user",
"essential": false
}
]
}
'''
TEST_REQUIRED_RESOURCE_ACCESS = '''[
{
"resourceAccess": [
{
"id": "41094075-9dad-400e-a0bd-54e686782033",
"type": "Scope"
}
],
"resourceAppId": "797f4846-ba00-4fd7-ba43-dac1f8f63013"
},
{
"resourceAccess": [
{
"id": "c79f8feb-a9db-4090-85f9-90d820caa0eb",
"type": "Scope"
},
{
"id": "18a4783c-866b-4cc7-a460-3d5e5662c884",
"type": "Role"
}
],
"resourceAppId": "00000003-0000-0000-c000-000000000000"
}
]
'''
# This test example is from
# https://docs.microsoft.com/en-us/azure/active-directory/develop/workload-identity-federation-create-trust-github?tabs=microsoft-graph
TEST_FEDERATED_IDENTITY_CREDENTIAL = '''{
"name": "Testing",
"issuer": "https://token.actions.githubusercontent.com/",
"subject": "repo:octo-org/octo-repo:environment:Production",
"description": "Testing",
"audiences": [
"api://AzureADTokenExchange"
]
}
'''
# TODO: https://github.com/Azure/azure-cli/pull/13769 fails to work
# Cert created with
# openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 10000 -out certificate.pem
TEST_CERTIFICATE = """
MIIDazCCAlOgAwIBAgIUIp5vybhHfKN+ZKL28AntYKhlKXkwDQYJKoZIhvcNAQEL
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDA3MjIwNzE3NDdaFw00NzEy
MDgwNzE3NDdaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDMa00H+/p4RP4Eo//1J81Wowo4y1SKOJHbJ6T/lZ73
5FzFX52gdQ/7HalJOwQdbha78RPGA7bXxEmyEo+q3w+IMYzrqboX5S9yf0v1DZvj
a/VEMtUsq79d7NUUEd+smkuqDxDHFIkMeMM8cXy6tc+TPbc28BkQQiKbzOEZDwy4
HPd7FCqCwwcZtgxfxFQx5A2DkAXtT53zQD8k1zY4UQWhkKDcgvINzQfYxJmUbXqH
27MuJuejhpWLjmwEFCQtMJMrEv44YmlDzmL64iN5HFckO65ikV9fe9g9EcR5acSY
2bsO8WyFYzTffVXFpFF011Vi4d/U0h4wSwj5KLMYMHkfAgMBAAGjUzBRMB0GA1Ud
DgQWBBQxgpSKG7fwIHEopaRA10GB8Z8SOTAfBgNVHSMEGDAWgBQxgpSKG7fwIHEo
paRA10GB8Z8SOTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAt
I5vbHGxVV3qRtd9PEFe9dUb9Yv9YIa5RUd5l795cgr6qyELfg3xTPZbNf1oUHpGX
NCfm1uqNTorIKOIEoTpA+STVwST/xcqzB6VjS31I/5IIrdK2NQenM+0DVJa+yGhX
+zI3+X3cO2YbyLSKBYqdMsqgnMS/ZC0NnrvigHgq2SC4Vzg8yz5rorjvLJ6ndeht
oWOtdCJKUTPihNh4e+GM2A7UNKdt5WKCiS/n/lShvm+8JEG2lXQmmxR6DOjdDyC4
/6tf7Ln7YoZZ0q6ICp04oMF6bvgGosdOkQATW4X97EmcfIBfHPX2w/Xn47np2rZr
lBMWCjI8gO6W8YQMu7AH""".replace('\n', '')
class GraphScenarioTestBase(ScenarioTest):
def tearDown(self):
# If self.kwargs contains appId, try best to delete the app.
for k, v in self.kwargs.items():
if k.startswith('app_id'):
try:
object_id = self.cmd("ad app show --id " + v).get_output_in_json()['id']
self.cmd("ad app delete --id " + v)
# Permanently delete item
# TODO: Add native commands for deleted items
self.cmd("az rest --method DELETE "
"--url https://graph.microsoft.com/v1.0/directory/deletedItems/" + object_id)
except:
pass
super().tearDown()
def _create_app(self):
self.kwargs['display_name'] = self.create_random_name(prefix='azure-cli-test', length=30)
result = self.cmd('ad app create --display-name {display_name}').get_output_in_json()
self.kwargs['app_id'] = result['appId']
def _create_sp(self):
self.kwargs['display_name'] = self.create_random_name(prefix='azure-cli-test', length=30)
result = self.cmd('ad app create --display-name {display_name}').get_output_in_json()
self.kwargs['app_id'] = result['appId']
self.cmd('ad sp create --id {app_id}').get_output_in_json()
def _get_signed_in_user(self):
account_info = self.cmd('account show').get_output_in_json()
if account_info['user']['type'] == 'user':
return account_info['user']['name']
return None
def _test_credential(self, object_type):
"""Test app/sp credential commands. Make sure app_id has been configured in self.kwargs."""
self.kwargs['object_type'] = object_type
# Test password
self.cmd('ad {object_type} credential reset --id {app_id} --append --years 2 --display-name key1',
checks=self.check('appId', '{app_id}'))
result = self.cmd('ad {object_type} credential list --id {app_id}',
checks=self.check('length([*])', 1)).get_output_in_json()
key_id = result[0]['keyId']
self.cmd('ad {object_type} credential reset --id {app_id} --append --display-name key2')
self.cmd('ad {object_type} credential list --id {app_id}', checks=[
self.check('length([*])', 2),
# Graph API reverses the order of insertion
self.check('[0].displayName', 'key2'),
self.check('[1].displayName', 'key1')
])
self.cmd('ad {object_type} credential delete --id {app_id} --key-id ' + key_id)
self.cmd('ad {object_type} credential list --id {app_id}', checks=self.check('length([*])', 1))
# try use --end-date
self.cmd('ad {object_type} credential reset --id {app_id} --end-date "2100-12-31T11:59:59+00:00"')
self.cmd('ad {object_type} credential list --id {app_id}',
checks=self.check('[0].endDateTime', '2100-12-31T11:59:59Z'))
self.cmd('ad {object_type} credential reset --id {app_id} --end-date "2100-12-31"')
self.cmd('ad {object_type} credential list --id {app_id}',
checks=self.check('[0].endDateTime', '2100-12-31T00:00:00Z'))
class ApplicationScenarioTest(GraphScenarioTestBase):
def test_app_scenario(self):
"""
- Test creating application with its properties.
- Test creating application first and update its properties.
"""
display_name = self.create_random_name(prefix='azure-cli-test', length=30)
# identifierUris must be on verified domain
# https://docs.microsoft.com/en-us/azure/active-directory/develop/security-best-practices-for-app-registration#appid-uri-configuration
self.kwargs.update({
'display_name': display_name,
'identifier_uri': f'api://{display_name}',
'homepage': 'https://myapp.com/',
'web_redirect_uri_1': 'http://localhost/webtest1',
'web_redirect_uri_2': 'http://localhost/webtest2',
'public_client_redirect_uri_1': 'http://localhost/publicclienttest1',
'public_client_redirect_uri_2': 'http://localhost/publicclienttest2',
'key_value': TEST_CERTIFICATE,
'app_roles': TEST_APP_ROLES,
'optional_claims': TEST_OPTIONAL_CLAIMS,
'required_resource_accesses': TEST_REQUIRED_RESOURCE_ACCESS,
})
# Create
result = self.cmd(
'ad app create --display-name {display_name} '
'--identifier-uris {identifier_uri} '
'--is-fallback-public-client True '
'--sign-in-audience AzureADMultipleOrgs '
# web
'--web-home-page-url {homepage} '
'--web-redirect-uris {web_redirect_uri_1} {web_redirect_uri_2} '
'--enable-access-token-issuance true --enable-id-token-issuance true '
# publicClient
'--public-client-redirect-uris {public_client_redirect_uri_1} {public_client_redirect_uri_2} '
# keyCredentials
'--key-value {key_value} '
# JSON properties
"--app-roles '{app_roles}' "
"--optional-claims '{optional_claims}' "
"--required-resource-accesses '{required_resource_accesses}'",
checks=[
self.check('displayName', '{display_name}'),
self.check('identifierUris[0]', '{identifier_uri}'),
self.check('isFallbackPublicClient', True),
self.check('signInAudience', 'AzureADMultipleOrgs'),
self.check('web.homePageUrl', '{homepage}'),
self.check('web.redirectUris[0]', '{web_redirect_uri_1}'),
self.check('web.redirectUris[1]', '{web_redirect_uri_2}'),
self.check('web.implicitGrantSettings.enableIdTokenIssuance', True),
self.check('web.implicitGrantSettings.enableAccessTokenIssuance', True),
self.check('publicClient.redirectUris[0]', '{public_client_redirect_uri_1}'),
self.check('publicClient.redirectUris[1]', '{public_client_redirect_uri_2}'),
self.check('length(keyCredentials)', 1),
self.check('length(appRoles)', 2),
self.check('length(optionalClaims)', 3),
self.check('length(requiredResourceAccess)', 2)
]).get_output_in_json()
self.kwargs['app_id'] = result['appId']
self.cmd('ad app delete --id {app_id}')
self.cmd('ad app show --id {app_id}', expect_failure=True)
# Create, then update
display_name_2 = self.create_random_name(prefix='azure-cli-test', length=30)
display_name_3 = self.create_random_name(prefix='azure-cli-test', length=30)
self.kwargs.update({
'display_name_2': display_name_2,
'display_name_3': display_name_3,
'identifier_uri_3': f'api://{display_name_3}',
})
# Graph cannot create app with same identifierUris even after deleting the previous one. Still confirming with
# service team.
result = self.cmd('ad app create --display-name {display_name_2}').get_output_in_json()
self.kwargs['app_id'] = result['appId']
self.cmd(
'ad app update --id {app_id} --display-name {display_name_3} '
'--identifier-uris {identifier_uri_3} '
'--is-fallback-public-client True '
# signInAudience can't be PATCHed currently due to service issue. PATCH first fails with 404, then 500
# '--sign-in-audience AzureADMultipleOrgs '
# web
'--web-home-page-url {homepage} '
'--web-redirect-uris {web_redirect_uri_1} {web_redirect_uri_2} '
'--enable-access-token-issuance true --enable-id-token-issuance true '
# keyCredentials
'--key-value {key_value} '
# publicClient
'--public-client-redirect-uris {public_client_redirect_uri_1} {public_client_redirect_uri_2} '
"--app-roles '{app_roles}' "
"--optional-claims '{optional_claims}' "
"--required-resource-accesses '{required_resource_accesses}'"
)
self.cmd(
'ad app show --id {app_id}',
checks=[
self.check('displayName', '{display_name_3}'),
self.check('identifierUris[0]', '{identifier_uri_3}'),
self.check('isFallbackPublicClient', True),
# self.check('signInAudience', 'AzureADMultipleOrgs'),
self.check('web.homePageUrl', '{homepage}'),
# redirectUris doesn't preserve item order.
# self.check('web.redirectUris[0]', '{web_redirect_uri_1}'),
# self.check('web.redirectUris[1]', '{web_redirect_uri_2}'),
self.check('length(web.redirectUris)', 2),
self.check('web.implicitGrantSettings.enableIdTokenIssuance', True),
self.check('web.implicitGrantSettings.enableAccessTokenIssuance', True),
# self.check('publicClient.redirectUris[0]', '{public_client_redirect_uri_1}'),
# self.check('publicClient.redirectUris[1]', '{public_client_redirect_uri_2}'),
self.check('length(publicClient.redirectUris)', 2),
self.check('length(keyCredentials)', 1),
self.check('length(appRoles)', 2),
self.check('length(optionalClaims)', 3),
self.check('length(requiredResourceAccess)', 2)
]).get_output_in_json()
# Update with generic update
self.cmd('ad app update --id {app_id} --set isDeviceOnlyAuthSupported=true')
self.cmd('ad app show --id {app_id}', checks=self.check('isDeviceOnlyAuthSupported', True))
self.cmd('ad app delete --id {app_id}')
self.cmd('ad app show --id {app_id}', expect_failure=True)
def test_app_create_idempotent(self):
self.kwargs = {
'display_name': self.create_random_name('app', 20)
}
result = self.cmd("ad app create --display-name {display_name} --is-fallback-public-client true").get_output_in_json()
self.kwargs['app_id'] = result['appId']
self.cmd("ad app create --display-name {display_name} --is-fallback-public-client false",
checks=[self.check('isFallbackPublicClient', False),
self.check('appId', '{app_id}')])
def test_app_resolution(self):
"""Test application can be resolved with identifierUris, appId, or id."""
display_name = self.create_random_name(prefix='azure-cli-test', length=30)
self.kwargs.update({
'display_name': display_name,
'identifier_uri': f'api://{display_name}'
})
app = self.cmd('ad app create --display-name {display_name} '
'--identifier-uris {identifier_uri}').get_output_in_json()
self.kwargs['app_id'] = app['appId']
self.kwargs['id'] = app['id']
# Show with appId
self.cmd('ad app show --id {app_id}', checks=[self.check('displayName', '{display_name}')])
# Show with id
self.cmd('ad app show --id {id}', checks=[self.check('displayName', '{display_name}')])
# Show with identifierUris
self.cmd('ad app show --id {identifier_uri}', checks=[self.check('displayName', '{display_name}')])
self.cmd('ad app delete --id {app_id}')
def test_app_show_exit_code(self):
with self.assertRaises(SystemExit):
self.assertEqual(self.cmd('ad app show --id non-exist-identifierUris').exit_code, 3)
self.assertEqual(self.cmd('ad app show --id 00000000-0000-0000-0000-000000000000').exit_code, 3)
def test_app_credential(self):
self._create_app()
self._test_credential('app')
def test_app_owner(self):
owner = self._get_signed_in_user()
if not owner:
return # this test deletes users which are beyond a SP's capacity, so quit.
self.kwargs = {
'owner': owner,
'display_name': self.create_random_name('azure-cli-test', 30)
}
self.recording_processors.append(MSGraphNameReplacer(owner, 'example@example.com'))
self.kwargs['owner_object_id'] = self.cmd('ad user show --id {owner}').get_output_in_json()['id']
self.kwargs['app_id'] = self.cmd('ad app create --display-name {display_name}').get_output_in_json()['appId']
self.cmd('ad app owner add --owner-object-id {owner_object_id} --id {app_id}')
self.cmd('ad app owner add --owner-object-id {owner_object_id} --id {app_id}') # test idempotence
self.cmd('ad app owner list --id {app_id}', checks=self.check('[0].userPrincipalName', owner))
self.cmd('ad app owner remove --owner-object-id {owner_object_id} --id {app_id}')
self.cmd('ad app owner list --id {app_id}', checks=self.check('length([*])', 0))
@AllowLargeResponse()
def test_app_permission(self):
if not self._get_signed_in_user():
return
self.kwargs = {
'display_name': self.create_random_name('cli-app-', 15),
# Microsoft Graph
'microsoft_graph_api': '00000003-0000-0000-c000-000000000000',
# Azure Storage
'azure_service_management_api': '797f4846-ba00-4fd7-ba43-dac1f8f63013',
}
# Look up for permission IDs
graph_sp = self.cmd('ad sp show --id {microsoft_graph_api}').get_output_in_json()
# Delegated permission Directory.AccessAsUser.All
self.kwargs['microsoft_graph_permission1'] = _get_id_from_value(
graph_sp['oauth2PermissionScopes'], 'Application.Read.All')
# Application permission Application.ReadWrite.OwnedBy
self.kwargs['microsoft_graph_permission2'] = _get_id_from_value(
graph_sp['appRoles'], 'Application.ReadWrite.OwnedBy')
arm_sp = self.cmd('ad sp show --id {azure_service_management_api}').get_output_in_json()
# Delegated permission user_impersonation
self.kwargs['azure_service_management_permission'] = _get_id_from_value(
arm_sp['oauth2PermissionScopes'],'user_impersonation')
result = self.cmd('ad sp create-for-rbac --name {display_name}').get_output_in_json()
self.kwargs['app_id'] = result['appId']
# Test add permissions using a list
self.cmd('ad app permission add --id {app_id} '
'--api {microsoft_graph_api} '
'--api-permissions {microsoft_graph_permission1}=Scope {microsoft_graph_permission2}=Role')
self.cmd('ad app permission add --id {app_id} --api {azure_service_management_api} '
'--api-permissions {azure_service_management_permission}=Scope')
permissions = self.cmd(
'ad app permission list --id {app_id}', checks=[self.check('length([*])', 2)]).get_output_in_json()
# Sample result (required_resource_access):
# "requiredResourceAccess": [
# {
# "resourceAccess": [
# {
# "id": "41094075-9dad-400e-a0bd-54e686782033",
# "type": "Scope"
# }
# ],
# "resourceAppId": "797f4846-ba00-4fd7-ba43-dac1f8f63013"
# },
# {
# "resourceAccess": [
# {
# "id": "c79f8feb-a9db-4090-85f9-90d820caa0eb",
# "type": "Scope"
# },
# {
# "id": "18a4783c-866b-4cc7-a460-3d5e5662c884",
# "type": "Role"
# }
# ],
# "resourceAppId": "00000003-0000-0000-c000-000000000000"
# }
# ],
microsoft_graph_permission1_object = {
"id": self.kwargs['microsoft_graph_permission1'],
"type": "Scope"}
microsoft_graph_permission2_object = {
"id": self.kwargs['microsoft_graph_permission2'],
"type": "Role"}
azure_service_management_permission_object = {
"id": self.kwargs['azure_service_management_permission'],
"type": "Scope"}
def get_required_resource_access(required_resource_access_list, resource_app_id):
"""Search for the RequiredResourceAccess from required_resource_access(list) by resourceAppId."""
return next(
filter(lambda a: a['resourceAppId'] == resource_app_id, required_resource_access_list), None)
microsoft_graph_api_object = get_required_resource_access(permissions, self.kwargs['microsoft_graph_api'])
azure_service_management_api_object = get_required_resource_access(
permissions, self.kwargs['azure_service_management_api'])
# Check initial `permission add` is correct
self.assertEqual(microsoft_graph_api_object['resourceAccess'],
[microsoft_graph_permission1_object, microsoft_graph_permission2_object])
self.assertEqual(azure_service_management_api_object['resourceAccess'],
[azure_service_management_permission_object])
# Test delete first permission (ResourceAccess) from microsoft_graph_api.
self.cmd('ad app permission delete --id {app_id} '
'--api {microsoft_graph_api} --api-permissions {microsoft_graph_permission1}')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
microsoft_graph_api_object = get_required_resource_access(permissions, self.kwargs['microsoft_graph_api'])
# microsoft_graph_permission1 (ResourceAccess) is deleted and
# microsoft_graph_permission2 (ResourceAccess) remains
self.assertEqual(microsoft_graph_api_object['resourceAccess'], [microsoft_graph_permission2_object])
# Test delete remaining permission (ResourceAccess) from microsoft_graph_api.
self.cmd('ad app permission delete --id {app_id} '
'--api {microsoft_graph_api} --api-permissions {microsoft_graph_permission2}')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
microsoft_graph_api_object = get_required_resource_access(permissions, self.kwargs['microsoft_graph_api'])
# microsoft_graph_api (RequiredResourceAccess) is removed automatically
self.assertIsNone(microsoft_graph_api_object)
# Add back microsoft_graph_permission1 and microsoft_graph_permission2
self.cmd('ad app permission add --id {app_id} '
'--api {microsoft_graph_api} '
'--api-permissions {microsoft_graph_permission1}=Scope {microsoft_graph_permission2}=Role')
# Delete both microsoft_graph_permission1 and microsoft_graph_permission2 at the same time
self.cmd('ad app permission delete --id {app_id} '
'--api {microsoft_graph_api} '
'--api-permissions {microsoft_graph_permission1} {microsoft_graph_permission2}')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
microsoft_graph_api_object = get_required_resource_access(permissions, self.kwargs['microsoft_graph_api'])
# microsoft_graph_api (RequiredResourceAccess) is removed automatically
self.assertIsNone(microsoft_graph_api_object)
# Test delete 1 api azure_service_management_api (RequiredResourceAccess)
self.cmd('ad app permission delete --id {app_id} --api {azure_service_management_api}')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
azure_service_management_api_object = get_required_resource_access(permissions, self.kwargs['azure_service_management_api'])
self.assertIsNone(azure_service_management_api_object)
# Test delete non-existing api
self.cmd('ad app permission delete --id {app_id} --api 11111111-0000-0000-c000-000000000000')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
self.assertEqual(permissions, [])
# Test delete api permission from non-existing api
self.cmd('ad app permission delete --id {app_id} '
'--api 11111111-0000-0000-c000-000000000000 '
'--api-permissions {microsoft_graph_permission1} {microsoft_graph_permission2}')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
self.assertEqual(permissions, [])
# Test delete non-existing api permission from existing api
self.cmd('ad app permission add --id {app_id} '
'--api {microsoft_graph_api} '
'--api-permissions {microsoft_graph_permission1}=Scope {microsoft_graph_permission2}=Role')
self.cmd('ad app permission delete --id {app_id} '
'--api {microsoft_graph_api} --api-permissions 22222222-0000-0000-c000-000000000000')
permissions = self.cmd('ad app permission list --id {app_id}').get_output_in_json()
microsoft_graph_api_object = get_required_resource_access(permissions, self.kwargs['microsoft_graph_api'])
self.assertEqual(microsoft_graph_api_object['resourceAccess'],
[microsoft_graph_permission1_object, microsoft_graph_permission2_object])
# Test permission type '=Scope' is missing
from azure.cli.core.azclierror import ArgumentUsageError
with self.assertRaisesRegex(ArgumentUsageError, 'both permission id and type'):
self.cmd('ad app permission add --id {app_id} '
'--api {microsoft_graph_api} '
'--api-permissions {microsoft_graph_permission1}')
@AllowLargeResponse()
def test_app_permission_grant(self):
if not self._get_signed_in_user():
return # this test delete users which are beyond a SP's capacity, so quit...
self.kwargs = {
'display_name': self.create_random_name('cli-app-', 15),
'microsoft_graph_api': '00000003-0000-0000-c000-000000000000',
'microsoft_graph_permission_value1': 'Directory.Read.All', # Delegated permission
'microsoft_graph_permission_value2': 'Application.Read.All', # Delegated permission
# 'microsoft_graph_permission_value3': 'Application.ReadWrite.OwnedBy' # Application permission
}
# Look up for permission IDs
graph_sp = self.cmd('ad sp show --id {microsoft_graph_api}').get_output_in_json()
self.kwargs['microsoft_graph_sp_id'] = graph_sp['id']
self.kwargs['microsoft_graph_permission1'] = _get_id_from_value(
graph_sp['oauth2PermissionScopes'], self.kwargs['microsoft_graph_permission_value1'])
self.kwargs['microsoft_graph_permission2'] = _get_id_from_value(
graph_sp['oauth2PermissionScopes'], self.kwargs['microsoft_graph_permission_value2'])
# TODO: support and test application permissions
# self.kwargs['microsoft_graph_permission3'] = _get_id_from_value(
# graph_sp['appRoles'], self.kwargs['microsoft_graph_permission_value2'])
result = self.cmd('ad sp create-for-rbac --name {display_name}').get_output_in_json()
self.kwargs['app_id'] = result['appId']
self.cmd('ad app permission add --id {app_id} '
'--api {microsoft_graph_api} --api-permissions {microsoft_graph_permission1}=Scope')
# Add permissions
permissions = self.cmd('ad app permission list --id {app_id}', checks=[
self.check('length([*])', 1)
]).get_output_in_json()
assert permissions[0]['resourceAppId'] == '00000003-0000-0000-c000-000000000000'
assert permissions[0]['resourceAccess'][0]['id'] == self.kwargs['microsoft_graph_permission1']
assert permissions[0]['resourceAccess'][0]['type'] == 'Scope'
# Grant permissions
self.cmd('ad app permission grant --id {app_id} --api {microsoft_graph_api} '
'--scope {microsoft_graph_permission_value1}')
grants = self.cmd('ad app permission list-grants --id {app_id} --show-resource-name').get_output_in_json()
assert len(grants) == 1
assert grants[0]['resourceId'] == self.kwargs['microsoft_graph_sp_id']
assert grants[0]['resourceDisplayName'] == "Microsoft Graph"
assert grants[0]['scope'] == self.kwargs['microsoft_graph_permission_value1']
# Add a second permission
self.cmd('ad app permission add --id {app_id} '
'--api {microsoft_graph_api} --api-permissions {microsoft_graph_permission2}=Scope')
# Grant permissions
self.cmd('ad app permission grant --id {app_id} --api {microsoft_graph_api} '
'--scope {microsoft_graph_permission_value1} {microsoft_graph_permission_value2}')
grants = self.cmd('ad app permission list-grants --id {app_id} --show-resource-name').get_output_in_json()
assert len(grants) == 1
assert grants[0]['scope'] == (self.kwargs['microsoft_graph_permission_value1'] + " " +
self.kwargs['microsoft_graph_permission_value2'])
self.cmd('ad app permission delete --id {app_id} --api {microsoft_graph_api}')
self.cmd('ad app permission list --id {app_id}', checks=self.check('length([*])', 0))
def test_app_federated_credential(self):
self._create_app()
self.kwargs['parameters'] = TEST_FEDERATED_IDENTITY_CREDENTIAL
self.kwargs['name'] = 'Testing'
# Create credential
result = self.cmd("ad app federated-credential create --id {app_id} --parameters '{parameters}'",
checks=[self.check('name', '{name}')]).get_output_in_json()
self.kwargs['credential_id'] = result['id']
# List credential
self.cmd("ad app federated-credential list --id {app_id}",
checks=[self.check('length(@)', 1)])
# Show credential with credential ID
self.cmd("ad app federated-credential show --id {app_id} --federated-credential-id {credential_id}",
checks=[self.check('name', '{name}')])
# Show with credential name
self.cmd("ad app federated-credential show --id {app_id} --federated-credential-id {name}",
checks=[self.check('name', '{name}')])
# Update credential's subject
update_subject = "repo:octo-org/octo-repo:environment:Staging"
self.kwargs['update_json'] = json.dumps({'subject': update_subject})
self.cmd("ad app federated-credential update --id {app_id} --federated-credential-id {credential_id} "
"--parameters '{update_json}'")
self.cmd("ad app federated-credential show --id {app_id} --federated-credential-id {credential_id}",
checks=self.check('subject', update_subject))
# Delete credential
self.cmd("ad app federated-credential delete --id {app_id} --federated-credential-id {credential_id}")
self.cmd("ad app federated-credential list --id {app_id}",
checks=[self.check('length(@)', 0)])
class ServicePrincipalScenarioTest(GraphScenarioTestBase):
def test_service_principal_scenario(self):
"""
- Test service principal creation.
- Test service principal can be resolved with servicePrincipalNames (appId and identifierUris) or id.
"""
display_name = self.create_random_name(prefix='azure-cli-test', length=30)
self.kwargs.update({
'display_name': display_name,
'identifier_uri': f'api://{display_name}'
})
# Create
app = self.cmd('ad app create --display-name {display_name} '
'--identifier-uris {identifier_uri}').get_output_in_json()
self.kwargs['app_id'] = app['appId']
sp = self.cmd('ad sp create --id {app_id}',
checks=[
self.check('appId', app['appId']),
self.check('appDisplayName', app['displayName']),
self.check('servicePrincipalNames[0]', '{app_id}')
]).get_output_in_json()
self.kwargs['id'] = sp['id']
# Show with appId as one of servicePrincipalNames
self.cmd('ad sp show --id {app_id}')
# Show with identifierUri as one of servicePrincipalNames
self.cmd('ad sp show --id {identifier_uri}')
# Show with id
self.cmd('ad sp show --id {id}')
# Update with generic update
self.cmd('ad sp update --id {id} --set appRoleAssignmentRequired=true')
self.cmd('ad sp show --id {id}', checks=self.check('appRoleAssignmentRequired', True))
self.cmd('ad sp delete --id {app_id}')
self.cmd('ad app delete --id {app_id}')
self.cmd('ad sp show --id {app_id}', expect_failure=True)
self.cmd('ad app show --id {app_id}', expect_failure=True)
def test_sp_show_exit_code(self):
with self.assertRaises(SystemExit):
self.assertEqual(self.cmd('ad sp show --id non-exist-sp-name').exit_code, 3)
self.assertEqual(self.cmd('ad sp show --id 00000000-0000-0000-0000-000000000000').exit_code, 3)
def test_sp_owner(self):
display_name = self.create_random_name(prefix='azure-cli-test', length=30)
self.kwargs.update({
'display_name': display_name,
'identifier_uri': f'api://{display_name}'
})
app = self.cmd('ad app create --display-name {display_name}').get_output_in_json()
self.kwargs['app_id'] = app['appId']
self.cmd('ad sp create --id {app_id}').get_output_in_json()
# We don't support create, remove yet
self.cmd('ad sp owner list --id {app_id}', checks=self.check('length(@)', 0))
def test_sp_credential(self):
self._create_sp()
self._test_credential('sp')
@unittest.skip("It seems sp doesn't work with federatedIdentityCredentials yet.")
def test_sp_federated_credential(self):
self._create_sp()
self._test_federated_credential('sp')
class UserScenarioTest(GraphScenarioTestBase):
def test_user_scenario(self):
self.kwargs = {
'user1': self.create_random_name(prefix='graphusertest', length=20),
'user2': self.create_random_name(prefix='graphusertest', length=20),
'domain': 'AzureSDKTeam.onmicrosoft.com',
'mail_nickname': 'graphusertest',
'new_mail_nick_name': 'graphusertestupdate',
'group': 'graphusertest_g',
'password': self.create_random_name(prefix='password-', length=40),
'force_change_password_next_login': True,
}
# create
user1_result = self.cmd(
'ad user create --display-name {user1} '
'--mail-nickname {mail_nickname} '
'--password {password} '
'--force-change-password-next-sign-in {force_change_password_next_login} '
'--user-principal-name {user1}@{domain} ',
checks=[
self.check("displayName","{user1}"),
self.check("userPrincipalName", "{user1}@{domain}")
]
).get_output_in_json()
self.kwargs['user1_id'] = user1_result['id']
self.kwargs['user1_newName'] = self.create_random_name(prefix='graphusertest', length=20)
# update
self.cmd(
'ad user update --display-name {user1_newName} '
'--account-enabled false '
'--id {user1_id} '
'--mail-nickname {new_mail_nick_name} '
'--password {password} '
'--force-change-password-next-sign-in true '
)
# show
self.cmd('ad user show --id {user1}@{domain}',
checks=[
self.check("displayName", '{user1_newName}')
])
# create group
group_result = self.cmd(
'ad group create --display-name {group} --mail-nickname {group} --description {group}').get_output_in_json()
self.kwargs['group_id'] = group_result['id']
# add user1 into group
self.cmd('ad group member add -g {group} --member-id {user1_id}',
checks=self.is_empty())
# show user's group memberships
self.cmd('ad user get-member-groups --id {user1_id}',
checks=self.check('[0].displayName', self.kwargs['group']))
# list
self.cmd('ad user list')
# delete
self.cmd('ad user delete --id {user1_id}')
class GroupScenarioTest(GraphScenarioTestBase):
def clean_resource(self, resource, type='group'):
try:
if type == 'user':
self.cmd('ad user delete --id {}'.format(resource))
elif type == 'group':
self.cmd('ad group delete -g {}'.format(resource))
elif type == 'app':
self.cmd('ad app delete --id {}'.format(resource))
except Exception:
pass
def test_group_scenario(self):
if not self._get_signed_in_user():
return # this test delete users which are beyond a SP's capacity, so quit...
domain = 'AzureSDKTeam.onmicrosoft.com'
self.kwargs = {
'group': self.create_random_name(prefix='testgroup', length=24),
'mail_nick_name': 'deleteme11',
'child_group': self.create_random_name(prefix='testchildgroup', length=24),
'leaf_group': self.create_random_name(prefix='testleafgroup', length=24),
'user1': self.create_random_name(prefix='testgroupuser1', length=24),
'user2': self.create_random_name(prefix='testgroupuser2', length=24),
'pass': 'Test1234!!',
'domain': domain,
'app_name': self.create_random_name(prefix='testgroupapp', length=24)
}
self.recording_processors.append(MSGraphNameReplacer('@' + domain, '@example.com'))
try:
# create group
group_result = self.cmd(
'ad group create --display-name {group} --mail-nickname {mail_nick_name} --description {group}',
checks=[self.check('displayName', '{group}'),
self.check('mailNickname', '{mail_nick_name}'),
self.check('description', '{group}')]
).get_output_in_json()
self.kwargs['group_id'] = group_result['id']
# create again to test idempotency
self.cmd('ad group create --display-name {group} --mail-nickname {mail_nick_name}')
# list groups
self.cmd('ad group list --display-name {group}', checks=self.check('length([])', 1))
# show group
self.cmd('ad group show -g {group}', checks=[
self.check('id', '{group_id}'),
self.check('displayName', '{group}'),
self.check('mailNickname', '{mail_nick_name}'),
self.check('description', '{group}')
])
# create other groups to test membership transitivity
group_result = self.cmd('ad group create --display-name {child_group} --mail-nickname {mail_nick_name}').get_output_in_json()
self.kwargs['child_group_id'] = group_result['id']
group_result = self.cmd('ad group create --display-name {leaf_group} --mail-nickname {mail_nick_name}').get_output_in_json()
self.kwargs['leaf_group_id'] = group_result['id']
# add child_group as member of group
self.cmd('ad group member add -g {group_id} --member-id {child_group_id}')
# add leaf_group as member of child_group
self.cmd('ad group member add -g {child_group_id} --member-id {leaf_group_id}')
# check member transitivity
self.cmd('ad group member check -g {group_id} --member-id {child_group_id}',
checks=self.check('value', True))
self.cmd('ad group member check -g {child_group_id} --member-id {leaf_group_id}',
checks=self.check('value', True))
self.cmd('ad group member check -g {group_id} --member-id {leaf_group_id}',
checks=self.check('value', True))
# list members (intransitive)
self.cmd('ad group member list -g {group_id}', checks=self.check('length([])', 1))
self.cmd('ad group member list -g {child_group_id}', checks=self.check('length([])', 1))
self.cmd('ad group member list -g {leaf_group_id}', checks=self.check('length([])', 0))
# get-member-groups transitivity
self.cmd('ad group get-member-groups -g {group_id}', checks=self.check('length([])', 0))
self.cmd('ad group get-member-groups -g {child_group_id}', checks=self.check('length([])', 1))
self.cmd('ad group get-member-groups -g {leaf_group_id}', checks=self.check('length([])', 2))
# remove member
self.cmd('ad group member remove -g {child_group_id} --member-id {leaf_group_id}')
self.cmd('ad group member check -g {child_group_id} --member-id {leaf_group_id}',
checks=self.check('value', False))
# create user to add group member
user_result = self.cmd('ad user create --display-name {user1} --password {pass} --user-principal-name {user1}@{domain}').get_output_in_json()
self.kwargs['user1_id'] = user_result['id']
# add user as group member
self.cmd('ad group member add -g {leaf_group_id} --member-id {user1_id}')
# check user as group member
self.cmd('ad group member check -g {leaf_group_id} --member-id {user1_id}',
checks=self.check('value', True))
# list member(user is expected)
self.cmd('ad group member list -g {leaf_group_id}', checks=self.check('length([])', 1))
# remove user as member
self.cmd('ad group member remove -g {leaf_group_id} --member-id {user1_id}')
self.cmd('ad group member check -g {leaf_group_id} --member-id {user1_id}',
checks=self.check('value', False))
# Create service principal to add group member
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
result = self.cmd('ad sp create-for-rbac -n {app_name}').get_output_in_json()
self.kwargs['app_id'] = result['appId']
sp = self.cmd('ad sp show --id {app_id}').get_output_in_json()
self.kwargs['sp_id'] = sp['id']
# add service principal as group member
self.cmd('ad group member add -g {leaf_group_id} --member-id {sp_id}')
# check service principal as group member
self.cmd('ad group member check -g {leaf_group_id} --member-id {sp_id}',
checks=self.check('value', True))
# TODO: check list sp as member after staged roll-out of service principals on MS Graph
# list member(service principal is expected)
# self.cmd('ad group member list -g {leaf_group_id}', checks=self.check('length([])', 1))
# remove service principal as member
self.cmd('ad group member remove -g {leaf_group_id} --member-id {sp_id}')
self.cmd('ad group member check -g {leaf_group_id} --member-id {sp_id}',
checks=self.check('value', False))
# list owners
self.cmd('ad group owner list -g {group_id}', checks=self.check('length([])', 0))
# create user to add group owner
user_result = self.cmd('ad user create --display-name {user2} --password {pass} --user-principal-name {user2}@{domain}').get_output_in_json()
self.kwargs['user2_id'] = user_result['id']
# add owner
self.cmd('ad group owner add -g {group_id} --owner-object-id {user1_id}')
self.cmd('ad group owner add -g {group_id} --owner-object-id {user2_id}')
self.cmd('ad group owner list -g {group_id}', checks=self.check('length([])', 2))
# remove owner
self.cmd('ad group owner remove -g {group_id} --owner-object-id {user1_id}')
self.cmd('ad group owner list -g {group_id}', checks=self.check('length([])', 1))
# delete group
self.cmd('ad group delete -g {group}')
self.cmd('ad group show -g {group_id}', expect_failure=True)
finally:
self.clean_resource(self.kwargs['group'])
self.clean_resource(self.kwargs['child_group'])
self.clean_resource(self.kwargs['leaf_group'])
self.clean_resource('{}@{}'.format(self.kwargs['user1'], self.kwargs['domain']), type='user')
self.clean_resource('{}@{}'.format(self.kwargs['user2'], self.kwargs['domain']), type='user')
if self.kwargs.get('app_id'):
self.clean_resource(self.kwargs['app_id'], type='app')
class MiscellaneousScenarioTest(GraphScenarioTestBase):
def test_special_characters(self):
# Test special characters in object names. Ensure these characters are correctly percent-encoded.
# For example, displayName with +(%2B), /(%2F)
from azure.cli.testsdk.scenario_tests.utilities import create_random_name
prefix = 'azure-cli-test-group+/'
mock_name = prefix + '000001'
if self.in_recording:
display_name = create_random_name(prefix=prefix, length=32)
self.recording_processors.append(MSGraphNameReplacer(display_name, mock_name))
else:
display_name = mock_name
self.kwargs = {
'display_name': display_name,
'mail_nick_name': 'deleteme11'
}
self.cmd('ad group create --display-name {display_name} --mail-nickname {mail_nick_name}',
checks=self.check('displayName', '{display_name}'))
self.cmd('ad group show --group {display_name}',
checks=self.check('displayName', '{display_name}'))
self.cmd('ad group list --display-name {display_name}',
checks=[self.check('length(@)', 1),
self.check('[0].displayName', '{display_name}')])
self.cmd('ad group delete --group {display_name}')
self.cmd('ad group list --display-name {display_name}',
checks=self.check('length(@)', 0))
def _get_id_from_value(permissions, value):
"""Get id from value for appRoles or oauth2PermissionScopes."""
# https://docs.microsoft.com/en-us/graph/api/resources/serviceprincipal?view=graph-rest-1.0#properties
return next(p['id'] for p in permissions if p['value'] == value)
| {
"content_hash": "d16692d5a927161b06777c35d51e7c15",
"timestamp": "",
"source": "github",
"line_count": 966,
"max_line_length": 153,
"avg_line_length": 49.15734989648033,
"alnum_prop": 0.5988923051004507,
"repo_name": "yugangw-msft/azure-cli",
"id": "96b59cb9a5cfa2d295cb689ec956a83565e908a1",
"size": "47831",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/role/tests/latest/test_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TransactionsResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'list[Transaction]',
'meta': 'MetaObject'
}
attribute_map = {
'data': 'data',
'meta': 'meta'
}
def __init__(self, data=None, meta=None): # noqa: E501
"""TransactionsResponse - a model defined in Swagger""" # noqa: E501
self._data = None
self._meta = None
self.discriminator = None
if data is not None:
self.data = data
if meta is not None:
self.meta = meta
@property
def data(self):
"""Gets the data of this TransactionsResponse. # noqa: E501
:return: The data of this TransactionsResponse. # noqa: E501
:rtype: list[Transaction]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this TransactionsResponse.
:param data: The data of this TransactionsResponse. # noqa: E501
:type: list[Transaction]
"""
self._data = data
@property
def meta(self):
"""Gets the meta of this TransactionsResponse. # noqa: E501
:return: The meta of this TransactionsResponse. # noqa: E501
:rtype: MetaObject
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this TransactionsResponse.
:param meta: The meta of this TransactionsResponse. # noqa: E501
:type: MetaObject
"""
self._meta = meta
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TransactionsResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TransactionsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| {
"content_hash": "797122d68f953b3e6318fc94cdbeda71",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 80,
"avg_line_length": 27.69402985074627,
"alnum_prop": 0.5451360819186203,
"repo_name": "ltowarek/budget-supervisor",
"id": "830acf1d8a24d12da79229d0fb66e8a5b8d5cca4",
"size": "3728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/saltedge/swagger_client/models/transactions_response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7960"
},
{
"name": "JavaScript",
"bytes": "79489"
}
],
"symlink_target": ""
} |
from google.cloud import datacatalog_v1
async def sample_create_taxonomy():
# Create a client
client = datacatalog_v1.PolicyTagManagerAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1.CreateTaxonomyRequest(
parent="parent_value",
)
# Make the request
response = await client.create_taxonomy(request=request)
# Handle the response
print(response)
# [END datacatalog_v1_generated_PolicyTagManager_CreateTaxonomy_async]
| {
"content_hash": "8a600bc12230a699fc34dc03e7c6a57c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 25.736842105263158,
"alnum_prop": 0.7280163599182005,
"repo_name": "googleapis/python-datacatalog",
"id": "654a4340fc57e4c37bcb7c6b67a622bdce2d3a26",
"size": "1888",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/datacatalog_v1_generated_policy_tag_manager_create_taxonomy_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "3073442"
},
{
"name": "Shell",
"bytes": "30675"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.