gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration test to see if the image promotion process is working for the
Spinnaker Kubernetes V2 integration.
"""
# Standard python modules.
import sys
# citest modules.
import citest.kube_testing as kube
import citest.json_contract as jc
import citest.json_predicate as jp
import citest.service_testing as st
# Spinnaker modules.
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
import spinnaker_testing.frigga as frigga
import citest.base
ov_factory = jc.ObservationPredicateFactory()
class KubeV2SmokeTestScenario(sk.SpinnakerTestScenario):
"""Defines the scenario for the kube v2 smoke test.
"""
@classmethod
def new_agent(cls, bindings):
"""Implements citest.service_testing.AgentTestScenario.new_agent."""
agent = gate.new_agent(bindings)
agent.default_max_wait_secs = 180
return agent
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser.
Args:
parser: argparse.ArgumentParser
"""
super(KubeV2SmokeTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
defaults = defaults or {}
parser.add_argument(
'--test_namespace', default='default',
help='The namespace to manage within the tests.')
def __init__(self, bindings, agent=None):
"""Constructor.
Args:
bindings: [dict] The data bindings to use to configure the scenario.
agent: [GateAgent] The agent for invoking the test operations on Gate.
"""
super(KubeV2SmokeTestScenario, self).__init__(bindings, agent)
bindings = self.bindings
# We'll call out the app name because it is widely used
# because it scopes the context of our activities.
# pylint: disable=invalid-name
self.TEST_APP = bindings['TEST_APP']
self.TEST_USER = bindings['TEST_USER']
# Take just the first if there are multiple
# because some uses below assume just one.
self.TEST_NAMESPACE = bindings['TEST_NAMESPACE'].split(',')[0]
self.pipeline_id = None
self.mf = sk.KubernetesManifestFactory(self)
self.mp = sk.KubernetesManifestPredicateFactory()
self.ps = sk.PipelineSupport(self)
def create_app(self):
"""Creates OperationContract that creates a new Spinnaker Application."""
contract = jc.Contract()
return st.OperationContract(
self.agent.make_create_app_operation(
bindings=self.bindings, application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT']),
contract=contract)
def delete_app(self):
"""Creates OperationContract that deletes a new Spinnaker Application."""
contract = jc.Contract()
return st.OperationContract(
self.agent.make_delete_app_operation(
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT']),
contract=contract)
def deploy_manifest(self, image):
"""Creates OperationContract for deployManifest.
To verify the operation, we just check that the deployment was created.
"""
bindings = self.bindings
name = self.TEST_APP + '-deployment'
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'type': 'deployManifest',
'user': '[anonymous]',
'manifests': [self.mf.deployment(name, image)],
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment created',
retryable_for_secs=15)
.get_resources(
'deploy',
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.deployment_image_predicate(image)))
return st.OperationContract(
self.new_post_operation(
title='deploy_manifest', data=payload, path='tasks'),
contract=builder.build())
def patch_manifest(self):
"""Creates OperationContract for patchManifest.
To verify the operation, we just check that the deployment was created.
"""
bindings = self.bindings
name = self.TEST_APP + '-deployment'
test_label = 'patchedLabel'
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'cloudProvider': 'kubernetes',
'kind': 'deployment',
'location': self.TEST_NAMESPACE,
'manifestName': 'deployment ' + name,
'type': 'patchManifest',
'user': '[anonymous]',
'source': 'text',
'patchBody': {
'metadata': {
'labels': {
'testLabel': test_label,
}
}
},
'options': {
'mergeStrategy': 'strategic',
'record': True
}
}],
description='Patch manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment patched',
retryable_for_secs=15)
.get_resources(
'deploy',
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(ov_factory.value_list_contains(jp.DICT_MATCHES({
'metadata': jp.DICT_MATCHES({
'labels': jp.DICT_MATCHES({
'testLabel': jp.STR_EQ(test_label)
})
})
}))))
return st.OperationContract(
self.new_post_operation(
title='patch_manifest', data=payload, path='tasks'),
contract=builder.build())
def undo_rollout_manifest(self, image):
"""Creates OperationContract for undoRolloutManifest.
To verify the operation, we just check that the deployment has changed size
"""
bindings = self.bindings
name = self.TEST_APP + '-deployment'
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'manifestName': 'deployment ' + name,
'location': self.TEST_NAMESPACE,
'type': 'undoRolloutManifest',
'user': '[anonymous]',
'numRevisionsBack': 1
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment rolled back',
retryable_for_secs=15)
.get_resources(
'deploy',
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.deployment_image_predicate(image)))
return st.OperationContract(
self.new_post_operation(
title='undo_rollout_manifest', data=payload, path='tasks'),
contract=builder.build())
def scale_manifest(self):
"""Creates OperationContract for scaleManifest.
To verify the operation, we just check that the deployment has changed size
"""
bindings = self.bindings
name = self.TEST_APP + '-deployment'
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'manifestName': 'deployment ' + name,
'location': self.TEST_NAMESPACE,
'type': 'scaleManifest',
'user': '[anonymous]',
'replicas': 2
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment scaled',
retryable_for_secs=15)
.get_resources(
'deploy',
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(ov_factory.value_list_contains(jp.DICT_MATCHES(
{ 'spec': jp.DICT_MATCHES({ 'replicas': jp.NUM_EQ(2) }) }))))
return st.OperationContract(
self.new_post_operation(
title='scale_manifest', data=payload, path='tasks'),
contract=builder.build())
def save_deploy_manifest_pipeline(self, image):
name = self.TEST_APP + '-deployment'
stage = {
'type': 'deployManifest',
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'manifests': [self.mf.deployment(name, image)],
}
return self.ps.submit_pipeline_contract('deploy-manifest-pipeline',
[stage],
user=self.TEST_USER)
def execute_deploy_manifest_pipeline(self, image):
name = self.TEST_APP + '-deployment'
bindings = self.bindings
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'type': 'manual',
'user': '[anonymous]'
}],
description='Deploy manifest in ' + self.TEST_APP,
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment deployed',
retryable_for_secs=15)
.get_resources(
'deploy',
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.deployment_image_predicate(image)))
return st.OperationContract(
self.new_post_operation(
title='Deploy manifest', data=payload,
path='pipelines/' + self.TEST_APP + '/deploy-manifest-pipeline'),
contract=builder.build())
def delete_manifest(self):
"""Creates OperationContract for deleteManifest
To verify the operation, we just check that the Kubernetes deployment
is no longer visible (or is in the process of terminating).
"""
bindings = self.bindings
name = self.TEST_APP + '-deployment'
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'type': 'deleteManifest',
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'user': '[anonymous]',
'kinds': [ 'deployment' ],
'location': self.TEST_NAMESPACE,
'options': { },
'labelSelectors': {
'selectors': [{
'kind': 'EQUALS',
'key': 'app',
'values': [ self.TEST_APP ]
}]
}
}],
application=self.TEST_APP,
description='Destroy Manifest')
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Replica Set Removed')
.get_resources(
'deployment',
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.not_found_observation_predicate()))
return st.OperationContract(
self.new_post_operation(
title='delete_manifest', data=payload, path='tasks'),
contract=builder.build())
class KubeV2SmokeTest(st.AgentTestCase):
"""The test fixture for the KubeV2SmokeTest.
This is implemented using citest OperationContract instances that are
created by the KubeV2SmokeTestScenario.
"""
# pylint: disable=missing-docstring
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(
KubeV2SmokeTestScenario)
def test_a_create_app(self):
self.run_test_case(self.scenario.create_app())
def test_b1_deploy_manifest(self):
self.run_test_case(self.scenario.deploy_manifest('library/nginx'),
max_retries=1,
timeout_ok=True)
def test_b2_update_manifest(self):
self.run_test_case(self.scenario.deploy_manifest('library/redis'),
max_retries=1,
timeout_ok=True)
def test_b3_undo_rollout_manifest(self):
self.run_test_case(self.scenario.undo_rollout_manifest('library/nginx'),
max_retries=1)
def test_b4_scale_manifest(self):
self.run_test_case(self.scenario.scale_manifest(), max_retries=1)
def test_b5_patch_manifest(self):
self.run_test_case(self.scenario.patch_manifest(), max_retries=1)
def test_b6_delete_manifest(self):
self.run_test_case(self.scenario.delete_manifest(), max_retries=2)
def test_c1_save_deploy_manifest_pipeline(self):
self.run_test_case(self.scenario.save_deploy_manifest_pipeline('library/nginx'))
def test_c2_execute_deploy_manifest_pipeline(self):
self.run_test_case(self.scenario.execute_deploy_manifest_pipeline('library/nginx'))
def test_c3_delete_manifest(self):
self.run_test_case(self.scenario.delete_manifest(), max_retries=2)
def test_z_delete_app(self):
# Give a total of 2 minutes because it might also need
# an internal cache update
self.run_test_case(self.scenario.delete_app(),
retry_interval_secs=8, max_retries=15)
def main():
"""Implements the main method running this smoke test."""
defaults = {
'TEST_STACK': 'tst',
'TEST_APP': 'kubv2smok' + KubeV2SmokeTestScenario.DEFAULT_TEST_ID
}
return citest.base.TestRunner.main(
parser_inits=[KubeV2SmokeTestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[KubeV2SmokeTest])
if __name__ == '__main__':
sys.exit(main())
|
|
import maya.OpenMaya as om
import mtm_attribute as attribute
reload(attribute)
filterNames = ["Unit Box Filter","Gauss", "Bartlett", "Catmull Rom", "Hanning", "Blackman", "Sinc (sharpening)", "Closest Sample Filtering", "Farthest Samples Filtering", "Disable edge Antialiasing", "Object with most coverage", "Object with most-nofilter"]
realFilterNames = ["box", "gauss", "bartlett", "catrom", "hanning", "blackman", "sinc", "minmax min", "minmax max", "minmax edge", "minmax ocover", "minmax idcover"]
textureFilterNames = ["box", "gauss", "bartlett", "catrom", "hanning", "blackman", "mitchell"]
bitDepthNames = ['8 bit', '16 bit', '16 bit float', '32 bit float']
realBitDepthNames = ['8', '16', '', 'float']
mbtypes = ["Leading Blur","Center Blur","Trailing Blur"]
renderEngines = ["Micropolygon Renderer","Raytracing","Micropolygon PBR","PBR","Photon Map Generation"]
outputTypes = ['Render To MPlay', 'Render To File']
accelTypes = ["KT Tree", "BVH"]
lightModelNames = ["phong", "blinn", "cone"]
lightTypes = ['Point', 'Line', 'Grid', 'Disk', 'Sphere', 'Geometry', 'Distant', 'Sun', 'Environment']
areaLightTypes = ['Grid', 'Line', 'Disk', 'Sphere', 'Geometry']
envMapSpace = ["Object Space", "Camera Space", "World Space", "Environment Null"]
attenuationType = ["No Attenuation", "Half Distance Attenuation", "Physically correct"]
colorSpaces = ["Linear", "Gamma 2.2"]
rayLimitBehaviours = ["Black Background", "Direct Lighting BG"]
shadowTypes = ["None", "Raytraced Shadows", "DepthMap Shadows"]
geoFileTypes = ["Binary", "Ascii"]
translatorVerbosities = ["None", "Error", "Warning", "Info", "Progress", "Debug"]
kColor = attribute.kColor
kEnum = attribute.kEnum
kMessage= attribute.kMessage
MAPTO_IGNORE = attribute.MAPTO_IGNORE
MAPTO_NONE = attribute.MAPTO_NONE
MAP_API_ATTR = attribute.MAP_API_ATTR
mantraTranslatorATList = attribute.AttributeCollection()
mantraTranslatorATList.addAttr(an='geoFileType', dn='Geometry Type', tp=kEnum, values=geoFileTypes, default=1, kat="Translation")
mantraTranslatorATList.addAttr(an='translatorVerbosity', dn='Translator Verbosity', tp=kEnum, values=translatorVerbosities,default=2, kat="Translation")
mantraGlobalsATList = attribute.AttributeCollection()
# Settings
mantraGlobalsATList.addAttr(an='output', dn='Output Type', tp=kEnum, values=outputTypes, default=0, kat="Settings")
mantraGlobalsATList.addAttr(an='singleFileOutput', dn='SingleFileAnim', tp=om.MFnNumericData.kBoolean, default=False, kat="Settings")
mantraGlobalsATList.addAttr(an='scaleFactor', dn='Scale Factor', tp=om.MFnNumericData.kFloat, default=0.1, kat="Settings")
# Motion Blur
mantraGlobalsATList.addAttr(an='motionblur', dn='Enable Motionblur', tp=om.MFnNumericData.kBoolean, default=False, kat="Motionblur", affectAtt=["imagemotionblur", "xftimesamples", "geotimesamples", "mbtype", 'motionfactor'])
mantraGlobalsATList.addAttr(an='imagemotionblur', dn='Allow Image Mb', tp=om.MFnNumericData.kBoolean, default=True, kat="Motionblur")
mantraGlobalsATList.addAttr(an='xftimesamples', dn='XForm Time Samples', tp=om.MFnNumericData.kInt, default=2, kat="Motionblur")
mantraGlobalsATList.addAttr(an='geotimesamples', dn='Geometry Time Samples', tp=om.MFnNumericData.kInt, default=1, kat="Motionblur")
mantraGlobalsATList.addAttr(an='motionfactor', dn='Motion Factor', tp=om.MFnNumericData.kFloat, default=0, kat="Motionblur")
mantraGlobalsATList.addAttr(an='mbtype', dn='Motionblur Type', tp=kEnum, values=mbtypes, default=1, kat="Motionblur")
# Sampling
mantraGlobalsATList.addAttr(an='dof', dn='Enable Depth of Field', tp=om.MFnNumericData.kBoolean, default=False, kat="Sampling")
mantraGlobalsATList.addAttr(an='samples', dn='Samples', tp=om.MFnNumericData.k2Int, default=[3,3], kat="Sampling")
mantraGlobalsATList.addAttr(an='samplelock', dn='Sample Lock', tp=om.MFnNumericData.kBoolean, default=True, kat="Sampling")
mantraGlobalsATList.addAttr(an='jitter', dn='Jitter', tp=om.MFnNumericData.kFloat, default=1.0, kat="Sampling")
mantraGlobalsATList.addAttr(an='noiselevel', dn='Noise Level', tp=om.MFnNumericData.kFloat, default=.05, kat="Sampling")
mantraGlobalsATList.addAttr(an='randomseed', dn='RandomSeed', tp=om.MFnNumericData.kInt, default=0, kat="Sampling")
mantraGlobalsATList.addAttr(an='rayvaryance', dn='Ray Variance Antialiasing', tp=om.MFnNumericData.kBoolean, default=False, kat="Sampling")
mantraGlobalsATList.addAttr(an='minraysamples', dn='Min Ray Samples', tp=om.MFnNumericData.kInt, default=1, kat="Sampling")
mantraGlobalsATList.addAttr(an='maxraysamples', dn='Max Ray Samples', tp=om.MFnNumericData.kInt, default=1, kat="Sampling")
mantraGlobalsATList.addAttr(an='volumestepsize', dn='Volume Step Size', tp=om.MFnNumericData.kFloat, default=.1, kat="Sampling")
mantraGlobalsATList.addAttr(an='decoupleshadowstep', dn='Decouple Shadow Step Size', tp=om.MFnNumericData.kBoolean, default=False, kat="Sampling")
mantraGlobalsATList.addAttr(an='shadowstepsize', dn='Shadow Step Size', tp=om.MFnNumericData.kFloat, default=1.0, kat="Sampling")
# Filter
mantraGlobalsATList.addAttr(an='filtertype', dn='Pixel Filter Type', tp=kEnum, values=filterNames, default=0, kat="Filter")
mantraGlobalsATList.addAttr(an='filtersizeuv', dn='FilterSize', tp=om.MFnNumericData.k2Int, default=[3,3], kat="Filter")
# Frame Buffer
mantraGlobalsATList.addAttr(an='bitdepth', dn='BitDepth', tp=kEnum, values=bitDepthNames, default=0, kat="FrameBuffer")
# Render Engine
mantraGlobalsATList.addAttr(an='renderengine', dn='Render Engine', tp=kEnum, values=renderEngines, default=0, kat="Render Engine")
mantraGlobalsATList.addAttr(an='tilesize', dn='Tile Size', tp=om.MFnNumericData.kInt, default=16, kat="Render Engine")
mantraGlobalsATList.addAttr(an='opacitylimit', dn='Opacity Limit', tp=om.MFnNumericData.kFloat, default=.98, kat="Render Engine")
mantraGlobalsATList.addAttr(an='verbosity', dn='Verbosity', tp=om.MFnNumericData.kString, default="2a", kat="Render Engine")
mantraGlobalsATList.addAttr(an='usemaxproc', dn='Use Max Processors', tp=om.MFnNumericData.kBoolean, default=True, kat="Render Engine")
mantraGlobalsATList.addAttr(an='threadcount', dn='Number of Threads', tp=om.MFnNumericData.kInt, default=1, kat="Render Engine")
mantraGlobalsATList.addAttr(an='accerationtype', dn='Acceleration Type', tp=kEnum, values=accelTypes, default=0, kat="Render Engine")
mantraGlobalsATList.addAttr(an='kdmemfac', dn='KD Memory Factor', tp=om.MFnNumericData.kFloat, default=1.0, kat="Render Engine")
mantraGlobalsATList.addAttr(an='geocachesize', dn='Geometry cache Size (MB)', tp=om.MFnNumericData.kInt, default=256, kat="Render Engine")
mantraGlobalsATList.addAttr(an='texcachesize', dn='Texture cache Size (MB)', tp=om.MFnNumericData.kInt, default=256, kat="Render Engine")
mantraGlobalsATList.addAttr(an='renderviewcam', dn='Render View Cam', tp=om.MFnNumericData.kBoolean, default=True, kat="Render Engine")
mantraGlobalsATList.addAttr(an='autogenenvmap', dn='Auto generate EnvMap', tp=om.MFnNumericData.kBoolean, default=True, kat="Render Engine")
mantraGlobalsATList.addAttr(an='autogensmap', dn='Auto generate SMap', tp=om.MFnNumericData.kBoolean, default=True, kat="Render Engine")
# Shading
mantraGlobalsATList.addAttr(an='reflectlimit', dn='Reflect Limit', tp=om.MFnNumericData.kInt, default=10, kat="Shading")
mantraGlobalsATList.addAttr(an='refractlimit', dn='Refract Limit', tp=om.MFnNumericData.kInt, default=10, kat="Shading")
mantraGlobalsATList.addAttr(an='glossylimit', dn='Glossy Limit', tp=om.MFnNumericData.kInt, default=1, kat="Shading")
mantraGlobalsATList.addAttr(an='diffuselimit', dn='Diffuse Limit', tp=om.MFnNumericData.kInt, default=0, kat="Shading")
mantraGlobalsATList.addAttr(an='volumelimit', dn='Volume Limit', tp=om.MFnNumericData.kInt, default=0, kat="Shading")
mantraGlobalsATList.addAttr(an='raytracebias', dn='Raytracing Bias', tp=om.MFnNumericData.kFloat, default=.01, kat="Shading")
mantraGlobalsATList.addAttr(an='biasalongnormal', dn='Bias along Normal', tp=om.MFnNumericData.kBoolean, default=False, kat="Shading")
mantraGlobalsATList.addAttr(an='colorspace', dn='Color Space', tp=kEnum, values=colorSpaces, default=0, kat="Shading")
mantraGlobalsATList.addAttr(an='raylimit', dn='At Ray Limit Use', tp=kEnum, values=rayLimitBehaviours, default=0, kat="Shading")
mantraGlobalsATList.addAttr(an='smoothgrid', dn='Smooth Grid Colors', tp=om.MFnNumericData.kBoolean, default=True, kat="Shading")
# pbr
mantraGlobalsATList.addAttr(an='colorlimit', dn='Color Limit', tp=om.MFnNumericData.kFloat, default=10.0, kat="PBR")
mantraGlobalsATList.addAttr(an='minreflectratio', dn='Min Reflection Ratio', tp=om.MFnNumericData.kFloat, default=10.0, kat="PBR")
# paths
mantraGlobalsATList.addAttr(an='baseName', dn='Base Name', tp=om.MFnNumericData.kString, default="", kat="Paths")
mantraGlobalsATList.addAttr(an='imageDir', dn='Image Dir', tp=om.MFnNumericData.kString, default="", kat="Paths")
mantraGlobalsATList.addAttr(an='imageName', dn='Image Name', tp=om.MFnNumericData.kString, default="", kat="Paths")
# common light attributeList
mantraCommonLightATList = attribute.AttributeCollection()
mantraCommonLightATList.addAttr(an='light_type', dn='Type', tp=kEnum, mapto=MAPTO_IGNORE, values=lightTypes, default=0, kat=None)
mantraCommonLightATList.addAttr(an='lightcolor', dn='Color', tp=kColor, mapto='color', values=[], default=[1.0, 1.0, 1.0], kat=None)
mantraCommonLightATList.addAttr(an='lightintensity', dn='Intensity', tp=om.MFnNumericData.kFloat, mapto='intensity', default=1.0, kat=None)
mantraCommonLightATList.addAttr(an='__nondiffuse', dn='Emit Diffuse', tp=om.MFnNumericData.kBoolean, mapto='emitDiffuse', default=False, kat=None)
mantraCommonLightATList.addAttr(an='__nonspecular', dn='Emit Specular', tp=om.MFnNumericData.kBoolean, mapto='emitSpecular', default=False, kat=None)
mantraCommonLightATList.addAttr(an='samplingquality', dn='Sampling Quality', tp=om.MFnNumericData.kFloat, default=1.0, kat=None)
mantraCommonLightATList.addAttr(an='surfaceshaders', dn='Use Surface Shaders', tp=om.MFnNumericData.kBoolean, default=True, kat=None)
# Attenuation
mantraCommonLightATList.addAttr(an='attentype', dn='Attenuation Type', tp=kEnum, values=attenuationType, default=0, kat='Attenuation')
mantraCommonLightATList.addAttr(an='attendist', dn='Half Distance', tp=om.MFnNumericData.kFloat,default=10.0, kat='Attenuation')
mantraCommonLightATList.addAttr(an='attenstart', dn='Attenuation Start', tp=om.MFnNumericData.kFloat,default=10.0, kat='Attenuation')
mantraCommonLightATList.addAttr(an='activeradius', dn='Active Radius', tp=om.MFnNumericData.kFloat,default=0.0, kat='Attenuation')
# Shadow
mantraLightShadowATList = attribute.AttributeCollection()
mantraLightShadowATList.addAttr(an='shadow_type', dn='Shadow Type', tp=kEnum, values=shadowTypes, default=1, kat='Shadow')
mantraLightShadowATList.addAttr(an='shadowI', dn='Shadow Intensity', tp=om.MFnNumericData.kFloat, default=1.0, kat='Shadow')
# pointLight AttributeList
mantraPointLightATList = attribute.AttributeCollection()
# spotLight AttributeList
mantraSpotLightATList = attribute.AttributeCollection()
mantraSpotLightATList.addAttr(an='light_fov', dn='Cone Angle', tp=om.MFnNumericData.kFloat, mapto='coneAngle', default=45.0)
mantraSpotLightATList.addAttr(an='projmap', dn='Projection Map', tp=om.MFnData.kString, default="")
mantraSpotLightATList.addAttr(an='near', dn='Clip Near', tp=om.MFnNumericData.kFloat, default=.1)
mantraSpotLightATList.addAttr(an='far', dn='Clip Far', tp=om.MFnNumericData.kBoolean, default=32025.0)
# directionalLight AttributeList
mantraDirectionalLightATList = attribute.AttributeCollection()
mantraDirectionalLightATList.addAttr(an='orthowidth', dn='Ortographic Width', tp=om.MFnNumericData.kFloat, default=1.0)
# areaLight AttributeList
mantraAreaLightATList = attribute.AttributeCollection()
mantraAreaLightATList.addAttr(an='area', dn='Area', tp=om.MFnNumericData.k2Float, default=[1.0, 1.0])
mantraAreaLightATList.addAttr(an='areashape', dn='Area Shape', values=areaLightTypes, tp=kEnum, default=0)
mantraAreaLightATList.addAttr(an='normalizearea', dn='Normalize Intensity', tp=om.MFnNumericData.kBoolean, default=True)
mantraAreaLightATList.addAttr(an='singlesided', dn='Single Sided', tp=om.MFnNumericData.kBoolean, default=False)
mantraAreaLightATList.addAttr(an='reverse', dn='Reverse Direction',tp=om.MFnNumericData.kBoolean, default=False)
mantraAreaLightATList.addAttr(an='areamap', dn='Environment Map', tp=om.MFnData.kString, default="")
mantraAreaLightATList.addAttr(an='areamapspace', dn='Env Space', tp=kEnum, values=envMapSpace, default=0)
mantraAreaLightATList.addAttr(an='light_texture', dn='Textrue Map', tp=om.MFnData.kString, default="", kat='LightTexture')
mantraAreaLightATList.addAttr(an='edgeenable', dn='Enable Texture Falloff',tp=om.MFnNumericData.kBoolean, default=False, kat='LightTexture')
mantraAreaLightATList.addAttr(an='edgewidth', dn='Edge Width', tp=om.MFnNumericData.kFloat, default=.1, kat='LightTexture')
mantraAreaLightATList.addAttr(an='edgerolloff', dn='Edge Rolloff', tp=om.MFnNumericData.kFloat, default=1.0, kat='LightTexture')
mantraAreaLightATList.addAttr(an='areageometry', dn='Object', tp=kMessage, default="", kat='GeometryLight')
mantraAreaLightATList.addAttr(an='intothisobject', dn='Transform into this obj',tp=om.MFnNumericData.kBoolean, default=False, kat='GeometryLight')
mantraLightTypeAttrList = {'areaLight':mantraAreaLightATList,
'pointLight':mantraPointLightATList,
'spotLight':mantraSpotLightATList,
'directionalLight':mantraDirectionalLightATList}
# ray_property object displace opdef:/Shop/c_mantraSurface diff_int 1 sss_pcname "c_mantraSurface1_SSS.pc" ior_in 1.25300002098 ior_out 1.01800000668 refr_enable 1 refr_int 0.985000014305 refr_min 0.0120000001043 refr_clr 1 0.998799979687 0.927999973297 refr_angle 0.019999999553 refr_aniso 0.0289999991655 refr_qual 1.02999997139 refr_thin 1 atten_enable 1 atten_den 0.503000020981 atten_clr 0.931999981403 0.937666654587 1 enableDispMap 1 displacementMap "D:/mudtmp/Textures/vectorDisplacement_1_vdm.exr" ogl_alpha 1 ogl_diff 1 1 1 ogl_spec 1 1 1 ogl_specmap "" ogl_rough 0 ogl_opacitymap "" ogl_bumpmap "" ogl_normalmap "" ogl_envmap ""
# ray_property object surface opdef:/Shop/c_mantraSurface diff_int 1 sss_pcname "c_mantraSurface1_SSS.pc" ior_in 1.25300002098 ior_out 1.01800000668 refr_enable 1 refr_int 0.985000014305 refr_min 0.0120000001043 refr_clr 1 0.998799979687 0.927999973297 refr_angle 0.019999999553 refr_aniso 0.0289999991655 refr_qual 1.02999997139 refr_thin 1 atten_enable 1 atten_den 0.503000020981 atten_clr 0.931999981403 0.937666654587 1 enableDispMap 1 displacementMap "D:/mudtmp/Textures/vectorDisplacement_1_vdm.exr" ogl_alpha 1 ogl_diff 1 1 1 ogl_spec 1 1 1 ogl_specmap "" ogl_rough 0 ogl_opacitymap "" ogl_bumpmap "" ogl_normalmap "" ogl_envmap ""
mantraMantraSurfaceShaderATList = attribute.AttributeCollection()
mantraMantraSurfaceShaderATList.addAttr(an='diff_enable',dn='Diffuse Enable',default=True,tp=om.MFnNumericData.kBoolean,kat='Diffuse')
mantraMantraSurfaceShaderATList.addAttr(an='diff_int',dn='Diffuse Intensity',default=0.5,tp=om.MFnNumericData.kFloat,kat='Diffuse')
mantraMantraSurfaceShaderATList.addAttr(an='diff_rough',dn='Diffuse Roughness',default=0.0,tp=om.MFnNumericData.kFloat,kat='Diffuse')
mantraMantraSurfaceShaderATList.addAttr(an='baseColor',dn='Base Color',default=[0.5, 0.5, 0.5],tp=kColor,kat='Diffuse')
mantraMantraSurfaceShaderATList.addAttr(an='useColorMap',dn='Use Color Map',default=False,tp=om.MFnNumericData.kBoolean,kat='Diffuse|ColorMap')
mantraMantraSurfaceShaderATList.addAttr(an='baseColorMap',dn='Base Color Map',default="",tp=om.MFnData.kString,kat='Diffuse|ColorMap')
mantraMantraSurfaceShaderATList.addAttr(an='colorMapIntensity',dn='Color Map Intensity',default=1.0,tp=om.MFnNumericData.kFloat,kat='Diffuse|ColorMap')
mantraMantraSurfaceShaderATList.addAttr(an='colorMapfilter',dn='Color Map Filter',values=textureFilterNames,default=0,tp=kEnum,kat='Diffuse|ColorMap')
mantraMantraSurfaceShaderATList.addAttr(an='colorMapWidth',dn='Color Map Filter',default=1.0,tp=om.MFnNumericData.kFloat,kat='Diffuse|ColorMap')
# Subsurfacescatter
mantraMantraSurfaceShaderATList.addAttr(an='sss_enable',dn='Subsurface Scatter Enable',default=False,tp=om.MFnNumericData.kBoolean,kat='Subsurfacescatter')
mantraMantraSurfaceShaderATList.addAttr(an='sss_int',dn='Subsurface Scatter Intensity',default=1.0,tp=om.MFnNumericData.kFloat,kat='Subsurfacescatter')
mantraMantraSurfaceShaderATList.addAttr(an='baseSSSColor',dn='Subsurface Scatter BaseColor',default=[1.0, 1.0, 1.0],tp=kColor,kat='Subsurfacescatter')
# Specular
mantraMantraSurfaceShaderATList.addAttr(an='refl_lights',dn='Reflect Lights',default=False,tp=om.MFnNumericData.kBoolean,kat='Specular')
mantraMantraSurfaceShaderATList.addAttr(an='spec_model',dn='Specular Model',values=lightModelNames,default=0,tp=kEnum,kat='Specular')
mantraMantraSurfaceShaderATList.addAttr(an='spec_int',dn='Specular Intensity',default=1.0,tp=om.MFnNumericData.kFloat,kat='Specular')
mantraMantraSurfaceShaderATList.addAttr(an='specColor1',dn='Specular Color',default=[1.0, 1.0, 1.0],tp=kColor,kat='Specular')
mantraMantraSurfaceShaderATList.addAttr(an='useSpecMap1',dn='Use Specular Map',default=False,tp=om.MFnNumericData.kBoolean,kat='Specular|SpecularMap')
mantraMantraSurfaceShaderATList.addAttr(an='specMap1',dn='Specular Map',default="",tp=om.MFnData.kString,kat='Specular|SpecularMap')
mantraMantraSurfaceShaderATList.addAttr(an='reflectMapIntensity',dn='Specular Map Intensity',default=1.0,tp=om.MFnNumericData.kFloat,kat='Specular|SpecularMap')
mantraMantraSurfaceShaderATList.addAttr(an='specMapFilter1',dn='Spec Map Filter',values=textureFilterNames,default=0,tp=kEnum,kat='Specular|SpecularMap')
mantraMantraSurfaceShaderATList.addAttr(an='reflectMapWidth1',dn='Specular Map Width',default=1.0,tp=om.MFnNumericData.kFloat,kat='Specular|SpecularMap')
mantraMantraSurfaceShaderATList.addAttr(an='spec_angle',dn='Specular Angle',default=10.0,tp=om.MFnNumericData.kFloat,kat='Specular')
mantraMantraSurfaceShaderATList.addAttr(an='spec_aniso',dn='Specular Anisotropy',default=0.0,tp=om.MFnNumericData.kFloat,kat='Specular')
# Reflections
mantraMantraSurfaceShaderATList.addAttr(an='refl_objs',dn='Reflect Objects',default=True,tp=om.MFnNumericData.kBoolean,kat='Reflections')
mantraMantraSurfaceShaderATList.addAttr(an='refl_qual',dn='Reflection Quality',default=1.0,tp=om.MFnNumericData.kFloat,kat='Reflections')
mantraMantraSurfaceShaderATList.addAttr(an='refl_sep',dn='Seperate Objects Reflection',default=False,tp=om.MFnNumericData.kBoolean,kat='Reflections')
mantraMantraSurfaceShaderATList.addAttr(an='refl_int',dn='Reflection Intensity',default=0.5,tp=om.MFnNumericData.kFloat,kat='Reflections')
mantraMantraSurfaceShaderATList.addAttr(an='refl_clr',dn='Reflection Color',default=[1.0, 1.0, 1.0],tp=kColor,kat='Reflections')
mantraMantraSurfaceShaderATList.addAttr(an='refl_angle',dn='Reflection Angle',default=0.0,tp=om.MFnNumericData.kFloat,kat='Reflections')
# Refractions
mantraMantraSurfaceShaderATList.addAttr(an='refr_enable',dn='Refractions Enable',default=False,tp=om.MFnNumericData.kBoolean,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='ior_in',dn='IOR In',default=1.2,tp=om.MFnNumericData.kFloat,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='ior_out',dn='IOR out',default=1.0,tp=om.MFnNumericData.kFloat,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='refr_model',dn='Refraction Model',values=lightModelNames,default=0,tp=kEnum,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='refr_int',dn='Refraction Intensity',default=1.0,tp=om.MFnNumericData.kFloat,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='refr_min',dn='Refraction Minimum',default=0.0,tp=om.MFnNumericData.kFloat,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='refr_clr',dn='Refraction Color',default=[1.0, 1.0, 1.0],tp=kColor,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='refr_angle',dn='Refraction Angle',default=0.0,tp=om.MFnNumericData.kFloat,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='refr_aniso',dn='Refraction Anisotropy',default=0.0,tp=om.MFnNumericData.kFloat,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='refr_qual',dn='Refraction Quality',default=1.0,tp=om.MFnNumericData.kFloat,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='refr_lights',dn='Refract Lights',default=True,tp=om.MFnNumericData.kBoolean,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='refr_objs',dn='Refract Objects',default=True,tp=om.MFnNumericData.kBoolean,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='refr_thin',dn='Thin Film Refractions',default=False,tp=om.MFnNumericData.kBoolean,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='atten_enable',dn='Attenuation Enable',default=False,tp=om.MFnNumericData.kBoolean,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='atten_den',dn='Attenuation Density',default=0.5,tp=om.MFnNumericData.kFloat,kat='Refractions')
mantraMantraSurfaceShaderATList.addAttr(an='atten_clr',dn='Attenuation Color',default=[1.0, 1.0, 1.0],tp=kColor,kat='Refractions')
# Emission
mantraMantraSurfaceShaderATList.addAttr(an='emit_enable',dn='Emission Enable',default=False,tp=om.MFnNumericData.kBoolean,kat='Emission')
mantraMantraSurfaceShaderATList.addAttr(an='emit_int',dn='Emission Intensity',default=1.0,tp=om.MFnNumericData.kFloat,kat='Emission')
mantraMantraSurfaceShaderATList.addAttr(an='emit_clr',dn='Emission Color',default=[1.0, 1.0, 1.0],tp=kColor,kat='Emission')
mantraMantraSurfaceShaderATList.addAttr(an='emit_illum',dn='Emission Illuminates Objects',default=False,tp=om.MFnNumericData.kBoolean,kat='Emission')
# Opacity
mantraMantraSurfaceShaderATList.addAttr(an='opac_int',dn='Opacity Intensity',default=1.0,tp=om.MFnNumericData.kFloat,kat='Opacity')
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestBatch(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.datastore.batch import Batch
return Batch
def _makeOne(self, dataset_id=None, connection=None):
return self._getTargetClass()(dataset_id=dataset_id,
connection=connection)
def test_ctor_missing_required(self):
from gcloud.datastore._testing import _monkey_defaults
with _monkey_defaults():
self.assertRaises(ValueError, self._makeOne)
self.assertRaises(ValueError, self._makeOne, dataset_id=object())
self.assertRaises(ValueError, self._makeOne, connection=object())
def test_ctor_explicit(self):
from gcloud.datastore._datastore_v1_pb2 import Mutation
_DATASET = 'DATASET'
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
self.assertEqual(batch.dataset_id, _DATASET)
self.assertEqual(batch.connection, connection)
self.assertTrue(batch._id is None)
self.assertTrue(isinstance(batch.mutation, Mutation))
self.assertEqual(batch._auto_id_entities, [])
def test_ctor_implicit(self):
from gcloud.datastore._testing import _monkey_defaults
from gcloud.datastore._datastore_v1_pb2 import Mutation
_DATASET = 'DATASET'
CONNECTION = _Connection()
with _monkey_defaults(connection=CONNECTION, dataset_id=_DATASET):
batch = self._makeOne()
self.assertEqual(batch.dataset_id, _DATASET)
self.assertEqual(batch.connection, CONNECTION)
self.assertTrue(batch._id is None)
self.assertTrue(isinstance(batch.mutation, Mutation))
self.assertEqual(batch._auto_id_entities, [])
def test_current(self):
_DATASET = 'DATASET'
connection = _Connection()
batch1 = self._makeOne(_DATASET, connection)
batch2 = self._makeOne(_DATASET, connection)
self.assertTrue(batch1.current() is None)
self.assertTrue(batch2.current() is None)
with batch1:
self.assertTrue(batch1.current() is batch1)
self.assertTrue(batch2.current() is batch1)
with batch2:
self.assertTrue(batch1.current() is batch2)
self.assertTrue(batch2.current() is batch2)
self.assertTrue(batch1.current() is batch1)
self.assertTrue(batch2.current() is batch1)
self.assertTrue(batch1.current() is None)
self.assertTrue(batch2.current() is None)
def test_add_auto_id_entity_w_partial_key(self):
_DATASET = 'DATASET'
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
entity = _Entity()
key = entity.key = _Key(_DATASET)
key._id = None
batch.add_auto_id_entity(entity)
self.assertEqual(batch._auto_id_entities, [entity])
def test_add_auto_id_entity_w_completed_key(self):
_DATASET = 'DATASET'
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
entity = _Entity()
entity.key = _Key(_DATASET)
self.assertRaises(ValueError, batch.add_auto_id_entity, entity)
def test_put_entity_wo_key(self):
_DATASET = 'DATASET'
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
self.assertRaises(ValueError, batch.put, _Entity())
def test_put_entity_w_key_wrong_dataset_id(self):
_DATASET = 'DATASET'
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
entity = _Entity()
entity.key = _Key('OTHER')
self.assertRaises(ValueError, batch.put, entity)
def test_put_entity_w_partial_key(self):
_DATASET = 'DATASET'
_PROPERTIES = {'foo': 'bar'}
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
entity = _Entity(_PROPERTIES)
key = entity.key = _Key(_DATASET)
key._id = None
batch.put(entity)
insert_auto_ids = list(batch.mutation.insert_auto_id)
self.assertEqual(len(insert_auto_ids), 1)
self.assertEqual(insert_auto_ids[0].key, key._key)
upserts = list(batch.mutation.upsert)
self.assertEqual(len(upserts), 0)
deletes = list(batch.mutation.delete)
self.assertEqual(len(deletes), 0)
self.assertEqual(batch._auto_id_entities, [entity])
def test_put_entity_w_completed_key(self):
_DATASET = 'DATASET'
_PROPERTIES = {
'foo': 'bar',
'baz': 'qux',
'spam': [1, 2, 3],
'frotz': [], # will be ignored
}
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
entity = _Entity(_PROPERTIES)
entity.exclude_from_indexes = ('baz', 'spam')
key = entity.key = _Key(_DATASET)
batch.put(entity)
insert_auto_ids = list(batch.mutation.insert_auto_id)
self.assertEqual(len(insert_auto_ids), 0)
upserts = list(batch.mutation.upsert)
self.assertEqual(len(upserts), 1)
upsert = upserts[0]
self.assertEqual(upsert.key, key._key)
props = dict([(prop.name, prop.value) for prop in upsert.property])
self.assertTrue(props['foo'].indexed)
self.assertFalse(props['baz'].indexed)
self.assertTrue(props['spam'].indexed)
self.assertFalse(props['spam'].list_value[0].indexed)
self.assertFalse(props['spam'].list_value[1].indexed)
self.assertFalse(props['spam'].list_value[2].indexed)
self.assertFalse('frotz' in props)
deletes = list(batch.mutation.delete)
self.assertEqual(len(deletes), 0)
def test_put_entity_w_completed_key_prefixed_dataset_id(self):
_DATASET = 'DATASET'
_PROPERTIES = {
'foo': 'bar',
'baz': 'qux',
'spam': [1, 2, 3],
'frotz': [], # will be ignored
}
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
entity = _Entity(_PROPERTIES)
entity.exclude_from_indexes = ('baz', 'spam')
key = entity.key = _Key('s~' + _DATASET)
batch.put(entity)
insert_auto_ids = list(batch.mutation.insert_auto_id)
self.assertEqual(len(insert_auto_ids), 0)
upserts = list(batch.mutation.upsert)
self.assertEqual(len(upserts), 1)
upsert = upserts[0]
self.assertEqual(upsert.key, key._key)
props = dict([(prop.name, prop.value) for prop in upsert.property])
self.assertTrue(props['foo'].indexed)
self.assertFalse(props['baz'].indexed)
self.assertTrue(props['spam'].indexed)
self.assertFalse(props['spam'].list_value[0].indexed)
self.assertFalse(props['spam'].list_value[1].indexed)
self.assertFalse(props['spam'].list_value[2].indexed)
self.assertFalse('frotz' in props)
deletes = list(batch.mutation.delete)
self.assertEqual(len(deletes), 0)
def test_delete_w_partial_key(self):
_DATASET = 'DATASET'
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
key = _Key(_DATASET)
key._id = None
self.assertRaises(ValueError, batch.delete, key)
def test_delete_w_key_wrong_dataset_id(self):
_DATASET = 'DATASET'
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
key = _Key('OTHER')
self.assertRaises(ValueError, batch.delete, key)
def test_delete_w_completed_key(self):
_DATASET = 'DATASET'
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
key = _Key(_DATASET)
batch.delete(key)
insert_auto_ids = list(batch.mutation.insert_auto_id)
self.assertEqual(len(insert_auto_ids), 0)
upserts = list(batch.mutation.upsert)
self.assertEqual(len(upserts), 0)
deletes = list(batch.mutation.delete)
self.assertEqual(len(deletes), 1)
self.assertEqual(deletes[0], key._key)
def test_delete_w_completed_key_w_prefixed_dataset_id(self):
_DATASET = 'DATASET'
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
key = _Key('s~' + _DATASET)
batch.delete(key)
insert_auto_ids = list(batch.mutation.insert_auto_id)
self.assertEqual(len(insert_auto_ids), 0)
upserts = list(batch.mutation.upsert)
self.assertEqual(len(upserts), 0)
deletes = list(batch.mutation.delete)
self.assertEqual(len(deletes), 1)
self.assertEqual(deletes[0], key._key)
def test_commit(self):
_DATASET = 'DATASET'
connection = _Connection()
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
batch.commit()
self.assertEqual(connection._committed,
[(_DATASET, batch.mutation, None)])
def test_commit_w_auto_id_entities(self):
_DATASET = 'DATASET'
_NEW_ID = 1234
connection = _Connection(_NEW_ID)
batch = self._makeOne(dataset_id=_DATASET, connection=connection)
entity = _Entity({})
key = entity.key = _Key(_DATASET)
key._id = None
batch._auto_id_entities.append(entity)
batch.commit()
self.assertEqual(connection._committed,
[(_DATASET, batch.mutation, None)])
self.assertFalse(key.is_partial)
self.assertEqual(key._id, _NEW_ID)
def test_as_context_mgr_wo_error(self):
from gcloud.datastore.batch import _BATCHES
_DATASET = 'DATASET'
_PROPERTIES = {'foo': 'bar'}
connection = _Connection()
entity = _Entity(_PROPERTIES)
key = entity.key = _Key(_DATASET)
self.assertEqual(list(_BATCHES), [])
with self._makeOne(dataset_id=_DATASET,
connection=connection) as batch:
self.assertEqual(list(_BATCHES), [batch])
batch.put(entity)
self.assertEqual(list(_BATCHES), [])
insert_auto_ids = list(batch.mutation.insert_auto_id)
self.assertEqual(len(insert_auto_ids), 0)
upserts = list(batch.mutation.upsert)
self.assertEqual(len(upserts), 1)
self.assertEqual(upserts[0].key, key._key)
deletes = list(batch.mutation.delete)
self.assertEqual(len(deletes), 0)
self.assertEqual(connection._committed,
[(_DATASET, batch.mutation, None)])
def test_as_context_mgr_nested(self):
from gcloud.datastore.batch import _BATCHES
_DATASET = 'DATASET'
_PROPERTIES = {'foo': 'bar'}
connection = _Connection()
entity1 = _Entity(_PROPERTIES)
key1 = entity1.key = _Key(_DATASET)
entity2 = _Entity(_PROPERTIES)
key2 = entity2.key = _Key(_DATASET)
self.assertEqual(list(_BATCHES), [])
with self._makeOne(dataset_id=_DATASET,
connection=connection) as batch1:
self.assertEqual(list(_BATCHES), [batch1])
batch1.put(entity1)
with self._makeOne(dataset_id=_DATASET,
connection=connection) as batch2:
self.assertEqual(list(_BATCHES), [batch2, batch1])
batch2.put(entity2)
self.assertEqual(list(_BATCHES), [batch1])
self.assertEqual(list(_BATCHES), [])
insert_auto_ids = list(batch1.mutation.insert_auto_id)
self.assertEqual(len(insert_auto_ids), 0)
upserts = list(batch1.mutation.upsert)
self.assertEqual(len(upserts), 1)
self.assertEqual(upserts[0].key, key1._key)
deletes = list(batch1.mutation.delete)
self.assertEqual(len(deletes), 0)
insert_auto_ids = list(batch2.mutation.insert_auto_id)
self.assertEqual(len(insert_auto_ids), 0)
upserts = list(batch2.mutation.upsert)
self.assertEqual(len(upserts), 1)
self.assertEqual(upserts[0].key, key2._key)
deletes = list(batch2.mutation.delete)
self.assertEqual(len(deletes), 0)
self.assertEqual(connection._committed,
[(_DATASET, batch2.mutation, None),
(_DATASET, batch1.mutation, None)])
def test_as_context_mgr_w_error(self):
from gcloud.datastore.batch import _BATCHES
_DATASET = 'DATASET'
_PROPERTIES = {'foo': 'bar'}
connection = _Connection()
entity = _Entity(_PROPERTIES)
key = entity.key = _Key(_DATASET)
self.assertEqual(list(_BATCHES), [])
try:
with self._makeOne(dataset_id=_DATASET,
connection=connection) as batch:
self.assertEqual(list(_BATCHES), [batch])
batch.put(entity)
raise ValueError("testing")
except ValueError:
pass
self.assertEqual(list(_BATCHES), [])
insert_auto_ids = list(batch.mutation.insert_auto_id)
self.assertEqual(len(insert_auto_ids), 0)
upserts = list(batch.mutation.upsert)
self.assertEqual(len(upserts), 1)
self.assertEqual(upserts[0].key, key._key)
deletes = list(batch.mutation.delete)
self.assertEqual(len(deletes), 0)
self.assertEqual(connection._committed, [])
class _CommitResult(object):
def __init__(self, *new_keys):
self.insert_auto_id_key = [_KeyPB(key) for key in new_keys]
class _PathElementPB(object):
def __init__(self, id):
self.id = id
class _KeyPB(object):
def __init__(self, id):
self.path_element = [_PathElementPB(id)]
class _Connection(object):
_marker = object()
_save_result = (False, None)
def __init__(self, *new_keys):
self._commit_result = _CommitResult(*new_keys)
self._committed = []
def commit(self, dataset_id, mutation, transaction_id):
self._committed.append((dataset_id, mutation, transaction_id))
return self._commit_result
class _Entity(dict):
key = None
exclude_from_indexes = ()
class _Key(object):
_MARKER = object()
_kind = 'KIND'
_key = 'KEY'
_path = None
_id = 1234
_stored = None
def __init__(self, dataset_id):
self.dataset_id = dataset_id
@property
def is_partial(self):
return self._id is None
def to_protobuf(self):
from gcloud.datastore import _datastore_v1_pb2
key = self._key = _datastore_v1_pb2.Key()
# Don't assign it, because it will just get ripped out
# key.partition_id.dataset_id = self.dataset_id
element = key.path_element.add()
element.kind = self._kind
if self._id is not None:
element.id = self._id
return key
def completed_key(self, new_id):
assert self.is_partial
self._id = new_id
|
|
# Copyright (C) 2007 - 2009 The MITRE Corporation. See the toplevel
# file LICENSE for license terms.
# This file contains all the basic pieces to do bootstrapping:
# build a corpus, build a model, run a test. The experiment engine
# is an extension of this now.
import sys, os, shutil, random, glob, time
# I don't think this is necessary, but it doesn't hurt.
random.seed()
# We actually need to create a better abstraction of document
# sets - we may end up using them in multiple places, but the first
# use is going to be for the experiment engine and cross-validation.
# These document sets should support a bunch of things:
# - randomly split into multiple sets
# - consolidate to a single directory, with disambiguated file names
# - instantiate from a number of different types of sources, such as
# a list of files, a file which contains a list of files, or pattern + prefix
class DocumentSetError(Exception):
pass
#
# The document set object.
#
# filePats: a list of glob expressions which picks out
# a candidate set of training docs.
# fileFile: a file which contains filenames, one per line.
# use either this or filePats/randomSubset.
# Two ways of providing a list of files: either filePats and a
# size, or a file of filenames, or a list of the files directly.
class DocumentSet:
def __init__(self, corpusName, partitionFile = None, filePats = None,
fileFile = None, fileList = None,
prefix = None,
partitions = None, partitionIsFixed = False,
flushedStdout = sys.stdout):
self.corpusName = corpusName
# Can't use sets, because I really need the order to be maintained.
self._initVars()
if partitionFile is not None:
self.loadPartition(partitionFile)
else:
self._populate(filePats, fileFile, fileList, prefix, flushedStdout)
self._partition(partitions, partitionIsFixed)
def _initVars(self):
self.files = []
self._filesSeen = {}
self.partitionDict = None
self.deconflictions = None
def _populate(self, filePats, fileFile, fileList, prefix, flushedStdout):
# In the general case, who cares how many of these you use.
if (fileList is None) and (filePats is None) and (fileFile is None):
raise DocumentSetError, "neither fileFile nor filePats nor fileList is specified"
if prefix is not None:
if not os.path.isabs(prefix):
raise DocumentSetError, "prefix must be absolute"
if fileList:
for f in fileList:
if os.path.isabs(f):
self._addFile(f)
elif prefix is None:
raise DocumentSetError, "fileList contains relative pathnames, but there's no prefix"
else:
self._addFile(os.path.join(prefix, f))
elif fileFile:
fp = open(fileFile, "r")
for f in [line.strip() for line in fp.readlines()]:
if os.path.isabs(f):
self._addFile(f)
else:
# The justification for this is that a file which contains
# pathnames will have been archived by the save() method.
raise DocumentSetError, "fileFile must contain absolute pathnames"
fp.close()
else:
files = []
if type(filePats) is type(""):
filePats = [filePats]
# Remove duplicates
for p in filePats:
i = 0
if not os.path.isabs(p):
if prefix is None:
raise DocumentSetError, "filePats contains relative pathnames, but there's no prefix"
p = os.path.join(prefix, p)
for f in glob.glob(p):
if not os.path.isfile(f):
raise DocumentSetError, ("file %s specified by pattern %s is not a regular file" % (p, f))
self._addFile(f)
i += 1
if i == 0:
print >> flushedStdout, "Warning: pattern %s did not pick out any files" % p
if not self.files:
raise DocumentSetError, "document set is empty"
def _addFile(self, f):
if not self._filesSeen.has_key(f):
self.files.append(f)
self._filesSeen[f] = True
# What does a partition look like? It's a list of pairs, where
# the first element is the name of the partition, and the second is
# the share. The share is a number. All the numbers are normalized to
# a share of 1, and then we randomize the list of files, and
# select slice of the randomized list.
# If partitionIsFixed is true, the number is an actual number of
# files, rather than a share. -1 means "everything else".
FIXED_PARTITION_REMAINDER = -1
def _partition(self, partitions, partitionIsFixed):
# Save the partition list in NORMALIZED form.
if partitions is not None:
self.partitionDict = {}
files = self.files[:]
random.shuffle(files)
trueP = []
if partitionIsFixed:
remainder = None
start = 0
for pName, total in partitions:
if total == self.FIXED_PARTITION_REMAINDER:
if remainder is not None:
raise DocumentSetError, ("multiple remainder pairs specified: %s, %s" % (pName, remainder))
remainder = pName
elif start + total > len(files):
raise DocumentSetError, ("number of requested files for fixed partition '%s' exceed number of available files" % pName)
else:
trueP.append([pName, start, start + total])
start += total
# Not all the files might be used.
if remainder is not None:
trueP.append([pName, start, len(files)])
else:
total = float(sum([x[1] for x in partitions]))
# Now, we convert the numbers in the partitions
# into shares of the size of the number of files.
# The last one has to grab the remainder. I don't
# want to make this too complicated.
i = 0.0
for pName, share in partitions:
j = i + (len(files) * (share / total))
trueP.append([pName, int(round(i)), int(round(j))])
i = j
trueP[-1][-1] = len(files)
for pName, start, end in trueP:
self.partitionDict[pName] = files[start:end]
else:
# Make sure to randomize the order.
self.files = self.files[:]
random.shuffle(self.files)
def getFiles(self, partition = None):
if partition is not None:
if self.partitionDict is None:
raise KeyError, partition
else:
return self.partitionDict[partition]
else:
return self.files
def savePaths(self, outPath):
fp = open(outPath, "w")
for f in self.files:
fp.write(f + "\n")
fp.close()
def savePartition(self, outPath):
fp = open(outPath, "w")
if self.partitionDict is not None:
for name, files in self.partitionDict.items():
for f in files:
fp.write("%s,%s\n" % (name, f))
fp.close()
def loadPartition(self, path):
self.partitionDict = {}
fp = open(path, "r")
for line in fp.readlines():
[name, f] = line.strip().split(",", 1)
try:
self.partitionDict[name].append(f)
except KeyError:
self.partitionDict[name] = [f]
self._addFile(f)
fp.close()
def _deconflict(self):
if self.deconflictions is not None:
return
self.deconflictions = {}
duplicateBasenameDict = {}
basenamesToMap = []
for f in self.files:
# This will be overwritten in some cases later when we figure
# out what needs to be deconflicted.
b = os.path.basename(f)
self.deconflictions[f] = b
if duplicateBasenameDict.has_key(b):
if len(duplicateBasenameDict[b]) == 1:
basenamesToMap.append(b)
duplicateBasenameDict[b].append(f)
else:
duplicateBasenameDict[b] = [f]
if basenamesToMap:
for b in basenamesToMap:
# There will be no duplicates in the list, because
# document sets have duplicates removed.
dirPrefix = os.path.dirname(os.path.commonprefix(duplicateBasenameDict[b]))
for f in duplicateBasenameDict[b]:
suff = f[len(dirPrefix):]
# The new filename is this suffix with all the dir
# components replaced.
stack = []
split = os.path.split(suff)
while split[0] != suff:
stack[0:0] = [split[1]]
suff = split[0]
split = os.path.split(suff)
newBasename = "_".join(stack)
# And replace it in the list of files in the document set.
self.deconflictions[f] = newBasename
# Consolidate a set of documents into a directory.
# Use deconfliction.
def consolidate(self, dir, fn):
self._deconflict()
for f in self.files:
fn(f, os.path.join(dir, self.deconflictions[f]))
# In the default case, these two are no-ops. They're required for the
# interaction with a potential parent bootstrapping or experiment engine.
def setContext(self, engine):
pass
def prepare(self, *args, **kw):
pass
#
# And now, something to wrap around the ModelBuilder to be used in
# bootstrapping systems. This is simplified from the original version in
# CarafeTrain.py, which does all sorts of frightening cacheing of settings, etc.
#
class TrainingRunError(Exception):
pass
# If we're going to be able to specify a model builder
# directly in the bootstrapper, we're going to have to override some of the default
# model builder creation.
import MAT.PluginMgr
class ModelInfo(MAT.PluginMgr.ModelInfo):
def __init__(self, task, mBClass):
self.task = task
self.modelClass = mBClass
self.configName = ""
self.modelBuildSettings = {}
class TrainingRunInstance:
def __init__(self, engine, template, modelSubdir, trainingCorpus,
engineSettings = None, corpusSettings = None, builderClass = None):
self.modelName = template.modelName
self.modelSubdir = modelSubdir
self.configName = template.configName
self.builderClass = builderClass
# I think these will be actual training corpora.
self.trainingSet = trainingCorpus
self.engineSettings = engineSettings
self.corpusSettings = corpusSettings
self.flushedStdout = template.flushedStdout
self.template = template
self.setContext(engine)
# In this case, there really does need to be a context set,
# and the engine needs to have a task, etc.
def setContext(self, engine):
self._updateFromEngine(engine)
if not os.path.exists(self.modelDir):
print >> self.flushedStdout, "Creating model directory", self.modelDir, "..."
os.makedirs(self.modelDir)
self._configureBuilder()
# The trainer requires all the files in a single directory, so we
# take the files and put them somewhere random.
# The format of the model output dir is a file containing
# the file list at the toplevel, and a subdir for each increment.
# I'm going to use the modelOutputDir also as the temp location,
# because that's the safest thing to do - that location will have
# to be appropriately protected for sensitive data.
def train(self, interleave = False, collectCorpusStatistics = False):
import MAT.ExecutionContext
with MAT.ExecutionContext.Tmpdir(specifiedParent = self.modelDir) as tmpDir:
mName = self.modelName
print >> self.flushedStdout, "Building model ..."
t = time.time()
self.builder.run(os.path.join(self.modelDir, "model"),
self.trainingSet.getFiles(),
tmpDir = tmpDir,
collectCorpusStatistics = collectCorpusStatistics,
oStream = self.flushedStdout)
print >> self.flushedStdout, "Build completed in %.2f seconds." % (time.time() - t,)
if interleave:
for rTemplate in self.engine.runList:
if rTemplate.modelName == mName:
for r in rTemplate.yieldInstances(self.engine):
r.setContext(self.engine, self)
r.prepareTest()
rName = r.runName
print >> self.flushedStdout, "\n### Performing run", rName, "in directory", r.runSubdir
r.test()
print >> self.flushedStdout, "### Done."
r.finishTest()
#
# Internal methods which can be consumed.
#
def _updateFromEngine(self, engine):
self.engine = engine
self.modelDir = engine.getModelDir(self)
def _configureBuilder(self):
task = self.engine.task
# Use a local class.
if self.builderClass:
mBClass = self.builderClass
buildInfo = ModelInfo(task, mBClass)
else:
# Next, we add the properties.
buildInfo = task.getModelInfo(configName = self.configName)
mBClass = buildInfo.getModelClass()
if mBClass is None:
raise TrainingRunError, ("model %s has no engine for the task" % self.modelName)
builderSettings = self.engineSettings or {}
self.builder = buildInfo.buildModelBuilder(**builderSettings)
# The challenge here is keeping enough information about the
# corpora to be able to ensure that (a) corpus increments are guaranteed to
# be successively larger slices of the SAME ORDER, and (b) corpus balances
# can be preserved. And then there's the issue about restart - but that's
# only in CarafeTrain, and I think I have to address it there.
# So I think I'll collect them as pairs of dsetname/partition and file,
# THEN shuffle them. Then that will be what I'll use when I select from it.
class TrainingRun:
def __init__(self, mName, trainingCorpora = None,
engineSettings = None, corpusSettings = None,
configName = None,
builderClass = None,
iterators = None,
flushedStdout = sys.stdout,
instanceClass = TrainingRunInstance):
self.modelName = mName
self.iterators = iterators
if iterators:
for iterator in iterators[:-1]:
if iterator.mustBeInnermost:
raise TrainingRunError, ("Iterator %s must be innermost, but is not last" % iterator)
self.configName = configName
self.builderClass = builderClass
self.trainingCorpora = trainingCorpora
self.engineSettings = engineSettings
self.corpusSettings = corpusSettings
self.flushedStdout = flushedStdout
self.instanceClass = instanceClass
# We collect the instances. If the run
# asks for them and you can use the cache,
# then use the cache.
self._cached = False
self.allInstances = []
# Note that because _createTrainingCorpus is called repeatedly,
# if there's iteration, on the output of _collectTrainingSet, there's
# no need to guarantee an order in _collectTrainingSet.
def _collectTrainingSet(self, engine):
elts = []
for dSetName, partition in self.trainingCorpora:
elts += [((dSetName, partition), f) for f in engine.getCorpus(dSetName).getFiles(partition)]
random.shuffle(elts)
return elts
def _createTrainingCorpus(self, tSetElements, size = None):
# VERY IMPORTANT to trim BEFORE DocumentSet is created,
# since DocumentSet does a random shuffle.
if size is not None:
tSetElements = tSetElements[:size]
tFiles = [p[1] for p in tSetElements]
return DocumentSet(None, fileList = tFiles, flushedStdout = self.flushedStdout)
def _satisfyIterators(self, engine, iterators, subdirName, **kw):
if not iterators:
# We've reached the end.
yield self._createInstance(engine, subdirName, **kw)
else:
for newSubdirName, newKw in iterators[0](subdirName, **kw):
for inst in self._satisfyIterators(engine, iterators[1:], newSubdirName, **newKw):
yield inst
# both tSetElements and engineSettings are keywords. The former is a list
# of pairs ((dSetName, partition), f), and the second is guaranteed to have a dictionary value.
def _createInstance(self, engine, modelSubdir, tSetElements = None, engineSettings = None,
corpusSettings = None, builderClass = None,
# Soak up the extra context that was passed in to the iterators
**kw):
return self.instanceClass(engine, self, modelSubdir,
self._createTrainingCorpus(tSetElements, **corpusSettings),
engineSettings = engineSettings, builderClass = builderClass)
#
# The sole public method
#
# We collect the objects and record the cache as done, so if we ever
# loop over the runs instead of the models, it'll work fine.
def yieldInstances(self, engine, **kw):
if self._cached:
for i in self.allInstances:
yield i
else:
tSetElements = self._collectTrainingSet(engine)
for mInst in self._satisfyIterators(engine, self.iterators or [], self.modelName,
tSetElements = tSetElements,
engineSettings = self.engineSettings or {},
corpusSettings = self.corpusSettings or {},
builderClass = self.builderClass,
**kw):
self.allInstances.append(mInst)
yield mInst
self._cached = True
#
# Next, the TestRun object.
#
import MAT.DocumentIO, MAT.ToolChain
_jsonIO = MAT.DocumentIO.getDocumentIO('mat-json')
_rawUTF8 = MAT.DocumentIO.getDocumentIO('raw', encoding = 'utf-8')
class TestRunError(Exception):
pass
class TestRunInstance:
def __init__(self, engine, template, runSubdir, engineOptions = None):
self.runName = template.runName
self.runSubdir = runSubdir
self.engineOptions = engineOptions
self.enginePrepOptions = template.enginePrepOptions
self.flushedStdout = template.flushedStdout
self.template = template
self.testSet = template.testSet
# I was hoping that this could be called from __init__, but
# it has to wait until the model is created, which is sometimes inside
# the run creation loop.
def setContext(self, engine, mInstance):
self.model = mInstance
self.engine = engine
self.runDir = os.path.join(self.engine.getRunDir(self), self.model.modelSubdir)
def prepareTest(self, **kw):
print >> self.flushedStdout, "\n### Preparing run", self.runName
runInputTestDir = os.path.join(self.runDir, "run_input")
if os.path.isdir(runInputTestDir):
shutil.rmtree(runInputTestDir)
self._prepareTestCorpus()
def test(self):
modelDir = os.path.join(self.model.modelDir)
modelFile = os.path.join(modelDir, "model")
hypDir = os.path.join(self.runDir, "hyp")
os.makedirs(hypDir)
self._runTest(modelFile, hypDir, "Creating hypothesis...")
def finishTest(self):
pass
#
# Internal functions available for reuse.
#
def _runTest(self, modelFile, hypDir, msg):
matEngineOptions = self.engineOptions
runInputTestDir = os.path.join(self.runDir, "run_input")
task = self.engine.task
for key in ["tagger_local", "tagger_model", "input_file_type", "output_encoding",
"output_file_type", "input_encoding", "input_dir", "input_file",
"output_dir", "output_file", "output_fsuff", "input_file_re"]:
if matEngineOptions.has_key(key):
raise TestRunError, ("%s not permitted in run settings" % key)
# Now, pull over the input file information from the prep options,
# if applicable.
inputEncoding = "utf-8"
inputFileType = "raw"
if self.enginePrepOptions is not None:
if self.enginePrepOptions.has_key("output_file_type"):
inputFileType = self.enginePrepOptions["output_file_type"]
if self.enginePrepOptions.has_key("output_encoding"):
inputEncoding = self.enginePrepOptions["output_encoding"]
print >> self.flushedStdout, msg
print >> self.flushedStdout, "Invoking MATEngine:", " ".join(['%s: "%s"' % pair for pair in matEngineOptions.items()])
# The workflow had better be there. This will raise an error if it
# isn't, most likely.
e = MAT.ToolChain.MATEngine(taskObj = task, workflow = matEngineOptions.get("workflow"))
e.Run(tagger_local = True, tagger_model = modelFile,
input_file_type = inputFileType, input_encoding = inputEncoding,
output_file_type = "mat-json", input_dir = runInputTestDir,
output_dir = hypDir, output_fsuff = ".tag.json", **matEngineOptions)
def _prepareTestCorpus(self):
expEngine = self.engine
task = expEngine.task
testSet = self.testSet
# Create preprocessed versions of the test files.
# If there's no enginePrepOptions, just make them raw.
print >> self.flushedStdout, "Preparing test files..."
runInputTestDir = os.path.join(self.runDir, "run_input")
os.makedirs(runInputTestDir)
if self.enginePrepOptions is None:
# Let's use UTF-8, since we know that it works.
testSet.consolidate(runInputTestDir,
lambda inf, outf: _rawUTF8.writeToTarget(_jsonIO.readFromSource(inf, taskSeed = task), outf + ".prepped"))
else:
testSet.consolidate(runInputTestDir,
lambda inf, outf: shutil.copy(inf, outf + ".prepinput"))
# Invoke the MATEngine. output_file_type is required. That's pretty much it.
for key in ["tagger_local", "tagger_model", "input_file_type",
"input_encoding", "input_dir", "input_file",
"output_dir", "output_file", "output_fsuff", "input_file_re"]:
if self.enginePrepOptions.has_key(key):
raise TestRunError, ("%s not permitted in run prep settings" % key)
if not self.enginePrepOptions.has_key("output_file_type"):
raise TestRunError, "output_file_type attribute required in run prep settings"
if self.enginePrepOptions["output_file_type"] not in ["raw", "mat-json"]:
raise TestRunError, "output_file_type attribute in run prep settings must be either raw or mat-json"
print >> self.flushedStdout, "Invoking MATEngine:", " ".join(['%s: "%s"' % pair for pair in self.enginePrepOptions.items()])
# The workflow had better be there. This will raise an error if it
# isn't, most likely.
e = MAT.ToolChain.MATEngine(taskObj = task, workflow = self.enginePrepOptions.get("workflow"))
# Use the directory temporarily.
e.Run(input_file_type = "mat-json",
input_dir = runInputTestDir,
output_dir = runInputTestDir,
input_file_re = "^.*\.prepinput$",
output_fsuff = ".postprocessed", **self.enginePrepOptions)
# Done. Remove all files which end with .prepinput, strip
# .postprocessed.
for b in testSet.deconflictions.values():
os.remove(os.path.join(runInputTestDir, b + ".prepinput"))
os.rename(os.path.join(runInputTestDir, b + ".prepinput.postprocessed"),
os.path.join(runInputTestDir, b + ".prepped"))
# Iterators can't apply to the test set. So that should be prepared
# when the test run is asked to yield instances.
class TestRun:
def __init__(self, rName, model = None, testCorpora = None,
engineOptions = None,
iterators = None,
enginePrepOptions = None, flushedStdout = sys.stdout,
instanceClass = TestRunInstance):
self.runName = rName
self.modelName = model
self.testCorpora = testCorpora
self.engineOptions = engineOptions
self.enginePrepOptions = enginePrepOptions
self.testSet = None
self.flushedStdout = flushedStdout
self.iterators = iterators
if iterators:
for iterator in iterators[:-1]:
if iterator.mustBeInnermost:
raise TrainingRunError, ("Iterator %s must be innermost, but is not last" % iterator)
self.allInstances = []
self.instanceClass = instanceClass
def _satisfyIterators(self, engine, iterators, subdirName, **kw):
if not iterators:
# We've reached the end.
yield self._createInstance(engine, subdirName, **kw)
else:
for newSubdirName, newKw in iterators[0](subdirName, **kw):
for inst in self._satisfyIterators(engine, iterators[1:], newSubdirName, **newKw):
yield inst
# engineOptions is a keyword, guaranteed to have a dictionary value.
def _createInstance(self, engine, runSubdir, engineOptions = None,
# Soak up the extra context that was passed in to the iterators
**kw):
return self.instanceClass(engine, self, runSubdir,
engineOptions = engineOptions)
def _configureTestCorpus(self, engine):
tFiles = []
for corpusName, partition in self.testCorpora:
corpus = engine.getCorpus(corpusName)
tFiles += corpus.getFiles(partition = partition)
self.testSet = DocumentSet(None, fileList = tFiles, flushedStdout = self.flushedStdout)
#
# The sole public method
#
def yieldInstances(self, engine, **kw):
# We should prepare the test set here, but I'm not
# sure where to put it yet. Let's just suppress that until the
# instance is created. That will be a bunch of extra work
# in the iterative cases, but no more than what's currently
# being done. I don't think we should allow the option
# of iterating on the test prep. For now, we just prepare the
# test set.
self._configureTestCorpus(engine)
for rInst in self._satisfyIterators(engine, self.iterators or [], self.runName,
engineOptions = self.engineOptions,
**kw):
self.allInstances.append(rInst)
yield rInst
#
# Some basic iterators
#
# Each iterator has a __call__() method, which should be a
# generator.
import re
class BootstrapIterator:
mustBeInnermost = False
def _newDirName(self, curDirName, attr, val):
return "%s_%s_%s" % (curDirName, re.sub("\W", "_", str(attr)),
re.sub("\W", "_", str(val)))
class IncrementIterator(BootstrapIterator):
def __init__(self, optionKey, attribute, startVal, endVal, increment, forceLast = False):
self.optionKey = optionKey
self.attribute = attribute
self.startVal = startVal
self.endVal = endVal
self.increment = increment
self.forceLast = forceLast
def __call__(self, curSubdirName, **kw):
v = self.startVal
if not kw.has_key(self.optionKey):
return
else:
while v <= self.endVal:
# Copy the arguments, and the value of the option key.
newKw = kw.copy()
newKw[self.optionKey] = newKw[self.optionKey].copy()
newKw[self.optionKey][self.attribute] = v
yield self._newDirName(curSubdirName, self.attribute, v), newKw
if self.forceLast and (v < self.endVal) and ((v + self.increment) > self.endVal):
v = self.endVal
else:
v += self.increment
# ONLY used with the corpus.
class CorpusSizeIterator(IncrementIterator):
def __init__(self, increment, startVal = None, endVal = None, forceLast = False):
IncrementIterator.__init__(self, "corpusSettings", "size", startVal or increment, endVal, increment, forceLast = forceLast)
def __call__(self, curSubdirName, tSetElements = None, **kw):
if self.endVal is None:
self.endVal = len(tSetElements)
for d, newKw in IncrementIterator.__call__(self, curSubdirName, tSetElements = tSetElements, **kw):
yield d, newKw
class ValueIterator(BootstrapIterator):
def __init__(self, optionKey, attribute, valueList):
self.optionKey = optionKey
self.attribute = attribute
self.valueList = valueList
def __call__(self, curSubdirName, **kw):
if not kw.has_key(self.optionKey):
return
else:
for v in self.valueList:
# Copy the arguments, and the value of the option key.
newKw = kw.copy()
newKw[self.optionKey] = newKw[self.optionKey].copy()
newKw[self.optionKey][self.attribute] = v
yield self._newDirName(curSubdirName, self.attribute, v), newKw
#
# And finally, the bootstrapper itself.
#
class BootstrapError(Exception):
pass
class Bootstrapper:
def __init__(self, dir = None, task = None,
corpora = None, models = None, runs = None,
corpusDirs = None, modelDirs = None, runDirs = None,
flushedStdout = sys.stdout):
self.flushedStdout = flushedStdout
self.dir = dir
if dir is None:
raise BootstrapError, "no dir specified"
self.task = task
if task is None:
raise BootstrapError, "no task specified"
# A table of the mapping from names to
# corpora.
self.corporaTable = {}
# A list of corpora.
self.corporaList = []
# A table of the current mapping from names
# to model set templates.
self.modelSetTable = {}
# A list of model set templates.
self.modelSetList = []
# A table of the current mapping from names
# to training run templates.
self.runTable = {}
# A list of run templates.
self.runList = []
self.corpusDirs = corpusDirs or {}
self.modelDirs = modelDirs or {}
self.runDirs = runDirs or {}
# Postpone setting the context until the run() method.
if corpora is not None:
for corpus in corpora:
cName = corpus.corpusName
if self.corporaTable.has_key(cName):
raise BootstrapError, ("duplicate corpus name '%s'" % cName)
self.corporaTable[cName] = corpus
self.corporaList.append(corpus)
if models is not None:
for model in models:
mName = model.modelName
if self.modelSetTable.has_key(mName):
raise BootstrapError, ("duplicate model set name '%s'" % mName)
for cName, mPart in model.trainingCorpora:
if not self.corporaTable.has_key(cName):
raise BootstrapError, ("model '%s' requires unknown corpus '%s'" % (mName, cName))
self.modelSetTable[mName] = model
self.modelSetList.append(model)
if runs is not None:
for run in runs:
rName = run.runName
if self.runTable.has_key(rName):
raise BootstrapError, ("duplicate run name '%s'" % rName)
if not self.modelSetTable.has_key(run.modelName):
raise BootstrapError, ("run %s requires unknown model set %s" % (rName, run.modelName))
for rCorpus, rPart in run.testCorpora:
if not self.corporaTable.has_key(rCorpus):
raise BootstrapError, ("run %s requires unknown corpus %s" % (rName, rCorpus))
self.runTable[rName] = run
self.runList.append(run)
# c is a DocumentSet.
def getCorpusDir(self, c):
cName = c.corpusName
cSubdir = cName
# Impose directory defaults. If dir is absent, use
# "corpora". If dir isn't absolute, prepend the experiment engine dir.
try:
cDir = self.corpusDirs[cName]
except KeyError:
cDir = "corpora"
if not os.path.isabs(cDir):
cDir = os.path.join(self.dir, cDir)
return os.path.join(cDir, cSubdir)
# m is a TrainingRunInstance.
def getModelDir(self, m):
mName = m.modelName
mSubdir = m.modelSubdir
try:
mDir = self.modelDirs[mName]
except KeyError:
mDir = "model_sets"
if not os.path.isabs(mDir):
mDir = os.path.join(self.dir, mDir)
return os.path.join(mDir, mSubdir)
# mTemplate is a TrainingRun.
def getModelDirPrefix(self, mTemplate):
mName = mTemplate.modelName
try:
mDir = self.modelDirs[mName]
except KeyError:
mDir = "model_sets"
if not os.path.isabs(mDir):
mDir = os.path.join(self.dir, mDir)
return os.path.join(mDir, mName)
# r is a TestRunInstance.
def getRunDir(self, r):
rName = r.runName
rSubdir = r.runSubdir
try:
rDir = self.runDirs[rName]
except KeyError:
rDir = "runs"
if not os.path.isabs(rDir):
rDir = os.path.join(self.dir, rDir)
return os.path.join(rDir, rSubdir)
# rTemplate is a TestRun.
def getRunDirPrefix(self, rTemplate):
rName = rTemplate.runName
try:
rDir = self.runDirs[rName]
except KeyError:
rDir = "runs"
if not os.path.isabs(rDir):
rDir = os.path.join(self.dir, rDir)
return os.path.join(rDir, rName)
def getCorpus(self, cName):
return self.corporaTable[cName]
def getModel(self, mName):
return self.modelSetTable[mName]
# So the new workflow is this.
def run(self, interleave = True):
# To run it, we need to prepare each corpus, build each model,
# execute each run.
origT = time.time()
print >> self.flushedStdout, "Run began at", time.ctime(origT)
hitError = False
from MAT.ExecutionContext import _DEBUG
try:
try:
# So the idea is that we start with the runs,
# iterating through the corpora and then models.
# Prepare each corpus.
for c in self.corporaList:
c.setContext(self)
cDir = self.getCorpusDir(c)
print >> self.flushedStdout, "\n### Preparing corpus", c.corpusName
c.prepare()
print >> self.flushedStdout, "### Done."
# We can't do interleaved scoring runs if some of the models are
# already done. So I need to check first.
for mTemplate in self.modelSetList:
for m in mTemplate.yieldInstances(self):
print >> self.flushedStdout, "\n### Building model set", m.modelName, "in directory", m.modelSubdir
# If interleave is True, we'll do the runs.
m.train(interleave = interleave)
print >> self.flushedStdout, "### Done."
if not interleave:
# Perform each run.
for rTemplate in self.runList:
mTemplate = self.getModel(rTemplate.modelName)
for r in rTemplate.yieldInstances(self):
for m in mTemplate.yieldInstances(self):
r.setContext(self, m)
r.prepareTest()
print >> self.flushedStdout, "\n### Performing run", r.runName, "in directory", r.runSubdir
r.test()
print >> self.flushedStdout, "### Done."
except Exception, e:
if _DEBUG:
hitError = True
raise
finally:
if not (_DEBUG and hitError):
self.finish(origT)
def finish(self, origT):
pass
|
|
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
from Array import *
import copy
import scipy
import numpy
from scipy import fftpack
import time
class EnsembleModule(object):
my_namespace = 'scipy|signals|ensembles'
class ComputeDistance(EnsembleModule, Module):
def compute(self):
vol = self.get_input("Signals").get_array()
num_im = vol.shape[0]
out_ar = numpy.zeros((num_im, num_im))
for i in range(num_im):
im_i = vol[i].squeeze().flatten()
for j in range(i+1, num_im, 1):
im_j = vol[j].squeeze().flatten()
d = (im_i - im_j)
d = d * d
d = numpy.sqrt(d.sum())
out_ar[i,j] = d
out_ar[j,i] = d
out = NDArray()
out.set_array(out_ar)
self.set_output("Output", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Signals", (NDArray, 'Input Signal Planes'))
reg.add_output_port(cls, "Output", (NDArray, 'Output Distance Matrix'))
class OrderByIndexes(EnsembleModule, Module):
""" Order the inputs using an array containing the
indexes they should appear in """
def compute(self):
vol = self.get_input("Signals")
inds = self.get_input("Indexes")
sh = vol.get_shape()
vol = vol.get_array()
inds = inds.get_array()
out_ar = [vol[inds[0]]]
for i in xrange(sh[0] - 1):
i += 1
try:
out_ar = numpy.vstack((out_ar, [vol[inds[i]]]))
except:
pass
out = NDArray()
out.set_array(out_ar)
self.set_output("Output", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Signals", (NDArray, 'Input Signal Set'))
reg.add_input_port(cls, "Indexes", (NDArray, 'Sorted Index Set'))
reg.add_output_port(cls, "Output", (NDArray, 'Sorted Signal Set'))
class OrderByCorrelation(EnsembleModule, Module):
""" Order the inputs using the correlations between a given
input index and all other slices in the volume """
def append_slice(self, vol, sl):
vol = numpy.vstack((vol, [sl]))
return vol
def append_cor(self, cor, sl_cor):
cor.append(sl_cor)
return cor
def prepend_slice(self, vol, sl):
vol = numpy.vstack(([sl], vol))
return vol
def prepend_cor(self, cor, sl_cor):
cor.insert(0, sl_cor)
return cor
def find_max(self, a):
f = a.max()
sh = a.shape
b = a.flatten()
ind = b.argmax()
row = int(ind/sh[1])
col = ind - row * sh[1]
return (row, col, f)
def compute(self):
ts = time.time()
vol = self.get_input("Signals")
ind = self.get_input("Key Slice")
if self.has_input("Normalize"):
self.normalize = self.get_input("Normalize")
else:
self.normalize = False
vol_ar = vol.get_array()
if self.normalize:
for i in range(vol_ar.shape[0]):
sl = vol_ar[i]
sl = sl - sl.min()
sl = sl / sl.max()
vol_ar[i] = sl
pos = self.force_get_input("Key Position")
key_slice = vol_ar[ind]
(r,c) = key_slice.shape
key_fft = fftpack.fftn(key_slice)
key_sq = key_slice * key_slice
norm = key_sq.sum()
norm = numpy.sqrt(norm)
num_slices = vol.get_shape()[0]
num_elements = key_slice.size
cor = []
for i in xrange(num_slices):
cur_slice = vol_ar[i]
cur_sq = cur_slice * cur_slice
cur_norm = cur_sq.sum()
cur_norm = numpy.sqrt(cur_norm)
cur_fft = fftpack.fftn(cur_slice)
cur_fft = cur_fft.conjugate()
cur_max = cur_slice.max()
prod_slice = key_fft * cur_fft
prod_slice = prod_slice / (norm * cur_norm)
cor_slice = fftpack.ifftn(prod_slice)
(row,col,val) = self.find_max(cor_slice.real)
cor.append((val,i,row,col))
cor.sort(lambda x,y:cmp(y[0],x[0]))
vol = [key_slice]
key_slice_out = key_slice
out_cor_ar = []
if pos == None:
app = True
for i in range(len(cor)):
sl_cor = cor[i]
if sl_cor[1] == ind:
continue
sl = vol_ar[sl_cor[1]]
if app:
vol = self.append_slice(vol, sl)
out_cor_ar = self.append_cor(out_cor_ar, cor[i][0])
else:
vol = self.prepend_slice(vol, sl)
out_cor_ar = self.prepend_cor(out_cor_ar, cor[i][0])
app = (app != True)
else:
for i in range(len(cor)):
sl_cor = cor[i]
sl = vol_ar[sl_cor[1]]
vol = self.append_slice(vol, sl)
out_cor_ar = self.append_cor(out_cor_ar, cor[i][0])
elapsed = time.time() - ts
# elapsed *= 1000000.
print "took: ", elapsed
out_vol = NDArray()
out_vol.set_array(vol/vol.max())
out_cor = NDArray()
out_cor_ar = numpy.array(out_cor_ar)
out_cor.set_array(out_cor_ar / out_cor_ar.max())
out_key = NDArray()
out_key.set_array(key_slice_out)
self.set_output("Output Key Slice", out_key)
self.set_output("Output Volume", out_vol)
self.set_output("Output Correlation", out_cor)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Signals", (NDArray, 'Input Signal Volume'))
reg.add_input_port(cls, "Key Slice", (basic.Integer, 'Key Slice Index'))
reg.add_input_port(cls, "Normalize", (basic.Boolean, 'Normalize Slice Power'))
reg.add_input_port(cls, "Key Position", (basic.Integer, 'Key Slice Position'))
reg.add_output_port(cls, "Output Volume", (NDArray, 'Sorted Signal Volume'))
reg.add_output_port(cls, "Output Correlation", (NDArray, 'Sorted Correlation Array'))
reg.add_output_port(cls, "Output Key Slice", (NDArray, 'Key Slice'))
class OrderByProgressiveCorrelation(EnsembleModule, Module):
def find_max(self, a):
f = a.max()
sh = a.shape
b = a.flatten()
ind = b.argmax()
row = int(ind/sh[1])
col = ind - row * sh[1]
return (row, col, f)
def correlate(self, plane, ref_im):
(r,c) = ref_im.shape
key_fft = fftpack.fftn(ref_im)
key_sq = ref_im * ref_im
norm = key_sq.sum()
norm = numpy.sqrt(norm)
cur_slice = plane
cur_sq = cur_slice * cur_slice
cur_norm = cur_sq.sum()
cur_norm = numpy.sqrt(cur_norm)
cur_fft = fftpack.fftn(cur_slice)
cur_fft = cur_fft.conjugate()
cur_max = cur_slice.max()
prod_slice = key_fft * cur_fft
prod_slice = prod_slice / (norm * cur_norm)
cor_slice = fftpack.ifftn(prod_slice)
(row,col,val) = self.find_max(cor_slice.real)
return val
def compute(self):
vol = self.get_input("Signals").get_array()
ind = self.get_input("Key Slice")
normalize = self.force_get_input("Normalize")
if normalize:
for i in range(vol.shape[0]):
sl = vol[i]
sl = sl - sl.min()
sl = sl / sl.max()
vol[i] = sl
tmp_vol = copy.copy(vol)
key_slice = vol[ind]
vol_ind = numpy.arange(vol.shape[0]).tolist()
out_ar = numpy.zeros(vol.shape)
out_ar[0,:,:] = key_slice
tmp_vol[ind,:,:] = 0.0
tmp_size = 1
cors_out = [self.correlate(key_slice,key_slice)]
print "key cor = ", cors_out[0]
while tmp_size < tmp_vol.shape[0]:
ts = time.time()
cors = []
print "output size is currently: ", tmp_size,
for i in range(tmp_vol.shape[0]):
plane = tmp_vol[i]
if plane.min() == 0. and plane.max() == 0.:
continue
cor = self.correlate(plane,out_ar[tmp_size-1,:,:])
cors.append((cor,i))
cors.sort(lambda x,y:cmp(y[0],x[0]))
(max_cor,ind) = cors[0]
print "\tcor = ", max_cor, ind
cors_out.append(max_cor)
out_ar[tmp_size,:,:] = vol[ind]
tmp_vol[ind,:,:] = 0.
tmp_size += 1
elapsed = time.time() - ts
# elapsed *= 1000000.
print "\tCorrelation took: ", elapsed
cor_ar = numpy.array(cors_out)
cor_ar /= cor_ar.max()
out = NDArray()
out.set_array(out_ar)
out_cor = NDArray()
out_cor.set_array(cor_ar)
self.set_output("Output Signals", out)
self.set_output("Output Correlations", out_cor)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Signals", (NDArray, 'Input Time Frequency Planes'))
reg.add_input_port(cls, "Key Slice", (basic.Integer, 'Key Slice'))
reg.add_input_port(cls, "Normalize", (basic.Boolean, 'Normalize each plane'), True)
reg.add_output_port(cls, "Output Signals", (NDArray, 'Output Time Frequency Planes'))
reg.add_output_port(cls, "Output Correlations", (NDArray, 'Output Correlations'))
|
|
# Copyright (c) 2012, Claudio "nex" Guarnieri
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from sqlalchemy import create_engine, Column, Integer, String, Boolean, DateTime, Enum, Text, ForeignKey, Table, Index, and_
from sqlalchemy.orm import relationship, backref, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.exc import SQLAlchemyError, IntegrityError
from sqlalchemy.pool import NullPool
from objects import File, Config
from datetime import datetime
Base = declarative_base()
association_table = Table('association', Base.metadata,
Column('tag_id', Integer, ForeignKey('tag.id')),
Column('malware_id', Integer, ForeignKey('malware.id'))
)
class Malware(Base):
__tablename__ = "malware"
id = Column(Integer(), primary_key=True)
name = Column(String(255), nullable=True)
size = Column(Integer(), nullable=False)
type = Column(Text(), nullable=True)
md5 = Column(String(32), nullable=False, index=True)
crc32 = Column(String(8), nullable=False)
sha1 = Column(String(40), nullable=False)
sha256 = Column(String(64), nullable=False, index=True)
sha512 = Column(String(128), nullable=False)
ssdeep = Column(String(255), nullable=True)
created_at = Column(DateTime(timezone=False), default=datetime.now(), nullable=False)
tag = relationship("Tag",
secondary=association_table,
backref="malware")
__table_args__ = (Index("hash_index",
"md5",
"crc32",
"sha1",
"sha256",
"sha512",
unique=True),)
def to_dict(self):
row_dict = {}
for column in self.__table__.columns:
value = getattr(self, column.name)
row_dict[column.name] = value
return row_dict
def __repr__(self):
return "<Malware('%s','%s')>" % (self.id, self.md5)
def __init__(self,
md5,
crc32,
sha1,
sha256,
sha512,
size,
type=None,
ssdeep=None,
name=None):
self.md5 = md5
self.sha1 = sha1
self.crc32 = crc32
self.sha256 = sha256
self.sha512 = sha512
self.size = size
self.type = type
self.ssdeep = ssdeep
self.name = name
class Tag(Base):
__tablename__ = "tag"
id = Column(Integer(), primary_key=True)
tag = Column(String(255), nullable=False, unique=True, index=True)
def to_dict(self):
row_dict = {}
for column in self.__table__.columns:
value = getattr(self, column.name)
row_dict[column.name] = value
return row_dict
def __repr__(self):
return "<Tag ('%s','%s'>" % (self.id, self.tag)
def __init__(self, tag):
self.tag = tag
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Database:
__metaclass__ = Singleton
def __init__(self):
self.engine = create_engine(Config().api.database, poolclass=NullPool)
self.engine.echo = False
self.engine.pool_timeout = 60
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
def __del__(self):
self.engine.dispose()
def add(self, obj, name, tags=None):
session = self.Session()
if isinstance(obj, File):
try:
malware_entry = Malware(md5=obj.get_md5(),
crc32=obj.get_crc32(),
sha1=obj.get_sha1(),
sha256=obj.get_sha256(),
sha512=obj.get_sha512(),
size=obj.get_size(),
type=obj.get_type(),
ssdeep=obj.get_ssdeep(),
name=name)
session.add(malware_entry)
session.commit()
except IntegrityError:
session.rollback()
malware_entry = session.query(Malware).filter(Malware.md5 == obj.get_md5()).first()
except SQLAlchemyError:
session.rollback()
return False
if tags:
tags = tags.strip()
if "," in tags:
tags = tags.split(",")
else:
tags = tags.split(" ")
for tag in tags:
tag = tag.strip().lower()
if tag == "":
continue
try:
malware_entry.tag.append(Tag(tag))
session.commit()
except IntegrityError as e:
session.rollback()
try:
malware_entry.tag.append(session.query(Tag).filter(Tag.tag == tag).first())
session.commit()
except SQLAlchemyError:
session.rollback()
return True
def find_md5(self, md5):
session = self.Session()
row = session.query(Malware).filter(Malware.md5 == md5).first()
return row
def find_sha256(self, sha256):
session = self.Session()
row = session.query(Malware).filter(Malware.sha256 == sha256).first()
return row
def find_tag(self, tag):
session = self.Session()
rows = session.query(Malware).filter(Malware.tag.any(Tag.tag == tag.lower())).all()
return rows
def find_ssdeep(self, ssdeep):
session = self.Session()
rows = session.query(Malware).filter(Malware.ssdeep.like("%" + str(ssdeep) + "%")).all()
return rows
def find_date(self, date):
session = self.Session()
date_min = datetime.strptime(date, "%Y-%m-%d")
date_max = date_min.replace(hour=23, minute=59, second=59)
rows = session.query(Malware).filter(and_(Malware.created_at >= date_min, Malware.created_at <= date_max)).all()
return rows
def list_tags(self):
session = self.Session()
rows = session.query(Tag).all()
return rows
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from nailgun import consts
from nailgun.test import base
from nailgun.orchestrator.plugins_serializers import \
BasePluginDeploymentHooksSerializer
from nailgun.orchestrator.plugins_serializers import \
PluginsPreDeploymentHooksSerializer
class TestBasePluginDeploymentHooksSerializer(base.BaseTestCase):
def setUp(self):
super(TestBasePluginDeploymentHooksSerializer, self).setUp()
cluster_mock = mock.Mock()
self.cluster = cluster_mock
self.nodes = [
{'id': 1, 'role': 'controller'},
{'id': 2, 'role': 'compute'}
]
self.hook = BasePluginDeploymentHooksSerializer(
self.nodes,
self.cluster)
@mock.patch('nailgun.orchestrator.plugins_serializers.get_uids_for_roles')
def test_original_order_of_deployment_tasks(self, get_uids_for_roles_mock):
stage = 'pre_deployment'
role = 'controller'
plugin = mock.Mock()
plugin.full_name = 'plugin_name'
plugin.tasks = [
{'type': 'shell', 'role': role, 'id': '1', 'stage': stage,
'parameters': {'cmd': 'test1', 'cwd': '/', 'timeout': 15}},
{'type': 'puppet', 'role': role, 'id': '2', 'stage': stage,
'parameters': {
'puppet_manifest': 'manifests/site.pp',
'puppet_modules': 'modules',
'cwd': '/etc/puppet/plugins/plugin_name',
'timeout': 150}},
{'type': 'shell', 'role': role, 'id': '3', 'stage': stage,
'parameters': {'cmd': 'test2', 'cwd': '/', 'timeout': 15}}
]
get_uids_for_roles_mock.return_value = [1, 2]
raw_result = self.hook.deployment_tasks([plugin], stage)
result = [r['type'] for r in raw_result]
self.assertEqual(result, ['shell', 'puppet', 'shell'])
self.assertEqual(raw_result[0]['parameters']['cmd'], 'test1')
self.assertEqual(
raw_result[1]['parameters']['puppet_modules'],
'modules')
self.assertEqual(raw_result[2]['parameters']['cmd'], 'test2')
@mock.patch('nailgun.orchestrator.plugins_serializers.get_uids_for_roles')
def test_support_reboot_type_task(self, get_uids_for_roles_mock):
stage = 'pre_deployment'
plugin = mock.Mock()
plugin.full_name = 'plugin_name'
plugin.slaves_scripts_path = 'plugin_path'
plugin.tasks = [{
'type': 'reboot',
'role': 'controller',
'stage': stage,
'parameters': {'timeout': 15}}]
get_uids_for_roles_mock.return_value = [1, 2]
result = self.hook.deployment_tasks([plugin], stage)
expecting_format = {
'diagnostic_name': 'plugin_name',
'fail_on_error': True,
'parameters': {'timeout': 15},
'type': 'reboot',
'uids': [1, 2]}
self.assertEqual(result, [expecting_format])
@mock.patch('nailgun.orchestrator.plugins_serializers.get_uids_for_roles',
return_value=[1, 2])
def test_generates_scripts_path_in_case_of_several_plugins(self, _):
stage = 'pre_deployment'
plugins = []
names = ['plugin_name1', 'plugin_name2']
for name in names:
plugin = mock.Mock()
plugin.full_name = name
plugin.slaves_scripts_path = name
plugin.tasks = [{
'type': 'shell',
'role': 'controller',
'stage': stage,
'parameters': {'timeout': 15, 'cmd': 'cmd'}}]
plugins.append(plugin)
result = self.hook.deployment_tasks(plugins, stage)
script_paths = sorted(map(lambda p: p['parameters']['cwd'], result))
self.assertEqual(script_paths, names)
@mock.patch('nailgun.orchestrator.plugins_serializers.get_uids_for_roles',
return_value=[1, 2])
class TestTasksDeploymentOrder(base.BaseTestCase):
def setUp(self):
super(TestTasksDeploymentOrder, self).setUp()
self.cluster = mock.Mock()
self.nodes = [
{'id': 1, 'role': 'controller'},
{'id': 2, 'role': 'compute'}]
self.hook = BasePluginDeploymentHooksSerializer(
self.nodes,
self.cluster)
def make_plugin_mock_with_stages(self, plugin_name, stages):
common_attrs = {
'type': 'shell',
'role': '*',
'parameters': {'cmd': 'cmd', 'timeout': 100}}
tasks = []
for stage in stages:
task = copy.deepcopy(common_attrs)
task['stage'] = stage
task['parameters']['cmd'] = stage
tasks.append(task)
plugin = mock.Mock()
plugin.tasks = tasks
plugin.plugin.name = plugin_name
return plugin
def test_sorts_plugins_by_numerical_postfixes(self, _):
plugin1 = self.make_plugin_mock_with_stages('name1', [
'pre_deployment/-100',
'pre_deployment/100.0',
'pre_deployment/+100',
'pre_deployment'])
plugin2 = self.make_plugin_mock_with_stages('name2', [
'pre_deployment/-99',
'pre_deployment/100',
'pre_deployment'])
tasks = self.hook.deployment_tasks(
# Pass plugins in reverse alphabetic order, to make
# sure that plugin name sorting works
[plugin2, plugin1],
consts.STAGES.pre_deployment)
commands = map(lambda t: t['parameters']['cmd'], tasks)
self.assertEqual(
commands,
['pre_deployment/-100',
'pre_deployment/-99',
'pre_deployment',
'pre_deployment',
'pre_deployment/100.0',
'pre_deployment/+100',
'pre_deployment/100'])
class TestPluginsPreDeploymentHooksSerializer(base.BaseTestCase):
def setUp(self):
super(TestPluginsPreDeploymentHooksSerializer, self).setUp()
self.cluster = mock.Mock()
self.cluster.release.operating_system = 'ubuntu'
self.nodes = [
{'id': 1, 'role': 'controller'},
{'id': 2, 'role': 'compute'}
]
self.hook = PluginsPreDeploymentHooksSerializer(
self.cluster,
self.nodes)
self.plugins = [mock.Mock()]
@mock.patch(
'nailgun.orchestrator.plugins_serializers.get_uids_for_tasks',
return_value=[1, 2])
@mock.patch(
'nailgun.orchestrator.plugins_serializers.'
'templates.make_ubuntu_sources_task',
return_value={'task_type': 'ubuntu_sources_task',
'parameters': {}})
@mock.patch(
'nailgun.orchestrator.plugins_serializers.'
'templates.make_ubuntu_preferences_task',
return_value=None)
@mock.patch(
'nailgun.orchestrator.plugins_serializers.'
'templates.make_apt_update_task',
return_value={'task_type': 'apt_update_task',
'parameters': {}})
def test_create_repositories_ubuntu_does_not_generate_prefences_if_none(
self, _, __, ___, ____):
self.cluster.release.operating_system = consts.RELEASE_OS.ubuntu
tasks = self.hook.create_repositories(self.plugins)
self.assertItemsEqual(
map(lambda t: t['task_type'], tasks),
['ubuntu_sources_task',
'apt_update_task'])
|
|
#!/usr/bin/python
#----------------------------------------------------------------------
# Copyright (c) 2013-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
# Handle the client connection between the VMOC and
# each registered controller for each switch
import inspect
import select
import socket
import threading
import pdb
from pox.core import core
from pox.lib.util import makePinger
from pox.lib.packet.ethernet import ethernet
from pox.lib.addresses import EthAddr
from pox.lib.packet.vlan import vlan
from pox.openflow.util import make_type_to_unpacker_table
from pox.openflow.libopenflow_01 import *
from pox.openflow import libopenflow_01 as of
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.vlan import vlan
import VMOCSwitchControllerMap as scmap
from VMOCSliceRegistry import slice_registry_lookup_slices_by_url, slice_registry_dump
from VMOCUtils import *
import gram.am.gram.config as config
log = core.getLogger() # Use central logging service
# Thread to manage connection with a controller:
# Listen to and process all messages received asynchronously from controller
class VMOCControllerConnection(threading.Thread):
def __init__(self, url, switch_connection, vlan, open_on_create=True):
threading.Thread.__init__(self)
self._running = False
self._vlan = int(vlan)
self.ofp_msgs = make_type_to_unpacker_table()
self.ofp_handlers = {
# Reactive handlers
ofp_type_rev_map['OFPT_HELLO'] : self._receive_hello,
ofp_type_rev_map['OFPT_ECHO_REQUEST'] : self._receive_echo,
ofp_type_rev_map['OFPT_FEATURES_REQUEST'] : \
self._receive_features_request,
ofp_type_rev_map['OFPT_FLOW_MOD'] : self._receive_flow_mod,
ofp_type_rev_map['OFPT_PACKET_OUT'] : self._receive_packet_out,
ofp_type_rev_map['OFPT_BARRIER_REQUEST'] : \
self._receive_barrier_request,
ofp_type_rev_map['OFPT_GET_CONFIG_REQUEST'] : \
self._receive_get_config_request,
ofp_type_rev_map['OFPT_SET_CONFIG'] : self._receive_set_config,
ofp_type_rev_map['OFPT_STATS_REQUEST'] : \
self._receive_stats_request,
ofp_type_rev_map['OFPT_VENDOR'] : self._receive_vendor,
# Proactive responses
ofp_type_rev_map['OFPT_ECHO_REPLY'] : self._receive_echo_reply
# TODO: many more packet types to process
}
self._switch_connection = switch_connection
if not hasattr(switch_connection, '_dpid'): pdb.set_trace()
# The controller sees VMOC as a switch. And we want each
# connection to a controller to have a different DPID (or controllers
# get confused). So VMOC has a different connection to a controller
# by VLAN. And a controller may connect to multiple VMOCs.
# So we need to construct a DPID that is unique over the space
# of VLAN and switch_DPID.
#
# Here's what we do.
# DPID's are 64 bits, of which the lower 48 are MAC addresses
# and the top 16 are 'implementer defined' (Openflow spec).
# The MAC should be unique and (for the contoller to talk to VMOC
# we don't need to maintain the switch implementation features.
# The vlan is 3 hex digits or 12 bits.
# So we take the VLAN and put it at the top three hex bytes of
# the switch's DPID to form the VMOC-Controller-VLAN-specific DPIC
#
# VLAN in top 3 hex chars
# Switch DPID in lower 13 hex chars
controller_connection_dpid = \
((self._vlan & 0xfff) << 48) + \
(switch_connection._dpid & 0x000fffffffffffff)
self._dpid = controller_connection_dpid
log.info("DPID = %d %x %x"% (self._vlan, switch_connection._dpid, self._dpid))
self._url = url
(host, port) = VMOCControllerConnection.parseURL(url)
self._host = host;
self._port = port
self._sock = None
# Make it optional to not open on create (for debugging at least)
if open_on_create:
self.open()
def open(self):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self._host, self._port))
self.start()
# When we have a new controller, send it a 'hello' (acting like
# we are a switch to whom they are talking
hello_msg = of.ofp_hello()
self.send(hello_msg)
def close(self):
self._delete_old_vlan_flows()
# print "CLOSING " + str(self)
self._sock.close()
# print "SOCKET CLOSED " + str(self)
self._running = False
# print "RUNNING = FALSE (from close)" + str(self)
def __str__(self):
return "[VMOCControllerConnection DPID %s URL %s VLAN %d]" % (self._dpid, self._url, self._vlan)
def getURL(self):
return self._url
def getVLAN(self):
return self._vlan
def getSwitchConnection(self):
return self._switch_connection
def _receive_hello(self, ofp):
log.debug("CC " + self._url + " recvd " + "'OFPT_HELLO")
# log.debug("CC " + str(ofp))
self._delete_old_vlan_flows()
def _receive_echo(self, ofp):
log.debug("CC " + self._url + " recvd " + "'OFPT_ECHO_REQUEST")
# log.debug("CC " + str(ofp))
echo_reply = ofp_echo_reply(xid=ofp.xid)
self.send(echo_reply)
def _receive_features_request(self, ofp):
log.debug("CC " + self._url + " recvd " + "'OFPT_FEATURES_REQUEST")
# log.debug("CC " + str(ofp))
switch = scmap.lookup_switch_for_controller_connection(self)
if not switch:
print "No switch for controller (FEAT_REQ), dropping %s " % self
return
# Take the features reply from switch, clone it and set the DPID
# to the unique DPID for this connection
# (based on switch DPID and VLAN)
features_reply = switch._features_reply
conn_features_reply = of.ofp_features_reply()
conn_features_reply.unpack(features_reply.pack())
conn_features_reply.datapath_id = self._dpid
# print "Features Request " + str(ofp.xid) + " " + str(features_reply.xid)
conn_features_reply.xid = ofp.xid
self.send(conn_features_reply)
def _receive_flow_mod(self, ofp):
log.debug("CC " + self._url + " recvd " + "'OFPT_FLOW_MOD")
# log.debug("CC " + str(ofp))
# Need to forward this back to the switch
self.forwardToAllAppropriateSwitches(ofp)
def _receive_packet_out(self, ofp):
log.debug("CC " + self._url + " recvd " + "'OFPT_PACKET_OUT")
self.forwardToAllAppropriateSwitches(ofp)
# log.debug("CC " + str(ofp))
def _receive_barrier_request(self, ofp):
log.debug("CC " + self._url + " recvd " + "'OFPT_BARRIER_REQUEST")
# log.debug("CC " + str(ofp))
# print "BARRIER ID " + str(ofp.xid)
barrier_reply = ofp_barrier_reply(xid = ofp.xid)
# print "BARRIER_REPLY = " + str(barrier_reply)
# print "BARRIER_REQUEST = " + str(ofp)
self.send(barrier_reply)
def _receive_get_config_request(self, ofp):
log.debug("CC " + self._url + " recvd " + \
"'OFPT_GET_CONFIG_REQUEST" + str(ofp))
config_reply = ofp_get_config_reply(xid = ofp.xid)
# log.debug("CC " + self._url + " recvd " + "'OFPT_GET_CONFIG_REQUEST ")
log.debug("CONFIG %s" % (config_reply))
self.send(config_reply)
def _receive_set_config(self, ofp):
log.debug("CC " + self._url + " recvd " + "'OFPT_SET_CONFIG")
# log.debug("CC " + str(ofp))
def _receive_stats_request(self, ofp):
log.debug("CC " + self._url + " recvd " + "'OFPT_STATS_REQUEST " + str(ofp.type))
# log.debug("CC " + str(ofp))
desc = ofp_desc_stats(mfr_desc="POX",
hw_desc=core._get_platform_info(),
sw_desc=core.version_string,
serial_num=str(self._dpid),
dp_desc=type(self).__name__)
stats_reply = ofp_stats_reply(xid=ofp.xid, body=desc)
self.send(stats_reply)
def _receive_vendor(self, ofp):
log.debug("CC " + self._url + " recvd " + "'OFPT_VENDOR")
# log.debug("CC " + str(ofp))
err = ofp_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_VENDOR)
if ofp:
err.xid = ofp.xid
err.data = ofp.pack()
else:
err.xid = 0
self.send(err)
# Delete all flows for given VLAN prior to starting new slice or deleting existing slice
def _delete_old_vlan_flows(self):
# print "*** DELETING OLD VLANS %s *** " % self._vlan
match = of.ofp_match()
match.dl_vlan = self._vlan
flow_mod = of.ofp_flow_mod(match=match, command=of.OFPFC_DELETE)
# print "FM = %s" % str(flow_mod)
self.forwardToAllAppropriateSwitches(flow_mod)
def _receive_echo_reply(self, ofp):
log.debug("CC " + self._url + " recvd " + "'OFPT_ECHO_REPLY")
# log.debug("CC " + str(ofp))
# For now, forward the ofp to all switches assocaited with VMOC
def forwardToAllAppropriateSwitches(self, ofp):
# print "Forwarding to switch : " + str(ofp)
# Send any message back to switch EXCEPT
# RECEIVE_FLOW_MOD : Need to add on VLAN tag, DROP if VLAN isn't consistent with slice
# : DROP if SRC and DST isn't consistent with slice
# RECEIVE_PACKET_OUT : Need to make sure it has VLAN tag and has MACs on that VLAN
# Get switch for this connection
switch = scmap.lookup_switch_for_controller_connection(self)
if not switch:
print "No switch for controller (FORWARD), dropping %s: %s" % \
(self, str(type(ofp)))
return
ofp_revised = self.validateMessage(ofp, switch)
if ofp_revised is None:
log.debug("Not forwarding controller message to switch " +
str(type(ofp)) + str(switch))
else:
log.debug("Forwarding controller message to switch " + \
str(type(ofp)) + " " + str(switch))
switch.send(ofp_revised)
# Determine if message should be forwarded to switch
# If it is a flow mod:
# - If the match has a vlan tag, make sure it fits the connection's VLAN (else drop)
# - If the match has no VLAN tag, add it in
# - If the action has aset-vlan or strip-vlan,drop it
# If it is a packet:
# - If it has a VLAN, make sure it fits in connection's VLAN (else drop)
# - If it doesn't have a VLAN, add the VLAN of the connection to the packet
def validateMessage(self, ofp, switch):
ofp_revised = None
if ofp.header_type == of.OFPT_FLOW_MOD:
# Need to narrow down between the sub-cases for FLOW MOD below
# Check that the match is okay
match = ofp.match
# print "COMMAND = " + str(ofp.command) + " " + str(of.OFPFC_DELETE)
# print "WC = " + str(match.wildcards) + " " + str(OFPFW_ALL)
if ofp.command == of.OFPFC_DELETE and not match.dl_vlan:
if not config.vmoc_accept_clear_all_flows_on_startup:
return None
# Weird case of getting a 'clear all entries' at startup
# print "OFP = " + str(ofp)
# print "*** CASE 0 ***"
return ofp
elif match.dl_vlan == of.OFP_VLAN_NONE or not match.dl_vlan:
if not config.vmoc_set_vlan_on_untagged_flow_mod:
return ofp
# add in the tag to the match if not set
# print "OFP = " + str(ofp)
# print "MATCH = " + str(match)
match.dl_vlan = self._vlan
# print "MATCH = " + str(match)
# print "MATCH.DL_VLAN = " + str(match.dl_vlan) + " " + str(of.OFP_VLAN_NONE)
# print "*** CASE 1 ***"
#
# pdb.set_trace()
return ofp
elif match.dl_vlan != self._vlan:
return ofp # ***
log.debug("Dropping FLOW MOD: match tagged with wrong VLAN : " + \
str(ofp) + " " + str(self))
# print "*** CASE 2 ***"
# pdb.set_trace()
return None
# Check that the actions are okay
actions = ofp.actions
for action in actions:
if isinstance(action, ofp_action_vlan_vid) and action.vlan_vid != self._vlan:
log.debug("Dropping FLOW MOD: action to set wrong VLAN : " + \
str(ofp) + " " + str(self))
# print "*** CASE 3 ***"
# pdb.set_trace()
return None
return ofp
elif ofp.header_type == of.OFPT_PACKET_OUT:
data = ofp.data
ethernet_packet = ethernet(raw=data)
# if packet has a VLAN, make sure it fits this connection
if ethernet_packet.type == ethernet.VLAN_TYPE:
vlan_packet = vlan(data[ethernet.MIN_LEN:])
if vlan_packet.id != self._vlan:
log.debug("Dropping PACKET OUT: wrong VLAN set : " +
str(vlan_packet) + " " + str(ofp) + " " + str(self))
# print "*** CASE 4 ***"
# pdb.set_trace()
return None
else:
return ofp
else:
if not config.vmoc_set_vlan_on_untagged_packet_out:
return ofp
# If not, set it
orig_in_port = ofp.in_port
new_ethernet_packet = add_vlan_to_packet(ethernet_packet, self._vlan)
# Create a new ofp from the new data
ofp_orig = ofp
ofp = of.ofp_packet_out(data=new_ethernet_packet.raw)
ofp.buffer_id = None
ofp.in_port = orig_in_port
ofp.actions = ofp_orig.actions
ofp.xid = ofp_orig.xid
log.debug("Adding vlan to PACKET_OUT : " + \
str(ethernet_packet) + " " + str(new_ethernet_packet) + " " + \
str(ofp) + " " + str(self))
# print str(ethernet_packet)
# ip_packet = ipv4(ethernet_packet.raw[ethernet.MIN_LEN:])
# print str(ip_packet)
# print str(ofp_orig)
# print str(ofp)
# print str(new_ethernet_packet)
# vlan_packet = vlan(new_ethernet_packet.raw[ethernet.MIN_LEN:])
# print str(vlan_packet)
# ip_packet = ipv4(new_ethernet_packet.raw[ethernet.MIN_LEN+vlan.MIN_LEN:])
# print str(ip_packet)
# pdb.set_trace()
return ofp
else: # Not a FLOW_MOD or PACKET_OUT
return ofp
# Determine if this vlan/src/dest tuple is valid for the slice
# managed by this controller
def belongsToSlice(self, vlan_id, src, dst):
return vlan_id == self._vlan
# slice_configs = slice_registry_lookup_slices_by_url(self._url)
# # slice_registry_dump(True)
# belongs = False
# for slice_config in slice_configs:
# if slice_config.belongsToSlice(vlan_id, src, dst):
# belongs = True
# break
# print "Belongs (Controller) = " + str(self) + " " + str(belongs) + " " + \
# str(vlan_id) + " " + str(src) + " " + str(dst)
# return belongs
# Parse URL of form http://host:port
@staticmethod
def parseURL(url):
pieces = url.replace("/", "").split(':');
host = pieces[1]
port = int(pieces[2])
return host, port
def run(self):
_select_timeout = 5
_buf_size = 8192
self._running = True
buf = b''
buf_empty = True
while self._running:
# print "VMOCControllerConnection Loop " + \
# str(len(buf)) + " " + str(buf_empty)
# If there is no more message data in the buffer, read from socket
# blocking within this thread
if buf_empty:
# print "BEFORE SELECT " + str(self._sock)
input_ready, output_ready, except_ready = \
select.select([self._sock], [], [], _select_timeout)
# print "AFTER SELECT " + str(self._sock) + " " + str(len(input_ready))
if not input_ready:
continue
new_buf = ''
try:
new_buf = self._sock.recv(_buf_size)
except:
# if we get an empty buffer or exception, kill connection
pass
# print "AFTER RECV " + str(self._sock)
if len(new_buf) == 0:
self._sock.close()
self._running = False;
log.info("LEN(NEW_BUF) = 0 ... closing socket and setting self._running to False");
break
else:
buf = buf + new_buf
# log.debug("Received buffer : " + str(len(buf)))
if ord(buf[0]) != of.OFP_VERSION:
log.warning("Bad OpenFlow version (" + str(ord(buf[0])) +
") on connection " + str(self))
return
# OpenFlow parsing occurs here:
ofp_type = ord(buf[1])
packet_length = ord(buf[2]) << 8 | ord(buf[3])
buf_empty = packet_length > len(buf)
# log.debug("Parsed " + str(ofp_type) + " " + \
# str(packet_length) + " " + \
# str(len(buf)) + " " + str(buf_empty))
if not buf_empty:
new_offset, msg_obj = self.ofp_msgs[ofp_type](buf, 0)
msg_obj.unpack(buf)
# log.debug("Received msg " + str(ofp_type) + " " + \
# str(packet_length) + \
# str(type(msg_obj)))
buf = buf[packet_length:]
# need to at least have the packet_length
buf_empty = len(buf) < 4
try:
if ofp_type not in self.ofp_handlers:
msg = "No handler for ofp_type %s(%d)" % \
(ofp_type_map.get(ofp_type), ofp_type)
raise RuntimeError(msg)
h = self.ofp_handlers[ofp_type]
# print "Calling " + str(h) + " on msg " + str(msg_obj) + " " + str(msg_obj.xid)
if "connection" in inspect.getargspec(h)[0]:
h(msg_obj, connection=self)
else:
h(msg_obj)
except Exception as e:
# print "Exception " + str(e)
log.exception(e)
log.exception(str(e))
self._running = False
log.info("After Exception, setting self._running to False");
scmap.remove_controller_connection(self)
# If the controller connection died, we should try to restart
# it if or when the controller comes back
scmap.create_controller_connection(self._url, self._vlan)
log.debug("Exiting VMCControllerConnection.run")
# To be called synchronously when VMOC determines it should
# Send a message to this client
def send(self, message):
log.debug("Sending to client: " + self._url + " " + str(type(message)))
data = message.pack()
bytes_sent = self._sock.send(data)
# log.debug("Sent " + str(bytes_sent) + " bytes")
|
|
#!/usr/bin/env python
from __future__ import print_function,division
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from pint.templates import lctemplate,lcprimitives,lcfitters
from pint.eventstats import z2m,sf_z2m, hm, sf_hm, sig2sigma
import sys
from astropy import log
import scipy.stats
def compute_fourier(phases,nh=10,pow_phase=False):
'''Compute Fourier amplitudes from an array of pulse phases
phases should be [0,1.0)
nh is the number of harmonics (1 = fundamental only)
Returns: cos and sin component arrays, unless pow_phase is True
then returns Fourier power (Leahy normalized) and phase arrays
DC bin is not computed or returned
'''
phis = 2.0*np.pi*phases # Convert phases to radians
n = len(phis)
c = np.asarray([(np.cos(k*phis)).sum() for k in range(1,nh+1)])/n
s = np.asarray([(np.sin(k*phis)).sum() for k in range(1,nh+1)])/n
c *= 2.0
s *= 2.0
if pow_phase:
# CHECK! There could be errors here!
# These should be Leahy normalized powers
fourier_pow = (n/2)*(c**2+s**2)
fourier_phases = np.arctan2(s,c)
return n,fourier_pow,fourier_phases
else:
return n,c,s
def evaluate_fourier(n,c,s,nbins,k=None):
# This should be updated to do a little integral over each bin.
# Currently evaluates the model at the center of each bin
model = np.zeros(nbins)+n/nbins
theta = 2.0*np.pi*np.arange(nbins,dtype=np.float)/nbins
theta += theta[1]/2.0
if k is not None:
model += (n/nbins)*(c[k]*np.cos((k+1)*theta) + s[k]*np.sin((k+1)*theta))
else:
for k in range(len(c)):
model += (n/nbins)*(c[k]*np.cos((k+1)*theta) + s[k]*np.sin((k+1)*theta))
return model
def evaluate_chi2(hist,model):
# Question here is whether error should be sqrt(data) or sqrt(model)
return ((hist-model)**2/model).sum()
def compute_phist(phases,nbins=200):
h, edges = np.histogram(phases,bins=np.linspace(0.0,1.0,nbins+1,endpoint=True))
return edges[:-1], h
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description = "Fit a set of pulse phases to harmonics")
parser.add_argument("evname", help="Input event file (must have PULSE_PHASE column)")
parser.add_argument("--white",help = "Replace phases with white random numbers, for testing", action="store_true")
parser.add_argument("--txt",help = "Assume input file is .txt instead of FITS", action="store_true")
parser.add_argument("--showcomps",help = "Show individual components of harmonic fit on plot", action="store_true")
parser.add_argument("--noplot",help = "Don't show any plots", action="store_true")
parser.add_argument("--output",help = "Save figures with basename", default=None)
parser.add_argument("--numharm",help="Max harmonic to use in analysis (1=Fundamental only)",default=4,type=int)
parser.add_argument("--numbins",help="Number of bins for histograms",default=200,type=int)
parser.add_argument("--emin",help="Minimum energy to include (keV)",default=0.25,type=float)
parser.add_argument("--emax",help="Maximum energy to include (keV)",default=12.0,type=float)
args = parser.parse_args()
if args.txt:
exposure = None
ph,en = np.loadtxt(args.evname,unpack=True,usecols=(1,2),skiprows=3)
log.info("Read {0} phases from .txt file".format(len(ph)))
tstart = 0.0
else:
f = fits.open(args.evname)
en = f['events'].data.field('pi')
ph = f['events'].data.field('pulse_phase')
log.info("Read {0} phases from FITS file".format(len(ph)))
exposure = float(f['events'].header['EXPOSURE'])
tstart = float(f['events'].header['TSTART'])
log.info("Exposure = {0} s".format(exposure))
if args.white:
# Random phases uniform over [0,1)
ph = np.random.random_sample(len(en))
log.info("Replaced with {0} random phases".format(len(en)))
matplotlib.rcParams['font.family'] = "serif"
matplotlib.rcParams.update({'font.size': 13})
matplotlib.rc('axes', linewidth=1.5)
if args.output:
resultsfile = open("{0}_results.txt".format(args.output),"w")
print("{0:.6f}".format(tstart),file=resultsfile)
# Filter on energy
idx = np.where(np.logical_and(en > int(args.emin*100), en < int(args.emax*100) ))[0]
ph = ph[idx]
en = en[idx]
# Hack to manually split out a segment
#q = 3 # Use 0, 1, 2, 3
#qn = len(ph)//4
#ph = ph[q*qn:(q+1)*qn]
#en = en[q*qn:(q+1)*qn]
nbins = args.numbins
bins,phist = compute_phist(ph,nbins=nbins)
fig,axs = plt.subplots(nrows=2,ncols=1)
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.97, top=0.94,hspace=0.001)
ax=axs[0]
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True, labelbottom=False)
# ax.text(.5,.8,'PSR J0030+0451', horizontalalignment='center', transform=ax.transAxes)
# ax.text(.5,.8,'PSR J0437-4715', horizontalalignment='center', transform=ax.transAxes)
# ax.text(.2,.8,'PSR J1231-1411', horizontalalignment='center', transform=ax.transAxes)
# ax.text(.8,.8,'PSR J2124-3358', horizontalalignment='center', transform=ax.transAxes)
ax.step(np.concatenate((bins,np.ones(1))),np.concatenate((phist,phist[-1:])),color='k',where='post')
ax.set_xlim(0.0,1.0)
ax.set_ylabel('Counts per bin')
n,c,s = compute_fourier(ph,nh=args.numharm)
model = evaluate_fourier(n,c,s,nbins)
ax.plot(bins+bins[1]/2.0,model,color='r',lw=2)
if args.showcomps:
for k in range(len(c)):
ax.plot(np.linspace(0.0,1.0,nbins),evaluate_fourier(n,c,s,nbins,k=k),ls='--')
fn,fpow,fphase = compute_fourier(ph,nh=args.numharm,pow_phase=True)
i=1
log.info("Harm LeahyPower Phase(deg)")
for fp, fph in zip(fpow,fphase):
log.info("{0:2d} {1:12.3f} {2:9.3f} deg".format(i,fp,np.rad2deg(fph)))
if args.output:
print("{0:2d} {1:12.3f} {2:9.3f}".format(i,fp,np.rad2deg(fph)),file=resultsfile)
i+=1
pcounts = (model-model.min()).sum()
pcounts_err = np.sqrt(model.sum() + model.min()*len(model))
if exposure:
log.info("Pulsed counts = {0:.3f}, count rate = {1:.3f}+/-{2:.4f} c/s".format(pcounts, pcounts/exposure, pcounts_err/exposure))
log.info("Total rate = {0:.3f} c/s, Unpulsed rate = {1:.3f} c/s".format(n/exposure, n/exposure-pcounts/exposure))
ax = axs[1]
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
ax.errorbar(np.linspace(0.0,1.0,nbins),phist-model,yerr=np.sqrt(phist),fmt='.',ecolor='k')
chisq = evaluate_chi2(phist,model)
nparams = 1 + 2*args.numharm # 1 for DC + 2 for each sinusoidal component
ax.set_xlim(0.0,1.0)
ax.set_xlabel('Pulse Phase')
ax.set_ylabel('Residuals (counts)')
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True)
ndof = len(phist)-nparams
axs[0].set_title("NumHarm = {0}, Chisq = {1:.2f}, DOF = {2}".format(args.numharm,chisq,ndof))
ax.grid(1)
# ax.set_label("{0} Harmonic Fit to Profile".format(args.numharm))
plt.tight_layout()
if args.output:
fig.savefig("{0}_harmfit.pdf".format(args.output))
# Plot distribution of residuals to compare to a gaussian
fig,ax = plt.subplots()
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
chi = (phist-model)/np.sqrt(model)
#x, y = np.histogram(chi,bins=np.linspace(-2.0,2.0,0.1))
x = np.linspace(-3.0,3.0,32,endpoint=True)
ax.hist(chi,bins=x,density=True)
ax.set_title('Histogram of residuals')
ax.plot(x,scipy.stats.norm.pdf(x))
plt.tight_layout()
# Plot histogram of phase differences to see if they are Poisson
fig,ax = plt.subplots()
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
ph.sort()
pdiffs = (ph[1:]-ph[:-1])*1.0
x = np.linspace(0.0,50.0e-6,200,endpoint=True)
histn, histbins, histpatches = ax.hist(pdiffs,bins=x,density=True,log=True)
ax.set_title('Histogram of phase differences')
ax.set_xlabel('Phase diff')
ax.plot(x,np.exp(-len(pdiffs)*(x*1.0))*n)
plt.tight_layout()
# Compute number of significant harmonics
# First by plotting Leahy powers
fig,axs = plt.subplots(nrows=2,ncols=1)
ax = axs[0]
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
n,pow,phases = compute_fourier(ph,nh=nbins//2,pow_phase=True)
ax.semilogy(np.arange(len(pow))+1,pow,marker='o')
# Leahy power of 5.99 corresponds to 2 sigma, I think
ax.axhline(5.99,color='r')
ax.axhline(2.0,color='b',ls='--')
#ax.xaxis.set_ticks(np.arange(1,len(pow)+1))
#ax.set_xlabel('Harmonic Number')
ax.set_ylabel('Leahy Power')
ax.set_title("Power Spectrum")
plt.tight_layout()
ax = axs[1]
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
ax.plot(np.arange(len(pow))+1,pow,marker='o')
ax.axhline(5.99,color='r')
ax.axhline(2.0,color='b',ls='--')
#ax.xaxis.set_ticks(np.arange(1,len(pow)+1))
ax.set_ylim(0.0,10.0)
ax.text(1.0,7.0,'Mean power {0:.3f}'.format(pow.mean()))
ax.set_xlabel('Harmonic Number')
ax.set_ylabel('Leahy Power')
if args.output:
fig.savefig("{0}_leahy.pdf".format(args.output))
plt.tight_layout()
# Then by computing chisq as a function of number of harmonics in model
chisq = []
ndof = []
maxharms = np.arange(1,min(33,nbins//4+1))
n,c,s = compute_fourier(ph,nh=maxharms[-1])
for maxharm in maxharms:
model = evaluate_fourier(n,c[:maxharm],s[:maxharm],nbins)
chisq.append(evaluate_chi2(phist,model))
nparams = 1 + 2*maxharm # 1 for DC + 2 for each sinusoidal component
ndof.append(len(phist)-nparams)
chisq = np.asarray(chisq)
ndof = np.asarray(ndof)
fig,ax = plt.subplots()
ax.tick_params(direction='in', length=6, width=2, colors='k',top=True, right=True)
ax.plot(maxharms,chisq/ndof,'o',ls='-')
ax.set_ylim(0.5,3.0)
ax.axhline(1.0,color='r',ls='--')
ax.set_xlabel('Number of Harmonics')
ax.set_ylabel('Chisq')
ax.set_title("Chisq/DOF vs. Number of Harmonics")
#ax.xaxis.set_ticks(maxharms)
#ax.semilogy(maxharms,ndof)
plt.tight_layout()
if args.output:
fig.savefig("{0}_chisq.pdf".format(args.output))
# Then look at amplitudes and phases as a function of energy cuts
# Look at color oscillations
# Select photons above and below some energy cut and look at the ratio
ensplit = 55
softidx = np.where(en<ensplit)[0]
hardidx = np.where(en>=ensplit)[0]
colorbins = 32
softbins, softn = compute_phist(ph[softidx],nbins=colorbins)
hardbins, hardn = compute_phist(ph[hardidx],nbins=colorbins)
softn = np.asarray(softn,dtype=np.float)
hardn = np.asarray(hardn,dtype=np.float)
fig,ax = plt.subplots()
color = hardn/softn
# Propagate Poisson errors to get error in ratio
cerr = color*np.sqrt(1.0/softn + 1.0/hardn)
#ax.step(np.concatenate((softbins,np.ones(1))),np.concatenate((color,color[-1:])),color='C0',where='post')
ax.errorbar(softbins+0.5*softbins[1],color,yerr=cerr,color='k',fmt='.')
ax.set_xlim(0.0,1.0)
ax.set_xlabel('Pulse Phase')
ax.set_ylabel('Spectral Color')
if not args.noplot:
plt.show()
|
|
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy.testing.schema import Table, Column
from test.orm import _fixtures
from sqlalchemy.testing import fixtures
from sqlalchemy import Integer, String, ForeignKey, func
from sqlalchemy.orm import mapper, relationship, backref, \
create_session, unitofwork, attributes,\
Session, class_mapper, sync, exc as orm_exc
from sqlalchemy.testing.assertsql import AllOf, CompiledSQL
class AssertsUOW(object):
def _get_test_uow(self, session):
uow = unitofwork.UOWTransaction(session)
deleted = set(session._deleted)
new = set(session._new)
dirty = set(session._dirty_states).difference(deleted)
for s in new.union(dirty):
uow.register_object(s)
for d in deleted:
uow.register_object(d, isdelete=True)
return uow
def _assert_uow_size(self, session, expected ):
uow = self._get_test_uow(session)
postsort_actions = uow._generate_actions()
print(postsort_actions)
eq_(len(postsort_actions), expected, postsort_actions)
class UOWTest(_fixtures.FixtureTest,
testing.AssertsExecutionResults, AssertsUOW):
run_inserts = None
class RudimentaryFlushTest(UOWTest):
def test_one_to_many_save(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address),
})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(email_address='a1'), Address(email_address='a2')
u1 = User(name='u1', addresses=[a1, a2])
sess.add(u1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
{'name': 'u1'}
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {'email_address': 'a1', 'user_id':u1.id}
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {'email_address': 'a2', 'user_id':u1.id}
),
)
def test_one_to_many_delete_all(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address),
})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(email_address='a1'), Address(email_address='a2')
u1 = User(name='u1', addresses=[a1, a2])
sess.add(u1)
sess.flush()
sess.delete(u1)
sess.delete(a1)
sess.delete(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
[{'id':a1.id},{'id':a2.id}]
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id",
{'id':u1.id}
),
)
def test_one_to_many_delete_parent(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address),
})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(email_address='a1'), Address(email_address='a2')
u1 = User(name='u1', addresses=[a1, a2])
sess.add(u1)
sess.flush()
sess.delete(u1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE "
"addresses.id = :addresses_id",
lambda ctx: [{'addresses_id': a1.id, 'user_id': None}]
),
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE "
"addresses.id = :addresses_id",
lambda ctx: [{'addresses_id': a2.id, 'user_id': None}]
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id",
{'id':u1.id}
),
)
def test_many_to_one_save(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User)
})
sess = create_session()
u1 = User(name='u1')
a1, a2 = Address(email_address='a1', user=u1), \
Address(email_address='a2', user=u1)
sess.add_all([a1, a2])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
{'name': 'u1'}
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {'email_address': 'a1', 'user_id':u1.id}
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {'email_address': 'a2', 'user_id':u1.id}
),
)
def test_many_to_one_delete_all(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User)
})
sess = create_session()
u1 = User(name='u1')
a1, a2 = Address(email_address='a1', user=u1), \
Address(email_address='a2', user=u1)
sess.add_all([a1, a2])
sess.flush()
sess.delete(u1)
sess.delete(a1)
sess.delete(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
[{'id':a1.id},{'id':a2.id}]
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id",
{'id':u1.id}
),
)
def test_many_to_one_delete_target(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User)
})
sess = create_session()
u1 = User(name='u1')
a1, a2 = Address(email_address='a1', user=u1), \
Address(email_address='a2', user=u1)
sess.add_all([a1, a2])
sess.flush()
sess.delete(u1)
a1.user = a2.user = None
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE "
"addresses.id = :addresses_id",
lambda ctx: [{'addresses_id': a1.id, 'user_id': None}]
),
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE "
"addresses.id = :addresses_id",
lambda ctx: [{'addresses_id': a2.id, 'user_id': None}]
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id",
{'id':u1.id}
),
)
def test_many_to_one_delete_unloaded(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'parent':relationship(User)
})
parent = User(name='p1')
c1, c2 = Address(email_address='c1', parent=parent), \
Address(email_address='c2', parent=parent)
session = Session()
session.add_all([c1, c2])
session.add(parent)
session.flush()
pid = parent.id
c1id = c1.id
c2id = c2.id
session.expire(parent)
session.expire(c1)
session.expire(c2)
session.delete(c1)
session.delete(c2)
session.delete(parent)
# testing that relationships
# are loaded even if all ids/references are
# expired
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
# [ticket:2002] - ensure the m2os are loaded.
# the selects here are in fact unexpiring
# each row - the m2o comes from the identity map.
# the User row might be handled before or the addresses
# are loaded so need to use AllOf
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c1id}
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c2id}
),
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: {'param_1': pid}
),
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
lambda ctx: [{'id': c1id}, {'id': c2id}]
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id",
lambda ctx: {'id': pid}
),
),
)
def test_many_to_one_delete_childonly_unloaded(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'parent':relationship(User)
})
parent = User(name='p1')
c1, c2 = Address(email_address='c1', parent=parent), \
Address(email_address='c2', parent=parent)
session = Session()
session.add_all([c1, c2])
session.add(parent)
session.flush()
pid = parent.id
c1id = c1.id
c2id = c2.id
session.expire(c1)
session.expire(c2)
session.delete(c1)
session.delete(c2)
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
# [ticket:2049] - we aren't deleting User,
# relationship is simple m2o, no SELECT should be emitted for it.
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c1id}
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c2id}
),
),
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
lambda ctx: [{'id': c1id}, {'id': c2id}]
),
)
def test_many_to_one_delete_childonly_unloaded_expired(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'parent':relationship(User)
})
parent = User(name='p1')
c1, c2 = Address(email_address='c1', parent=parent), \
Address(email_address='c2', parent=parent)
session = Session()
session.add_all([c1, c2])
session.add(parent)
session.flush()
pid = parent.id
c1id = c1.id
c2id = c2.id
session.expire(parent)
session.expire(c1)
session.expire(c2)
session.delete(c1)
session.delete(c2)
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
# the parent User is expired, so it gets loaded here.
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c1id}
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE addresses.id = "
":param_1",
lambda ctx: {'param_1': c2id}
),
),
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
lambda ctx: [{'id': c1id}, {'id': c2id}]
),
)
def test_natural_ordering(self):
"""test that unconnected items take relationship() into account regardless."""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'parent':relationship(User)
})
sess = create_session()
u1 = User(id=1, name='u1')
a1 = Address(id=1, user_id=1, email_address='a2')
sess.add_all([u1, a1])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO users (id, name) VALUES (:id, :name)",
{'id':1, 'name':'u1'}),
CompiledSQL(
"INSERT INTO addresses (id, user_id, email_address) "
"VALUES (:id, :user_id, :email_address)",
{'email_address': 'a2', 'user_id': 1, 'id': 1}
)
)
sess.delete(u1)
sess.delete(a1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
[{'id': 1}]
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id",
[{'id': 1}]
)
)
def test_natural_selfref(self):
"""test that unconnected items take relationship() into account regardless."""
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'children':relationship(Node)
})
sess = create_session()
n1 = Node(id=1)
n2 = Node(id=2, parent_id=1)
n3 = Node(id=3, parent_id=2)
# insert order is determined from add order since they
# are the same class
sess.add_all([n1, n2, n3])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (id, parent_id, data) VALUES "
"(:id, :parent_id, :data)",
[{'parent_id': None, 'data': None, 'id': 1},
{'parent_id': 1, 'data': None, 'id': 2},
{'parent_id': 2, 'data': None, 'id': 3}]
),
)
def test_many_to_many(self):
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Item, items, properties={
'keywords':relationship(Keyword, secondary=item_keywords)
})
mapper(Keyword, keywords)
sess = create_session()
k1 = Keyword(name='k1')
i1 = Item(description='i1', keywords=[k1])
sess.add(i1)
self.assert_sql_execution(
testing.db,
sess.flush,
AllOf(
CompiledSQL(
"INSERT INTO keywords (name) VALUES (:name)",
{'name':'k1'}
),
CompiledSQL(
"INSERT INTO items (description) VALUES (:description)",
{'description':'i1'}
),
),
CompiledSQL(
"INSERT INTO item_keywords (item_id, keyword_id) "
"VALUES (:item_id, :keyword_id)",
lambda ctx:{'item_id':i1.id, 'keyword_id':k1.id}
)
)
# test that keywords collection isn't loaded
sess.expire(i1, ['keywords'])
i1.description = 'i2'
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL("UPDATE items SET description=:description "
"WHERE items.id = :items_id",
lambda ctx:{'description':'i2', 'items_id':i1.id})
)
def test_m2o_flush_size(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=True)
})
sess = create_session()
u1 = User(name='ed')
sess.add(u1)
self._assert_uow_size(sess, 2)
def test_o2m_flush_size(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address),
})
mapper(Address, addresses)
sess = create_session()
u1 = User(name='ed')
sess.add(u1)
self._assert_uow_size(sess, 2)
sess.flush()
u1.name='jack'
self._assert_uow_size(sess, 2)
sess.flush()
a1 = Address(email_address='foo')
sess.add(a1)
sess.flush()
u1.addresses.append(a1)
self._assert_uow_size(sess, 6)
sess.flush()
sess = create_session()
u1 = sess.query(User).first()
u1.name='ed'
self._assert_uow_size(sess, 2)
u1.addresses
self._assert_uow_size(sess, 6)
class SingleCycleTest(UOWTest):
def teardown(self):
engines.testing_reaper.rollback_all()
# mysql can't handle delete from nodes
# since it doesn't deal with the FKs correctly,
# so wipe out the parent_id first
testing.db.execute(
self.tables.nodes.update().values(parent_id=None)
)
super(SingleCycleTest, self).teardown()
def test_one_to_many_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'children':relationship(Node)
})
sess = create_session()
n2, n3 = Node(data='n2'), Node(data='n3')
n1 = Node(data='n1', children=[n2, n3])
sess.add(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
{'parent_id': None, 'data': 'n1'}
),
AllOf(
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {'parent_id': n1.id, 'data': 'n2'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {'parent_id': n1.id, 'data': 'n3'}
),
)
)
def test_one_to_many_delete_all(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'children':relationship(Node)
})
sess = create_session()
n2, n3 = Node(data='n2', children=[]), Node(data='n3', children=[])
n1 = Node(data='n1', children=[n2, n3])
sess.add(n1)
sess.flush()
sess.delete(n1)
sess.delete(n2)
sess.delete(n3)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx:[{'id':n2.id}, {'id':n3.id}]),
CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {'id':n1.id})
)
def test_one_to_many_delete_parent(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'children':relationship(Node)
})
sess = create_session()
n2, n3 = Node(data='n2', children=[]), Node(data='n3', children=[])
n1 = Node(data='n1', children=[n2, n3])
sess.add(n1)
sess.flush()
sess.delete(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
AllOf(
CompiledSQL("UPDATE nodes SET parent_id=:parent_id "
"WHERE nodes.id = :nodes_id",
lambda ctx: {'nodes_id':n3.id, 'parent_id':None}),
CompiledSQL("UPDATE nodes SET parent_id=:parent_id "
"WHERE nodes.id = :nodes_id",
lambda ctx: {'nodes_id':n2.id, 'parent_id':None}),
),
CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx:{'id':n1.id})
)
def test_many_to_one_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'parent':relationship(Node, remote_side=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n2, n3 = Node(data='n2', parent=n1), Node(data='n3', parent=n1)
sess.add_all([n2, n3])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
{'parent_id': None, 'data': 'n1'}
),
AllOf(
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {'parent_id': n1.id, 'data': 'n2'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {'parent_id': n1.id, 'data': 'n3'}
),
)
)
def test_many_to_one_delete_all(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'parent':relationship(Node, remote_side=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n2, n3 = Node(data='n2', parent=n1), Node(data='n3', parent=n1)
sess.add_all([n2, n3])
sess.flush()
sess.delete(n1)
sess.delete(n2)
sess.delete(n3)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx:[{'id':n2.id},{'id':n3.id}]),
CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {'id':n1.id})
)
def test_many_to_one_set_null_unloaded(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'parent':relationship(Node, remote_side=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n2 = Node(data='n2', parent=n1)
sess.add_all([n1, n2])
sess.flush()
sess.close()
n2 = sess.query(Node).filter_by(data='n2').one()
n2.parent = None
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE nodes SET parent_id=:parent_id WHERE "
"nodes.id = :nodes_id",
lambda ctx: {"parent_id":None, "nodes_id":n2.id}
)
)
def test_cycle_rowswitch(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'children':relationship(Node)
})
sess = create_session()
n2, n3 = Node(data='n2', children=[]), Node(data='n3', children=[])
n1 = Node(data='n1', children=[n2])
sess.add(n1)
sess.flush()
sess.delete(n2)
n3.id = n2.id
n1.children.append(n3)
sess.flush()
def test_bidirectional_mutations_one(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'children':relationship(Node,
backref=backref('parent',
remote_side=nodes.c.id))
})
sess = create_session()
n2, n3 = Node(data='n2', children=[]), Node(data='n3', children=[])
n1 = Node(data='n1', children=[n2])
sess.add(n1)
sess.flush()
sess.delete(n2)
n1.children.append(n3)
sess.flush()
sess.delete(n1)
sess.delete(n3)
sess.flush()
def test_bidirectional_multilevel_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'children':relationship(Node,
backref=backref('parent', remote_side=nodes.c.id)
)
})
sess = create_session()
n1 = Node(data='n1')
n1.children.append(Node(data='n11'))
n12 = Node(data='n12')
n1.children.append(n12)
n1.children.append(Node(data='n13'))
n1.children[1].children.append(Node(data='n121'))
n1.children[1].children.append(Node(data='n122'))
n1.children[1].children.append(Node(data='n123'))
sess.add(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx:{'parent_id':None, 'data':'n1'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx:{'parent_id':n1.id, 'data':'n11'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx:{'parent_id':n1.id, 'data':'n12'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx:{'parent_id':n1.id, 'data':'n13'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx:{'parent_id':n12.id, 'data':'n121'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx:{'parent_id':n12.id, 'data':'n122'}
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx:{'parent_id':n12.id, 'data':'n123'}
),
)
def test_singlecycle_flush_size(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'children':relationship(Node)
})
sess = create_session()
n1 = Node(data='ed')
sess.add(n1)
self._assert_uow_size(sess, 2)
sess.flush()
n1.data='jack'
self._assert_uow_size(sess, 2)
sess.flush()
n2 = Node(data='foo')
sess.add(n2)
sess.flush()
n1.children.append(n2)
self._assert_uow_size(sess, 3)
sess.flush()
sess = create_session()
n1 = sess.query(Node).first()
n1.data='ed'
self._assert_uow_size(sess, 2)
n1.children
self._assert_uow_size(sess, 2)
def test_delete_unloaded_m2o(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'parent':relationship(Node, remote_side=nodes.c.id)
})
parent = Node()
c1, c2 = Node(parent=parent), Node(parent=parent)
session = Session()
session.add_all([c1, c2])
session.add(parent)
session.flush()
pid = parent.id
c1id = c1.id
c2id = c2.id
session.expire(parent)
session.expire(c1)
session.expire(c2)
session.delete(c1)
session.delete(c2)
session.delete(parent)
# testing that relationships
# are loaded even if all ids/references are
# expired
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
# ensure all three m2os are loaded.
# the selects here are in fact unexpiring
# each row - the m2o comes from the identity map.
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes "
"WHERE nodes.id = :param_1",
lambda ctx: {'param_1': pid}
),
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes "
"WHERE nodes.id = :param_1",
lambda ctx: {'param_1': c1id}
),
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, "
"nodes.data AS nodes_data FROM nodes "
"WHERE nodes.id = :param_1",
lambda ctx: {'param_1': c2id}
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{'id': c1id}, {'id': c2id}]
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {'id': pid}
),
),
)
class SingleCyclePlusAttributeTest(fixtures.MappedTest,
testing.AssertsExecutionResults, AssertsUOW):
@classmethod
def define_tables(cls, metadata):
Table('nodes', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('parent_id', Integer, ForeignKey('nodes.id')),
Column('data', String(30))
)
Table('foobars', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('parent_id', Integer, ForeignKey('nodes.id')),
)
def test_flush_size(self):
foobars, nodes = self.tables.foobars, self.tables.nodes
class Node(fixtures.ComparableEntity):
pass
class FooBar(fixtures.ComparableEntity):
pass
mapper(Node, nodes, properties={
'children':relationship(Node),
'foobars':relationship(FooBar)
})
mapper(FooBar, foobars)
sess = create_session()
n1 = Node(data='n1')
n2 = Node(data='n2')
n1.children.append(n2)
sess.add(n1)
# ensure "foobars" doesn't get yanked in here
self._assert_uow_size(sess, 3)
n1.foobars.append(FooBar())
# saveupdateall/deleteall for FooBar added here,
# plus processstate node.foobars
# currently the "all" procs stay in pairs
self._assert_uow_size(sess, 6)
sess.flush()
class SingleCycleM2MTest(fixtures.MappedTest,
testing.AssertsExecutionResults, AssertsUOW):
@classmethod
def define_tables(cls, metadata):
nodes = Table('nodes', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30)),
Column('favorite_node_id', Integer, ForeignKey('nodes.id'))
)
node_to_nodes =Table('node_to_nodes', metadata,
Column('left_node_id', Integer,
ForeignKey('nodes.id'),primary_key=True),
Column('right_node_id', Integer,
ForeignKey('nodes.id'),primary_key=True),
)
def test_many_to_many_one(self):
nodes, node_to_nodes = self.tables.nodes, self.tables.node_to_nodes
class Node(fixtures.ComparableEntity):
pass
mapper(Node, nodes, properties={
'children':relationship(Node, secondary=node_to_nodes,
primaryjoin=nodes.c.id==node_to_nodes.c.left_node_id,
secondaryjoin=nodes.c.id==node_to_nodes.c.right_node_id,
backref='parents'
),
'favorite':relationship(Node, remote_side=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n2 = Node(data='n2')
n3 = Node(data='n3')
n4 = Node(data='n4')
n5 = Node(data='n5')
n4.favorite = n3
n1.favorite = n5
n5.favorite = n2
n1.children = [n2, n3, n4]
n2.children = [n3, n5]
n3.children = [n5, n4]
sess.add_all([n1, n2, n3, n4, n5])
# can't really assert the SQL on this easily
# since there's too many ways to insert the rows.
# so check the end result
sess.flush()
eq_(
sess.query(node_to_nodes.c.left_node_id,
node_to_nodes.c.right_node_id).\
order_by(node_to_nodes.c.left_node_id,
node_to_nodes.c.right_node_id).\
all(),
sorted([
(n1.id, n2.id), (n1.id, n3.id), (n1.id, n4.id),
(n2.id, n3.id), (n2.id, n5.id),
(n3.id, n5.id), (n3.id, n4.id)
])
)
sess.delete(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
# this is n1.parents firing off, as it should, since
# passive_deletes is False for n1.parents
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.data AS nodes_data, "
"nodes.favorite_node_id AS nodes_favorite_node_id FROM "
"nodes, node_to_nodes WHERE :param_1 = "
"node_to_nodes.right_node_id AND nodes.id = "
"node_to_nodes.left_node_id" ,
lambda ctx:{'param_1': n1.id},
),
CompiledSQL(
"DELETE FROM node_to_nodes WHERE "
"node_to_nodes.left_node_id = :left_node_id AND "
"node_to_nodes.right_node_id = :right_node_id",
lambda ctx:[
{'right_node_id': n2.id, 'left_node_id': n1.id},
{'right_node_id': n3.id, 'left_node_id': n1.id},
{'right_node_id': n4.id, 'left_node_id': n1.id}
]
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx:{'id': n1.id}
),
)
for n in [n2, n3, n4, n5]:
sess.delete(n)
# load these collections
# outside of the flush() below
n4.children
n5.children
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM node_to_nodes WHERE node_to_nodes.left_node_id "
"= :left_node_id AND node_to_nodes.right_node_id = "
":right_node_id",
lambda ctx:[
{'right_node_id': n5.id, 'left_node_id': n3.id},
{'right_node_id': n4.id, 'left_node_id': n3.id},
{'right_node_id': n3.id, 'left_node_id': n2.id},
{'right_node_id': n5.id, 'left_node_id': n2.id}
]
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx:[{'id': n4.id}, {'id': n5.id}]
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx:[{'id': n2.id}, {'id': n3.id}]
),
)
class RowswitchAccountingTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer, primary_key=True)
)
Table('child', metadata,
Column('id', Integer, ForeignKey('parent.id'), primary_key=True)
)
def test_accounting_for_rowswitch(self):
parent, child = self.tables.parent, self.tables.child
class Parent(object):
def __init__(self, id):
self.id = id
self.child = Child()
class Child(object):
pass
mapper(Parent, parent, properties={
'child':relationship(Child, uselist=False,
cascade="all, delete-orphan",
backref="parent")
})
mapper(Child, child)
sess = create_session(autocommit=False)
p1 = Parent(1)
sess.add(p1)
sess.commit()
sess.close()
p2 = Parent(1)
p3 = sess.merge(p2)
old = attributes.get_history(p3, 'child')[2][0]
assert old in sess
sess.flush()
assert p3.child._sa_instance_state.session_id == sess.hash_key
assert p3.child in sess
p4 = Parent(1)
p5 = sess.merge(p4)
old = attributes.get_history(p5, 'child')[2][0]
assert old in sess
sess.flush()
class BatchInsertsTest(fixtures.MappedTest, testing.AssertsExecutionResults):
@classmethod
def define_tables(cls, metadata):
Table('t', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)),
Column('def_', String(50), server_default='def1')
)
def test_batch_interaction(self):
"""test batching groups same-structured, primary
key present statements together.
"""
t = self.tables.t
class T(fixtures.ComparableEntity):
pass
mapper(T, t)
sess = Session()
sess.add_all([
T(data='t1'),
T(data='t2'),
T(id=3, data='t3'),
T(id=4, data='t4'),
T(id=5, data='t5'),
T(id=6, data=func.lower('t6')),
T(id=7, data='t7'),
T(id=8, data='t8'),
T(id=9, data='t9', def_='def2'),
T(id=10, data='t10', def_='def3'),
T(id=11, data='t11'),
])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO t (data) VALUES (:data)",
{'data': 't1'}
),
CompiledSQL(
"INSERT INTO t (data) VALUES (:data)",
{'data': 't2'}
),
CompiledSQL(
"INSERT INTO t (id, data) VALUES (:id, :data)",
[{'data': 't3', 'id': 3},
{'data': 't4', 'id': 4},
{'data': 't5', 'id': 5}]
),
CompiledSQL(
"INSERT INTO t (id, data) VALUES (:id, lower(:lower_1))",
{'lower_1': 't6', 'id': 6}
),
CompiledSQL(
"INSERT INTO t (id, data) VALUES (:id, :data)",
[{'data': 't7', 'id': 7}, {'data': 't8', 'id': 8}]
),
CompiledSQL(
"INSERT INTO t (id, data, def_) VALUES (:id, :data, :def_)",
[{'data': 't9', 'id': 9, 'def_':'def2'},
{'data': 't10', 'id': 10, 'def_':'def3'}]
),
CompiledSQL(
"INSERT INTO t (id, data) VALUES (:id, :data)",
{'data': 't11', 'id': 11}
),
)
class LoadersUsingCommittedTest(UOWTest):
"""Test that events which occur within a flush()
get the same attribute loading behavior as on the outside
of the flush, and that the unit of work itself uses the
"committed" version of primary/foreign key attributes
when loading a collection for historical purposes (this typically
has importance for when primary key values change).
"""
def _mapper_setup(self, passive_updates=True):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(Address,
order_by=addresses.c.email_address,
passive_updates=passive_updates,
backref='user')
})
mapper(Address, addresses)
return create_session(autocommit=False)
def test_before_update_m2o(self):
"""Expect normal many to one attribute load behavior
(should not get committed value)
from within public 'before_update' event"""
sess = self._mapper_setup()
Address, User = self.classes.Address, self.classes.User
def before_update(mapper, connection, target):
# if get committed is used to find target.user, then
# it will be still be u1 instead of u2
assert target.user.id == target.user_id == u2.id
from sqlalchemy import event
event.listen(Address, 'before_update', before_update)
a1 = Address(email_address='a1')
u1 = User(name='u1', addresses=[a1])
sess.add(u1)
u2 = User(name='u2')
sess.add(u2)
sess.commit()
sess.expunge_all()
# lookup an address and move it to the other user
a1 = sess.query(Address).get(a1.id)
# move address to another user's fk
assert a1.user_id == u1.id
a1.user_id = u2.id
sess.flush()
def test_before_update_o2m_passive(self):
"""Expect normal one to many attribute load behavior
(should not get committed value)
from within public 'before_update' event"""
self._test_before_update_o2m(True)
def test_before_update_o2m_notpassive(self):
"""Expect normal one to many attribute load behavior
(should not get committed value)
from within public 'before_update' event with
passive_updates=False
"""
self._test_before_update_o2m(False)
def _test_before_update_o2m(self, passive_updates):
sess = self._mapper_setup(passive_updates=passive_updates)
Address, User = self.classes.Address, self.classes.User
class AvoidReferencialError(Exception):
"""the test here would require ON UPDATE CASCADE on FKs
for the flush to fully succeed; this exception is used
to cancel the flush before we get that far.
"""
def before_update(mapper, connection, target):
if passive_updates:
# we shouldn't be using committed value.
# so, having switched target's primary key,
# we expect no related items in the collection
# since we are using passive_updates
# this is a behavior change since #2350
assert 'addresses' not in target.__dict__
eq_(target.addresses, [])
else:
# in contrast with passive_updates=True,
# here we expect the orm to have looked up the addresses
# with the committed value (it needs to in order to
# update the foreign keys). So we expect addresses
# collection to move with the user,
# (just like they will be after the update)
# collection is already loaded
assert 'addresses' in target.__dict__
eq_([a.id for a in target.addresses],
[a.id for a in [a1, a2]])
raise AvoidReferencialError()
from sqlalchemy import event
event.listen(User, 'before_update', before_update)
a1 = Address(email_address='jack1')
a2 = Address(email_address='jack2')
u1 = User(id=1, name='jack', addresses=[a1, a2])
sess.add(u1)
sess.commit()
sess.expunge_all()
u1 = sess.query(User).get(u1.id)
u1.id = 2
try:
sess.flush()
except AvoidReferencialError:
pass
|
|
from datetime import timedelta
import pendulum
from pendulum.utils._compat import PYPY
from .constants import SECONDS_PER_DAY
from .constants import SECONDS_PER_HOUR
from .constants import SECONDS_PER_MINUTE
from .constants import US_PER_SECOND
def _divide_and_round(a, b):
"""divide a by b and round result to the nearest integer
When the ratio is exactly half-way between two integers,
the even integer is returned.
"""
# Based on the reference implementation for divmod_near
# in Objects/longobject.c.
q, r = divmod(a, b)
# The output of divmod() is either a float or an int,
# but we always want it to be an int.
q = int(q)
# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.
# The expression r / b > 0.5 is equivalent to 2 * r > b if b is
# positive, 2 * r < b if b negative.
r *= 2
greater_than_half = r > b if b > 0 else r < b
if greater_than_half or r == b and q % 2 == 1:
q += 1
return q
class Duration(timedelta):
"""
Replacement for the standard timedelta class.
Provides several improvements over the base class.
"""
_y = None
_m = None
_w = None
_d = None
_h = None
_i = None
_s = None
_invert = None
def __new__(
cls,
days=0,
seconds=0,
microseconds=0,
milliseconds=0,
minutes=0,
hours=0,
weeks=0,
years=0,
months=0,
):
if not isinstance(years, int) or not isinstance(months, int):
raise ValueError("Float year and months are not supported")
self = timedelta.__new__(
cls,
days + years * 365 + months * 30,
seconds,
microseconds,
milliseconds,
minutes,
hours,
weeks,
)
# Intuitive normalization
total = self.total_seconds() - (years * 365 + months * 30) * SECONDS_PER_DAY
self._total = total
m = 1
if total < 0:
m = -1
self._microseconds = round(total % m * 1e6)
self._seconds = abs(int(total)) % SECONDS_PER_DAY * m
_days = abs(int(total)) // SECONDS_PER_DAY * m
self._days = _days
self._remaining_days = abs(_days) % 7 * m
self._weeks = abs(_days) // 7 * m
self._months = months
self._years = years
return self
def total_minutes(self):
return self.total_seconds() / SECONDS_PER_MINUTE
def total_hours(self):
return self.total_seconds() / SECONDS_PER_HOUR
def total_days(self):
return self.total_seconds() / SECONDS_PER_DAY
def total_weeks(self):
return self.total_days() / 7
if PYPY:
def total_seconds(self):
days = 0
if hasattr(self, "_years"):
days += self._years * 365
if hasattr(self, "_months"):
days += self._months * 30
if hasattr(self, "_remaining_days"):
days += self._weeks * 7 + self._remaining_days
else:
days += self._days
return (
(days * SECONDS_PER_DAY + self._seconds) * US_PER_SECOND
+ self._microseconds
) / US_PER_SECOND
@property
def years(self):
return self._years
@property
def months(self):
return self._months
@property
def weeks(self):
return self._weeks
if PYPY:
@property
def days(self):
return self._years * 365 + self._months * 30 + self._days
@property
def remaining_days(self):
return self._remaining_days
@property
def hours(self):
if self._h is None:
seconds = self._seconds
self._h = 0
if abs(seconds) >= 3600:
self._h = (abs(seconds) // 3600 % 24) * self._sign(seconds)
return self._h
@property
def minutes(self):
if self._i is None:
seconds = self._seconds
self._i = 0
if abs(seconds) >= 60:
self._i = (abs(seconds) // 60 % 60) * self._sign(seconds)
return self._i
@property
def seconds(self):
return self._seconds
@property
def remaining_seconds(self):
if self._s is None:
self._s = self._seconds
self._s = abs(self._s) % 60 * self._sign(self._s)
return self._s
@property
def microseconds(self):
return self._microseconds
@property
def invert(self):
if self._invert is None:
self._invert = self.total_seconds() < 0
return self._invert
def in_weeks(self):
return int(self.total_weeks())
def in_days(self):
return int(self.total_days())
def in_hours(self):
return int(self.total_hours())
def in_minutes(self):
return int(self.total_minutes())
def in_seconds(self):
return int(self.total_seconds())
def in_words(self, locale=None, separator=" "):
"""
Get the current interval in words in the current locale.
Ex: 6 jours 23 heures 58 minutes
:param locale: The locale to use. Defaults to current locale.
:type locale: str
:param separator: The separator to use between each unit
:type separator: str
:rtype: str
"""
periods = [
("year", self.years),
("month", self.months),
("week", self.weeks),
("day", self.remaining_days),
("hour", self.hours),
("minute", self.minutes),
("second", self.remaining_seconds),
]
if locale is None:
locale = pendulum.get_locale()
locale = pendulum.locale(locale)
parts = []
for period in periods:
unit, count = period
if abs(count) > 0:
translation = locale.translation(
"units.{}.{}".format(unit, locale.plural(abs(count)))
)
parts.append(translation.format(count))
if not parts:
if abs(self.microseconds) > 0:
unit = "units.second.{}".format(locale.plural(1))
count = "{:.2f}".format(abs(self.microseconds) / 1e6)
else:
unit = "units.microsecond.{}".format(locale.plural(0))
count = 0
translation = locale.translation(unit)
parts.append(translation.format(count))
return separator.join(parts)
def _sign(self, value):
if value < 0:
return -1
return 1
def as_timedelta(self):
"""
Return the interval as a native timedelta.
:rtype: timedelta
"""
return timedelta(seconds=self.total_seconds())
def __str__(self):
return self.in_words()
def __repr__(self):
rep = f"{self.__class__.__name__}("
if self._years:
rep += f"years={self._years}, "
if self._months:
rep += f"months={self._months}, "
if self._weeks:
rep += f"weeks={self._weeks}, "
if self._days:
rep += f"days={self._remaining_days}, "
if self.hours:
rep += f"hours={self.hours}, "
if self.minutes:
rep += f"minutes={self.minutes}, "
if self.remaining_seconds:
rep += f"seconds={self.remaining_seconds}, "
if self.microseconds:
rep += f"microseconds={self.microseconds}, "
rep += ")"
return rep.replace(", )", ")")
def __add__(self, other):
if isinstance(other, timedelta):
return self.__class__(seconds=self.total_seconds() + other.total_seconds())
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
return self.__class__(seconds=self.total_seconds() - other.total_seconds())
return NotImplemented
def __neg__(self):
return self.__class__(
years=-self._years,
months=-self._months,
weeks=-self._weeks,
days=-self._remaining_days,
seconds=-self._seconds,
microseconds=-self._microseconds,
)
def _to_microseconds(self):
return (self._days * (24 * 3600) + self._seconds) * 1000000 + self._microseconds
def __mul__(self, other):
if isinstance(other, int):
return self.__class__(
years=self._years * other,
months=self._months * other,
seconds=self._total * other,
)
if isinstance(other, float):
usec = self._to_microseconds()
a, b = other.as_integer_ratio()
return self.__class__(0, 0, _divide_and_round(usec * a, b))
return NotImplemented
__rmul__ = __mul__
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return self.__class__(
0,
0,
usec // other,
years=self._years // other,
months=self._months // other,
)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return self.__class__(
0,
0,
_divide_and_round(usec, other),
years=_divide_and_round(self._years, other),
months=_divide_and_round(self._months, other),
)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return self.__class__(
0,
0,
_divide_and_round(b * usec, a),
years=_divide_and_round(self._years * b, a),
months=_divide_and_round(self._months, other),
)
__div__ = __floordiv__
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return self.__class__(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(), other._to_microseconds())
return q, self.__class__(0, 0, r)
return NotImplemented
Duration.min = Duration(days=-999999999)
Duration.max = Duration(
days=999999999, hours=23, minutes=59, seconds=59, microseconds=999999
)
Duration.resolution = Duration(microseconds=1)
class AbsoluteDuration(Duration):
"""
Duration that expresses a time difference in absolute values.
"""
def __new__(
cls,
days=0,
seconds=0,
microseconds=0,
milliseconds=0,
minutes=0,
hours=0,
weeks=0,
years=0,
months=0,
):
if not isinstance(years, int) or not isinstance(months, int):
raise ValueError("Float year and months are not supported")
self = timedelta.__new__(
cls, days, seconds, microseconds, milliseconds, minutes, hours, weeks
)
# We need to compute the total_seconds() value
# on a native timedelta object
delta = timedelta(
days, seconds, microseconds, milliseconds, minutes, hours, weeks
)
# Intuitive normalization
self._total = delta.total_seconds()
total = abs(self._total)
self._microseconds = round(total % 1 * 1e6)
self._seconds = int(total) % SECONDS_PER_DAY
days = int(total) // SECONDS_PER_DAY
self._days = abs(days + years * 365 + months * 30)
self._remaining_days = days % 7
self._weeks = days // 7
self._months = abs(months)
self._years = abs(years)
return self
def total_seconds(self):
return abs(self._total)
@property
def invert(self):
if self._invert is None:
self._invert = self._total < 0
return self._invert
|
|
"""passlib.handlers.sha2_crypt - SHA256-Crypt / SHA512-Crypt"""
#=============================================================================
# imports
#=============================================================================
# core
import hashlib
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from passlib.utils import classproperty, h64, safe_crypt, test_crypt, \
repeat_string, to_unicode
from passlib.utils.compat import b, bytes, byte_elem_value, irange, u, \
uascii_to_str, unicode
import passlib.utils.handlers as uh
# local
__all__ = [
"sha512_crypt",
"sha256_crypt",
]
#=============================================================================
# pure-python backend, used by both sha256_crypt & sha512_crypt
# when crypt.crypt() backend is not available.
#=============================================================================
_BNULL = b('\x00')
# pre-calculated offsets used to speed up C digest stage (see notes below).
# sequence generated using the following:
##perms_order = "p,pp,ps,psp,sp,spp".split(",")
##def offset(i):
## key = (("p" if i % 2 else "") + ("s" if i % 3 else "") +
## ("p" if i % 7 else "") + ("" if i % 2 else "p"))
## return perms_order.index(key)
##_c_digest_offsets = [(offset(i), offset(i+1)) for i in range(0,42,2)]
_c_digest_offsets = (
(0, 3), (5, 1), (5, 3), (1, 2), (5, 1), (5, 3), (1, 3),
(4, 1), (5, 3), (1, 3), (5, 0), (5, 3), (1, 3), (5, 1),
(4, 3), (1, 3), (5, 1), (5, 2), (1, 3), (5, 1), (5, 3),
)
# map used to transpose bytes when encoding final sha256_crypt digest
_256_transpose_map = (
20, 10, 0, 11, 1, 21, 2, 22, 12, 23, 13, 3, 14, 4, 24, 5,
25, 15, 26, 16, 6, 17, 7, 27, 8, 28, 18, 29, 19, 9, 30, 31,
)
# map used to transpose bytes when encoding final sha512_crypt digest
_512_transpose_map = (
42, 21, 0, 1, 43, 22, 23, 2, 44, 45, 24, 3, 4, 46, 25, 26,
5, 47, 48, 27, 6, 7, 49, 28, 29, 8, 50, 51, 30, 9, 10, 52,
31, 32, 11, 53, 54, 33, 12, 13, 55, 34, 35, 14, 56, 57, 36, 15,
16, 58, 37, 38, 17, 59, 60, 39, 18, 19, 61, 40, 41, 20, 62, 63,
)
def _raw_sha2_crypt(pwd, salt, rounds, use_512=False):
"""perform raw sha256-crypt / sha512-crypt
this function provides a pure-python implementation of the internals
for the SHA256-Crypt and SHA512-Crypt algorithms; it doesn't
handle any of the parsing/validation of the hash strings themselves.
:arg pwd: password chars/bytes to encrypt
:arg salt: salt chars to use
:arg rounds: linear rounds cost
:arg use_512: use sha512-crypt instead of sha256-crypt mode
:returns:
encoded checksum chars
"""
#===================================================================
# init & validate inputs
#===================================================================
# validate secret
if isinstance(pwd, unicode):
# XXX: not sure what official unicode policy is, using this as default
pwd = pwd.encode("utf-8")
assert isinstance(pwd, bytes)
if _BNULL in pwd:
raise uh.exc.NullPasswordError(sha512_crypt if use_512 else sha256_crypt)
pwd_len = len(pwd)
# validate rounds
assert 1000 <= rounds <= 999999999, "invalid rounds"
# NOTE: spec says out-of-range rounds should be clipped, instead of
# causing an error. this function assumes that's been taken care of
# by the handler class.
# validate salt
assert isinstance(salt, unicode), "salt not unicode"
salt = salt.encode("ascii")
salt_len = len(salt)
assert salt_len < 17, "salt too large"
# NOTE: spec says salts larger than 16 bytes should be truncated,
# instead of causing an error. this function assumes that's been
# taken care of by the handler class.
# load sha256/512 specific constants
if use_512:
hash_const = hashlib.sha512
hash_len = 64
transpose_map = _512_transpose_map
else:
hash_const = hashlib.sha256
hash_len = 32
transpose_map = _256_transpose_map
#===================================================================
# digest B - used as subinput to digest A
#===================================================================
db = hash_const(pwd + salt + pwd).digest()
#===================================================================
# digest A - used to initialize first round of digest C
#===================================================================
# start out with pwd + salt
a_ctx = hash_const(pwd + salt)
a_ctx_update = a_ctx.update
# add pwd_len bytes of b, repeating b as many times as needed.
a_ctx_update(repeat_string(db, pwd_len))
# for each bit in pwd_len: add b if it's 1, or pwd if it's 0
i = pwd_len
while i:
a_ctx_update(db if i & 1 else pwd)
i >>= 1
# finish A
da = a_ctx.digest()
#===================================================================
# digest P from password - used instead of password itself
# when calculating digest C.
#===================================================================
if pwd_len < 64:
# method this is faster under python, but uses O(pwd_len**2) memory
# so we don't use it for larger passwords, to avoid a potential DOS.
dp = repeat_string(hash_const(pwd * pwd_len).digest(), pwd_len)
else:
tmp_ctx = hash_const(pwd)
tmp_ctx_update = tmp_ctx.update
i = pwd_len-1
while i:
tmp_ctx_update(pwd)
i -= 1
dp = repeat_string(tmp_ctx.digest(), pwd_len)
assert len(dp) == pwd_len
#===================================================================
# digest S - used instead of salt itself when calculating digest C
#===================================================================
ds = hash_const(salt * (16 + byte_elem_value(da[0]))).digest()[:salt_len]
assert len(ds) == salt_len, "salt_len somehow > hash_len!"
#===================================================================
# digest C - for a variable number of rounds, combine A, S, and P
# digests in various ways; in order to burn CPU time.
#===================================================================
# NOTE: the original SHA256/512-Crypt specification performs the C digest
# calculation using the following loop:
#
##dc = da
##i = 0
##while i < rounds:
## tmp_ctx = hash_const(dp if i & 1 else dc)
## if i % 3:
## tmp_ctx.update(ds)
## if i % 7:
## tmp_ctx.update(dp)
## tmp_ctx.update(dc if i & 1 else dp)
## dc = tmp_ctx.digest()
## i += 1
#
# The code Passlib uses (below) implements an equivalent algorithm,
# it's just been heavily optimized to pre-calculate a large number
# of things beforehand. It works off of a couple of observations
# about the original algorithm:
#
# 1. each round is a combination of 'dc', 'ds', and 'dp'; determined
# by the whether 'i' a multiple of 2,3, and/or 7.
# 2. since lcm(2,3,7)==42, the series of combinations will repeat
# every 42 rounds.
# 3. even rounds 0-40 consist of 'hash(dc + round-specific-constant)';
# while odd rounds 1-41 consist of hash(round-specific-constant + dc)
#
# Using these observations, the following code...
# * calculates the round-specific combination of ds & dp for each round 0-41
# * runs through as many 42-round blocks as possible
# * runs through as many pairs of rounds as possible for remaining rounds
# * performs once last round if the total rounds should be odd.
#
# this cuts out a lot of the control overhead incurred when running the
# original loop 40,000+ times in python, resulting in ~20% increase in
# speed under CPython (though still 2x slower than glibc crypt)
# prepare the 6 combinations of ds & dp which are needed
# (order of 'perms' must match how _c_digest_offsets was generated)
dp_dp = dp+dp
dp_ds = dp+ds
perms = [dp, dp_dp, dp_ds, dp_ds+dp, ds+dp, ds+dp_dp]
# build up list of even-round & odd-round constants,
# and store in 21-element list as (even,odd) pairs.
data = [ (perms[even], perms[odd]) for even, odd in _c_digest_offsets]
# perform as many full 42-round blocks as possible
dc = da
blocks, tail = divmod(rounds, 42)
while blocks:
for even, odd in data:
dc = hash_const(odd + hash_const(dc + even).digest()).digest()
blocks -= 1
# perform any leftover rounds
if tail:
# perform any pairs of rounds
pairs = tail>>1
for even, odd in data[:pairs]:
dc = hash_const(odd + hash_const(dc + even).digest()).digest()
# if rounds was odd, do one last round (since we started at 0,
# last round will be an even-numbered round)
if tail & 1:
dc = hash_const(dc + data[pairs][0]).digest()
#===================================================================
# encode digest using appropriate transpose map
#===================================================================
return h64.encode_transposed_bytes(dc, transpose_map).decode("ascii")
#=============================================================================
# handlers
#=============================================================================
_UROUNDS = u("rounds=")
_UDOLLAR = u("$")
_UZERO = u("0")
class _SHA2_Common(uh.HasManyBackends, uh.HasRounds, uh.HasSalt,
uh.GenericHandler):
"class containing common code shared by sha256_crypt & sha512_crypt"
#===================================================================
# class attrs
#===================================================================
# name - set by subclass
setting_kwds = ("salt", "rounds", "implicit_rounds", "salt_size")
# ident - set by subclass
checksum_chars = uh.HASH64_CHARS
# checksum_size - set by subclass
min_salt_size = 0
max_salt_size = 16
salt_chars = uh.HASH64_CHARS
min_rounds = 1000 # bounds set by spec
max_rounds = 999999999 # bounds set by spec
rounds_cost = "linear"
_cdb_use_512 = False # flag for _calc_digest_builtin()
_rounds_prefix = None # ident + _UROUNDS
#===================================================================
# methods
#===================================================================
implicit_rounds = False
def __init__(self, implicit_rounds=None, **kwds):
super(_SHA2_Common, self).__init__(**kwds)
# if user calls encrypt() w/ 5000 rounds, default to compact form.
if implicit_rounds is None:
implicit_rounds = (self.use_defaults and self.rounds == 5000)
self.implicit_rounds = implicit_rounds
@classmethod
def from_string(cls, hash):
# basic format this parses -
# $5$[rounds=<rounds>$]<salt>[$<checksum>]
# TODO: this *could* use uh.parse_mc3(), except that the rounds
# portion has a slightly different grammar.
# convert to unicode, check for ident prefix, split on dollar signs.
hash = to_unicode(hash, "ascii", "hash")
ident = cls.ident
if not hash.startswith(ident):
raise uh.exc.InvalidHashError(cls)
assert len(ident) == 3
parts = hash[3:].split(_UDOLLAR)
# extract rounds value
if parts[0].startswith(_UROUNDS):
assert len(_UROUNDS) == 7
rounds = parts.pop(0)[7:]
if rounds.startswith(_UZERO) and rounds != _UZERO:
raise uh.exc.ZeroPaddedRoundsError(cls)
rounds = int(rounds)
implicit_rounds = False
else:
rounds = 5000
implicit_rounds = True
# rest should be salt and checksum
if len(parts) == 2:
salt, chk = parts
elif len(parts) == 1:
salt = parts[0]
chk = None
else:
raise uh.exc.MalformedHashError(cls)
# return new object
return cls(
rounds=rounds,
salt=salt,
checksum=chk or None,
implicit_rounds=implicit_rounds,
relaxed=not chk, # NOTE: relaxing parsing for config strings
# so that out-of-range rounds are clipped,
# since SHA2-Crypt spec treats them this way.
)
def to_string(self):
if self.rounds == 5000 and self.implicit_rounds:
hash = u("%s%s$%s") % (self.ident, self.salt,
self.checksum or u(''))
else:
hash = u("%srounds=%d$%s$%s") % (self.ident, self.rounds,
self.salt, self.checksum or u(''))
return uascii_to_str(hash)
#===================================================================
# backends
#===================================================================
backends = ("os_crypt", "builtin")
_has_backend_builtin = True
# _has_backend_os_crypt - provided by subclass
def _calc_checksum_builtin(self, secret):
return _raw_sha2_crypt(secret, self.salt, self.rounds,
self._cdb_use_512)
def _calc_checksum_os_crypt(self, secret):
hash = safe_crypt(secret, self.to_string())
if hash:
# NOTE: avoiding full parsing routine via from_string().checksum,
# and just extracting the bit we need.
cs = self.checksum_size
assert hash.startswith(self.ident) and hash[-cs-1] == _UDOLLAR
return hash[-cs:]
else:
return self._calc_checksum_builtin(secret)
#===================================================================
# eoc
#===================================================================
class sha256_crypt(_SHA2_Common):
"""This class implements the SHA256-Crypt password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 0-16 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 80000, must be between 1000 and 999999999, inclusive.
:type implicit_rounds: bool
:param implicit_rounds:
this is an internal option which generally doesn't need to be touched.
this flag determines whether the hash should omit the rounds parameter
when encoding it to a string; this is only permitted by the spec for rounds=5000,
and the flag is ignored otherwise. the spec requires the two different
encodings be preserved as they are, instead of normalizing them.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
name = "sha256_crypt"
ident = u("$5$")
checksum_size = 43
default_rounds = 80000 # current passlib default
#===================================================================
# backends
#===================================================================
@classproperty
def _has_backend_os_crypt(cls):
return test_crypt("test", "$5$rounds=1000$test$QmQADEXMG8POI5W"
"Dsaeho0P36yK3Tcrgboabng6bkb/")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# sha 512 crypt
#=============================================================================
class sha512_crypt(_SHA2_Common):
"""This class implements the SHA512-Crypt password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 0-16 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 60000, must be between 1000 and 999999999, inclusive.
:type implicit_rounds: bool
:param implicit_rounds:
this is an internal option which generally doesn't need to be touched.
this flag determines whether the hash should omit the rounds parameter
when encoding it to a string; this is only permitted by the spec for rounds=5000,
and the flag is ignored otherwise. the spec requires the two different
encodings be preserved as they are, instead of normalizing them.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
name = "sha512_crypt"
ident = u("$6$")
checksum_size = 86
_cdb_use_512 = True
default_rounds = 60000 # current passlib default
#===================================================================
# backend
#===================================================================
@classproperty
def _has_backend_os_crypt(cls):
return test_crypt("test", "$6$rounds=1000$test$2M/Lx6Mtobqj"
"Ljobw0Wmo4Q5OFx5nVLJvmgseatA6oMn"
"yWeBdRDx4DU.1H3eGmse6pgsOgDisWBG"
"I5c7TZauS0")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#=============================================================================
|
|
#/u/GoldenSights
import praw # simple interface to the reddit API, also handles rate limiting of requests
import time
import sqlite3
import json
import urllib.request
from bs4 import BeautifulSoup
'''USER CONFIGURATION'''
USERNAME = ""
#This is the bot's Username. In order to send mail, he must have some amount of Karma.
PASSWORD = ""
#This is the bot's Password.
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
#This bot gets its subreddits from subreddit.txt
MAXPERTHREAD = 4
#This is the maximum number of individuals to be fetched in a single thread
COMMENTHEADER = ""
#At the top of the comment
COMMENTALSOSEE = "**Also see:**"
#After the first individual's full rundown
COMMENTFOOTER = "This comment was automatically generated. [Source Code](https://github.com/voussoir/reddit/tree/master/Politician)"
#At the bottom of the comment
#Newline characters will be managed automatically in all three
MAXPOSTS = 100
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 20
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
'''All done!'''
WAITS = str(WAIT)
with open('full.txt', 'r') as f:
FULL = json.loads(f.read())
with open('nick.txt', 'r') as f:
NICK = json.loads(f.read())
print('Loaded name tables.')
try:
import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials
USERNAME = bot.uG
PASSWORD = bot.pG
USERAGENT = bot.aG
except ImportError:
pass
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS oldthreads(ID TEXT, COUNT INT, NAMES TEXT)')
print('Loaded Completed table')
sql.commit()
r = praw.Reddit(USERAGENT)
r.login(USERNAME, PASSWORD)
def generatepolitician(iid, terminate=False):
print('Fetching ' + iid)
url = 'https://www.opensecrets.org/politicians/summary.php?cycle=2014&cid=%s&type=I' % iid
filea = BeautifulSoup(urllib.request.urlopen(url))
comment = ''
comment += '[**' + filea.find_all('h1')[1].string
comment += ', ' + filea.find(id='title').string + '**](' + url + ')\n\n'
if terminate:
return comment
elecinfo = filea.find(id='elec').contents
comment += elecinfo[1].string + ', '
comment += elecinfo[2].strip() + '\n\n.\n\n'
f = filea.find(id='summaryData').find_all('td')
l = []
for i in f:
for c in i.contents:
c = str(c).strip().replace('\n', '')
if '$' in c:
l.append(c.split()[-1])
h = filea.find(id='profileLeftColumn')
h = h.h2.contents[-1]
comment += '**' + h + '**\n\n'
table = 'Raised | ' + l[0] + '\n:- | -:\n'
table += 'Spent | ' + l[1] + '\n'
table += 'Cash on hand | ' + l[2] + '\n'
table += 'Debts | ' + l[3] + '\n'
comment += table
comment += '\n\n.\n\n'
h2s = filea.find_all('h2')
h = h2s[2].string
comment += '**' + h + '**\n\n'
table = 'Industry | Total | Indivs | PACs\n:- | -: | -: | -:\n'
industries = filea.find(id='topIndus').contents
item = industries[1]
contents = item.contents
for i in contents:
for x in i:
table += x.string + ' | '
table += '\n'
comment += table
return comment
def dropfrom(filename, content):
print('Dropping ' + content + ' from ' + filename)
f = open(filename, 'r')
l = [line.strip() for line in f.readlines()]
f.close()
for i in range(len(l)):
item = l[i]
if content.lower() == item.lower():
l[i] = ''
while '' in l:
l.remove('')
while '\n' in l:
l.remove('\n')
f = open(filename, 'w')
for item in l:
print(item, file=f)
f.close()
def scan():
print('\nChecking blacklist')
blackfile = open('blacklist.txt', 'r')
blacklist = blackfile.readlines()
blackfile.close()
print('Checking subreddits\n')
subfile = open('subreddit.txt', 'r')
sublist = subfile.readlines()
while '' in sublist:
sublist.remove('')
subfile.close()
if sublist == []:
print('Subreddit list is empty')
return
SUBREDDIT = '+'.join(sublist)
print('Scanning ' + SUBREDDIT)
subreddit = r.get_subreddit(SUBREDDIT)
print('Getting submissions...')
posts = list(subreddit.get_new(limit=MAXPOSTS))
print('Getting comments...')
posts += list(subreddit.get_comments(limit=MAXPOSTS))
for post in posts:
cur.execute('SELECT * FROM oldposts WHERE ID=?', [post.fullname])
if not cur.fetchone():
print(post.fullname + ': New')
ids = []
try:
pauthor = post.author.name.lower()
if not any(pauthor == blacklisted.lower() for blacklisted in blacklist):
if type(post) == praw.objects.Comment:
submissionid = post.link_id
if type(post) == praw.objects.Submission:
submissionid = post.fullname
print(post.fullname + ': Passed blacklist check')
cur.execute('SELECT * FROM oldthreads WHERE ID=?', [submissionid])
fetched = cur.fetchone()
if not fetched:
cur.execute('INSERT INTO oldthreads VALUES(?, ?, ?)', [submissionid, 0, ''])
fetched = [submissionid, 0, '']
if fetched[1] <= MAXPERTHREAD:
if type(post) == praw.objects.Submission:
pbody = post.title.lower() + '\n\n' + post.selftext.lower()
if type(post) == praw.objects.Comment:
pbody = post.body.lower()
#print(pbody)
for member in FULL:
if member.lower() in pbody:
idd = FULL[member]
if idd not in ids:
ids.append(idd)
for member in NICK:
if member.lower() in pbody:
idd = NICK[member]
if idd not in ids:
ids.append(idd)
else:
print(submissionid + ': Already reached limit for thread')
else:
print(post.fullname + ': Author ' + pauthor + ' is blacklisted.')
except AttributeError:
print(post.fullname + ': Author is deleted.')
ids = ids[:5]
if len(ids) > 0:
print(post.fullname + ': Produced ' + str(len(ids)) + ' items.')
print('\t', ids)
count = fetched[1]
print(submissionid + ': has', count)
alreadyseen = fetched[2].split()
print(submissionid + ': already seen:', alreadyseen)
for item in range(len(ids)):
if ids[item] in alreadyseen:
print('\t' + ids[item] + ' has already been seen in this thread')
ids[item] = ''
while '' in ids:
ids.remove('')
if len(ids) > 0:
newcomment = COMMENTHEADER + '\n\n'
newcomment += generatepolitician(ids[0])
if len(ids) > 1:
newcomment += '\n\n.\n\n' + COMMENTALSOSEE + '\n\n'
for member in ids[1:]:
newcomment += generatepolitician(member, terminate=True)
newcomment += '\n\n' + COMMENTFOOTER
print(post.fullname + ': Writing reply.')
try:
if type(post) == praw.objects.Submission:
post.add_comment(newcomment)
if type(post) == praw.objects.Comment:
post.reply(newcomment)
except praw.requests.exceptions.HTTPError:
print('HTTPError: Probably banned in this sub')
dropfrom(subreddit.txt, post.subreddit.display_name)
alreadyseen = ' '.join(alreadyseen) + ' ' + ' '.join(ids)
cur.execute('UPDATE oldthreads SET COUNT=?, NAMES=? WHERE ID=?', [count+len(ids), alreadyseen, submissionid])
cur.execute('INSERT INTO oldposts VALUES(?)', [post.fullname])
sql.commit()
while True:
scan()
print('Running again in ' + WAITS + ' seconds.')
time.sleep(WAIT)
|
|
# sqlite/pysqlite.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the SQLite database via pysqlite.
Note that pysqlite is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)
Full documentation on pysqlite is available at:
`<http://www.initd.org/pub/software/pysqlite/doc/usage-guide.html>`_
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database" portion of
the URL. Note that the format of a url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to the
**right** of the third slash. So connecting to a relative filepath looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you need **four**
slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be used.
Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is present. Specify
``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the DATETIME
or TIME types...confused yet ?) will not perform any bind parameter or result
processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result processing.
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is controlled by the ``check_same_thread``
Pysqlite flag. This default is intended to work with older versions
of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default will use
:class:`.SingletonThreadPool`. This pool maintains a single connection per
thread, so that all access to the engine within the current thread use the
same ``:memory:`` database - other threads would access a different
``:memory:`` database.
* When a file-based database is specified, the dialect will use :class:`.NullPool`
as the source of connections. This pool closes and discards connections
which are returned to the pool immediately. SQLite file-based connections
have extremely low overhead, so pooling is not necessary. The scheme also
prevents a connection from being used again in a different thread and works
best with SQLite's coarse-grained file locking.
.. note:: The default selection of :class:`.NullPool` for SQLite file-based databases
is new in SQLAlchemy 0.7. Previous versions
select :class:`.SingletonThreadPool` by
default for all SQLite databases.
Modern versions of SQLite no longer have the threading restrictions, and assuming
the sqlite3/pysqlite library was built with SQLite's default threading mode
of "Serialized", even ``:memory:`` databases can be shared among threads.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same connection
object must be shared among threads, since the database exists
only within the scope of that connection. The :class:`.StaticPool` implementation
will maintain a single connection globally, and the ``check_same_thread`` flag
can be passed to Pysqlite as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a temporary table
in a file-based SQLite database across multiple checkouts from the connection pool, such
as when using an ORM :class:`.Session` where the temporary table should continue to remain
after :meth:`.commit` or :meth:`.rollback` is called,
a pool which maintains a single connection must be used. Use :class:`.SingletonThreadPool`
if the scope is only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number of threads
that are to be used; beyond that number, connections will be closed out in a non deterministic
way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets, never
plain strings, and accommodates ``unicode`` objects within bound parameter
values in all cases. Regardless of the SQLAlchemy string type in use,
string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
.. _pysqlite_serializable:
Serializable Transaction Isolation
----------------------------------
The pysqlite DBAPI driver has a long-standing bug in which transactional
state is not begun until the first DML statement, that is INSERT, UPDATE
or DELETE, is emitted. A SELECT statement will not cause transactional
state to begin. While this mode of usage is fine for typical situations
and has the advantage that the SQLite database file is not prematurely
locked, it breaks serializable transaction isolation, which requires
that the database file be locked upon any SQL being emitted.
To work around this issue, the ``BEGIN`` keyword can be emitted
at the start of each transaction. The following recipe establishes
a :meth:`.ConnectionEvents.begin` handler to achieve this::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db", isolation_level='SERIALIZABLE')
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.execute("BEGIN")
"""
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE
from sqlalchemy import exc, pool
from sqlalchemy import types as sqltypes
from sqlalchemy import util
import os
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = 'qmark'
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date:_SQLite_pysqliteDate,
sqltypes.TIMESTAMP:_SQLite_pysqliteTimeStamp,
}
)
# Py3K
#description_encoding = None
driver = 'pysqlite'
def __init__(self, **kwargs):
SQLiteDialect.__init__(self, **kwargs)
if self.dbapi is not None:
sqlite_ver = self.dbapi.version_info
if sqlite_ver < (2, 1, 3):
util.warn(
("The installed version of pysqlite2 (%s) is out-dated "
"and will cause errors in some cases. Version 2.1.3 "
"or greater is recommended.") %
'.'.join([str(subver) for subver in sqlite_ver]))
@classmethod
def dbapi(cls):
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError, e:
try:
from sqlite3 import dbapi2 as sqlite #try the 2.5+ stdlib name.
except ImportError:
raise e
return sqlite
@classmethod
def get_pool_class(cls, url):
if url.database and url.database != ':memory:':
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,))
filename = url.database or ':memory:'
if filename != ':memory:':
filename = os.path.abspath(filename)
opts = url.query.copy()
util.coerce_kw_type(opts, 'timeout', float)
util.coerce_kw_type(opts, 'isolation_level', str)
util.coerce_kw_type(opts, 'detect_types', int)
util.coerce_kw_type(opts, 'check_same_thread', bool)
util.coerce_kw_type(opts, 'cached_statements', int)
return ([filename], opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.ProgrammingError) and \
"Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
|
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import sys
import logging
import copy
import aliyunOpenApiData
LOG = logging.getLogger(__name__)
class Completer(object):
def __init__(self):
self.openApiDataHandler = aliyunOpenApiData.aliyunOpenApiDataHandler()
self.driver = None
self.main_hc = None
self.main_options = ['output', 'AccessKeyId', 'AccessKeySecret', 'RegionId' ,'profile', 'version']
self.cmdline = None
self.point = None
self.command_hc = None
self.subcommand_hc = None
self.command_name = None
self.operation = None
self.current_word = None
self.previous_word = None
self.non_options = None
self.version = None
self.aliyuncli = 'aliyuncli'
def _complete_option(self, option_name):
# if option_name == '--endpoint-url':
# return []
if option_name == '--output':
cli_data = ['text', 'table', 'json']
return cli_data
# if option_name == '--profile':
# return self.driver.session.available_profiles
return []
def _complete_provider(self):
retval = []
if self.current_word.startswith('-'):
cw = self.current_word.lstrip('-')
l = ['--' + n for n in self.main_options
if n.startswith(cw)]
retval = l
elif self.current_word == './testcli' or self.current_word == self.aliyuncli:
retval = self._documented(self.openApiDataHandler.getApiCmdsLower())
else:
# Otherwise, see if they have entered a partial command name
retval = self._documented(self.openApiDataHandler.getApiCmdsLower(),
startswith=self.current_word)
return retval
def _complete_command(self):
retval = []
if self.current_word == self.command_name: # here means only cmd give operation is None
_operations = set()
apiOperations = self.openApiDataHandler.getApiOperations(self.command_name, self.version)
import commandConfigure
_configure = commandConfigure.commandConfigure()
extensionOperations = _configure.getExtensionOperations(self.command_name)
for item in apiOperations:
_operations.add(item)
if extensionOperations is not None:
for item in extensionOperations:
_operations.add(item)
if self.openApiDataHandler.getApiOperations(self.command_name, self.version):
retval = self._documented(_operations)
# retval = self._documented(self.openApiDataHandler.getApiOperations(self.command_name, self.version))
elif self.current_word.startswith('-'): # this is complete the key and values
retval = self._find_possible_options()
else: # here means cmd give we need complete the operation
# See if they have entered a partial command name
_operations = set()
apiOperations = self.openApiDataHandler.getApiOperations(self.command_name, self.version)
import commandConfigure
_configure = commandConfigure.commandConfigure()
extensionOperations = _configure.getExtensionOperations(self.command_name)
for item in apiOperations:
_operations.add(item)
if extensionOperations is not None:
for item in extensionOperations:
_operations.add(item)
if self.openApiDataHandler.getApiOperations(self.command_name, self.version):
retval = self._documented(_operations, startswith=self.current_word)
# retval = self._documented(self.openApiDataHandler.getApiOperations(self.command_name, self.version),
# startswith=self.current_word)
return retval
def _documented(self, table, startswith=None):
names = []
for key in table:
# if getattr(command, '_UNDOCUMENTED', False):
# Don't tab complete undocumented commands/params
# continue
if startswith is not None and not key.startswith(startswith):
continue
# if getattr(command, 'positional_arg', False):
# continue
names.append(key)
return names
def _complete_subcommand(self):
retval = []
if self.current_word == self.operation:
retval = []
elif self.current_word.startswith('-'):
retval = self._find_possible_options()
return retval
def _find_possible_options(self):
all_options = copy.copy(self.main_options)
# here give all attribute list
# where code run here , self.version should be decide before
# self.subcommand_name = self.operation
# cmdInstance = self.openApiDataHandler.getInstanceByCmd(self.command_name, self.operation, self.version)
cmdInstance, mclassname = self.openApiDataHandler.getInstanceByCmdOperation(self.command_name, self.operation, self.version)
# old_arg_list = self.openApiDataHandler.getAttrList(cmdInstance)
old_arg_list = list()
if cmdInstance is None:
import commandConfigure
_configure = commandConfigure.commandConfigure()
old_arg_list = _configure.getExtensionOptions(self.command_name, self.operation)
else:
old_arg_list = self.openApiDataHandler.getAttrList(mclassname)
new_arg_list = set()
if not old_arg_list is None:
for item in old_arg_list:
if not item.startswith('_'):
new_arg_list.add(item)
all_options = all_options + self._documented(new_arg_list)
for opt in self.options:
# Look thru list of options on cmdline. If there are
# options that have already been specified and they are
# not the current word, remove them from list of possibles.
if opt != self.current_word:
stripped_opt = opt.lstrip('-')
if stripped_opt in all_options:
all_options.remove(stripped_opt)
cw = self.current_word.lstrip('-')
possibles = ['--' + n for n in all_options if n.startswith(cw)]
if len(possibles) == 1 and possibles[0] == self.current_word:
return self._complete_option(possibles[0])
return possibles
def _help_to_show_instance_attribute(self, classname):
all_options = copy.copy(self.main_options)
# here give all attribute list
# where code run here , self.version should be decide before
# self.subcommand_name = self.operation
old_arg_list = self.openApiDataHandler.getAttrList(classname)
new_arg_list = set()
if not old_arg_list is None:
for item in old_arg_list:
if not item.startswith('_'):
new_arg_list.add(item)
all_options = all_options + self._documented(new_arg_list)
# for opt in self.options:
# # Look thru list of options on cmdline. If there are
# # options that have already been specified and they are
# # not the current word, remove them from list of possibles.
# if opt != self.current_word:
# stripped_opt = opt.lstrip('-')
# if stripped_opt in all_options:
# all_options.remove(stripped_opt)
#cw = self.current_word.lstrip('-')
possibles = ['--' + n for n in all_options]
# if len(possibles) == 1 and possibles[0] == self.current_word:
# return self._complete_option(possibles[0])
return possibles
def _process_command_line(self):
# Process the command line and try to find:
# - command_name
# - subcommand_name
# - words
# - current_word
# - previous_word
# - non_options
# - options
self.command_name = None
self.operation = None
self.words = self.cmdline[0:self.point].split()
self.current_word = self.words[-1]
if len(self.words) >= 2:
self.previous_word = self.words[-2]
else:
self.previous_word = None
self.non_options = [w for w in self.words if not w.startswith('-')]
self.options = [w for w in self.words if w.startswith('-')]
# Look for a command name in the non_options
for w in self.non_options:
if w in self.openApiDataHandler.getApiCmdsLower() or w in self.openApiDataHandler.getApiCmds(): # cmd check
self.command_name = w # here give the command_name
self.version = self.openApiDataHandler.getSdkVersion(self.command_name, None)
cmd_obj = self.openApiDataHandler.getApiOperations(self.command_name, self.version)
# self.command_hc = cmd_obj.create_help_command()
if not cmd_obj is None:
# Look for subcommand name
for w in self.non_options:
if w in cmd_obj:
self.operation = w
# cmd_obj = self.command_hc.command_table[self.subcommand_name]
# self.subcommand_hc = cmd_obj.create_help_command()
break
cmd_extension_obj = self.openApiDataHandler.getExtensionOperationsFromCmd(self.command_name)
if not cmd_extension_obj is None:
for w in self.non_options:
if w in cmd_extension_obj:
self.operation = w
# cmd_obj = self.command_hc.command_table[self.subcommand_name]
# self.subcommand_hc = cmd_obj.create_help_command()
break
break
def complete(self, cmdline, point):
self.cmdline = cmdline
self.command_name = None
if point is None:
point = len(cmdline)
self.point = point
self._process_command_line()
if not self.command_name: # such as 'ec'
# If we didn't find any command names in the cmdline
# lets try to complete provider options
return self._complete_provider()
if self.command_name and not self.operation: # such as 'ecs create-'
return self._complete_command()
return self._complete_subcommand()
def complete(cmdline, point):
choices = Completer().complete(cmdline, point)
print(' \n'.join(choices))
if __name__ == '__main__':
# if len(sys.argv) == 3:
# cmdline = sys.argv[1]
# point = int(sys.argv[2])
# elif len(sys.argv) == 2:
# cmdline = sys.argv[1]
# else:
# print('usage: %s <cmdline> <point>' % sys.argv[0])
# sys.exit(1)
cmdline = './testcli E'
point = len(cmdline)
print(complete(cmdline, point))
|
|
#! /usr/bin/env python3
import logging
import argparse
import re
import math
import datetime
import mechanicalsoup
import ics
import pytz
ADE_ROOT = 'http://chronos.epita.net'
PRODID = '-//Laboratoire Assistant et Charles Villard//chronos.py//EN'
ROOM_MAPPING = {}
CLASS_MAPPING = {}
BASE_TIME = datetime.datetime.strptime("2019-08-26","%Y-%m-%d") # 1 september 2017 = 0
def compute_date_base(html, date):
"""
Computes the Unix timestamp corresponding to the beginning of Chronos'
week 0 from the result of piano.jsp, assuming there are no missing weeks :)
"""
if not date:
return 0
dates = []
for tag in html.soup.find_all('img'):
dates.append(tag.get('alt'))
maps = []
for tag in html.soup.find_all('area'):
m = re.match("javascript:push\((\d+), 'true'\)", tag.get('href'))
if m.group(1):
maps.append(m.group(1))
for i in range(0, len(dates)):
if dates[i] == date:
return maps[i]
return None
def compute_week_number():
"""
Computes the Chronos' week number corresponding to a Unix timestamp.
It needs the base reference to work
"""
d2 = datetime.datetime.now()
monday1 = BASE_TIME
monday2 = (d2 - datetime.timedelta(days=d2.weekday()))
return math.floor(((monday2 - monday1).days / 7)-1) # Get 1 last week in case of error
def process_raw_data(items):
"""
Process a raw class data and make it usable : time parsing, use of room
and class name matching tables to make them readable and uniform
"""
# Start date
d1 = ' '.join(items[0:2])
result = {
'start': datetime.datetime.strptime(d1, '%d/%m/%Y %Hh%M'),
'groups': items[4].split(),
'prof': items[5],
}
# End date
m = re.match('(\d+)h(?:(\d+)min)?', items[2])
if m:
delta = datetime.timedelta(hours=int(m.group(1)))
if m.group(2):
delta += datetime.timedelta(minutes=int(m.group(2)))
result['end'] = result['start'] + delta
else:
m = re.match('(\d+)min', items[2])
if m:
delta = datetime.timedelta(minutes=int(m.group(1)))
result['end'] = result['start'] + delta
else:
raise Exception('Unhandled duration format')
# Class name
if items[3] in CLASS_MAPPING.keys():
result['name'] = CLASS_MAPPING[items[3]]
else:
result['name'] = items[3]
# Room
if items[6] in ROOM_MAPPING.keys():
result['room'] = ROOM_MAPPING[items[6]]
else:
result['room'] = items[6]
return result
def retrieve_class_list(html):
"""
Retrieve a list of classes from the output of info.jsp (lower pane of the
timetable display) It only retrieves the time, name of class and room,
since they are the only really useful ones.
"""
result = []
for tr in html.soup.table.find_all('tr'):
it = []
for td in tr.find_all('td'):
txt = td.string
if txt:
it.append(txt)
if it:
result.append(process_raw_data(it))
return result
def find_tree_url(soup):
"""
Find the tree pane URL
"""
for frame in soup.select('frame'):
if 'tree.jsp' in frame.get('src'):
return '{}{}'.format(ADE_ROOT, frame.get('src'))
return None
def search_tree(agent, tree, path):
"""
Walk the tree following the given path, and return the URL at the leaf
"""
#tree_frame = agent.get(tree)
#assert tree_frame.status_code == 200
agent.get("{}/ade/standard/gui/tree.jsp?selectCategory=trainee&scroll=0".format(ADE_ROOT))
agent.get("{}/ade/standard/gui/tree.jsp?selectBranchId=1&reset=true&forceLoad=false&scroll=0".format(ADE_ROOT))
agent.get("{}/ade/standard/gui/tree.jsp?selectBranchId=15&reset=true&forceLoad=false&scroll=0".format(ADE_ROOT))
if path:
r = "{}/ade/standard/gui/tree.jsp?".format(ADE_ROOT)
r += "{}={}".format("selectId", path)
r += "&reset=true&forceLoad=true&scroll=0"
return r
else:
raise Exception("Can't get calendar")
def connect_and_select(agent, date, path):
"""
Connect to Chronos and select a node (given by its path), retrieve the time
base and return it.
"""
main_page = agent.get("{}/".format(ADE_ROOT))
assert main_page.status_code == 200
# Find the tree
tree = find_tree_url(main_page.soup)
assert tree != None
# Find the leaf following the given path
leaf = search_tree(agent, tree, path)
assert leaf != None
# Access the leaf
leaf_page = agent.get(leaf)
assert leaf_page.status_code == 200
# Get the time bar
uri = "{}/ade/custom/modules/plannings/pianoWeeks.jsp".format(ADE_ROOT)
time_bar = agent.get(uri)
assert time_bar.status_code == 200
# Return the computed week origin
return compute_week_number()
def retrieve_week_classes(agent, first, numweeks):
"""
Retrieve the classes of a week given a Unix timestamp in this week.
"""
# Set the weeks
for i in range(0, numweeks):
uri = "{}/ade/custom/modules/plannings/bounds.jsp?".format(ADE_ROOT)
uri += "week={}".format(i + first)
if i == 0:
uri += "&reset=true"
page = agent.get(uri)
assert page.status_code == 200
# Retrieve the content and parse it
p = agent.get("{}/ade/custom/modules/plannings/info.jsp".format(ADE_ROOT))
assert p.status_code == 200
return retrieve_class_list(p)
# def ical_output__(promo, classes):
# cal = icalendar.Calendar()
# cal.add('VERSION', '2.0')
# cal.add('PRODID', PRODID)
# for c in classes:
# event = icalendar.Event()
# event_condensed_name = '{}-{}'.format(c.get('name'), c.get('prof'))
# event_condensed_name = re.sub(r"[^\w]", "_", event_condensed_name)
# event['UID'] = 'chronos-{}-{}-{}'.format(
# promo, c.get('start'), event_condensed_name).replace(' ', '_')
##date the event was created (reset to now)
# event['DTSTAMP'] = icalendar.vDatetime(datetime.datetime.now())
# summary = '{}'.format(c.get('name'))
# if c.get('prof') != '-':
# summary += ' - {}'.format(c.get('prof'))
# summary += ' ({})'.format(c.get('room'))
# event['SUMMARY;CHARSET=UTF-8'] = '{}'.format(summary)
# event['DESCRIPTION'] = '\n'.join({
# "Cours: {}".format(c.get('name')),
# "Prof: {}".format(c.get('prof')),
# "Salle: {}".format(c.get('room'),
# "Groupes: {}".format('-'.join(c.get('groups')))),
# }).replace(',', '\\,')
# event['DTSTART'] = icalendar.vDatetime(c.get('start'))
# event['DTEND'] = icalendar.vDatetime(c.get('end'))
# event['LOCATION'] = c.get('room')
# cal.add_component(event)
# return cal
def ical_output(promo, classes):
events = []
for c in classes:
name = '{}-{}'.format(c.get('name'), c.get('prof'))
name = re.sub(r"[^\w]", "_", name)
uid = 'chronos-{}-{}-{}'.format(promo, c.get('start'), name)
uid = uid.replace(' ', '_')
summary = '{}'.format(c.get('name'))
if c.get('prof') != '-':
summary += ' - {}'.format(c.get('prof'))
summary += ' ({})'.format(c.get('room'))
description = '\n'.join({
"Cours: {}".format(c.get('name')),
"Prof: {}".format(c.get('prof')),
"Salle: {}".format(c.get('room')),
"Groupes: {}".format('-'.join(c.get('groups'))),
}).replace(',', '\\,')
paris = pytz.timezone('Europe/Paris')
begin, end=map(paris.localize, [c.get('start'), c.get('end')])
events.append(ics.Event(
name=summary,
begin=begin,
end=end,
uid=uid,
description=description,
location=c.get('room').capitalize()
))
cal = ics.Calendar(creator=PRODID, events=events)
return cal
def chronos(promo, group, numweeks):
agent = mechanicalsoup.StatefulBrowser()
try:
path = group
except:
logging.fatal("Can't find path for this calendar: {}".format(group))
first = connect_and_select(agent, None, path)
classes = retrieve_week_classes(agent, first, numweeks)
cal = ical_output(promo, classes)
return cal
|
|
"""
# tinypath
Tinypath is a tiny file path module that provides only the most crucial and
commonly needed functionality, including turning files and folders into classes.
Designed as a companion module for projects that require handling arbitrary paths
from users without breaking, and easy navigation of local file systems.
By staying tiny in both size and functionality, the API is easy to learn so
you can start using it right away. Essentially, it provides object-oriented access
to files and folders with several convenient attributes such as checking file or
folder size, handling a lot of the intricacies of os.path behind
the scene so you do not have to.
## Platforms
Tested on Python version 2.x.
## Dependencies
Pure Python, no dependencies.
## Installing it
Tinypath is installed with pip from the commandline:
pip install tinypath
## More Information:
- [Home Page](http://github.com/karimbahgat/tinypath)
- [API Documentation](http://pythonhosted.org/tinypath)
## License:
This code is free to share, use, reuse,
and modify according to the MIT license, see license.txt
## Credits:
Karim Bahgat (2015)
"""
__version__ = "0.1.1"
# Imports
import sys,os,time
# Classes
class Size:
"""
The file size object, as returned by File.size.
Attributes:
- **bytes**: The actual size in bytes, to be used for calculations.
- **string**: The size as a more sensible human-readable string depending on size.
"""
def __init__(self, bytes):
self.bytes = bytes
# parse
size = bytes
kb,mb,gb = 1000, 1000*1000, 1000*1000*1000
if size < mb:
size = size/float(kb)
sizeunit = "kb"
elif size < gb:
size = size/float(mb)
sizeunit = "mb"
else:
size = size/float(gb)
sizeunit = "gb"
self.string = "%.3f %s" %(size, sizeunit)
def __str__(self):
return "Size: %s" %self.string
class Folder:
"""
A class that holds info about a folder, which can be accessed via attributes
Attributes:
- **path**: Full proper path of the folder.
- **name**: Just the name of the folder.
- **exists**: True or False if exists.
- **read_ok**: Read permission or not.
- **write_ok**: Write permission or not.
- **location**: The parent folder where the folder is located.
- **content**: A list of all child files and folders.
- **files**: A list of all child files.
- **folders**: A list of all child folders.
- **size**: A Size instance of the total size of the entire folder.
This is done by looping and sizign all nested child files, so can take a while
for a high level folder. The size is cached after the first time to ease
repeat calling.
- **total_files**: Total number of files within the entire folder.
"""
def __init__(self, *folderpath, **kwargs):
"""
Arguments:
- **folderpath**: can be a relative path, a full path incl drive letter and filetype extension, or a list of path name elements to be joined together
"""
#concatenate path elements if multiple given
folderpath = os.path.join(*folderpath)
#full normalized path (make into absolute path if relative)
folderpath = os.path.abspath(folderpath)
folderpath = os.path.normpath(folderpath)
self.path = folderpath
#access
self.exists = os.access(self.path, os.F_OK)
self.read_ok = os.access(self.path, os.R_OK)
self.write_ok = os.access(self.path, os.W_OK)
#split entire path into components
pathsplit = []
head,tail = os.path.split(self.path)
while head != "":
pathsplit.insert(0,tail)
if os.path.ismount(head):
drive = head
break
head,tail = os.path.split(head)
#parent folder path
if len(pathsplit) > 1:
oneup = os.path.join(*pathsplit[:-1])
else:
oneup = ""
if drive:
self._oneup = drive+oneup
else:
self._oneup = oneup
self.name = pathsplit[-1]
def __str__(self):
string = "Folder:\n"
for each in (self.location.name,self.name,self.size,self.total_files):
string += "\t%s\n" % each
return string
def __repr__(self):
return "Folder( %s )" % self.path
# Properties
@property
def location(self):
return Folder(self._oneup)
@property
def content(self):
content = []
content.extend(self.files)
content.extend(self.folders)
return content
@property
def files(self):
if not hasattr(self, "_files"):
self._files = []
try:
children = os.listdir(self.path)
for child in children:
childpath = os.path.join(self.path, child)
if os.path.isfile(childpath) or os.path.islink(childpath):
self._files.append( File(childpath) )
except OSError:
pass
return self._files
@property
def folders(self):
if not hasattr(self, "_folders"):
self._folders = []
try:
children = os.listdir(self.path)
for child in children:
childpath = os.path.join(self.path, child)
if os.path.isdir(childpath):
self._folders.append( Folder(childpath) )
except OSError:
pass
return self._folders
@property
def size(self):
# collect
if not hasattr(self, "_size"):
foldersize = 0
for file in self.loop():
if file.read_ok:
foldersize += file.size.bytes
self._size = Size(foldersize)
return self._size
@property
def total_files(self):
# collect
if not hasattr(self, "_filecount"):
self._filecount = 0
for file in self.loop():
self._filecount += 1
return self._filecount
# Methods
def up(self):
"""Changes this object's path up one level"""
newpath = self._oneup
self.__init__(newpath)
def down(self, foldername):
"""Changes this object's path down into the given subfolder name"""
for folder in self.folders:
if foldername == folder.name:
newpath = folder.path
self.__init__(newpath)
break
else: raise Exception("No such folder found")
def loop(self, filetypes=[], maxdepth=None):
"""
Loops files only
Arguments:
- **filetypes** (optional): If filetype is a sequence then grabs all filetypes listed within it, otherwise grabs everything.
Each file type is specified as the file extension including the dot, eg ".py".
- **maxdepth** (optional): Max depth to look before continuing.
"""
return loop_folder(self, filetypes, maxdepth)
def overview(self, filetypes=[], maxdepth=None):
"""
Return a string representation of the folder structure and file members, as a snapshot of the folder's content.
Arguments:
- **filetypes** (optional): If filetypes is a sequence then grabs all filetypes listed within it, otherwise grabs everything.
Each file type is specified as the file extension including the dot, eg ".py".
- **maxdepth** (optional): Max depth to look before continuing.
"""
if not filetypes: filetypes = []
if not isinstance(filetypes, (list,tuple)): filetypes = [filetypes]
# setup
topfolder = self
depth = 0
spaces = " "
structstring = self.path+"\n"
def recurloop(parentfolder, structstring, depth, spaces):
depth += 1
if not maxdepth or depth <= maxdepth:
if not filetypes:
for file in parentfolder.files:
structstring += spaces*depth + file.name + file.type + "\n"
else:
for file in parentfolder.files:
if file.type in filetypes:
structstring += spaces*depth + file.name + file.type + "\n"
for folder in parentfolder.folders:
structstring += spaces*depth + folder.name + "\n"
folder, structstring, depth, spaces = recurloop(folder, structstring, depth, spaces)
depth -= 1
return parentfolder, structstring, depth, spaces
# begin
finalfolder, structstring, depth, spaces = recurloop(topfolder, structstring, depth, spaces)
return structstring
def overview_table(self, filetypes=[], maxdepth=None):
"""
Return a tab-delimited table string of the folder structure and file members, as a snapshot of the folder's content.
Arguments:
- **filetypes** (optional): If filetypes is a sequence then grabs all filetypes listed within it, otherwise grabs everything.
Each file type is specified as the file extension including the dot, eg ".py".
- **maxdepth** (optional): Max depth to look before continuing.
Warning: Not fully tested...
"""
# setup
topfolder = self
depth = 0
delimit = "\t"
structstring = delimit.join(["path","name","type","depth","size"])+"\n"
structstring += delimit.join([self.path,self.name,"",str(depth),str(self.size.bytes)])+"\n"
def recurloop(parentfolder, structstring, depth):
depth += 1
if not maxdepth or depth <= maxdepth:
if not filetypes:
for file in parentfolder.files:
structstring += delimit.join([file.path,file.name,file.type,str(depth),str(file.size.bytes)])+"\n"
else:
for file in parentfolder.files:
if file.type in filetypes:
structstring += delimit.join([file.path,file.name,file.type,str(depth),str(file.size.bytes)])+"\n"
for folder in parentfolder.folders:
structstring += delimit.join([folder.path,folder.name,"",str(depth),str(folder.size.bytes)])+"\n"
folder, structstring, depth = recurloop(folder, structstring, depth)
depth -= 1
return parentfolder, structstring, depth
# begin
finalfolder, structstring, depth = recurloop(topfolder, structstring, depth)
return structstring
class File:
"""
A class that holds info about a file, which can be accessed via attributes.
Attributes:
- **path**: Full proper path of the folder.
- **name**: Just the name of the folder.
- **type**: Type extension of the file.
- **filename**: Name with type extension.
- **exists**: True or False if exists.
- **read_ok**: Read permission or not.
- **write_ok**: Write permission or not.
- **size**: Size instance of the size of the file.
- **lastchanged**: The last time the file was modified as a timestamp object.
"""
def __init__(self, *filepath, **kwargs):
"""
Arguments:
- **filepath**: can be a relative path, a full path incl drive letter and filetype extension, or a list of path name elements to be joined together
"""
#concatenate path elements if multiple given
filepath = os.path.join(*filepath)
#full normalized path (make into absolute path if relative)
filepath = os.path.abspath(filepath)
filepath = os.path.normpath(filepath)
self.path = filepath
#access
self.exists = os.access(self.path, os.F_OK)
self.read_ok = os.access(self.path, os.R_OK)
self.write_ok = os.access(self.path, os.W_OK)
#split entire path into components
pathsplit = []
head,tail = os.path.split(filepath)
while head != "":
pathsplit.insert(0,tail)
if os.path.ismount(head):
drive = head
break
head,tail = os.path.split(head)
#folder path
if len(pathsplit) > 1:
oneup = os.path.join(*pathsplit[:-1])
else:
oneup = ""
if drive:
self.folder = Folder(drive+oneup)
else:
self.folder = Folder(oneup)
#filename and type
fullfilename = pathsplit[-1]
filename,filetype = os.path.splitext(fullfilename)
self.name = filename #".".join(fullfilename.split(".")[:-1])
self.type = filetype #"." + fullfilename.split(".")[-1]
self.filename = filename + filetype
def __str__(self):
string = "File:\n"
for each in (self.folder.name,self.name,self.type):
string += "\t%s\n" % each
if self.read_ok:
string += "\t%s\n" % self.size
string += "\t%s\n" % self.lastchanged
return string
def __repr__(self):
return "File( %s )" % self.path
@property
def size(self):
#filesize
if not hasattr(self, "_size"):
self._size = Size( os.path.getsize(self.path) )
return self._size
@property
def lastchanged(self):
#last changed
if not hasattr(self, "_lastchanged"):
self._lastchanged = time.ctime(os.path.getmtime(self.path))
return self._lastchanged
# User Functions
def current_script():
"""
Returns the file object of the currently running script
"""
return File(__file__)
def current_folder():
"""
Returns the folder object of the currently running script
"""
curfile = current_script()
return curfile.folder
def path2obj(path):
"""
Returns a File or Folder object from the given path.
"""
if os.path.isfile(path) or os.path.islink(path):
return File(path)
elif os.path.isdir(path):
return Folder(path)
def loop_folder(folder, filetypes=[], maxdepth=None):
"""
A generator that iterates through all files in a folder tree, either in a for loop or by using next() on it.
Arguments:
- **folder**: The folder path to loop. Can be a folder instance, or any string path accepted by Folder.
- **filetypes** (optional): If filetypes is a sequence then grabs all filetypes listed within it, otherwise grabs everything.
Each file type is specified as the file extension including the dot, eg ".py".
- **maxdepth** (optional): Max depth to look before continuing.
"""
if not filetypes: filetypes = []
if not isinstance(filetypes, (list,tuple)): filetypes = [filetypes]
# setup
if not isinstance(folder, Folder):
topfolder = Folder(folder)
else:
topfolder = folder
depth = 0
def recurloop(parentfolder, depth):
depth += 1
if not maxdepth or depth <= maxdepth:
if not filetypes:
for file in parentfolder.files:
yield file
else:
for file in parentfolder.files:
if file.type in filetypes:
yield file
for folder in parentfolder.folders:
for file in recurloop(folder, depth):
yield file
# begin
return recurloop(topfolder, depth)
|
|
from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
audio = SchLib(tool=SKIDL).add_parts(*[
Part(name='LL1587',dest=TEMPLATE,tool=SKIDL,do_erc=True),
Part(name='LM1875',dest=TEMPLATE,tool=SKIDL,keywords='LM1875 Amplifier 20W',description='20W Audio Power Amplifier, TO220-5',ref_prefix='U',num_units=1,fplist=['TO*'],do_erc=True,pins=[
Pin(num='1',name='IN+',do_erc=True),
Pin(num='2',name='IN-',do_erc=True),
Pin(num='3',name='VEE',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='VCC',func=Pin.PWRIN,do_erc=True)]),
Part(name='LM1876',dest=TEMPLATE,tool=SKIDL,keywords='LM1876 Overture Amplifier Dual 20W',description='Dual 20W Overture Seriers Audio Power Amplifier, with Mute Standby Mode, TO220-15 (MultiWatt)',ref_prefix='U',num_units=2,fplist=['TO*'],do_erc=True,pins=[
Pin(num='2',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='VEE',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='MUTE',do_erc=True),
Pin(num='7',name='IN-',do_erc=True),
Pin(num='8',name='IN+',do_erc=True),
Pin(num='9',name='STB',do_erc=True),
Pin(num='1',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='VEE',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='MUTE',do_erc=True),
Pin(num='12',name='IN-',do_erc=True),
Pin(num='13',name='IN+',do_erc=True),
Pin(num='14',name='STB',do_erc=True),
Pin(num='15',name='VCC',func=Pin.PWRIN,do_erc=True)]),
Part(name='LM1877M',dest=TEMPLATE,tool=SKIDL,keywords='Dual Audio Amplifier 2W 24V',description='2W/8R Dual Audio Power Amplifier, Supply Voltage 6-24V, SO14L',ref_prefix='U',num_units=1,fplist=['SO*'],do_erc=True,pins=[
Pin(num='1',name='BIAS',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='OUT1',func=Pin.OUTPUT,do_erc=True),
Pin(num='3',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='IN1',do_erc=True),
Pin(num='7',name='IN1',do_erc=True),
Pin(num='8',name='IN2',do_erc=True),
Pin(num='9',name='IN2',do_erc=True),
Pin(num='10',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='OUT2',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='V+',func=Pin.PWRIN,do_erc=True)]),
Part(name='LM1877N',dest=TEMPLATE,tool=SKIDL,keywords='Dual Audio Amplifier 2W 24V',description='2W/8R Dual Audio Power Amplifier, Supply Voltage 6-24V, DIP14',ref_prefix='U',num_units=1,fplist=['DIP*', 'PDIP*'],do_erc=True,pins=[
Pin(num='1',name='BIAS',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='OUT1',func=Pin.OUTPUT,do_erc=True),
Pin(num='3',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='IN1',do_erc=True),
Pin(num='7',name='IN1',do_erc=True),
Pin(num='8',name='IN2',do_erc=True),
Pin(num='9',name='IN2',do_erc=True),
Pin(num='10',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='OUT2',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='V+',func=Pin.PWRIN,do_erc=True)]),
Part(name='LM3886T',dest=TEMPLATE,tool=SKIDL,keywords='LM3886 Overture Amplifier Single 86W',description='Single 68W Overture Seriers Audio Power Amplifier, with Mute Mode, PFM-11 (Plastic MultiWatt)',ref_prefix='U',num_units=1,fplist=['TO*'],do_erc=True,aliases=['LM3886TF'],pins=[
Pin(num='1',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='V-',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='MUTE',do_erc=True),
Pin(num='9',name='IN-',do_erc=True),
Pin(num='10',name='IN+',do_erc=True)]),
Part(name='LM4950TA',dest=TEMPLATE,tool=SKIDL,keywords='LM4950 Stereo Power Aplifier 3.1W',description='Audio 3.1W Stereo Power Amplifier, TO220-9',ref_prefix='U',num_units=1,fplist=['TO220*'],do_erc=True,pins=[
Pin(num='1',name='INA',do_erc=True),
Pin(num='2',name='~SHD~',do_erc=True),
Pin(num='3',name='OUTA',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='OUTB',func=Pin.OUTPUT,do_erc=True),
Pin(num='8',name='BP',func=Pin.OUTPUT,do_erc=True),
Pin(num='9',name='INB',do_erc=True),
Pin(num='10',name='TAP',do_erc=True)]),
Part(name='LM4950TS',dest=TEMPLATE,tool=SKIDL,keywords='LM4950 Stereo Power Aplifier 3.1W',description='Audio 3.1W Stereo Power Amplifier, TO263-9',ref_prefix='U',num_units=1,fplist=['TO263*'],do_erc=True,pins=[
Pin(num='1',name='INA',do_erc=True),
Pin(num='2',name='~SHD~',do_erc=True),
Pin(num='3',name='OUTA',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='OUTB',func=Pin.OUTPUT,do_erc=True),
Pin(num='8',name='BP',func=Pin.OUTPUT,do_erc=True),
Pin(num='9',name='INB',do_erc=True),
Pin(num='10',name='TAP',do_erc=True)]),
Part(name='LM4990',dest=TEMPLATE,tool=SKIDL,keywords='Audio Boomer BTL Mono Amplifier',description='Audio 1.25W Mono Power Amplifier, VSSOP',ref_prefix='U',num_units=1,fplist=['MSOP-*_3x3mm_Pitch0.65mm*'],do_erc=True,pins=[
Pin(num='1',name='~SHTD',do_erc=True),
Pin(num='2',name='BYPS',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='+IN',do_erc=True),
Pin(num='4',name='-IN',do_erc=True),
Pin(num='5',name='VO1',func=Pin.OUTPUT,do_erc=True),
Pin(num='6',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='VO2',func=Pin.OUTPUT,do_erc=True)]),
Part(name='MSGEQ7',dest=TEMPLATE,tool=SKIDL,keywords='equalizer filter',description='Graphic Equalizer Display Filter',ref_prefix='U',num_units=1,fplist=['SOIC*', 'DIP*'],do_erc=True,pins=[
Pin(num='1',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='VSS',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='STROBE',do_erc=True),
Pin(num='5',name='IN',do_erc=True),
Pin(num='6',name='GND',func=Pin.PWROUT,do_erc=True),
Pin(num='7',name='RESET',do_erc=True),
Pin(num='8',name='CKIN',do_erc=True)]),
Part(name='NSL-32',dest=TEMPLATE,tool=SKIDL,keywords='OPTO',description='Opto resistor',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='A',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='K',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='R',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='R',func=Pin.PASSIVE,do_erc=True)]),
Part(name='PAM8301',dest=TEMPLATE,tool=SKIDL,keywords='Audio Mono Filterless Class-D Amplifier',description='Audio 1.5W Filterless Class-D Mono Amplifier, TSOT-23-6',ref_prefix='U',num_units=1,fplist=['SOT-23*'],do_erc=True,pins=[
Pin(num='1',name='OUT-',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='IN',do_erc=True),
Pin(num='4',name='~SD',do_erc=True),
Pin(num='5',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='OUT+',func=Pin.OUTPUT,do_erc=True)]),
Part(name='PGA4311',dest=TEMPLATE,tool=SKIDL,keywords='AUDIO',description='4 channels Audio Volume Control',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='MUTE',do_erc=True),
Pin(num='2',name='AGND-1',do_erc=True),
Pin(num='3',name='Ain-1',do_erc=True),
Pin(num='4',name='AGND-1',do_erc=True),
Pin(num='5',name='Ain-1',func=Pin.OUTPUT,do_erc=True),
Pin(num='6',name='VA-',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='VA+',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='Aout-3',func=Pin.OUTPUT,do_erc=True),
Pin(num='9',name='AGND-3',do_erc=True),
Pin(num='10',name='Ain-3',do_erc=True),
Pin(num='20',name='AGND_4',do_erc=True),
Pin(num='11',name='AGND-3',do_erc=True),
Pin(num='21',name='Aout-4',func=Pin.OUTPUT,do_erc=True),
Pin(num='12',name='VD+',func=Pin.PWRIN,do_erc=True),
Pin(num='22',name='VA+',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='SDI',do_erc=True),
Pin(num='23',name='VA-',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='CS',do_erc=True),
Pin(num='24',name='Aout-2',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='SCLK',do_erc=True),
Pin(num='25',name='AGND-2',do_erc=True),
Pin(num='16',name='SDO',func=Pin.OUTPUT,do_erc=True),
Pin(num='26',name='Ain-2',do_erc=True),
Pin(num='17',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='27',name='AGND-2',do_erc=True),
Pin(num='18',name='AGND_4',do_erc=True),
Pin(num='28',name='ZCEN',do_erc=True),
Pin(num='19',name='Ain-4',do_erc=True)]),
Part(name='SSM-2017P',dest=TEMPLATE,tool=SKIDL,keywords='Audio PREAMP',description='Audio low noise preamp',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='Gain',do_erc=True),
Pin(num='2',name='-',do_erc=True),
Pin(num='3',name='+',do_erc=True),
Pin(num='4',name='V-',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='Ref',do_erc=True),
Pin(num='6',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='Gain',do_erc=True)]),
Part(name='SSM-2018T',dest=TEMPLATE,tool=SKIDL,keywords='AUDIO VCA',description='audio VCA',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='9',name='Comp3',do_erc=True),
Pin(num='1',name='+I1-G',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='-Ig',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='-I1-G',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='Comp1',do_erc=True),
Pin(num='6',name='+',do_erc=True),
Pin(num='7',name='-',do_erc=True),
Pin(num='8',name='Comp2',do_erc=True),
Pin(num='10',name='V-',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='Vctrl',do_erc=True),
Pin(num='12',name='MODE',do_erc=True),
Pin(num='13',name='GND',do_erc=True),
Pin(num='14',name='Vg',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='BAL',do_erc=True),
Pin(num='16',name='V1-g',func=Pin.OUTPUT,do_erc=True)]),
Part(name='SSM2120P',dest=TEMPLATE,tool=SKIDL,keywords='AUDIO VCA',description='Dual VCA & level detectors',ref_prefix='U',num_units=2,do_erc=True,pins=[
Pin(num='10',name='Iref',do_erc=True),
Pin(num='11',name='V-',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='22',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='1',name='Thresh',do_erc=True),
Pin(num='2',name='LOGav',do_erc=True),
Pin(num='3',name='CTR',do_erc=True),
Pin(num='4',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='+Vc',do_erc=True),
Pin(num='6',name='CFT',do_erc=True),
Pin(num='7',name='-Vc',do_erc=True),
Pin(num='8',name='E',do_erc=True),
Pin(num='9',name='RecIn',do_erc=True),
Pin(num='20',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='12',name='Thresh',do_erc=True),
Pin(num='13',name='LOGav',do_erc=True),
Pin(num='14',name='CTR',do_erc=True),
Pin(num='15',name='RecIn',do_erc=True),
Pin(num='16',name='E',do_erc=True),
Pin(num='17',name='-Vc',do_erc=True),
Pin(num='18',name='CFT',do_erc=True),
Pin(num='19',name='+Vc',do_erc=True)]),
Part(name='SSM2122P',dest=TEMPLATE,tool=SKIDL,keywords='AUDIO VCA',description='Dual VCA',ref_prefix='U',num_units=2,do_erc=True,pins=[
Pin(num='7',name='Iref',do_erc=True),
Pin(num='8',name='V-',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='3',name='+Vc',do_erc=True),
Pin(num='4',name='CFT',do_erc=True),
Pin(num='5',name='-Vc',do_erc=True),
Pin(num='6',name='E',do_erc=True),
Pin(num='10',name='E',do_erc=True),
Pin(num='11',name='-Vc',do_erc=True),
Pin(num='12',name='CFT',do_erc=True),
Pin(num='13',name='+Vc',do_erc=True),
Pin(num='14',name='~',func=Pin.OUTPUT,do_erc=True)]),
Part(name='SSM2165',dest=TEMPLATE,tool=SKIDL,keywords='AUDIO',description='8 pins microphone preamp and compressor',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='VCAIN',do_erc=True),
Pin(num='3',name='BUFOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='IN+',do_erc=True),
Pin(num='5',name='AVG',do_erc=True),
Pin(num='6',name='COMP',do_erc=True),
Pin(num='7',name='VOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='8',name='V+',func=Pin.PWRIN,do_erc=True)]),
Part(name='SSM2210',dest=TEMPLATE,tool=SKIDL,description='Audio dual matched NPN transistor',ref_prefix='U',num_units=2,do_erc=True,pins=[
Pin(num='1',name='C',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='B',do_erc=True),
Pin(num='3',name='E',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='E',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='B',do_erc=True),
Pin(num='8',name='C',func=Pin.PASSIVE,do_erc=True)]),
Part(name='SSM2220',dest=TEMPLATE,tool=SKIDL,keywords='AUDIO',description='Audiao dual Matched PNP transistor (low noise)',ref_prefix='U',num_units=2,do_erc=True,pins=[
Pin(num='1',name='C',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='B',do_erc=True),
Pin(num='3',name='E',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='E',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='B',do_erc=True),
Pin(num='8',name='C',func=Pin.PASSIVE,do_erc=True)]),
Part(name='STK435',dest=TEMPLATE,tool=SKIDL,keywords='STK443 Dual 25W Amplifier Audio',description='Dual 25W Audio Power Amplifier, 4010',ref_prefix='U',num_units=2,do_erc=True,aliases=['STK433', 'STK436', 'STK437', 'STK439', 'STK441', 'STK443'],pins=[
Pin(num='1',name='IN+',do_erc=True),
Pin(num='2',name='IN-',do_erc=True),
Pin(num='3',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='6',name='BST',do_erc=True),
Pin(num='7',name='VCC',do_erc=True),
Pin(num='8',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='VCC2',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='VCC',do_erc=True),
Pin(num='8',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='VCC2',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='BST',do_erc=True),
Pin(num='11',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='12',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='IN-',do_erc=True),
Pin(num='15',name='IN+',do_erc=True)]),
Part(name='TDA2003H',dest=TEMPLATE,tool=SKIDL,keywords='TDA2003H Amplifier 10W Pentawatt',description='10W Car Radio Audio Amplifier, TO220-5 (PentaWatt5H)',ref_prefix='U',num_units=1,fplist=['TO*'],do_erc=True,pins=[
Pin(num='1',name='IN+',do_erc=True),
Pin(num='2',name='IN-',do_erc=True),
Pin(num='3',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='VCC',func=Pin.PWRIN,do_erc=True)]),
Part(name='TDA2003V',dest=TEMPLATE,tool=SKIDL,keywords='TDA2003V Amplifier 10W Pentawatt',description='10W Car Radio Audio Amplifier, PentaWatt5V',ref_prefix='U',num_units=1,fplist=['TO*'],do_erc=True,pins=[
Pin(num='1',name='IN+',do_erc=True),
Pin(num='2',name='IN-',do_erc=True),
Pin(num='3',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='VCC',func=Pin.PWRIN,do_erc=True)]),
Part(name='TDA2005R',dest=TEMPLATE,tool=SKIDL,keywords='TDA2005 Amplifier 20W Multiwatt',description='20W Bidge/Stereo Audio Amplifier for Car Radio, TO220-11 (MultiWatt11)',ref_prefix='U',num_units=2,fplist=['TO*'],do_erc=True,pins=[
Pin(num='3',name='SVRR',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='1',name='IN+',do_erc=True),
Pin(num='2',name='IN-',do_erc=True),
Pin(num='10',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='11',name='BST',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='IN-',do_erc=True),
Pin(num='5',name='IN+',do_erc=True),
Pin(num='7',name='BST',func=Pin.PASSIVE,do_erc=True),
Pin(num='8',name='OUT',func=Pin.OUTPUT,do_erc=True)]),
Part(name='TDA2030',dest=TEMPLATE,tool=SKIDL,keywords='TDA2030 Amplifier14W Pentawatt',description='14W Hi-Fi Audio Amplifier, TO220-5 (PentaWatt)',ref_prefix='U',num_units=1,fplist=['TO*'],do_erc=True,pins=[
Pin(num='1',name='IN+',do_erc=True),
Pin(num='2',name='IN-',do_erc=True),
Pin(num='3',name='VEE',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='VCC',func=Pin.PWRIN,do_erc=True)]),
Part(name='TDA2050',dest=TEMPLATE,tool=SKIDL,keywords='TDA2050 Amplifier 32W Pentawatt',description='32W Hi-Fi Audio Amplifier, TO220-5 (PentaWatt)',ref_prefix='U',num_units=1,fplist=['TO*'],do_erc=True,pins=[
Pin(num='1',name='IN+',do_erc=True),
Pin(num='2',name='IN-',do_erc=True),
Pin(num='3',name='VEE',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='VCC',func=Pin.PWRIN,do_erc=True)]),
Part(name='TDA7294HS',dest=TEMPLATE,tool=SKIDL,keywords='TDA7294 Amplifier Single 100W',description='Single 100W Audio Power Amplifier, with Mute and Standby Mode, MultiWatt15H',ref_prefix='U',num_units=1,fplist=['TO*'],do_erc=True,pins=[
Pin(num='1',name='STBY-GND',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='IN-',do_erc=True),
Pin(num='3',name='IN+',do_erc=True),
Pin(num='4',name='IN+MUTE',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='BST',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='Vs+',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='STBY',do_erc=True),
Pin(num='10',name='MUTE',do_erc=True),
Pin(num='13',name='PWVs+',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='PWVs-',func=Pin.PWRIN,do_erc=True)]),
Part(name='TDA7294V',dest=TEMPLATE,tool=SKIDL,keywords='TDA7294 Amplifier Single 100W',description='Single 100W Audio Power Amplifier, with Mute and Standby Mode, TO220-15 (MultiWatt15V)',ref_prefix='U',num_units=1,fplist=['TO*'],do_erc=True,pins=[
Pin(num='1',name='STBY-GND',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='IN-',do_erc=True),
Pin(num='3',name='IN+',do_erc=True),
Pin(num='4',name='IN+MUTE',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='BST',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='Vs+',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='STBY',do_erc=True),
Pin(num='10',name='MUTE',do_erc=True),
Pin(num='13',name='PWVs+',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='PWVs-',func=Pin.PWRIN,do_erc=True)]),
Part(name='THAT2180',dest=TEMPLATE,tool=SKIDL,keywords='AUDIO VCA',description='VCA (THAT Corporation)',ref_prefix='U',num_units=1,do_erc=True,aliases=['THAT2181'],pins=[
Pin(num='5',name='V-',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='1',name='E',do_erc=True),
Pin(num='2',name='+Vc',do_erc=True),
Pin(num='3',name='-Vc',do_erc=True),
Pin(num='4',name='Sym',do_erc=True),
Pin(num='8',name='~',func=Pin.OUTPUT,do_erc=True)]),
Part(name='TR-AUDIO-2P',dest=TEMPLATE,tool=SKIDL,keywords='TRANSFO',description='Microphone Input Transformer (2 P 1S)',ref_prefix='T',num_units=1,do_erc=True,pins=[
Pin(num='1',name='P1+',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='P1-',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='P2-',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='P2+',func=Pin.PASSIVE,do_erc=True),
Pin(num='5',name='S-',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='S+',func=Pin.PASSIVE,do_erc=True),
Pin(num='8',name='~',do_erc=True)])])
|
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ResourceFinder is a helper class for finding resources given their name."""
import os
from tvcm import module
from tvcm import style_sheet as style_sheet_module
from tvcm import resource as resource_module
from tvcm import html_module
from tvcm import strip_js_comments
class ResourceLoader(object):
"""Manges loading modules and their dependencies from files.
Modules handle parsing and the construction of their individual dependency
pointers. The loader deals with bookkeeping of what has been loaded, and
mapping names to file resources.
"""
def __init__(self, project):
self.project = project
self.stripped_js_by_filename = {}
self.loaded_modules = {}
self.loaded_raw_scripts = {}
self.loaded_style_sheets = {}
self.loaded_images = {}
@property
def source_paths(self):
"""A list of base directories to search for modules under."""
return self.project.source_paths
def FindResource(self, some_path):
"""Finds a Resource for the given path.
Args:
some_path: A relative or absolute path to a file.
Returns:
A Resource or None.
"""
if os.path.isabs(some_path):
return self.FindResourceGivenAbsolutePath(some_path)
else:
return self.FindResourceGivenRelativePath(some_path)
def FindResourceGivenAbsolutePath(self, absolute_path):
"""Returns a Resource for the given absolute path."""
candidate_paths = []
for source_path in self.source_paths:
if absolute_path.startswith(source_path):
candidate_paths.append(source_path)
if len(candidate_paths) == 0:
return None
# Sort by length. Longest match wins.
candidate_paths.sort(lambda x, y: len(x) - len(y))
longest_candidate = candidate_paths[-1]
return resource_module.Resource(longest_candidate, absolute_path)
def FindResourceGivenRelativePath(self, relative_path):
"""Returns a Resource for the given relative path."""
absolute_path = None
for script_path in self.source_paths:
absolute_path = os.path.join(script_path, relative_path)
if os.path.exists(absolute_path):
return resource_module.Resource(script_path, absolute_path)
return None
def _FindResourceGivenNameAndSuffix(self, requested_name, extension, return_resource=False):
"""Searches for a file and reads its contents.
Args:
requested_name: The name of the resource that was requested.
extension: The extension for this requested resource.
Returns:
A (path, contents) pair.
"""
pathy_name = requested_name.replace('.', os.sep)
filename = pathy_name + extension
resource = self.FindResourceGivenRelativePath(filename)
if return_resource:
return resource
if not resource:
return None, None
return _read_file(resource.absolute_path)
def FindModuleResource(self, requested_module_name):
"""Finds a module javascript file and returns a Resource, or none."""
js_resource = self._FindResourceGivenNameAndSuffix(requested_module_name, '.js', return_resource=True)
html_resource = self._FindResourceGivenNameAndSuffix(requested_module_name, '.html', return_resource=True)
if js_resource and html_resource:
if html_module.IsHTMLResourceTheModuleGivenConflictingResourceNames(js_resource, html_resource):
return html_resource
return js_resource
elif js_resource:
return js_resource
return html_resource
def LoadModule(self, module_name=None, module_filename=None):
assert bool(module_name) ^ bool(module_filename), 'Must provide module_name or module_filename.'
if module_filename:
resource = self.FindResource(module_filename)
if not resource:
raise Exception('Could not find %s in %s' % (
module_filename, repr(self.source_paths)))
module_name = resource.name
else:
resource = None # Will be set if we end up needing to load.
if module_name in self.loaded_modules:
assert self.loaded_modules[module_name].contents
return self.loaded_modules[module_name]
if not resource: # happens when module_name was given
resource = self.FindModuleResource(module_name)
if not resource:
raise module.DepsException('No resource for module "%s"' % module_name)
if resource.absolute_path.endswith('.js'):
raise Exception(".js modules are deprecated")
m = html_module.HTMLModule(self, module_name, resource)
m.Parse()
self.loaded_modules[module_name] = m
m.Load()
return m
def LoadRawScript(self, relative_raw_script_path):
resource = None
for source_path in self.source_paths:
possible_absolute_path = os.path.join(source_path, relative_raw_script_path)
if os.path.exists(possible_absolute_path):
resource = resource_module.Resource(source_path, possible_absolute_path)
break
if not resource:
raise module.DepsException('Could not find a file for raw script %s in %s' % (
self.source_paths, relative_raw_script_path))
assert relative_raw_script_path == resource.unix_style_relative_path, \
'Expected %s == %s' % (relative_raw_script_path, resource.unix_style_relative_path)
if resource.absolute_path in self.loaded_raw_scripts:
return self.loaded_raw_scripts[resource.absolute_path]
raw_script = module.RawScript(resource)
self.loaded_raw_scripts[resource.absolute_path] = raw_script
return raw_script
def LoadStyleSheet(self, name):
if name in self.loaded_style_sheets:
return self.loaded_style_sheets[name]
resource = self._FindResourceGivenNameAndSuffix(name, '.css', return_resource=True)
if not resource:
raise module.DepsException('Could not find a file for stylesheet %s' % name)
style_sheet = style_sheet_module.StyleSheet(self, name, resource)
style_sheet.load()
self.loaded_style_sheets[name] = style_sheet
return style_sheet
def LoadImage(self, abs_path):
if abs_path in self.loaded_images:
return self.loaded_images[abs_path]
if not os.path.exists(abs_path):
raise module.DepsException(
"""url('%s') did not exist""" % abs_path)
res = self.FindResourceGivenAbsolutePath(abs_path)
if res == None:
raise module.DepsException(
"""url('%s') was not in search path""" % abs_path)
image = style_sheet_module.Image(res)
self.loaded_images[abs_path] = image
return image
def GetStrippedJSForFilename(self, filename, early_out_if_no_tvcm):
if filename in self.stripped_js_by_filename:
return self.stripped_js_by_filename[filename]
with open(filename, 'r') as f:
contents = f.read(4096)
if early_out_if_no_tvcm and ('tvcm' not in contents):
return None
s = strip_js_comments.StripJSComments(contents)
self.stripped_js_by_filename[filename] = s
return s
def _read_file(absolute_path):
"""Reads a file and returns a (path, contents) pair.
Args:
absolute_path: Absolute path to a file.
Raises:
Exception: The given file doesn't exist.
IOError: There was a problem opening or reading the file.
"""
if not os.path.exists(absolute_path):
raise Exception('%s not found.' % absolute_path)
f = open(absolute_path, 'r')
contents = f.read()
f.close()
return absolute_path, contents
|
|
# -*- coding: utf-8 -*-
import base64
import json
import os
import os.path
import random
import shutil
import tempfile
import unittest
from py.test import ensuretemp
from pytest import mark
from docker import auth, errors
try:
from unittest import mock
except ImportError:
import mock
class RegressionTest(unittest.TestCase):
def test_803_urlsafe_encode(self):
auth_data = {
'username': 'root',
'password': 'GR?XGR?XGR?XGR?X'
}
encoded = auth.encode_header(auth_data)
assert b'/' not in encoded
assert b'_' in encoded
class ResolveRepositoryNameTest(unittest.TestCase):
def test_resolve_repository_name_hub_library_image(self):
self.assertEqual(
auth.resolve_repository_name('image'),
('docker.io', 'image'),
)
def test_resolve_repository_name_dotted_hub_library_image(self):
self.assertEqual(
auth.resolve_repository_name('image.valid'),
('docker.io', 'image.valid')
)
def test_resolve_repository_name_hub_image(self):
self.assertEqual(
auth.resolve_repository_name('username/image'),
('docker.io', 'username/image'),
)
def test_explicit_hub_index_library_image(self):
self.assertEqual(
auth.resolve_repository_name('docker.io/image'),
('docker.io', 'image')
)
def test_explicit_legacy_hub_index_library_image(self):
self.assertEqual(
auth.resolve_repository_name('index.docker.io/image'),
('docker.io', 'image')
)
def test_resolve_repository_name_private_registry(self):
self.assertEqual(
auth.resolve_repository_name('my.registry.net/image'),
('my.registry.net', 'image'),
)
def test_resolve_repository_name_private_registry_with_port(self):
self.assertEqual(
auth.resolve_repository_name('my.registry.net:5000/image'),
('my.registry.net:5000', 'image'),
)
def test_resolve_repository_name_private_registry_with_username(self):
self.assertEqual(
auth.resolve_repository_name('my.registry.net/username/image'),
('my.registry.net', 'username/image'),
)
def test_resolve_repository_name_no_dots_but_port(self):
self.assertEqual(
auth.resolve_repository_name('hostname:5000/image'),
('hostname:5000', 'image'),
)
def test_resolve_repository_name_no_dots_but_port_and_username(self):
self.assertEqual(
auth.resolve_repository_name('hostname:5000/username/image'),
('hostname:5000', 'username/image'),
)
def test_resolve_repository_name_localhost(self):
self.assertEqual(
auth.resolve_repository_name('localhost/image'),
('localhost', 'image'),
)
def test_resolve_repository_name_localhost_with_username(self):
self.assertEqual(
auth.resolve_repository_name('localhost/username/image'),
('localhost', 'username/image'),
)
def test_invalid_index_name(self):
self.assertRaises(
errors.InvalidRepository,
lambda: auth.resolve_repository_name('-gecko.com/image')
)
def encode_auth(auth_info):
return base64.b64encode(
auth_info.get('username', '').encode('utf-8') + b':' +
auth_info.get('password', '').encode('utf-8'))
class ResolveAuthTest(unittest.TestCase):
index_config = {'auth': encode_auth({'username': 'indexuser'})}
private_config = {'auth': encode_auth({'username': 'privateuser'})}
legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
auth_config = auth.parse_auth({
'https://index.docker.io/v1/': index_config,
'my.registry.net': private_config,
'http://legacy.registry.url/v1/': legacy_config,
})
def test_resolve_authconfig_hostname_only(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'my.registry.net'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_protocol(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'my.registry.net/v1/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path_trailing_slash(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path_wrong_secure_proto(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'https://my.registry.net'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path_wrong_insecure_proto(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://index.docker.io'
)['username'],
'indexuser'
)
def test_resolve_authconfig_path_wrong_proto(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'https://my.registry.net/v1/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_default_registry(self):
self.assertEqual(
auth.resolve_authconfig(self.auth_config)['username'],
'indexuser'
)
def test_resolve_authconfig_default_explicit_none(self):
self.assertEqual(
auth.resolve_authconfig(self.auth_config, None)['username'],
'indexuser'
)
def test_resolve_authconfig_fully_explicit(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net/v1/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_legacy_config(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'legacy.registry.url'
)['username'],
'legacyauth'
)
def test_resolve_authconfig_no_match(self):
self.assertTrue(
auth.resolve_authconfig(self.auth_config, 'does.not.exist') is None
)
def test_resolve_registry_and_auth_library_image(self):
image = 'image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_hub_image(self):
image = 'username/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_explicit_hub(self):
image = 'docker.io/username/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_explicit_legacy_hub(self):
image = 'index.docker.io/username/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_private_registry(self):
image = 'my.registry.net/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'privateuser',
)
def test_resolve_registry_and_auth_unauthenticated_registry(self):
image = 'other.registry.net/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
),
None,
)
class FindConfigFileTest(unittest.TestCase):
def tmpdir(self, name):
tmpdir = ensuretemp(name)
self.addCleanup(tmpdir.remove)
return tmpdir
def test_find_config_fallback(self):
tmpdir = self.tmpdir('test_find_config_fallback')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
assert auth.find_config_file() is None
def test_find_config_from_explicit_path(self):
tmpdir = self.tmpdir('test_find_config_from_explicit_path')
config_path = tmpdir.ensure('my-config-file.json')
assert auth.find_config_file(str(config_path)) == str(config_path)
def test_find_config_from_environment(self):
tmpdir = self.tmpdir('test_find_config_from_environment')
config_path = tmpdir.ensure('config.json')
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}):
assert auth.find_config_file() == str(config_path)
@mark.skipif("sys.platform == 'win32'")
def test_find_config_from_home_posix(self):
tmpdir = self.tmpdir('test_find_config_from_home_posix')
config_path = tmpdir.ensure('.docker', 'config.json')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
assert auth.find_config_file() == str(config_path)
@mark.skipif("sys.platform == 'win32'")
def test_find_config_from_home_legacy_name(self):
tmpdir = self.tmpdir('test_find_config_from_home_legacy_name')
config_path = tmpdir.ensure('.dockercfg')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
assert auth.find_config_file() == str(config_path)
@mark.skipif("sys.platform != 'win32'")
def test_find_config_from_home_windows(self):
tmpdir = self.tmpdir('test_find_config_from_home_windows')
config_path = tmpdir.ensure('.docker', 'config.json')
with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}):
assert auth.find_config_file() == str(config_path)
class LoadConfigTest(unittest.TestCase):
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
cfg = auth.load_config(folder)
self.assertTrue(cfg is not None)
def test_load_config(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, '.dockercfg')
with open(dockercfg_path, 'w') as f:
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = sakuya@scarlet.net')
cfg = auth.load_config(dockercfg_path)
assert auth.INDEX_NAME in cfg
self.assertNotEqual(cfg[auth.INDEX_NAME], None)
cfg = cfg[auth.INDEX_NAME]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_with_random_name(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder,
'.{0}.dockercfg'.format(
random.randrange(100000)))
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env_with_auths(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
'auths': {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env_utf8(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(
b'sakuya\xc3\xa6:izayoi\xc3\xa6').decode('ascii')
config = {
'auths': {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], b'sakuya\xc3\xa6'.decode('utf8'))
self.assertEqual(cfg['password'], b'izayoi\xc3\xa6'.decode('utf8'))
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env_with_headers(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
config = {
'HttpHeaders': {
'Name': 'Spike',
'Surname': 'Spiegel'
},
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert 'HttpHeaders' in cfg
self.assertNotEqual(cfg['HttpHeaders'], None)
cfg = cfg['HttpHeaders']
self.assertEqual(cfg['Name'], 'Spike')
self.assertEqual(cfg['Surname'], 'Spiegel')
def test_load_config_unknown_keys(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
config = {
'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert cfg == {}
def test_load_config_invalid_auth_dict(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
config = {
'auths': {
'scarlet.net': {'sakuya': 'izayoi'}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert cfg == {'scarlet.net': {}}
def test_load_config_identity_token(self):
folder = tempfile.mkdtemp()
registry = 'scarlet.net'
token = '1ce1cebb-503e-7043-11aa-7feb8bd4a1ce'
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
auth_entry = encode_auth({'username': 'sakuya'}).decode('ascii')
config = {
'auths': {
registry: {
'auth': auth_entry,
'identitytoken': token
}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert registry in cfg
cfg = cfg[registry]
assert 'IdentityToken' in cfg
assert cfg['IdentityToken'] == token
|
|
"""The tests for the Legacy Mqtt vacuum platform."""
from copy import deepcopy
import json
import pytest
from homeassistant.components import vacuum
from homeassistant.components.mqtt import CONF_COMMAND_TOPIC
from homeassistant.components.mqtt.vacuum import schema_legacy as mqttvacuum
from homeassistant.components.mqtt.vacuum.schema import services_to_strings
from homeassistant.components.mqtt.vacuum.schema_legacy import (
ALL_SERVICES,
SERVICE_TO_STRING,
)
from homeassistant.components.vacuum import (
ATTR_BATTERY_ICON,
ATTR_BATTERY_LEVEL,
ATTR_FAN_SPEED,
ATTR_FAN_SPEED_LIST,
ATTR_STATUS,
)
from homeassistant.const import CONF_NAME, CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
from tests.components.vacuum import common
DEFAULT_CONFIG = {
CONF_PLATFORM: "mqtt",
CONF_NAME: "mqtttest",
CONF_COMMAND_TOPIC: "vacuum/command",
mqttvacuum.CONF_SEND_COMMAND_TOPIC: "vacuum/send_command",
mqttvacuum.CONF_BATTERY_LEVEL_TOPIC: "vacuum/state",
mqttvacuum.CONF_BATTERY_LEVEL_TEMPLATE: "{{ value_json.battery_level }}",
mqttvacuum.CONF_CHARGING_TOPIC: "vacuum/state",
mqttvacuum.CONF_CHARGING_TEMPLATE: "{{ value_json.charging }}",
mqttvacuum.CONF_CLEANING_TOPIC: "vacuum/state",
mqttvacuum.CONF_CLEANING_TEMPLATE: "{{ value_json.cleaning }}",
mqttvacuum.CONF_DOCKED_TOPIC: "vacuum/state",
mqttvacuum.CONF_DOCKED_TEMPLATE: "{{ value_json.docked }}",
mqttvacuum.CONF_ERROR_TOPIC: "vacuum/state",
mqttvacuum.CONF_ERROR_TEMPLATE: "{{ value_json.error }}",
mqttvacuum.CONF_FAN_SPEED_TOPIC: "vacuum/state",
mqttvacuum.CONF_FAN_SPEED_TEMPLATE: "{{ value_json.fan_speed }}",
mqttvacuum.CONF_SET_FAN_SPEED_TOPIC: "vacuum/set_fan_speed",
mqttvacuum.CONF_FAN_SPEED_LIST: ["min", "medium", "high", "max"],
}
DEFAULT_CONFIG_2 = {vacuum.DOMAIN: {"platform": "mqtt", "name": "test"}}
async def test_default_supported_features(hass, mqtt_mock):
"""Test that the correct supported features."""
assert await async_setup_component(
hass, vacuum.DOMAIN, {vacuum.DOMAIN: DEFAULT_CONFIG}
)
await hass.async_block_till_done()
entity = hass.states.get("vacuum.mqtttest")
entity_features = entity.attributes.get(mqttvacuum.CONF_SUPPORTED_FEATURES, 0)
assert sorted(services_to_strings(entity_features, SERVICE_TO_STRING)) == sorted(
[
"turn_on",
"turn_off",
"stop",
"return_home",
"battery",
"status",
"clean_spot",
]
)
async def test_all_commands(hass, mqtt_mock):
"""Test simple commands to the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
await common.async_turn_on(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/command", "turn_on", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/command", "turn_off", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_stop(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with("vacuum/command", "stop", 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_clean_spot(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/command", "clean_spot", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_locate(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/command", "locate", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_start_pause(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/command", "start_pause", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_return_to_base(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/command", "return_to_base", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_set_fan_speed(hass, "high", "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/set_fan_speed", "high", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(hass, "44 FE 93", entity_id="vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/send_command", "44 FE 93", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(
hass, "44 FE 93", {"key": "value"}, entity_id="vacuum.mqtttest"
)
assert json.loads(mqtt_mock.async_publish.mock_calls[-1][1][1]) == {
"command": "44 FE 93",
"key": "value",
}
await common.async_send_command(
hass, "44 FE 93", {"key": "value"}, entity_id="vacuum.mqtttest"
)
assert json.loads(mqtt_mock.async_publish.mock_calls[-1][1][1]) == {
"command": "44 FE 93",
"key": "value",
}
async def test_commands_without_supported_features(hass, mqtt_mock):
"""Test commands which are not supported by the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
services = mqttvacuum.STRING_TO_SERVICE["status"]
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
services, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
await common.async_turn_on(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_stop(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_clean_spot(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_locate(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_start_pause(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_return_to_base(hass, "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_set_fan_speed(hass, "high", "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(hass, "44 FE 93", entity_id="vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
async def test_attributes_without_supported_features(hass, mqtt_mock):
"""Test attributes which are not supported by the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
services = mqttvacuum.STRING_TO_SERVICE["turn_on"]
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
services, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"battery_level": 54,
"cleaning": true,
"docked": false,
"charging": false,
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BATTERY_LEVEL) is None
assert state.attributes.get(ATTR_BATTERY_ICON) is None
assert state.attributes.get(ATTR_FAN_SPEED) is None
assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None
async def test_status(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"battery_level": 54,
"cleaning": true,
"docked": false,
"charging": false,
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_FAN_SPEED) == "max"
message = """{
"battery_level": 61,
"docked": true,
"cleaning": false,
"charging": true,
"fan_speed": "min"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-charging-60"
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 61
assert state.attributes.get(ATTR_FAN_SPEED) == "min"
async def test_status_battery(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"battery_level": 54
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
async def test_status_cleaning(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"cleaning": true
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_ON
async def test_status_docked(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"docked": true
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_OFF
async def test_status_charging(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"charging": true
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-outline"
async def test_status_fan_speed(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.attributes.get(ATTR_FAN_SPEED) == "max"
async def test_status_fan_speed_list(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
state = hass.states.get("vacuum.mqtttest")
assert state.attributes.get(ATTR_FAN_SPEED_LIST) == ["min", "medium", "high", "max"]
async def test_status_no_fan_speed_list(hass, mqtt_mock):
"""Test status updates from the vacuum.
If the vacuum doesn't support fan speed, fan speed list should be None.
"""
config = deepcopy(DEFAULT_CONFIG)
services = ALL_SERVICES - mqttvacuum.SUPPORT_FAN_SPEED
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
services, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
state = hass.states.get("vacuum.mqtttest")
assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None
async def test_status_error(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"error": "Error1"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.attributes.get(ATTR_STATUS) == "Error: Error1"
message = """{
"error": ""
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.attributes.get(ATTR_STATUS) == "Stopped"
async def test_battery_template(hass, mqtt_mock):
"""Test that you can use non-default templates for battery_level."""
config = deepcopy(DEFAULT_CONFIG)
config.update(
{
mqttvacuum.CONF_SUPPORTED_FEATURES: services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
),
mqttvacuum.CONF_BATTERY_LEVEL_TOPIC: "retroroomba/battery_level",
mqttvacuum.CONF_BATTERY_LEVEL_TEMPLATE: "{{ value }}",
}
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "retroroomba/battery_level", "54")
state = hass.states.get("vacuum.mqtttest")
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
async def test_status_invalid_json(hass, mqtt_mock):
"""Test to make sure nothing breaks if the vacuum sends bad JSON."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "vacuum/state", '{"asdfasas false}')
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_STATUS) == "Stopped"
async def test_missing_battery_template(hass, mqtt_mock):
"""Test to make sure missing template is not allowed."""
config = deepcopy(DEFAULT_CONFIG)
config.pop(mqttvacuum.CONF_BATTERY_LEVEL_TEMPLATE)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
state = hass.states.get("vacuum.mqtttest")
assert state is None
async def test_missing_charging_template(hass, mqtt_mock):
"""Test to make sure missing template is not allowed."""
config = deepcopy(DEFAULT_CONFIG)
config.pop(mqttvacuum.CONF_CHARGING_TEMPLATE)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
state = hass.states.get("vacuum.mqtttest")
assert state is None
async def test_missing_cleaning_template(hass, mqtt_mock):
"""Test to make sure missing template is not allowed."""
config = deepcopy(DEFAULT_CONFIG)
config.pop(mqttvacuum.CONF_CLEANING_TEMPLATE)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
state = hass.states.get("vacuum.mqtttest")
assert state is None
async def test_missing_docked_template(hass, mqtt_mock):
"""Test to make sure missing template is not allowed."""
config = deepcopy(DEFAULT_CONFIG)
config.pop(mqttvacuum.CONF_DOCKED_TEMPLATE)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
state = hass.states.get("vacuum.mqtttest")
assert state is None
async def test_missing_error_template(hass, mqtt_mock):
"""Test to make sure missing template is not allowed."""
config = deepcopy(DEFAULT_CONFIG)
config.pop(mqttvacuum.CONF_ERROR_TEMPLATE)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
state = hass.states.get("vacuum.mqtttest")
assert state is None
async def test_missing_fan_speed_template(hass, mqtt_mock):
"""Test to make sure missing template is not allowed."""
config = deepcopy(DEFAULT_CONFIG)
config.pop(mqttvacuum.CONF_FAN_SPEED_TEMPLATE)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
state = hass.states.get("vacuum.mqtttest")
assert state is None
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one vacuum per unique_id."""
config = {
vacuum.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, vacuum.DOMAIN, config)
async def test_discovery_removal_vacuum(hass, mqtt_mock, caplog):
"""Test removal of discovered vacuum."""
data = json.dumps(DEFAULT_CONFIG_2[vacuum.DOMAIN])
await help_test_discovery_removal(hass, mqtt_mock, caplog, vacuum.DOMAIN, data)
async def test_discovery_update_vacuum(hass, mqtt_mock, caplog):
"""Test update of discovered vacuum."""
data1 = '{ "name": "Beer",' ' "command_topic": "test_topic" }'
data2 = '{ "name": "Milk",' ' "command_topic": "test_topic" }'
await help_test_discovery_update(
hass, mqtt_mock, caplog, vacuum.DOMAIN, data1, data2
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer",' ' "command_topic": "test_topic#" }'
data2 = '{ "name": "Milk",' ' "command_topic": "test_topic" }'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, vacuum.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT vacuum device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT vacuum device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = {
vacuum.DOMAIN: {
"platform": "mqtt",
"name": "test",
"battery_level_topic": "test-topic",
"battery_level_template": "{{ value_json.battery_level }}",
"command_topic": "command-topic",
"availability_topic": "avty-topic",
}
}
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, vacuum.DOMAIN, config, ["test-topic", "avty-topic"]
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
config = {
vacuum.DOMAIN: {
"platform": "mqtt",
"name": "test",
"battery_level_topic": "test-topic",
"battery_level_template": "{{ value_json.battery_level }}",
"command_topic": "command-topic",
"availability_topic": "avty-topic",
}
}
await help_test_entity_debug_info_message(
hass, mqtt_mock, vacuum.DOMAIN, config, "test-topic"
)
|
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import bottle
import cgitb
import gevent
from cfgm_common import jsonutils as json
import logging
from pprint import pformat
import requests
import sys
import string
import ConfigParser
from pysandesh.sandesh_base import *
from pysandesh.sandesh_logger import *
from vnc_api import vnc_api
from neutron_plugin_db import DBInterface
from cfgm_common.utils import CacheContainer
@bottle.error(400)
def error_400(err):
return err.body
@bottle.error(404)
def error_404(err):
return err.body
@bottle.error(409)
def error_409(err):
return err.body
class NeutronPluginInterface(object):
"""
An instance of this class receives requests from Contrail Neutron Plugin
"""
def __init__(self, api_server_ip, api_server_port, conf_sections, sandesh):
if api_server_ip == '0.0.0.0':
self._vnc_api_ip = '127.0.0.1'
else:
self._vnc_api_ip = api_server_ip
self._vnc_api_port = api_server_port
self._config_sections = conf_sections
self._auth_user = conf_sections.get('KEYSTONE', 'admin_user')
self._auth_passwd = conf_sections.get('KEYSTONE', 'admin_password')
self._auth_tenant = conf_sections.get('KEYSTONE', 'admin_tenant_name')
try:
exts_enabled = conf_sections.getboolean('NEUTRON',
'contrail_extensions_enabled')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
exts_enabled = True
self._contrail_extensions_enabled = exts_enabled
try:
_vnc_connection_cache_size = int(
conf_sections.get("DEFAULTS", "vnc_connection_cache_size"))
except ConfigParser.NoOptionError:
_vnc_connection_cache_size = 0
try:
self._multi_tenancy = conf_sections.get('DEFAULTS', 'multi_tenancy')
except ConfigParser.NoOptionError:
self._multi_tenancy = False
try:
self._list_optimization_enabled = \
conf_sections.get('DEFAULTS', 'list_optimization_enabled')
except ConfigParser.NoOptionError:
self._list_optimization_enabled = False
try:
self._sn_host_route = conf_sections.get('DEFAULTS',
'apply_subnet_host_routes')
except ConfigParser.NoOptionError:
self._sn_host_route = False
self._cfgdb = None
self._cfgdb_map = CacheContainer(_vnc_connection_cache_size) \
if _vnc_connection_cache_size > 0 else dict()
global LOG
LOG = sandesh.logger()
self.logger = LOG
def _connect_to_db(self):
"""
Many instantiations of plugin (base + extensions) but need to have
only one config db conn (else error from ifmap-server)
"""
if self._cfgdb is None:
# Initialize connection to DB and add default entries
exts_enabled = self._contrail_extensions_enabled
apply_sn_route = self._sn_host_route
self._cfgdb = DBInterface(self,
self._auth_user,
self._auth_passwd,
self._auth_tenant,
self._vnc_api_ip,
self._vnc_api_port,
contrail_extensions_enabled=exts_enabled,
list_optimization_enabled=\
self._list_optimization_enabled,
apply_subnet_host_routes=apply_sn_route)
#end _connect_to_db
def _get_user_cfgdb(self, context):
"""
send admin token if multi_tenancy is disabled
else forward user token along for RBAC if passed by neutron plugin
"""
self._connect_to_db()
user_token = bottle.request.headers.get('X_AUTH_TOKEN')
if self._multi_tenancy and user_token:
self._cfgdb._vnc_lib.set_auth_token(user_token)
return self._cfgdb
def _get_requests_data(self):
ctype = bottle.request.headers['content-type']
try:
if 'application/json' in ctype:
req = bottle.request.json
return req['context'], req['data']
except Exception as e:
bottle.abort(400, 'Unable to parse request data')
# Network API Handling
def plugin_get_network(self, context, network):
"""
Network get request
"""
fields = network['fields']
try:
cfgdb = self._get_user_cfgdb(context)
net_info = cfgdb.network_read(network['id'], fields)
return net_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_network(self, context, network):
"""
Network create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
net_info = cfgdb.network_create(network['resource'])
return net_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_update_network(self, context, network):
"""
Network update request
"""
try:
cfgdb = self._get_user_cfgdb(context)
net_info = cfgdb.network_update(network['id'],
network['resource'])
return net_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_network(self, context, network):
"""
Network delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.network_delete(network['id'])
LOG.debug("plugin_delete_network(): " + pformat(network['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_networks(self, context, network):
"""
Networks get request
"""
filters = network['filters']
try:
cfgdb = self._get_user_cfgdb(context)
nets_info = cfgdb.network_list(context, filters)
return json.dumps(nets_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_networks_count(self, context, network):
"""
Networks count request
"""
filters = network['filters']
try:
cfgdb = self._get_user_cfgdb(context)
nets_count = cfgdb.network_count(filters)
LOG.debug("plugin_get_networks_count(): filters: "
+ pformat(filters) + " data: " + str(nets_count))
return {'count': nets_count}
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_network(self):
"""
Bottle callback for Network POST
"""
context, network = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_network(context, network)
elif context['operation'] == 'CREATE':
return self.plugin_create_network(context, network)
elif context['operation'] == 'UPDATE':
return self.plugin_update_network(context, network)
elif context['operation'] == 'DELETE':
return self.plugin_delete_network(context, network)
elif context['operation'] == 'READALL':
return self.plugin_get_networks(context, network)
elif context['operation'] == 'READCOUNT':
return self.plugin_get_networks_count(context, network)
def _make_subnet_dict(self, subnet):
res = {'id': subnet['id'],
'name': subnet['name'],
'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'ip_version': subnet['ip_version'],
'cidr': subnet['cidr'],
'allocation_pools': [{'start': pool['first_ip'],
'end': pool['last_ip']}
for pool in subnet['allocation_pools']],
'gateway_ip': subnet['gateway_ip'],
'enable_dhcp': subnet['enable_dhcp'],
'ipv6_ra_mode': subnet['ipv6_ra_mode'],
'ipv6_address_mode': subnet['ipv6_address_mode'],
'dns_nameservers': [dns['address']
for dns in subnet['dns_nameservers']],
'host_routes': [{'destination': route['destination'],
'nexthop': route['nexthop']}
for route in subnet['routes']],
'shared': subnet['shared']
}
return res
# Subnet API Handling
def plugin_get_subnet(self, context, subnet):
"""
Subnet get request
"""
try:
cfgdb = self._get_user_cfgdb(context)
subnet_info = cfgdb.subnet_read(subnet['id'])
return self._make_subnet_dict(subnet_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_subnet(self, context, subnet):
"""
Subnet create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
subnet_info = cfgdb.subnet_create(subnet['resource'])
return self._make_subnet_dict(subnet_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_update_subnet(self, context, subnet):
"""
Subnet update request
"""
try:
cfgdb = self._get_user_cfgdb(context)
subnet_info = cfgdb.subnet_update(subnet['id'],
subnet['resource'])
return self._make_subnet_dict(subnet_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_subnet(self, context, subnet):
"""
Subnet delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.subnet_delete(subnet['id'])
LOG.debug("plugin_delete_subnet(): " + pformat(subnet['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_subnets(self, context, subnet):
"""
Subnets get request
"""
filters = subnet['filters']
try:
cfgdb = self._get_user_cfgdb(context)
subnets_info = cfgdb.subnets_list(context, filters)
return json.dumps([self._make_subnet_dict(i) for i in subnets_info])
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_subnets_count(self, context, subnet):
"""
Subnets count request
"""
filters = subnet['filters']
try:
cfgdb = self._get_user_cfgdb(context)
subnets_count = cfgdb.subnets_count(context, filters)
LOG.debug("plugin_get_subnets_count(): filters: "
+ pformat(filters) + " data: " + str(subnets_count))
return {'count': subnets_count}
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_subnet(self):
"""
Bottle callback for Subnet POST
"""
context, subnet = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_subnet(context, subnet)
elif context['operation'] == 'CREATE':
return self.plugin_create_subnet(context, subnet)
elif context['operation'] == 'UPDATE':
return self.plugin_update_subnet(context, subnet)
elif context['operation'] == 'DELETE':
return self.plugin_delete_subnet(context, subnet)
elif context['operation'] == 'READALL':
return self.plugin_get_subnets(context, subnet)
elif context['operation'] == 'READCOUNT':
return self.plugin_get_subnets_count(context, subnet)
# Port API Handling
def plugin_get_port(self, context, port):
"""
Port get request
"""
try:
cfgdb = self._get_user_cfgdb(context)
port_info = cfgdb.port_read(port['id'])
return port_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_port(self, context, port):
"""
Port create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
net_info = cfgdb.port_create(context, port['resource'])
return net_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_update_port(self, context, port):
"""
Port update request
"""
try:
cfgdb = self._get_user_cfgdb(context)
net_info = cfgdb.port_update(port['id'],
port['resource'])
return net_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_port(self, context, port):
"""
Port delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.port_delete(port['id'])
LOG.debug("plugin_delete_port(): " + pformat(port['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_ports(self, context, port):
"""
Ports get request
"""
filters = port['filters']
try:
cfgdb = self._get_user_cfgdb(context)
ports_info = cfgdb.port_list(context, filters)
return json.dumps(ports_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_ports_count(self, context, port):
"""
Ports count request
"""
filters = port['filters']
try:
cfgdb = self._get_user_cfgdb(context)
ports_count = cfgdb.port_count(filters)
LOG.debug("plugin_get_ports_count(): filters: "
+ pformat(filters) + " data: " + str(ports_count))
return {'count': ports_count}
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_port(self):
"""
Bottle callback for Port POST
"""
context, port = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_port(context, port)
elif context['operation'] == 'CREATE':
return self.plugin_create_port(context, port)
elif context['operation'] == 'UPDATE':
return self.plugin_update_port(context, port)
elif context['operation'] == 'DELETE':
return self.plugin_delete_port(context, port)
elif context['operation'] == 'READALL':
return self.plugin_get_ports(context, port)
elif context['operation'] == 'READCOUNT':
return self.plugin_get_ports_count(context, port)
# Floating IP API Handling
def plugin_get_floatingip(self, context, floatingip):
"""
Floating IP get request
"""
try:
cfgdb = self._get_user_cfgdb(context)
fip_info = cfgdb.floatingip_read(floatingip['id'])
return fip_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_floatingip(self, context, floatingip):
"""
Floating IP create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
net_info = cfgdb.floatingip_create(context, floatingip['resource'])
return net_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_update_floatingip(self, context, floatingip):
"""
Floating IP update request
"""
try:
cfgdb = self._get_user_cfgdb(context)
floatingip_info = cfgdb.floatingip_update(context, floatingip['id'],
floatingip['resource'])
return floatingip_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_floatingip(self, context, floatingip):
"""
Floating IP delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.floatingip_delete(floatingip['id'])
LOG.debug("plugin_delete_floatingip(): " +
pformat(floatingip['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_floatingips(self, context, floatingip):
"""
Floating IPs get request
"""
filters = floatingip['filters']
try:
cfgdb = self._get_user_cfgdb(context)
floatingips_info = cfgdb.floatingip_list(context, filters)
return json.dumps(floatingips_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_floatingips_count(self, context, floatingip):
"""
Floating IPs count request
"""
filters = floatingip['filters']
try:
cfgdb = self._get_user_cfgdb(context)
floatingips_count = cfgdb.floatingip_count(context, filters)
LOG.debug("plugin_get_floatingips_count(): filters: "
+ pformat(filters) + " data: " + str(floatingips_count))
return {'count': floatingips_count}
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_floatingip(self):
"""
Bottle callback for Floating IP POST
"""
context, floatingip = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_floatingip(context, floatingip)
elif context['operation'] == 'CREATE':
return self.plugin_create_floatingip(context, floatingip)
elif context['operation'] == 'UPDATE':
return self.plugin_update_floatingip(context, floatingip)
elif context['operation'] == 'DELETE':
return self.plugin_delete_floatingip(context, floatingip)
elif context['operation'] == 'READALL':
return self.plugin_get_floatingips(context, floatingip)
elif context['operation'] == 'READCOUNT':
return self.plugin_get_floatingips_count(context, floatingip)
# Security Group API Handling
def plugin_get_sec_group(self, context, sg):
"""
Security group get request
"""
try:
cfgdb = self._get_user_cfgdb(context)
sg_info = cfgdb.security_group_read(sg['id'])
return sg_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_sec_group(self, context, sg):
"""
Security group create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
sg_info = cfgdb.security_group_create(sg['resource'])
return sg_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_update_sec_group(self, context, sg):
"""
Security group update request
"""
try:
cfgdb = self._get_user_cfgdb(context)
sg_info = cfgdb.security_group_update(sg['id'],
sg['resource'])
return sg_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_sec_group(self, context, sg):
"""
Security group delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.security_group_delete(context, sg['id'])
LOG.debug("plugin_delete_sec_group(): " + pformat(sg['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_sec_groups(self, context, sg):
"""
Security groups get request
"""
filters = sg['filters']
try:
cfgdb = self._get_user_cfgdb(context)
sgs_info = cfgdb.security_group_list(context, filters)
return json.dumps(sgs_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_securitygroup(self):
"""
Bottle callback for Security Group POST
"""
context, sg = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_sec_group(context, sg)
elif context['operation'] == 'CREATE':
return self.plugin_create_sec_group(context, sg)
elif context['operation'] == 'UPDATE':
return self.plugin_update_sec_group(context, sg)
elif context['operation'] == 'DELETE':
return self.plugin_delete_sec_group(context, sg)
elif context['operation'] == 'READALL':
return self.plugin_get_sec_groups(context, sg)
def plugin_get_sec_group_rule(self, context, sg_rule):
"""
Security group rule get request
"""
try:
cfgdb = self._get_user_cfgdb(context)
sg_rule_info = cfgdb.security_group_rule_read(context,
sg_rule['id'])
return sg_rule_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_sec_group_rule(self, context, sg_rule):
"""
Security group rule create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
sg_rule_info = cfgdb.security_group_rule_create(sg_rule['resource'])
return sg_rule_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_sec_group_rule(self, context, sg_rule):
"""
Security group rule delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.security_group_rule_delete(context, sg_rule['id'])
LOG.debug("plugin_delete_sec_group_rule(): " +
pformat(sg_rule['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_sec_group_rules(self, context, sg_rule):
"""
Security group rules get request
"""
filters = sg_rule['filters']
try:
cfgdb = self._get_user_cfgdb(context)
sg_rules_info = cfgdb.security_group_rule_list(context, filters)
return json.dumps(sg_rules_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_securitygrouprule(self):
"""
Bottle callback for sec_group_rule POST
"""
context, sg_rule = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_sec_group_rule(context, sg_rule)
elif context['operation'] == 'CREATE':
return self.plugin_create_sec_group_rule(context, sg_rule)
elif context['operation'] == 'UPDATE':
return self.plugin_update_sec_group_rule(context, sg_rule)
elif context['operation'] == 'DELETE':
return self.plugin_delete_sec_group_rule(context, sg_rule)
elif context['operation'] == 'READALL':
return self.plugin_get_sec_group_rules(context, sg_rule)
elif context['operation'] == 'READCOUNT':
return self.plugin_get_sec_group_rules_count(context, sg_rule)
# Router IP API Handling
def plugin_get_router(self, context, router):
"""
Router get request
"""
try:
cfgdb = self._get_user_cfgdb(context)
router_info = cfgdb.router_read(router['id'])
return router_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_router(self, context, router):
"""
Router create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
router_info = cfgdb.router_create(router['resource'])
return router_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_update_router(self, context, router):
"""
Router update request
"""
try:
cfgdb = self._get_user_cfgdb(context)
router_info = cfgdb.router_update(router['id'],
router['resource'])
return router_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_router(self, context, router):
"""
Router delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.router_delete(router['id'])
LOG.debug("plugin_delete_router(): " +
pformat(router['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_routers(self, context, router):
"""
Routers get request
"""
filters = router['filters']
try:
cfgdb = self._get_user_cfgdb(context)
routers_info = cfgdb.router_list(context, filters)
return json.dumps(routers_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_routers_count(self, context, router):
"""
Routers count request
"""
filters = router['filters']
try:
cfgdb = self._get_user_cfgdb(context)
routers_count = cfgdb.router_count(filters)
LOG.debug("plugin_get_routers_count(): filters: "
+ pformat(filters) + " data: " + str(routers_count))
return {'count': routers_count}
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_add_router_interface(self, context, interface_info):
"""
Add interface to a router
"""
try:
cfgdb = self._get_user_cfgdb(context)
router_id = interface_info['id']
if 'port_id' in interface_info['resource']:
port_id = interface_info['resource']['port_id']
return cfgdb.add_router_interface(context, router_id, port_id=port_id)
elif 'subnet_id' in interface_info['resource']:
subnet_id = interface_info['resource']['subnet_id']
return cfgdb.add_router_interface(context, router_id,
subnet_id=subnet_id)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_del_router_interface(self, context, interface_info):
"""
Delete interface from a router
"""
try:
cfgdb = self._get_user_cfgdb(context)
router_id = interface_info['id']
if 'port_id' in interface_info['resource']:
port_id = interface_info['resource']['port_id']
return cfgdb.remove_router_interface(router_id,
port_id=port_id)
elif 'subnet_id' in interface_info['resource']:
subnet_id = interface_info['resource']['subnet_id']
return cfgdb.remove_router_interface(router_id,
subnet_id=subnet_id)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_router(self):
"""
Bottle callback for Router POST
"""
context, router = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_router(context, router)
elif context['operation'] == 'CREATE':
return self.plugin_create_router(context, router)
elif context['operation'] == 'UPDATE':
return self.plugin_update_router(context, router)
elif context['operation'] == 'DELETE':
return self.plugin_delete_router(context, router)
elif context['operation'] == 'READALL':
return self.plugin_get_routers(context, router)
elif context['operation'] == 'READCOUNT':
return self.plugin_get_routers_count(context, router)
elif context['operation'] == 'ADDINTERFACE':
return self.plugin_add_router_interface(context, router)
elif context['operation'] == 'DELINTERFACE':
return self.plugin_del_router_interface(context, router)
# IPAM API Handling
def plugin_get_ipam(self, context, ipam):
"""
IPAM get request
"""
try:
cfgdb = self._get_user_cfgdb(context)
ipam_info = cfgdb.ipam_read(ipam['id'])
return ipam_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_ipam(self, context, ipam):
"""
IPAM create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
ipam_info = cfgdb.ipam_create(ipam['resource'])
return ipam_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_update_ipam(self, context, ipam):
"""
IPAM update request
"""
try:
cfgdb = self._get_user_cfgdb(context)
ipam_info = cfgdb.ipam_update(ipam['id'],
ipam['resource'])
return ipam_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_ipam(self, context, ipam):
"""
IPAM delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.ipam_delete(ipam['id'])
LOG.debug("plugin_delete_ipam(): " +
pformat(ipam['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_ipams(self, context, ipam):
"""
IPAM get request
"""
filters = ipam['filters']
try:
cfgdb = self._get_user_cfgdb(context)
ipams_info = cfgdb.ipam_list(context, filters)
return json.dumps(ipams_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_ipams_count(self, context, ipam):
"""
IPAM count request
"""
filters = ipam['filters']
try:
cfgdb = self._get_user_cfgdb(context)
ipams_count = cfgdb.ipam_count(context, filters)
LOG.debug("plugin_get_ipams_count(): filters: "
+ pformat(filters) + " data: " + str(ipams_count))
return {'count': ipams_count}
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_ipam(self):
"""
Bottle callback for IPAM POST
"""
context, ipam = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_ipam(context, ipam)
elif context['operation'] == 'CREATE':
return self.plugin_create_ipam(context, ipam)
elif context['operation'] == 'UPDATE':
return self.plugin_update_ipam(context, ipam)
elif context['operation'] == 'DELETE':
return self.plugin_delete_ipam(context, ipam)
elif context['operation'] == 'READALL':
return self.plugin_get_ipams(context, ipam)
elif context['operation'] == 'READCOUNT':
return self.plugin_get_ipams_count(context, ipam)
# Policy IP API Handling
def plugin_get_policy(self, context, policy):
"""
Policy get request
"""
try:
cfgdb = self._get_user_cfgdb(context)
pol_info = cfgdb.policy_read(policy['id'])
return pol_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_policy(self, context, policy):
"""
Policy create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
pol_info = cfgdb.policy_create(policy['resource'])
return pol_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_update_policy(self, context, policy):
"""
Policy update request
"""
try:
cfgdb = self._get_user_cfgdb(context)
policy_info = cfgdb.policy_update(policy['id'],
policy['resource'])
return policy_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_policy(self, context, policy):
"""
Policy delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.policy_delete(policy['id'])
LOG.debug("plugin_delete_policy(): " +
pformat(policy['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_policys(self, context, policy):
"""
Policys get request
"""
filters = policy['filters']
try:
cfgdb = self._get_user_cfgdb(context)
policys_info = cfgdb.policy_list(context, filters)
return json.dumps(policys_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_policys_count(self, context, policy):
"""
Policys count request
"""
filters = policy['filters']
try:
cfgdb = self._get_user_cfgdb(context)
policys_count = cfgdb.policy_count(context, filters)
LOG.debug("plugin_get_policys_count(): filters: "
+ pformat(filters) + " data: " + str(policys_count))
return {'count': policys_count}
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_policy(self):
"""
Bottle callback for Policy POST
"""
context, policy = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_policy(context, policy)
elif context['operation'] == 'CREATE':
return self.plugin_create_policy(context, policy)
elif context['operation'] == 'UPDATE':
return self.plugin_update_policy(context, policy)
elif context['operation'] == 'DELETE':
return self.plugin_delete_policy(context, policy)
elif context['operation'] == 'READALL':
return self.plugin_get_policys(context, policy)
elif context['operation'] == 'READCOUNT':
return self.plugin_get_policys_count(context, policy)
def plugin_get_route_table(self, context, route_table):
"""
Route table get request
"""
try:
cfgdb = self._get_user_cfgdb(context)
rt_info = cfgdb.route_table_read(route_table['id'])
return rt_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_route_table(self, context, route_table):
"""
Route table create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
rt_info = cfgdb.route_table_create(route_table['resource'])
return rt_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_update_route_table(self, context, route_table):
"""
Route table update request
"""
try:
cfgdb = self._get_user_cfgdb(context)
rt_info = cfgdb.route_table_update(route_table['id'],
route_table['resource'])
return rt_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_route_table(self, context, route_table):
"""
Route table delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.route_table_delete(route_table['id'])
LOG.debug("plugin_delete_route_table(): " +
pformat(route_table['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_route_tables(self, context, route_table):
"""
Route Tables get request
"""
filters = route_table['filters']
try:
cfgdb = self._get_user_cfgdb(context)
rts_info = cfgdb.route_table_list(context, filters)
return json.dumps(rts_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_route_tables_count(self, context, route_table):
"""
Route Tables count request
"""
filters = route_table['filters']
try:
cfgdb = self._get_user_cfgdb(context)
rts_count = cfgdb.route_table_count(filters)
LOG.debug("plugin_get_route_tables_count(): filters: "
+ pformat(filters) + " data: " + str(rts_count))
return {'count': rts_count}
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_route_table(self):
"""
Bottle callback for Route-table POST
"""
context, route_table = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_route_table(context, route_table)
elif context['operation'] == 'CREATE':
return self.plugin_create_route_table(context, route_table)
elif context['operation'] == 'UPDATE':
return self.plugin_update_route_table(context, route_table)
elif context['operation'] == 'DELETE':
return self.plugin_delete_route_table(context, route_table)
elif context['operation'] == 'READALL':
return self.plugin_get_route_tables(context, route_table)
elif context['operation'] == 'READCOUNT':
return self.plugin_get_route_tables_count(context, route_table)
def plugin_get_svc_instance(self, context, svc_instance):
"""
Service instance get request
"""
try:
cfgdb = self._get_user_cfgdb(context)
si_info = cfgdb.svc_instance_read(svc_instance['id'])
return si_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_create_svc_instance(self, context, svc_instance):
"""
Service instance create request
"""
try:
cfgdb = self._get_user_cfgdb(context)
si_info = cfgdb.svc_instance_create(svc_instance['resource'])
return si_info
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_delete_svc_instance(self, context, svc_instance):
"""
Service instance delete request
"""
try:
cfgdb = self._get_user_cfgdb(context)
cfgdb.svc_instance_delete(svc_instance['id'])
LOG.debug("plugin_delete_svc_instance(): " +
pformat(svc_instance['id']))
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_get_svc_instances(self, context, svc_instance):
"""
Service instance get request
"""
filters = svc_instance['filters']
try:
cfgdb = self._get_user_cfgdb(context)
sis_info = cfgdb.svc_instance_list(context, filters)
return json.dumps(sis_info)
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise
def plugin_http_post_svc_instance(self):
"""
Bottle callback for Route-table POST
"""
context, svc_instance = self._get_requests_data()
if context['operation'] == 'READ':
return self.plugin_get_svc_instance(context, svc_instance)
elif context['operation'] == 'CREATE':
return self.plugin_create_svc_instance(context, svc_instance)
elif context['operation'] == 'DELETE':
return self.plugin_delete_svc_instance(context, svc_instance)
elif context['operation'] == 'READALL':
return self.plugin_get_svc_instances(context, svc_instance)
|
|
import json
import unittest
from libsaas import http, port
from libsaas.executors import test_executor
from libsaas.services import base, desk
class DeskTestCase(unittest.TestCase):
def setUp(self):
self.executor = test_executor.use()
self.executor.set_response(b'{}', 200, {})
self.service = desk.Desk('domain', 'key', 'secret',
'token', 'token_secret')
def expect(self, method=None, uri=None, params=None, headers=None):
if method is not None:
self.assertEqual(method, self.executor.request.method)
if uri is not None:
self.assertEqual(self.executor.request.uri,
'https://domain.desk.com/api/v2' + uri)
if params is not None:
params = (json.dumps(params)
if method not in http.URLENCODE_METHODS else params)
self.assertEqual(self.executor.request.params, params)
if headers is not None:
self.assertEqual(self.executor.request.headers, headers)
def test_cases(self):
paging = {'page': 1, 'per_page': 5}
self.service.cases().get()
self.expect('GET', '/cases')
self.service.cases().get(page=1, per_page=5)
self.expect('GET', '/cases', paging)
self.service.case(10).get()
self.expect('GET', '/cases/10')
self.service.case(10, is_external=True).get()
self.expect('GET', '/cases/e-10')
self.service.case(10).update({'foo': 'bar'})
self.expect('PATCH', '/cases/10', {'foo': 'bar'})
with port.assertRaises(base.MethodNotSupported):
self.service.cases().create()
self.service.cases().update()
self.service.cases().delete()
self.service.case(10).delete()
def test_companies(self):
obj = {'name': 'test'}
paging = {'page': 1, 'per_page': 5}
self.service.companies().get(per_page=5, page=1)
self.expect('GET', '/companies', paging)
self.service.company(4).get()
self.expect('GET', '/companies/4')
self.service.company(4).update(obj)
self.expect('PATCH', '/companies/4', obj)
self.service.companies().create(obj)
self.expect('POST', '/companies', obj)
self.service.companies().search('foo')
self.expect('GET', '/companies/search', {'q': 'foo'})
self.assertRaises(base.MethodNotSupported,
self.service.customer(4).delete)
def test_customers(self):
obj = {'email': 'test@test.com'}
paging = {'page': 1, 'per_page': 5}
self.service.customers().get(per_page=5, page=1)
self.expect('GET', '/customers', paging)
self.service.customer(4).get()
self.expect('GET', '/customers/4')
self.service.customer(4).update(obj)
self.expect('PATCH', '/customers/4', obj)
self.service.customers().create(obj)
self.expect('POST', '/customers', obj)
self.assertRaises(base.MethodNotSupported,
self.service.customer(4).delete)
def test_insights(self):
self.service.insights().meta()
self.expect('GET', '/insights/meta')
self.service.insights().report(metrics='1,2,3')
self.expect('POST', '/insights/reports', {'metrics': '1,2,3'})
def test_groups(self):
paging = {'page': 1, 'per_page': 5}
self.service.groups().get()
self.expect('GET', '/groups')
self.service.groups().get(page=1, per_page=5)
self.expect('GET', '/groups', paging)
self.service.group(10).get()
self.expect('GET', '/groups/10')
self.assertRaises(base.MethodNotSupported,
self.service.groups().create, {'foo': 'bar'})
self.assertRaises(base.MethodNotSupported,
self.service.group(10).delete)
self.assertRaises(base.MethodNotSupported,
self.service.group(10).update, {})
def test_users(self):
paging = {'page': 1, 'per_page': 5}
self.service.users().get()
self.expect('GET', '/users')
self.service.users().get(page=1, per_page=5)
self.expect('GET', '/users', paging)
self.service.user(10).get()
self.expect('GET', '/users/10')
self.assertRaises(base.MethodNotSupported,
self.service.users().create, {'foo': 'bar'})
self.assertRaises(base.MethodNotSupported,
self.service.user(10).delete)
self.assertRaises(base.MethodNotSupported,
self.service.user(10).update, {})
def test_topics(self):
obj = {'subject': 'test'}
paging = {'page': 1, 'per_page': 5}
self.service.topics().get(per_page=5, page=1)
self.expect('GET', '/topics', paging)
self.service.topic(4).get()
self.expect('GET', '/topics/4')
self.service.topic(4).update(obj)
self.expect('PATCH', '/topics/4', obj)
self.service.topics().create(obj)
self.expect('POST', '/topics', obj)
self.service.topic(4).delete()
self.expect('DELETE', '/topics/4')
self.service.topic(4).articles().get(per_page=5, page=1)
self.expect('GET', '/topics/4/articles', paging)
self.service.topic(4).articles().create(obj)
self.expect('POST', '/topics/4/articles', obj)
self.service.article(4).update(obj)
self.expect('PATCH', '/articles/4', obj)
self.service.article(4).get()
self.expect('GET', '/articles/4')
self.service.article(4).delete()
self.expect('DELETE', '/articles/4')
def test_macros(self):
obj = {'foo': 'bar'}
paging = {'page': 1, 'per_page': 5}
self.service.macros().get(per_page=5, page=1)
self.expect('GET', '/macros', paging)
self.service.macro(4).get()
self.expect('GET', '/macros/4')
self.service.macro(4).update(obj)
self.expect('PATCH', '/macros/4', obj)
self.service.macros().create(obj)
self.expect('POST', '/macros', obj)
self.service.macro(4).delete()
self.expect('DELETE', '/macros/4')
self.service.macro(4).actions().get(per_page=5, page=1)
self.expect('GET', '/macros/4/actions', paging)
self.service.macro(4).action(1).get()
self.expect('GET', '/macros/4/actions/1')
self.service.macro(4).action(1).update(obj)
self.expect('PATCH', '/macros/4/actions/1', obj)
self.assertRaises(base.MethodNotSupported,
self.service.macro(4).action(1).delete)
def test_full_domain(self):
service = desk.Desk('support.domain.com', 'key', 'secret',
'token', 'token_secret')
service.users().get()
self.assertEqual(self.executor.request.uri,
'https://support.domain.com/api/v2/users')
|
|
# Copyright (C) 2018 and later: Unicode, Inc. and others.
# License & terms of use: http://www.unicode.org/copyright.html
# Python 2/3 Compatibility (ICU-20299)
# TODO(ICU-20301): Remove this.
from __future__ import print_function
from abc import abstractmethod
import copy
import sys
from . import *
from . import utils
# TODO(ICU-20301): Remove arguments from all instances of super() in this file
# Note: for this to be a proper abstract class, it should extend abc.ABC.
# There is no nice way to do this that works in both Python 2 and 3.
# TODO(ICU-20301): Make this inherit from abc.ABC.
class AbstractRequest(object):
def __init__(self, **kwargs):
# Used for identification purposes
self.name = None
# The filter category that applies to this request
self.category = None
self._set_fields(kwargs)
def _set_fields(self, kwargs):
for key, value in list(kwargs.items()):
if hasattr(self, key):
if isinstance(value, list):
value = copy.copy(value)
elif isinstance(value, dict):
value = copy.deepcopy(value)
setattr(self, key, value)
else:
raise ValueError("Unknown argument: %s" % key)
def apply_file_filter(self, filter):
"""
Returns True if this request still has input files after filtering,
or False if the request is "empty" after filtering.
"""
return True
def flatten(self, config, all_requests, common_vars):
return [self]
def all_input_files(self):
return []
def all_output_files(self):
return []
class AbstractExecutionRequest(AbstractRequest):
def __init__(self, **kwargs):
# Names of targets (requests) or files that this request depends on.
# The entries of dep_targets may be any of the following types:
#
# 1. DepTarget, for the output of an execution request.
# 2. InFile, TmpFile, etc., for a specific file.
# 3. A list of InFile, TmpFile, etc., where the list is the same
# length as self.input_files and self.output_files.
#
# In cases 1 and 2, the dependency is added to all rules that the
# request generates. In case 3, the dependency is added only to the
# rule that generates the output file at the same array index.
self.dep_targets = []
# Computed during self.flatten(); don't edit directly.
self.common_dep_files = []
# Primary input files
self.input_files = []
# Output files; for some subclasses, this must be the same length
# as input_files
self.output_files = []
# What tool to execute
self.tool = None
# Argument string to pass to the tool with optional placeholders
self.args = ""
# Placeholders to substitute into the argument string; if any of these
# have a list type, the list must be equal in length to input_files
self.format_with = {}
super(AbstractExecutionRequest, self).__init__(**kwargs)
def apply_file_filter(self, filter):
i = 0
while i < len(self.input_files):
if filter.match(self.input_files[i]):
i += 1
continue
self._del_at(i)
return i > 0
def _del_at(self, i):
del self.input_files[i]
for _, v in self.format_with.items():
if isinstance(v, list):
assert len(v) == len(self.input_files) + 1
del v[i]
for v in self.dep_targets:
if isinstance(v, list):
assert len(v) == len(self.input_files) + 1
del v[i]
def flatten(self, config, all_requests, common_vars):
self._dep_targets_to_files(all_requests)
return super(AbstractExecutionRequest, self).flatten(config, all_requests, common_vars)
def _dep_targets_to_files(self, all_requests):
if not self.dep_targets:
return
for dep_target in self.dep_targets:
if isinstance(dep_target, list):
if hasattr(self, "specific_dep_files"):
assert len(dep_target) == len(self.specific_dep_files)
for file, out_list in zip(dep_target, self.specific_dep_files):
assert hasattr(file, "filename")
out_list.append(file)
else:
self.common_dep_files += dep_target
continue
if not isinstance(dep_target, DepTarget):
# Copy file entries directly to dep_files.
assert hasattr(dep_target, "filename")
self.common_dep_files.append(dep_target)
continue
# For DepTarget entries, search for the target.
for request in all_requests:
if request.name == dep_target.name:
self.common_dep_files += request.all_output_files()
break
else:
print("Warning: Unable to find target %s, a dependency of %s" % (
dep_target.name,
self.name
), file=sys.stderr)
def all_input_files(self):
return self.common_dep_files + self.input_files
def all_output_files(self):
return self.output_files
class SingleExecutionRequest(AbstractExecutionRequest):
def __init__(self, **kwargs):
super(SingleExecutionRequest, self).__init__(**kwargs)
class RepeatedExecutionRequest(AbstractExecutionRequest):
def __init__(self, **kwargs):
# Placeholders to substitute into the argument string unique to each
# iteration; all values must be lists equal in length to input_files
self.repeat_with = {}
# Lists for dep files that are specific to individual resource bundle files
self.specific_dep_files = [[] for _ in range(len(kwargs["input_files"]))]
super(RepeatedExecutionRequest, self).__init__(**kwargs)
def _del_at(self, i):
super(RepeatedExecutionRequest, self)._del_at(i)
del self.output_files[i]
for _, v in self.repeat_with.items():
if isinstance(v, list):
del v[i]
def all_input_files(self):
files = super(RepeatedExecutionRequest, self).all_input_files()
for specific_file_list in self.specific_dep_files:
files += specific_file_list
return files
class RepeatedOrSingleExecutionRequest(AbstractExecutionRequest):
def __init__(self, **kwargs):
self.repeat_with = {}
super(RepeatedOrSingleExecutionRequest, self).__init__(**kwargs)
def flatten(self, config, all_requests, common_vars):
if config.max_parallel:
new_request = RepeatedExecutionRequest(
name = self.name,
category = self.category,
dep_targets = self.dep_targets,
input_files = self.input_files,
output_files = self.output_files,
tool = self.tool,
args = self.args,
format_with = self.format_with,
repeat_with = self.repeat_with
)
else:
new_request = SingleExecutionRequest(
name = self.name,
category = self.category,
dep_targets = self.dep_targets,
input_files = self.input_files,
output_files = self.output_files,
tool = self.tool,
args = self.args,
format_with = utils.concat_dicts(self.format_with, self.repeat_with)
)
return new_request.flatten(config, all_requests, common_vars)
def _del_at(self, i):
super(RepeatedOrSingleExecutionRequest, self)._del_at(i)
del self.output_files[i]
for _, v in self.repeat_with.items():
if isinstance(v, list):
del v[i]
class PrintFileRequest(AbstractRequest):
def __init__(self, **kwargs):
self.output_file = None
self.content = None
super(PrintFileRequest, self).__init__(**kwargs)
def all_output_files(self):
return [self.output_file]
class CopyRequest(AbstractRequest):
def __init__(self, **kwargs):
self.input_file = None
self.output_file = None
super(CopyRequest, self).__init__(**kwargs)
def all_input_files(self):
return [self.input_file]
def all_output_files(self):
return [self.output_file]
class VariableRequest(AbstractRequest):
def __init__(self, **kwargs):
self.input_files = []
super(VariableRequest, self).__init__(**kwargs)
def all_input_files(self):
return self.input_files
class ListRequest(AbstractRequest):
def __init__(self, **kwargs):
self.variable_name = None
self.output_file = None
self.include_tmp = None
super(ListRequest, self).__init__(**kwargs)
def flatten(self, config, all_requests, common_vars):
list_files = list(sorted(utils.get_all_output_files(all_requests)))
if self.include_tmp:
variable_files = list(sorted(utils.get_all_output_files(all_requests, include_tmp=True)))
else:
# Always include the list file itself
variable_files = list_files + [self.output_file]
return PrintFileRequest(
name = self.name,
output_file = self.output_file,
content = "\n".join(file.filename for file in list_files)
).flatten(config, all_requests, common_vars) + VariableRequest(
name = self.variable_name,
input_files = variable_files
).flatten(config, all_requests, common_vars)
def all_output_files(self):
return [self.output_file]
class IndexRequest(AbstractRequest):
def __init__(self, **kwargs):
self.input_files = []
self.txt_file = None
self.output_file = None
self.cldr_version = ""
self.args = ""
self.format_with = {}
super(IndexRequest, self).__init__(**kwargs)
def apply_file_filter(self, filter):
i = 0
while i < len(self.input_files):
if filter.match(self.input_files[i]):
i += 1
continue
del self.input_files[i]
return i > 0
def flatten(self, config, all_requests, common_vars):
return PrintFileRequest(
name = self.name,
output_file = self.txt_file,
content = self._generate_index_file(common_vars)
).flatten(config, all_requests, common_vars) + SingleExecutionRequest(
name = "%s_res" % self.name,
category = self.category,
input_files = [self.txt_file],
output_files = [self.output_file],
tool = IcuTool("genrb"),
args = self.args,
format_with = self.format_with
).flatten(config, all_requests, common_vars)
def _generate_index_file(self, common_vars):
locales = [f.filename[f.filename.rfind("/")+1:-4] for f in self.input_files]
formatted_version = " CLDRVersion { \"%s\" }\n" % self.cldr_version if self.cldr_version else ""
formatted_locales = "\n".join([" %s {\"\"}" % v for v in locales])
# TODO: CLDRVersion is required only in the base file
return ("// Warning this file is automatically generated\n"
"{INDEX_NAME}:table(nofallback) {{\n"
"{FORMATTED_VERSION}"
" InstalledLocales {{\n"
"{FORMATTED_LOCALES}\n"
" }}\n"
"}}").format(
FORMATTED_VERSION = formatted_version,
FORMATTED_LOCALES = formatted_locales,
**common_vars
)
def all_input_files(self):
return self.input_files
def all_output_files(self):
return [self.output_file]
|
|
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from neutronclient.common import exceptions
from neutronclient.neutron.v2_0.vpn import ipsec_site_connection
from neutronclient.tests.unit import test_cli20
class CLITestV20IPsecSiteConnectionJSON(test_cli20.CLITestV20Base):
# TODO(pcm): Remove, once peer-cidr is deprecated completely
def test_create_ipsec_site_connection_all_params_using_peer_cidrs(self):
# ipsecsite-connection-create all params using peer CIDRs.
resource = 'ipsec_site_connection'
cmd = ipsec_site_connection.CreateIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
tenant_id = 'mytenant_id'
name = 'connection1'
my_id = 'my_id'
peer_address = '192.168.2.10'
peer_id = '192.168.2.10'
psk = 'abcd'
mtu = '1500'
initiator = 'bi-directional'
vpnservice_id = 'vpnservice_id'
ikepolicy_id = 'ikepolicy_id'
ipsecpolicy_id = 'ipsecpolicy_id'
peer_cidrs = ['192.168.3.0/24', '192.168.2.0/24']
admin_state = True
description = 'my-vpn-connection'
dpd = 'action=restart,interval=30,timeout=120'
args = ['--tenant-id', tenant_id,
'--peer-address', peer_address, '--peer-id', peer_id,
'--psk', psk, '--initiator', initiator,
'--vpnservice-id', vpnservice_id,
'--ikepolicy-id', ikepolicy_id, '--name', name,
'--ipsecpolicy-id', ipsecpolicy_id, '--mtu', mtu,
'--description', description,
'--peer-cidr', '192.168.3.0/24',
'--peer-cidr', '192.168.2.0/24',
'--dpd', dpd]
position_names = ['name', 'tenant_id', 'admin_state_up',
'peer_address', 'peer_id', 'peer_cidrs',
'psk', 'mtu', 'initiator', 'description',
'vpnservice_id', 'ikepolicy_id',
'ipsecpolicy_id']
position_values = [name, tenant_id, admin_state, peer_address,
peer_id, peer_cidrs, psk, mtu,
initiator, description,
vpnservice_id, ikepolicy_id, ipsecpolicy_id]
extra_body = {
'dpd': {
'action': 'restart',
'interval': 30,
'timeout': 120,
},
}
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
extra_body=extra_body)
def test_create_ipsec_site_conn_all_params(self):
# ipsecsite-connection-create all params using endpoint groups.
resource = 'ipsec_site_connection'
cmd = ipsec_site_connection.CreateIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
tenant_id = 'mytenant_id'
name = 'connection1'
my_id = 'my_id'
peer_address = '192.168.2.10'
peer_id = '192.168.2.10'
psk = 'abcd'
mtu = '1500'
initiator = 'bi-directional'
vpnservice_id = 'vpnservice_id'
ikepolicy_id = 'ikepolicy_id'
ipsecpolicy_id = 'ipsecpolicy_id'
local_ep_group = 'local-epg'
peer_ep_group = 'peer-epg'
admin_state = True
description = 'my-vpn-connection'
dpd = 'action=restart,interval=30,timeout=120'
args = ['--tenant-id', tenant_id,
'--peer-address', peer_address, '--peer-id', peer_id,
'--psk', psk, '--initiator', initiator,
'--vpnservice-id', vpnservice_id,
'--ikepolicy-id', ikepolicy_id, '--name', name,
'--ipsecpolicy-id', ipsecpolicy_id, '--mtu', mtu,
'--description', description,
'--local-ep-group', local_ep_group,
'--peer-ep-group', peer_ep_group,
'--dpd', dpd]
position_names = ['name', 'tenant_id', 'admin_state_up',
'peer_address', 'peer_id', 'psk', 'mtu',
'local_ep_group_id', 'peer_ep_group_id',
'initiator', 'description',
'vpnservice_id', 'ikepolicy_id',
'ipsecpolicy_id']
position_values = [name, tenant_id, admin_state, peer_address,
peer_id, psk, mtu, local_ep_group,
peer_ep_group, initiator, description,
vpnservice_id, ikepolicy_id, ipsecpolicy_id]
extra_body = {
'dpd': {
'action': 'restart',
'interval': 30,
'timeout': 120,
},
}
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
extra_body=extra_body)
def test_create_ipsec_site_connection_with_limited_params(self):
# ipsecsite-connection-create with limited params.
resource = 'ipsec_site_connection'
cmd = ipsec_site_connection.CreateIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
tenant_id = 'mytenant_id'
my_id = 'my_id'
peer_address = '192.168.2.10'
peer_id = '192.168.2.10'
psk = 'abcd'
mtu = '1500'
initiator = 'bi-directional'
vpnservice_id = 'vpnservice_id'
ikepolicy_id = 'ikepolicy_id'
ipsecpolicy_id = 'ipsecpolicy_id'
local_ep_group = 'local-epg'
peer_ep_group = 'peer-epg'
admin_state = True
args = ['--tenant-id', tenant_id,
'--peer-address', peer_address,
'--peer-id', peer_id,
'--psk', psk,
'--vpnservice-id', vpnservice_id,
'--ikepolicy-id', ikepolicy_id,
'--ipsecpolicy-id', ipsecpolicy_id,
'--local-ep-group', local_ep_group,
'--peer-ep-group', peer_ep_group]
position_names = ['tenant_id', 'admin_state_up',
'peer_address', 'peer_id',
'local_ep_group_id', 'peer_ep_group_id',
'psk', 'mtu', 'initiator',
'vpnservice_id', 'ikepolicy_id',
'ipsecpolicy_id']
position_values = [tenant_id, admin_state, peer_address, peer_id,
local_ep_group, peer_ep_group, psk, mtu, initiator,
vpnservice_id, ikepolicy_id, ipsecpolicy_id]
self._test_create_resource(resource, cmd, None, my_id, args,
position_names, position_values)
def _test_create_failure(self, additional_args=None, expected_exc=None):
# Helper to test failure of IPSec site-to-site creation failure.
resource = 'ipsec_site_connection'
cmd = ipsec_site_connection.CreateIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
tenant_id = 'mytenant_id'
my_id = 'my_id'
peer_address = '192.168.2.10'
peer_id = '192.168.2.10'
psk = 'abcd'
mtu = '1500'
initiator = 'bi-directional'
vpnservice_id = 'vpnservice_id'
ikepolicy_id = 'ikepolicy_id'
ipsecpolicy_id = 'ipsecpolicy_id'
admin_state = True
args = ['--tenant-id', tenant_id,
'--peer-address', peer_address,
'--peer-id', peer_id,
'--psk', psk,
'--vpnservice-id', vpnservice_id,
'--ikepolicy-id', ikepolicy_id,
'--ipsecpolicy-id', ipsecpolicy_id]
if additional_args is not None:
args += additional_args
position_names = ['tenant_id', 'admin_state_up', 'peer_address',
'peer_id', 'psk', 'mtu', 'initiator',
'local_ep_group_id', 'peer_ep_group_id',
'vpnservice_id', 'ikepolicy_id', 'ipsecpolicy_id']
position_values = [tenant_id, admin_state, peer_address, peer_id, psk,
mtu, initiator, None, None, vpnservice_id,
ikepolicy_id, ipsecpolicy_id]
if not expected_exc:
expected_exc = exceptions.CommandError
self.assertRaises(expected_exc,
self._test_create_resource,
resource, cmd, None, my_id, args,
position_names, position_values)
def test_fail_create_with_invalid_mtu(self):
# ipsecsite-connection-create with invalid dpd values.
bad_mtu = ['--mtu', '67']
self._test_create_failure(bad_mtu)
def test_fail_create_with_invalid_dpd_keys(self):
bad_dpd_key = ['--dpd', 'act=restart,interval=30,time=120']
self._test_create_failure(bad_dpd_key, SystemExit)
def test_fail_create_with_invalid_dpd_values(self):
bad_dpd_values = ['--dpd', 'action=hold,interval=30,timeout=-1']
self._test_create_failure(bad_dpd_values)
def test_fail_create_missing_endpoint_groups_or_cidr(self):
# Must provide either endpoint groups or peer cidrs.
self._test_create_failure()
def test_fail_create_missing_peer_endpoint_group(self):
# Fails if dont have both endpoint groups - missing peer.
self._test_create_failure(['--local-ep-group', 'local-epg'])
def test_fail_create_missing_local_endpoint_group(self):
# Fails if dont have both endpoint groups - missing local.
self._test_create_failure(['--peer-ep-group', 'peer-epg'])
def test_fail_create_when_both_endpoints_and_peer_cidr(self):
# Cannot intermix endpoint groups and peer CIDRs for create.
additional_args = ['--local-ep-group', 'local-epg',
'--peer-ep-group', 'peer-epg',
'--peer-cidr', '10.2.0.0/24']
self._test_create_failure(additional_args)
def test_list_ipsec_site_connection(self):
# ipsecsite-connection-list.
resources = "ipsec_site_connections"
cmd = ipsec_site_connection.ListIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
self._test_list_resources(resources, cmd, True)
def test_list_ipsec_site_connection_pagination(self):
# ipsecsite-connection-list.
resources = "ipsec_site_connections"
cmd = ipsec_site_connection.ListIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ipsec_site_connection_sort(self):
# ipsecsite-connection-list.
# --sort-key name --sort-key id --sort-key asc --sort-key desc
resources = "ipsec_site_connections"
cmd = ipsec_site_connection.ListIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ipsec_site_connection_limit(self):
# ipsecsite-connection-list -P.
resources = "ipsec_site_connections"
cmd = ipsec_site_connection.ListIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
self._test_list_resources(resources, cmd, page_size=1000)
def test_delete_ipsec_site_connection(self):
# ipsecsite-connection-delete my-id.
resource = 'ipsec_site_connection'
cmd = ipsec_site_connection.DeleteIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
def test_update_ipsec_site_connection(self):
# ipsecsite-connection-update myid --name Branch-new --tags a b.
resource = 'ipsec_site_connection'
cmd = ipsec_site_connection.UpdateIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'Branch-new',
'--tags', 'a', 'b'],
{'name': 'Branch-new',
'tags': ['a', 'b'], })
# ipsecsite-connection-update myid --mtu 69 --initiator response-only
# --peer-id '192.168.2.11' --peer-ep-group 'update-grp'
self._test_update_resource(resource, cmd, 'myid',
['myid', '--mtu', '69',
'--initiator', 'response-only',
'--peer-id', '192.168.2.11',
'--peer-ep-group', 'update-grp'],
{'mtu': '69',
'initiator': 'response-only',
'peer_id': '192.168.2.11',
'peer_ep_group_id': 'update-grp', },)
def test_show_ipsec_site_connection_id(self):
# ipsecsite-connection-show test_id."""
resource = 'ipsec_site_connection'
cmd = ipsec_site_connection.ShowIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_ipsec_site_connection_id_name(self):
# ipsecsite-connection-show."""
resource = 'ipsec_site_connection'
cmd = ipsec_site_connection.ShowIPsecSiteConnection(
test_cli20.MyApp(sys.stdout), None
)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
|
|
# Copyright 2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file will be used with PyPi in order to package and distribute the final
# product.
"""Discovers the mongo cluster and starts the connector.
"""
import logging
import logging.handlers
import oplog_manager
import optparse
import os
import pymongo
import re
import shutil
import sys
import threading
import time
import util
import imp
from locking_dict import LockingDict
try:
from pymongo import MongoClient as Connection
except ImportError:
from pymongo import Connection
try:
import simplejson as json
except ImportError:
import json
class Connector(threading.Thread):
"""Checks the cluster for shards to tail.
"""
def __init__(self, address, oplog_checkpoint, target_url, ns_set,
u_key, auth_key, doc_manager=None, auth_username=None):
if doc_manager is not None:
doc_manager = imp.load_source('DocManager', doc_manager)
else:
from doc_manager import DocManager
time.sleep(1)
super(Connector, self).__init__()
#can_run is set to false when we join the thread
self.can_run = True
#The name of the file that stores the progress of the OplogThreads
self.oplog_checkpoint = oplog_checkpoint
#main address - either mongos for sharded setups or a primary otherwise
self.address = address
#The URL of the target system
self.target_url = target_url
#The set of relevant namespaces to consider
self.ns_set = ns_set
#The key that is a unique document identifier for the target system.
#Not necessarily the mongo unique key.
self.u_key = u_key
#Password for authentication
self.auth_key = auth_key
#Username for authentication
self.auth_username = auth_username
#The set of OplogThreads created
self.shard_set = {}
#Dict of OplogThread/timestamp pairs to record progress
self.oplog_progress = LockingDict()
try:
if target_url is None:
if doc_manager is None: # imported using from... import
self.doc_manager = DocManager(unique_key=u_key)
else: # imported using load source
self.doc_manager = doc_manager.DocManager(unique_key=u_key)
else:
if doc_manager is None:
self.doc_manager = DocManager(self.target_url,
unique_key=u_key)
else:
self.doc_manager = doc_manager.DocManager(self.target_url,
unique_key=u_key)
except SystemError:
logging.critical("MongoConnector: Bad target system URL!")
self.can_run = False
return
if self.oplog_checkpoint is not None:
if not os.path.exists(self.oplog_checkpoint):
info_str = "MongoConnector: Can't find OplogProgress file!"
logging.critical(info_str)
self.doc_manager.stop()
self.can_run = False
else:
if (not os.access(self.oplog_checkpoint, os.W_OK)
and not os.access(self.oplog_checkpoint, os.R_OK )):
logging.critical("Invalid permissions on %s! Exiting" %
(self.oplog_checkpoint))
sys.exit(1)
def join(self):
""" Joins thread, stops it from running
"""
self.can_run = False
self.doc_manager.stop()
threading.Thread.join(self)
def write_oplog_progress(self):
""" Writes oplog progress to file provided by user
"""
if self.oplog_checkpoint is None:
return None
# write to temp file
backup_file = self.oplog_checkpoint + '.backup'
os.rename(self.oplog_checkpoint, backup_file)
# for each of the threads write to file
with open(self.oplog_checkpoint, 'w') as dest:
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
for oplog, time_stamp in oplog_dict.items():
oplog_str = str(oplog)
timestamp = util.bson_ts_to_long(time_stamp)
json_str = json.dumps([oplog_str, timestamp])
try:
dest.write(json_str)
except IOError:
# Basically wipe the file, copy from backup
dest.truncate()
with open(backup_file, 'r') as backup:
shutil.copyfile(backup, dest)
break
os.remove(self.oplog_checkpoint + '.backup')
def read_oplog_progress(self):
"""Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
"""
if self.oplog_checkpoint is None:
return None
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
logging.info("MongoConnector: Empty oplog progress file.")
return None
except OSError:
return None
source = open(self.oplog_checkpoint, 'r')
try:
data = json.load(source)
except ValueError: # empty file
reason = "It may be empty or corrupt."
logging.info("MongoConnector: Can't read oplog progress file. %s" %
(reason))
source.close()
return None
source.close()
count = 0
oplog_dict = self.oplog_progress.get_dict()
for count in range(0, len(data), 2):
oplog_str = data[count]
time_stamp = data[count + 1]
oplog_dict[oplog_str] = util.long_to_bson_ts(time_stamp)
#stored as bson_ts
def run(self):
"""Discovers the mongo cluster and creates a thread for each primary.
"""
main_conn = Connection(self.address)
if self.auth_key is not None:
main_conn['admin'].authenticate(self.auth_username, self.auth_key)
self.read_oplog_progress()
conn_type = None
try:
main_conn.admin.command("isdbgrid")
except pymongo.errors.OperationFailure:
conn_type = "REPLSET"
if conn_type == "REPLSET":
#non sharded configuration
oplog_coll = main_conn['local']['oplog.rs']
prim_admin = main_conn.admin
repl_set = prim_admin.command("replSetGetStatus")['set']
oplog = oplog_manager.OplogThread(main_conn,
(main_conn.host + ":" + str(main_conn.port)),
oplog_coll,
False, self.doc_manager,
self.oplog_progress,
self.ns_set, self.auth_key,
self.auth_username,
repl_set=repl_set)
self.shard_set[0] = oplog
logging.info('MongoConnector: Starting connection thread %s' %
main_conn)
oplog.start()
while self.can_run:
if not self.shard_set[0].running:
logging.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[0])))
self.oplog_thread_join()
self.doc_manager.stop()
return
self.write_oplog_progress()
time.sleep(1)
else: # sharded cluster
while self.can_run is True:
for shard_doc in main_conn['config']['shards'].find():
shard_id = shard_doc['_id']
if shard_id in self.shard_set:
if not self.shard_set[shard_id].running:
logging.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[shard_id])))
self.oplog_thread_join()
self.doc_manager.stop()
return
self.write_oplog_progress()
time.sleep(1)
continue
try:
repl_set, hosts = shard_doc['host'].split('/')
except ValueError:
cause = "The system only uses replica sets!"
logging.error("MongoConnector: %s", cause)
self.oplog_thread_join()
self.doc_manager.stop()
return
shard_conn = Connection(hosts, replicaset=repl_set)
oplog_coll = shard_conn['local']['oplog.rs']
oplog = oplog_manager.OplogThread(shard_conn, self.address,
oplog_coll, True,
self.doc_manager,
self.oplog_progress,
self.ns_set,
self.auth_key,
self.auth_username)
self.shard_set[shard_id] = oplog
msg = "Starting connection thread"
logging.info("MongoConnector: %s %s" % (msg, shard_conn))
oplog.start()
self.oplog_thread_join()
def oplog_thread_join(self):
"""Stops all the OplogThreads
"""
logging.info('MongoConnector: Stopping all OplogThreads')
for thread in self.shard_set.values():
thread.join()
def main():
""" Starts the mongo connector (assuming CLI)
"""
parser = optparse.OptionParser()
#-m is for the main address, which is a host:port pair, ideally of the
#mongos. For non sharded clusters, it can be the primary.
parser.add_option("-m", "--main", action="store", type="string",
dest="main_addr", default="localhost:27217",
help="""Specify the main address, which is a"""
""" host:port pair. For sharded clusters, this"""
""" should be the mongos address. For individual"""
""" replica sets, supply the address of the"""
""" primary. For example, `-m localhost:27217`"""
""" would be a valid argument to `-m`. Don't use"""
""" quotes around the address""")
#-o is to specify the oplog-config file. This file is used by the system
#to store the last timestamp read on a specific oplog. This allows for
#quick recovery from failure.
parser.add_option("-o", "--oplog-ts", action="store", type="string",
dest="oplog_config", default="config.txt",
help="""Specify the name of the file that stores the"""
"""oplog progress timestamps. """
"""This file is used by the system to store the last"""
"""timestamp read on a specific oplog. This allows"""
""" for quick recovery from failure. By default this"""
""" is `config.txt`, which starts off empty. An empty"""
""" file causes the system to go through all the mongo"""
""" oplog and sync all the documents. Whenever the """
"""cluster is restarted, it is essential that the """
"""oplog-timestamp config file be emptied - otherwise"""
""" the connector will miss some documents and behave"""
"""incorrectly.""")
#-t is to specify the URL to the target system being used.
parser.add_option("-t", "--target-url", action="store", type="string",
dest="url", default=None,
help="""Specify the URL to the target system being """
"""used. For example, if you were using Solr out of """
"""the box, you could use '-t """
""" http://localhost:8080/solr' with the """
""" SolrDocManager to establish a proper connection."""
""" Don't use quotes around address."""
"""If target system doesn't need URL, don't specify""")
#-n is to specify the namespaces we want to consider. The default
#considers all the namespaces
parser.add_option("-n", "--namespace-set", action="store", type="string",
dest="ns_set", default=None, help=
"""Used to specify the namespaces we want to """
""" consider. For example, if we wished to store all """
""" documents from the test.test and alpha.foo """
""" namespaces, we could use `-n test.test,alpha.foo`."""
""" The default is to consider all the namespaces, """
""" excluding the system and config databases, and """
""" also ignoring the "system.indexes" collection in """
"""any database.""")
#-u is to specify the mongoDB field that will serve as the unique key
#for the target system,
parser.add_option("-u", "--unique-key", action="store", type="string",
dest="u_key", default="_id", help=
"""Used to specify the mongoDB field that will serve"""
"""as the unique key for the target system"""
"""The default is "_id", which can be noted by """
""" '-u _id'""")
#-f is to specify the authentication key file. This file is used by mongos
#to authenticate connections to the shards, and we'll use it in the oplog
#threads.
parser.add_option("-f", "--password-file", action="store", type="string",
dest="auth_file", default=None, help=
""" Used to store the password for authentication."""
""" Use this option if you wish to specify a"""
""" username and password but don't want to"""
""" type in the password. The contents of this"""
""" file should be the password for the admin user.""")
#-p is to specify the password used for authentication.
parser.add_option("-p", "--password", action="store", type="string",
dest="password", default=None, help=
""" Used to specify the password."""
""" This is used by mongos to authenticate"""
""" connections to the shards, and in the"""
""" oplog threads. If authentication is not used, then"""
""" this field can be left empty as the default """)
#-a is to specify the username for authentication.
parser.add_option("-a", "--admin-username", action="store", type="string",
dest="admin_name", default="__system", help=
"""Used to specify the username of an admin user to"""
"""authenticate with. To use authentication, the user"""
"""must specify both an admin username and a keyFile."""
"""The default username is '__system'""")
#-d is to specify the doc manager file.
parser.add_option("-d", "--docManager", action="store", type="string",
dest="doc_manager", default=None, help=
"""Used to specify the doc manager file that"""
""" is going to be used. You should send the"""
""" path of the file you want to be used."""
""" By default, it will use the """
""" doc_manager_simulator.py file. It is"""
""" recommended that all doc manager files be"""
""" kept in the doc_managers folder in"""
""" mongo-connector. For more information"""
""" about making your own doc manager,"""
""" see Doc Manager section.""")
#-s is to enable syslog logging.
parser.add_option("-s", "--enable-syslog", action="store_true",
dest="enable_syslog", default=False, help=
"""Used to enable logging to syslog."""
""" Use -l to specify syslog host.""")
#--syslog-host is to specify the syslog host.
parser.add_option("--syslog-host", action="store", type="string",
dest="syslog_host", default="localhost:514", help=
"""Used to specify the syslog host."""
""" The default is 'localhost:514'""")
#--syslog-facility is to specify the syslog facility.
parser.add_option("--syslog-facility", action="store", type="string",
dest="syslog_facility", default="user", help=
"""Used to specify the syslog facility."""
""" The default is 'user'""")
(options, args) = parser.parse_args()
logger = logging.getLogger()
loglevel = logging.INFO
logger.setLevel(loglevel)
if options.enable_syslog:
syslog_info = options.syslog_host.split(":")
syslog_host = logging.handlers.SysLogHandler(address=(syslog_info[0],
int(syslog_info[1])),facility=options.syslog_facility)
syslog_host.setLevel(loglevel)
logger.addHandler(syslog_host)
else:
log_out = logging.StreamHandler()
log_out.setLevel(loglevel)
log_out.setFormatter(logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(log_out)
logger.info('Beginning Mongo Connector')
if options.doc_manager is None:
logger.info('No doc manager specified, using simulator.')
if options.ns_set is None:
ns_set = []
else:
ns_set = options.ns_set.split(',')
key = None
if options.auth_file is not None:
try:
key = open(options.auth_file).read()
re.sub(r'\s', '', key)
except IOError:
logger.error('Could not parse password authentication file!')
sys.exit(1)
if options.password is not None:
key = options.password
if key is None and options.admin_name != "__system":
logger.error("Admin username specified without password!")
sys.exit(1)
connector = Connector(options.main_addr, options.oplog_config, options.url,
ns_set, options.u_key, key, options.doc_manager,
auth_username=options.admin_name)
connector.start()
while True:
try:
time.sleep(3)
if not connector.is_alive():
break
except KeyboardInterrupt:
logging.info("Caught keyboard interrupt, exiting!")
connector.join()
break
if __name__ == '__main__':
main()
|
|
import matplotlib
import numpy as np
from scipy.misc import toimage
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Grid
from library.datasets.dataset import Dataset
class CIFARBase:
def __init__(self,
num_images=1.0,
one_hot_encode=False,
train_validate_split=None,
preprocess='',
augment=False,
num_test_images=1.0,
endian='little',
make_image=True,
image_mode='rgb',
save_h5py=True,
verbose=False):
"""
:param num_images:
:param one_hot_encode:
:param train_validate_split:
:param preprocess:
:param augment:
:param num_test_images:
:param endian:
:param make_image:
:param image_mode:
:param save_h5py:
:param verbose:
"""
self.verbose = verbose
self.img_height = 32
self.img_width = 32
self.num_channels = 3
self.one_hot_encode = one_hot_encode
self.endian = endian
self.train_validate_split = train_validate_split
if num_images > 1.0 or num_images < 0.0:
self.num_images = int(50000)
else:
self.num_images = int(num_images*50000)
if self.train_validate_split is not None:
self.num_train_images = int(self.train_validate_split*self.num_images)
self.num_validate_images = self.num_images - self.num_train_images
else:
self.num_train_images = int(self.num_images)
if num_test_images > 1.0 or num_test_images < 0.0:
self.num_test_images = int(10000)
else:
self.num_test_images = int(num_test_images*10000)
self.train = Dataset()
self.validate = Dataset()
self.test = Dataset()
self.make_image = make_image
self.image_mode = image_mode
self.preprocess = preprocess
self.augment = augment
self.save_h5py = save_h5py
self.fine_classes = None
self.coarse_classes = None
self.num_fine_classes = None
self.num_coarse_classes = None
self.file_url = None
self.file_md5 = None
def get_image_width(self):
"""
:return:
"""
return self.img_width
def get_image_height(self):
"""
:return:
"""
return self.img_height
def get_num_image_channels(self):
"""
:return:
"""
return self.img_channels
def convert_images(self, raw, type='rgb'):
"""
:param raw:
:param type:
:return:
"""
raw_float = np.array(raw, dtype=float)
if type == 'rgb_float':
# Convert the raw images from the data-files to floating-points.
raw_float = np.array(raw, dtype=float) / 255.0
# Reshape the array to 4-dimensions.
images = raw_float.reshape([-1, 3, 32, 32])
# Reorder the indices of the array.
images = images.transpose([0, 2, 3, 1])
images_n = images[0, :]
if type == 'grey':
images_n = ((images_n[:, :, 0] * 0.299) +
(images_n[:, :, 1] * 0.587) +
(images_n[:, :, 2] * 0.114))
elif type == 'grey_float':
images_n = ((images_n[:, :, 0] * 0.299) +
(images_n[:, :, 1] * 0.587) +
(images_n[:, :, 2] * 0.114))
images_n = images_n / 255.0
return images_n
def convert_one_hot_encoding(self, classes, data_type='train', class_type='fine'):
"""
:param classes:
:param data_type:
:param class_type:
:return:
"""
num_classes = np.max(classes) + 1
if data_type == 'train' and class_type == 'fine':
self.train.one_hot_fine_labels = np.zeros((classes.shape[0], num_classes))
elif data_type == 'train' and class_type == 'coarse':
self.train.one_hot_coarse_labels = np.zeros((classes.shape[0], num_classes))
if data_type == 'validate' and class_type == 'fine':
self.validate.one_hot_fine_labels = np.zeros((classes.shape[0], num_classes))
elif data_type == 'validate' and class_type == 'coarse':
self.validate.one_hot_coarse_labels = np.zeros((classes.shape[0], num_classes))
if data_type == 'test' and class_type == 'fine':
self.test.one_hot_fine_labels = np.zeros((classes.shape[0], num_classes))
elif data_type == 'test' and class_type == 'coarse':
self.test.one_hot_coarse_labels = np.zeros((classes.shape[0], num_classes))
for i in range(classes.shape[0]):
if self.endian == 'big' and class_type == 'fine':
if data_type == 'train':
self.train.one_hot_fine_labels[i, num_classes - 1 - classes[i]] = 1
if data_type == 'validate':
self.validate.one_hot_fine_labels[i, num_classes - 1 - classes[i]] = 1
if data_type == 'test':
self.test.one_hot_fine_labels[i, num_classes-1-classes[i]] = 1
elif self.endian == 'big' and class_type == 'coarse':
if data_type == 'train':
self.train.one_hot_coarse_labels[i, num_classes - 1 - classes[i]] = 1
if data_type == 'validate':
self.validate.one_hot_coarse_labels[i, num_classes - 1 - classes[i]] = 1
if data_type == 'test':
self.test.one_hot_coarse_labels[i, num_classes-1-classes[i]] = 1
if self.endian == 'little' and class_type == 'fine':
if data_type == 'train':
self.train.one_hot_fine_labels[i, classes[i]] = 1
if data_type == 'validate':
self.validate.one_hot_fine_labels[i, classes[i]] = 1
if data_type == 'test':
self.test.one_hot_fine_labels[i, classes[i]] = 1
elif self.endian == 'little' and class_type == 'coarse':
if data_type == 'train':
self.train.one_hot_coarse_labels[i, classes[i]] = 1
if data_type == 'validate':
self.validate.one_hot_coarse_labels[i, classes[i]] = 1
if data_type == 'test':
self.test.one_hot_coarse_labels[i, classes[i]] = 1
def train_images(self):
"""
:return:
"""
return self.train.data
def train_labels(self):
"""
:return:
"""
return self.train.one_hot_labels
def train_classes(self):
"""
:return:
"""
return self.train.fine_labels
def validate_images(self):
"""
:return:
"""
return self.validate.data
def validate_labels(self):
"""
:return:
"""
return self.validate.one_hot_labels
def validate_classes(self):
"""
:return:
"""
return self.validate.fine_labels
def test_images(self):
"""
:return:
"""
return self.test.data
def test_labels(self):
"""
:return:
"""
return self.test.one_hot_labels
def test_classes(self):
"""
:return:
"""
return self.test.fine_labels
def plot(self, grid, matrix, fontsize=10):
"""
:param grid:
:param matrix:
:param fontsize:
:return:
"""
k = 0
class_type = 0
for ax in grid:
ax.imshow(toimage(matrix[k, :]))
ax.title.set_visible(False)
# ax.axis('tight')
# ax.axis('off')
ax.set_frame_on(False)
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_xlim([0, 32])
ax.set_ylim([32, 0])
if k % 10 == 0:
ax.set_ylabel(self.fine_classes[class_type], rotation=0, ha='right',
weight='bold', size=fontsize)
class_type += 1
k += 1
plt.tight_layout()
plt.show()
def plot_sample(self, plot_train=True, plot_test=False, verbose=False, fig_size=(7, 7), fontsize=12,
images_per_class=10):
"""
:param plot_train:
:param plot_test:
:param verbose:
:param fig_size:
:param fontsize:
:param images_per_class:
:return:
"""
num_images_per_class = images_per_class
if self.train.data is None and plot_test is False:
self.load_data(train=plot_train, test=plot_test)
elif plot_train is False and self.test.data is None:
self.load_data(train=plot_train, test=plot_test)
elif self.train.data is None and self.test.data is None:
self.load_data(train=plot_train, test=plot_test)
data_image_nos = []
test_image_nos = []
if plot_train is True:
for class_type in range(self.num_fine_classes):
data_fine_labels = np.where(self.train.fine_labels == class_type)
data_fine_labels = data_fine_labels[0][:num_images_per_class].tolist()
data_image_nos.extend(data_fine_labels)
example_data_images = self.train.data[data_image_nos, :]
example_data_image_matrix = []
k = 0
for i in range(len(data_image_nos)):
example_data_image_matrix.append(self.convert_images(example_data_images[k, :]))
k += 1
example_data_image_matrix = np.array(example_data_image_matrix)
if plot_test is True:
for class_type in range(self.num_fine_classes):
test_fine_labels = np.where(self.test.fine_labels == class_type)
test_fine_labels = test_fine_labels[0][:num_images_per_class]
test_fine_labels = test_fine_labels.tolist()
test_image_nos.extend(test_fine_labels)
example_test_images = self.test.data[test_image_nos, :]
example_test_image_matrix = []
k = 0
for i in range(len(test_image_nos)):
example_test_image_matrix.append(self.convert_images(example_test_images[k, :]))
k += 1
example_test_image_matrix = np.array(example_test_image_matrix)
num_rows = min(self.num_fine_classes, 20)
num_cols = num_images_per_class
if verbose is True:
print('Plot image matrix shape: ' + str(example_data_image_matrix.shape))
print('Number of rows: %d' % num_rows)
print('Number of cols: %d' % num_cols)
if plot_train is True:
data_fig = plt.figure()
data_fig.set_figheight(fig_size[0])
data_fig.set_figwidth(fig_size[1])
data_grid = Grid(data_fig, rect=111, nrows_ncols=(num_rows, num_cols),
axes_pad=0.0, label_mode='R',
)
self.plot(data_grid, example_data_image_matrix, fontsize=fontsize)
if plot_test is True:
test_fig = plt.figure()
test_fig.set_figheight(fig_size[0])
test_fig.set_figwidth(fig_size[1])
test_grid = Grid(test_fig, rect=111, nrows_ncols=(num_rows, num_cols),
axes_pad=0.0, label_mode='R',
)
self.plot(test_grid, example_test_image_matrix, fontsize=fontsize)
def plot_images(self, images, cls_true_fine, cls_true_coarse=None, cls_pred_fine=None, cls_pred_coarse=None,
nrows=3, ncols=3, fig_size=(7, 7), fontsize=15, convert=False, type='rgb'):
"""
:param images:
:param cls_true_fine:
:param cls_true_coarse:
:param cls_pred_fine:
:param cls_pred_coarse:
:param nrows:
:param ncols:
:param fig_size:
:param fontsize:
:param convert:
:param type:
:return:
"""
assert images.shape[0] == cls_true_fine.shape[0]
fig, axes = plt.subplots(nrows, ncols)
if fig_size is not None:
fig.set_figheight(fig_size[0])
fig.set_figwidth(fig_size[1])
fig.subplots_adjust(hspace=0.1, wspace=0.1)
for image_no, ax in enumerate(axes.flat):
# Plot image.
if convert is True:
image = self.convert_images(images[image_no, :])
else:
image = images[image_no, :]
if type == 'rgb':
ax.imshow(toimage(image), cmap='binary')
if type == 'grey':
ax.imshow(toimage(image), cmap=matplotlib.cm.Greys_r)
# Show true and predicted classes.
if cls_pred_fine is None and cls_pred_coarse is None:
if cls_true_coarse is None:
xlabel = "Fine: {0}".format(cls_true_fine[image_no])
else:
xlabel = "Fine: {0}\nCoarse: {1}".format(cls_true_fine[image_no], cls_true_coarse[image_no])
else:
if cls_true_coarse is None:
if cls_pred_fine is None:
xlabel = "Fine: {0}".format(cls_true_fine[image_no])
else:
xlabel = "Fine: {0}\nPred. Fine: {1}".format(cls_true_fine[image_no], cls_pred_fine[image_no])
else:
if cls_pred_coarse is None:
xlabel = "Coarse: {0}".format(cls_true_coarse[image_no])
else:
xlabel = "Coarse: {0}\nPred. Coarse: {1}".format(cls_true_coarse[image_no],
cls_pred_coarse[image_no])
ax.set_xlabel(xlabel, weight='bold', size=fontsize)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
plt.show()
return True
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018 Nicolai Buchwitz <nb@tipi-net.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: netcup_dns
notes: []
version_added: 2.7.0
short_description: manage Netcup DNS records
description:
- "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)"
options:
api_key:
description:
- API key for authentification, must be obtained via the netcup CCP (U(https://ccp.netcup.net))
required: True
api_password:
description:
- API password for authentification, must be obtained via the netcup CCP (https://ccp.netcup.net)
required: True
customer_id:
description:
- Netcup customer id
required: True
domain:
description:
- Domainname the records should be added / removed
required: True
record:
description:
- Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name)
default: "@"
aliases: [ name ]
type:
description:
- Record type
choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']
required: True
value:
description:
- Record value
required: true
solo:
type: bool
default: False
description:
- Whether the record should be the only one for that record type and record name. Only use with C(state=present)
- This will delete all other records with the same record name and type.
priority:
description:
- Record priority. Required for C(type=MX)
required: False
state:
description:
- Whether the record should exist or not
required: False
default: present
choices: [ 'present', 'absent' ]
requirements:
- "nc-dnsapi >= 0.1.3"
author: "Nicolai Buchwitz (@nbuchwitz)"
'''
EXAMPLES = '''
- name: Create a record of type A
netcup_dns:
api_key: "..."
api_password: "..."
customer_id: "..."
domain: "example.com"
name: "mail"
type: "A"
value: "127.0.0.1"
- name: Delete that record
netcup_dns:
api_key: "..."
api_password: "..."
customer_id: "..."
domain: "example.com"
name: "mail"
type: "A"
value: "127.0.0.1"
state: absent
- name: Create a wildcard record
netcup_dns:
api_key: "..."
api_password: "..."
customer_id: "..."
domain: "example.com"
name: "*"
type: "A"
value: "127.0.1.1"
- name: Set the MX record for example.com
netcup_dns:
api_key: "..."
api_password: "..."
customer_id: "..."
domain: "example.com"
type: "MX"
value: "mail.example.com"
- name: Set a record and ensure that this is the only one
netcup_dns:
api_key: "..."
api_password: "..."
customer_id: "..."
name: "demo"
domain: "example.com"
type: "AAAA"
value: "::1"
solo: true
'''
RETURN = '''
records:
description: list containing all records
returned: success
type: complex
contains:
name:
description: the record name
returned: success
type: string
sample: fancy-hostname
type:
description: the record type
returned: succcess
type: string
sample: A
value:
description: the record destination
returned: success
type: string
sample: 127.0.0.1
priority:
description: the record priority (only relevant if type=MX)
returned: success
type: int
sample: 0
id:
description: internal id of the record
returned: success
type: int
sample: 12345
'''
from ansible.module_utils.basic import AnsibleModule
try:
import nc_dnsapi
from nc_dnsapi import DNSRecord
HAS_NCDNSAPI = True
except ImportError:
HAS_NCDNSAPI = False
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
api_password=dict(required=True, no_log=True),
customer_id=dict(required=True, type='int'),
domain=dict(required=True),
record=dict(required=False, default='@', aliases=['name']),
type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']),
value=dict(required=True),
priority=dict(required=False, type='int'),
solo=dict(required=False, type='bool', default=False),
state=dict(required=False, choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_NCDNSAPI:
module.fail_json(msg="nc-dnsapi is required for this module")
api_key = module.params.get('api_key')
api_password = module.params.get('api_password')
customer_id = module.params.get('customer_id')
domain = module.params.get('domain')
record_type = module.params.get('type')
record = module.params.get('record')
value = module.params.get('value')
priority = module.params.get('priority')
solo = module.params.get('solo')
state = module.params.get('state')
if record_type == 'MX' and not priority:
module.fail_json(msg="record type MX required the 'priority' argument")
has_changed = False
all_records = []
try:
with nc_dnsapi.Client(customer_id, api_key, api_password) as api:
all_records = api.dns_records(domain)
record = DNSRecord(record, record_type, value, priority=priority)
# try to get existing record
record_exists = False
for r in all_records:
if r == record:
record_exists = True
record = r
break
if state == 'present':
if solo:
obsolete_records = [r for r in all_records if
r.hostname == record.hostname
and r.type == record.type
and not r.destination == record.destination]
if obsolete_records:
if not module.check_mode:
all_records = api.delete_dns_records(domain, obsolete_records)
has_changed = True
if not record_exists:
if not module.check_mode:
all_records = api.add_dns_record(domain, record)
has_changed = True
elif state == 'absent' and record_exists:
if not module.check_mode:
all_records = api.delete_dns_record(domain, record)
has_changed = True
except Exception as ex:
module.fail_json(msg=ex.message)
module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]})
def record_data(r):
return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id}
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class ServicesOperations(object):
"""ServicesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for each request. The current version is 2015-08-19. Constant value: "2015-08-19".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-08-19"
self.config = config
def create_or_update(
self, resource_group_name, search_service_name, service, search_management_request_options=None, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a Search service in the given resource group. If the
Search service already exists, all properties will be updated with the
given values.
:param resource_group_name: The name of the resource group within the
current subscription. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Search service to
create or update. Search service names must only contain lowercase
letters, digits or dashes, cannot use dash as the first two or last
one characters, cannot contain consecutive dashes, and must be between
2 and 60 characters in length. Search service names must be globally
unique since they are part of the service URI
(https://<name>.search.windows.net). You cannot change the service
name after the service is created.
:type search_service_name: str
:param service: The definition of the Search service to create or
update.
:type service: :class:`SearchService
<azure.mgmt.search.models.SearchService>`
:param search_management_request_options: Additional parameters for
the operation
:type search_management_request_options:
:class:`SearchManagementRequestOptions
<azure.mgmt.search.models.SearchManagementRequestOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SearchService
<azure.mgmt.search.models.SearchService>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
client_request_id = None
if search_management_request_options is not None:
client_request_id = search_management_request_options.client_request_id
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
# Construct body
body_content = self._serialize.body(service, 'SearchService')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchService', response)
if response.status_code == 201:
deserialized = self._deserialize('SearchService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, search_service_name, search_management_request_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets the Search service with the given name in the given resource
group.
:param resource_group_name: The name of the resource group within the
current subscription. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Search service
associated with the specified resource group.
:type search_service_name: str
:param search_management_request_options: Additional parameters for
the operation
:type search_management_request_options:
:class:`SearchManagementRequestOptions
<azure.mgmt.search.models.SearchManagementRequestOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SearchService
<azure.mgmt.search.models.SearchService>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
client_request_id = None
if search_management_request_options is not None:
client_request_id = search_management_request_options.client_request_id
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, search_service_name, search_management_request_options=None, custom_headers=None, raw=False, **operation_config):
"""Deletes a Search service in the given resource group, along with its
associated resources.
:param resource_group_name: The name of the resource group within the
current subscription. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Search service
associated with the specified resource group.
:type search_service_name: str
:param search_management_request_options: Additional parameters for
the operation
:type search_management_request_options:
:class:`SearchManagementRequestOptions
<azure.mgmt.search.models.SearchManagementRequestOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
client_request_id = None
if search_management_request_options is not None:
client_request_id = search_management_request_options.client_request_id
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list_by_resource_group(
self, resource_group_name, search_management_request_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of all Search services in the given resource group.
:param resource_group_name: The name of the resource group within the
current subscription. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param search_management_request_options: Additional parameters for
the operation
:type search_management_request_options:
:class:`SearchManagementRequestOptions
<azure.mgmt.search.models.SearchManagementRequestOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SearchServicePaged
<azure.mgmt.search.models.SearchServicePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
client_request_id = None
if search_management_request_options is not None:
client_request_id = search_management_request_options.client_request_id
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SearchServicePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SearchServicePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def check_name_availability(
self, name, search_management_request_options=None, custom_headers=None, raw=False, **operation_config):
"""Checks whether or not the given Search service name is available for
use. Search service names must be globally unique since they are part
of the service URI (https://<name>.search.windows.net).
:param name: The Search service name to validate. Search service names
must only contain lowercase letters, digits or dashes, cannot use dash
as the first two or last one characters, cannot contain consecutive
dashes, and must be between 2 and 60 characters in length.
:type name: str
:param search_management_request_options: Additional parameters for
the operation
:type search_management_request_options:
:class:`SearchManagementRequestOptions
<azure.mgmt.search.models.SearchManagementRequestOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CheckNameAvailabilityOutput
<azure.mgmt.search.models.CheckNameAvailabilityOutput>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
client_request_id = None
if search_management_request_options is not None:
client_request_id = search_management_request_options.client_request_id
check_name_availability_input = models.CheckNameAvailabilityInput(name=name)
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Search/checkNameAvailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
# Construct body
body_content = self._serialize.body(check_name_availability_input, 'CheckNameAvailabilityInput')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityOutput', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
r"""This script crawls experiment directories for results and aggregates them.
Usage example:
MODELS_DIR="/tmp/models"
bazel run single_task:aggregate_experiment_results -- \
--models_dir="$MODELS_DIR" \
--max_npe="20M" \
--task_list="add echo" \
--model_types="[('topk', 'v0'), ('ga', 'v0')]" \
--csv_file=/tmp/results_table.csv
"""
import ast
from collections import namedtuple
import csv
import os
import re
import StringIO
import sys
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
from single_task import misc # brain coder
from single_task import results_lib # brain coder
DEFAULT_MODELS = [('pg', 'v0'), ('topk', 'v0'), ('ga', 'v0'), ('rand', 'v0')]
DEFAULT_TASKS = [
'reverse', 'remove-char', 'count-char', 'add', 'bool-logic', 'print-hello',
'echo-twice', 'echo-thrice', 'copy-reverse', 'zero-cascade', 'cascade',
'shift-left', 'shift-right', 'riffle', 'unriffle', 'middle-char',
'remove-last', 'remove-last-two', 'echo-alternating', 'echo-half', 'length',
'echo-second-seq', 'echo-nth-seq', 'substring', 'divide-2', 'dedup']
FLAGS = flags.FLAGS
flags.DEFINE_string(
'models_dir', '',
'Absolute path where results folders are found.')
flags.DEFINE_string(
'exp_prefix', 'bf_rl_iclr',
'Prefix for all experiment folders.')
flags.DEFINE_string(
'max_npe', '5M',
'String representation of max NPE of the experiments.')
flags.DEFINE_spaceseplist(
'task_list', DEFAULT_TASKS,
'List of task names separated by spaces. If empty string, defaults to '
'`DEFAULT_TASKS`. These are the rows of the results table.')
flags.DEFINE_string(
'model_types', str(DEFAULT_MODELS),
'String representation of a python list of 2-tuples, each a model_type + '
'job description pair. Descriptions allow you to choose among different '
'runs of the same experiment. These are the columns of the results table.')
flags.DEFINE_string(
'csv_file', '/tmp/results_table.csv',
'Where to write results table. Format is CSV.')
flags.DEFINE_enum(
'data', 'success_rates', ['success_rates', 'code'],
'What type of data to aggregate.')
def make_csv_string(table):
"""Convert 2D list to CSV string."""
s = StringIO.StringIO()
writer = csv.writer(s)
writer.writerows(table)
value = s.getvalue()
s.close()
return value
def process_results(metrics):
"""Extract useful information from given metrics.
Args:
metrics: List of results dicts. These should have been written to disk by
training jobs.
Returns:
Dict mapping stats names to values.
Raises:
ValueError: If max_npe or max_global_repetitions values are inconsistant
across dicts in the `metrics` list.
"""
count = len(metrics)
success_count = 0
total_npe = 0 # Counting NPE across all runs.
success_npe = 0 # Counting NPE in successful runs only.
max_npe = 0
max_repetitions = 0
for metric_dict in metrics:
if not max_npe:
max_npe = metric_dict['max_npe']
elif max_npe != metric_dict['max_npe']:
raise ValueError(
'Invalid experiment. Different reps have different max-NPE settings.')
if not max_repetitions:
max_repetitions = metric_dict['max_global_repetitions']
elif max_repetitions != metric_dict['max_global_repetitions']:
raise ValueError(
'Invalid experiment. Different reps have different num-repetition '
'settings.')
if metric_dict['found_solution']:
success_count += 1
success_npe += metric_dict['npe']
total_npe += metric_dict['npe']
stats = {}
stats['max_npe'] = max_npe
stats['max_repetitions'] = max_repetitions
stats['repetitions'] = count
stats['successes'] = success_count # successful reps
stats['failures'] = count - success_count # failed reps
stats['success_npe'] = success_npe
stats['total_npe'] = total_npe
if success_count:
# Only successful runs counted.
stats['avg_success_npe'] = stats['success_npe'] / float(success_count)
else:
stats['avg_success_npe'] = 0.0
if count:
stats['success_rate'] = success_count / float(count)
stats['avg_total_npe'] = stats['total_npe'] / float(count)
else:
stats['success_rate'] = 0.0
stats['avg_total_npe'] = 0.0
return stats
ProcessedResults = namedtuple('ProcessedResults', ['metrics', 'processed'])
def get_results_for_experiment(
models_dir, task_name, model_type='pg', max_npe='5M', desc='v0',
name_prefix='bf_rl_paper', extra_desc=''):
"""Get and process results for a given experiment.
An experiment is a set of runs with the same hyperparameters and environment.
It is uniquely specified by a (task_name, model_type, max_npe) triple, as
well as an optional description.
We assume that each experiment has a folder with the same name as the job that
ran the experiment. The name is computed by
"%name_prefix%.%desc%-%max_npe%_%task_name%".
Args:
models_dir: Parent directory containing experiment folders.
task_name: String name of task (the coding env). See code_tasks.py or
run_eval_tasks.py
model_type: Name of the algorithm, such as 'pg', 'topk', 'ga', 'rand'.
max_npe: String SI unit representation of the maximum NPE threshold for the
experiment. For example, "5M" means 5 million.
desc: Description.
name_prefix: Prefix of job names. Normally leave this as default.
extra_desc: Optional extra description at the end of the job name.
Returns:
ProcessedResults namedtuple instance, containing
metrics: Raw dicts read from disk.
processed: Stats computed by `process_results`.
Raises:
ValueError: If max_npe in the metrics does not match NPE in the experiment
folder name.
"""
folder = name_prefix + '.{0}.{1}-{2}_{3}'.format(desc, model_type, max_npe,
task_name)
if extra_desc:
folder += '.' + extra_desc
results = results_lib.Results(os.path.join(models_dir, folder))
metrics, _ = results.read_all()
processed = process_results(metrics)
if (not np.isclose(processed['max_npe'], misc.si_to_int(max_npe))
and processed['repetitions']):
raise ValueError(
'Invalid experiment. Max-NPE setting does not match expected max-NPE '
'in experiment name.')
return ProcessedResults(metrics=metrics, processed=processed)
BestCodeResults = namedtuple(
'BestCodeResults',
['code', 'reward', 'npe', 'folder', 'finished', 'error'])
class BestCodeResultError(object):
success = 0
no_solution_found = 1
experiment_does_not_exist = 2
def get_best_code_for_experiment(
models_dir, task_name, model_type='pg', max_npe='5M', desc=0,
name_prefix='bf_rl_paper', extra_desc=''):
"""Like `get_results_for_experiment`, but fetches the code solutions."""
folder = name_prefix + '.{0}.{1}-{2}_{3}'.format(desc, model_type, max_npe,
task_name)
if extra_desc:
folder += '.' + extra_desc
log_dir = os.path.join(models_dir, folder, 'logs')
search_regex = r'^solutions_([0-9])+\.txt$'
try:
all_children = tf.gfile.ListDirectory(log_dir)
except tf.errors.NotFoundError:
return BestCodeResults(
code=None, reward=0.0, npe=0, folder=folder, finished=False,
error=BestCodeResultError.experiment_does_not_exist)
solution_files = [
fname for fname in all_children if re.search(search_regex, fname)]
max_reward = 0.0
npe = 0
best_code = None
for fname in solution_files:
with tf.gfile.FastGFile(os.path.join(log_dir, fname), 'r') as reader:
results = [ast.literal_eval(entry) for entry in reader]
for res in results:
if res['reward'] > max_reward:
best_code = res['code']
max_reward = res['reward']
npe = res['npe']
error = (
BestCodeResultError.success if best_code
else BestCodeResultError.no_solution_found)
try:
# If there is a status.txt file, check if it contains the status of the job.
with tf.gfile.FastGFile(os.path.join(log_dir, 'status.txt'), 'r') as f:
# Job is done, so mark this experiment as finished.
finished = f.read().lower().strip() == 'done'
except tf.errors.NotFoundError:
# No status file has been written, so the experiment is not done. No need to
# report an error here, because we do not require that experiment jobs write
# out a status.txt file until they have finished.
finished = False
return BestCodeResults(
code=best_code, reward=max_reward, npe=npe, folder=folder,
finished=finished, error=error)
def make_results_table(
models=None,
tasks=None,
max_npe='5M',
name_prefix='bf_rl_paper',
extra_desc='',
models_dir='/tmp'):
"""Creates a table of results: algorithm + version by tasks.
Args:
models: The table columns. A list of (algorithm, desc) tuples.
tasks: The table rows. List of task names.
max_npe: String SI unit representation of the maximum NPE threshold for the
experiment. For example, "5M" means 5 million. All entries in the table
share the same max-NPE.
name_prefix: Name prefix used in logging directory for the experiment.
extra_desc: Extra description added to name of logging directory for the
experiment.
models_dir: Parent directory containing all experiment folders.
Returns:
A 2D list holding the table cells.
"""
if models is None:
models = DEFAULT_MODELS
if tasks is None:
tasks = DEFAULT_TASKS
model_results = {}
for model_type, desc in models:
model_results[model_type] = {
tname: get_results_for_experiment(
models_dir, tname, model_type, max_npe, desc,
name_prefix=name_prefix, extra_desc=extra_desc
).processed
for tname in tasks}
def info(stats):
return [str(stats['repetitions']),
'%.2f' % stats['success_rate'],
str(int(stats['avg_total_npe']))]
rows = [['max NPE: ' + max_npe]
+ misc.flatten([['{0} ({1})'.format(m, d), '', '']
for m, d in models])]
rows.append(
[''] + misc.flatten([['reps', 'success rate', 'avg NPE']
for _ in models]))
for tname in tasks:
rows.append(
[tname]
+ misc.flatten([info(model_results[model][tname])
for model, _ in models]))
return rows
def print_results_table(results_table):
"""Print human readable results table to stdout."""
print('')
print('=== Results Table ===')
print('Format: # reps [success rate, avg total NPE]')
def info_str(info_row):
# num_runs (success_rate, avg_total_npe)
if not info_row[0]:
return '0'
return '%s [%s, %s]' % (str(info_row[0]).ljust(2), info_row[1], info_row[2])
nc = len(results_table[0]) # num cols
out_table = [
[results_table[0][0]] + [results_table[0][i] for i in range(1, nc, 3)]]
for row in results_table[2:]:
out_table.append([row[0]] + [info_str(row[i:i+3]) for i in range(1, nc, 3)])
nc = len(out_table[0]) # num cols
col_widths = [max(len(row[col]) for row in out_table) for col in range(nc)]
table_string = ''
for row in out_table:
table_string += ''.join(
[row[c].ljust(col_widths[c] + 2) for c in range(nc)]) + '\n'
print(table_string)
def main(argv):
del argv # Unused.
name_prefix = FLAGS.exp_prefix
print('Experiments prefix: %s' % name_prefix)
model_types = ast.literal_eval(FLAGS.model_types)
if FLAGS.data == 'success_rates':
results_table = make_results_table(
models=model_types, tasks=FLAGS.task_list, max_npe=FLAGS.max_npe,
models_dir=FLAGS.models_dir,
name_prefix=name_prefix, extra_desc='')
with tf.gfile.FastGFile(FLAGS.csv_file, 'w') as f:
f.write(make_csv_string(results_table))
print_results_table(results_table)
else:
# Best code
print('* = experiment is still running')
print('')
print('=== Best Synthesized Code ===')
for model_type, desc in model_types:
print('%s (%s)' % (model_type, desc))
sys.stdout.flush()
for tname in FLAGS.task_list:
res = get_best_code_for_experiment(
FLAGS.models_dir, tname, model_type, FLAGS.max_npe, desc,
name_prefix=name_prefix, extra_desc='')
unfinished_mark = '' if res.finished else ' *'
tname += unfinished_mark
if res.error == BestCodeResultError.success:
print(' %s' % tname)
print(' %s' % res.code)
print(' R=%.6f, NPE=%s' % (res.reward, misc.int_to_si(res.npe)))
elif res.error == BestCodeResultError.experiment_does_not_exist:
print(' Experiment does not exist. Check arguments.')
print(' Experiment folder: %s' % res.folder)
break
else:
print(' %s' % tname)
print(' (none)')
sys.stdout.flush()
if __name__ == '__main__':
app.run(main)
|
|
import asyncio
import pytest
import aioredis
async def _reader(channel, output, waiter, conn):
await conn.execute('subscribe', channel)
ch = conn.pubsub_channels[channel]
waiter.set_result(conn)
while await ch.wait_message():
msg = await ch.get()
await output.put(msg)
@pytest.mark.run_loop
async def test_publish(create_connection, redis, server, loop):
out = asyncio.Queue(loop=loop)
fut = loop.create_future()
conn = await create_connection(
server.tcp_address, loop=loop)
sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn), loop=loop)
await fut
await redis.publish('chan:1', 'Hello')
msg = await out.get()
assert msg == b'Hello'
sub.cancel()
@pytest.mark.run_loop
async def test_publish_json(create_connection, redis, server, loop):
out = asyncio.Queue(loop=loop)
fut = loop.create_future()
conn = await create_connection(
server.tcp_address, loop=loop)
sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn), loop=loop)
await fut
res = await redis.publish_json('chan:1', {"Hello": "world"})
assert res == 1 # recievers
msg = await out.get()
assert msg == b'{"Hello": "world"}'
sub.cancel()
@pytest.mark.run_loop
async def test_subscribe(redis):
res = await redis.subscribe('chan:1', 'chan:2')
assert redis.in_pubsub == 2
ch1 = redis.channels['chan:1']
ch2 = redis.channels['chan:2']
assert res == [ch1, ch2]
assert ch1.is_pattern is False
assert ch2.is_pattern is False
res = await redis.unsubscribe('chan:1', 'chan:2')
assert res == [[b'unsubscribe', b'chan:1', 1],
[b'unsubscribe', b'chan:2', 0]]
@pytest.mark.parametrize('create_redis', [
pytest.param(aioredis.create_redis_pool, id='pool'),
])
@pytest.mark.run_loop
async def test_subscribe_empty_pool(create_redis, server, loop, _closable):
redis = await create_redis(server.tcp_address, loop=loop)
_closable(redis)
await redis.connection.clear()
res = await redis.subscribe('chan:1', 'chan:2')
assert redis.in_pubsub == 2
ch1 = redis.channels['chan:1']
ch2 = redis.channels['chan:2']
assert res == [ch1, ch2]
assert ch1.is_pattern is False
assert ch2.is_pattern is False
res = await redis.unsubscribe('chan:1', 'chan:2')
assert res == [[b'unsubscribe', b'chan:1', 1],
[b'unsubscribe', b'chan:2', 0]]
@pytest.mark.run_loop
async def test_psubscribe(redis, create_redis, server, loop):
sub = redis
res = await sub.psubscribe('patt:*', 'chan:*')
assert sub.in_pubsub == 2
pat1 = sub.patterns['patt:*']
pat2 = sub.patterns['chan:*']
assert res == [pat1, pat2]
pub = await create_redis(
server.tcp_address, loop=loop)
await pub.publish_json('chan:123', {"Hello": "World"})
res = await pat2.get_json()
assert res == (b'chan:123', {"Hello": "World"})
res = await sub.punsubscribe('patt:*', 'patt:*', 'chan:*')
assert res == [[b'punsubscribe', b'patt:*', 1],
[b'punsubscribe', b'patt:*', 1],
[b'punsubscribe', b'chan:*', 0],
]
@pytest.mark.parametrize('create_redis', [
pytest.param(aioredis.create_redis_pool, id='pool'),
])
@pytest.mark.run_loop
async def test_psubscribe_empty_pool(create_redis, server, loop, _closable):
sub = await create_redis(server.tcp_address, loop=loop)
pub = await create_redis(server.tcp_address, loop=loop)
_closable(sub)
_closable(pub)
await sub.connection.clear()
res = await sub.psubscribe('patt:*', 'chan:*')
assert sub.in_pubsub == 2
pat1 = sub.patterns['patt:*']
pat2 = sub.patterns['chan:*']
assert res == [pat1, pat2]
await pub.publish_json('chan:123', {"Hello": "World"})
res = await pat2.get_json()
assert res == (b'chan:123', {"Hello": "World"})
res = await sub.punsubscribe('patt:*', 'patt:*', 'chan:*')
assert res == [[b'punsubscribe', b'patt:*', 1],
[b'punsubscribe', b'patt:*', 1],
[b'punsubscribe', b'chan:*', 0],
]
@pytest.redis_version(
2, 8, 0, reason='PUBSUB CHANNELS is available since redis>=2.8.0')
@pytest.mark.run_loop
async def test_pubsub_channels(create_redis, server, loop):
redis = await create_redis(
server.tcp_address, loop=loop)
res = await redis.pubsub_channels()
assert res == []
res = await redis.pubsub_channels('chan:*')
assert res == []
sub = await create_redis(
server.tcp_address, loop=loop)
await sub.subscribe('chan:1')
res = await redis.pubsub_channels()
assert res == [b'chan:1']
res = await redis.pubsub_channels('ch*')
assert res == [b'chan:1']
await sub.unsubscribe('chan:1')
await sub.psubscribe('chan:*')
res = await redis.pubsub_channels()
assert res == []
@pytest.redis_version(
2, 8, 0, reason='PUBSUB NUMSUB is available since redis>=2.8.0')
@pytest.mark.run_loop
async def test_pubsub_numsub(create_redis, server, loop):
redis = await create_redis(
server.tcp_address, loop=loop)
res = await redis.pubsub_numsub()
assert res == {}
res = await redis.pubsub_numsub('chan:1')
assert res == {b'chan:1': 0}
sub = await create_redis(
server.tcp_address, loop=loop)
await sub.subscribe('chan:1')
res = await redis.pubsub_numsub()
assert res == {}
res = await redis.pubsub_numsub('chan:1')
assert res == {b'chan:1': 1}
res = await redis.pubsub_numsub('chan:2')
assert res == {b'chan:2': 0}
res = await redis.pubsub_numsub('chan:1', 'chan:2')
assert res == {b'chan:1': 1, b'chan:2': 0}
await sub.unsubscribe('chan:1')
await sub.psubscribe('chan:*')
res = await redis.pubsub_numsub()
assert res == {}
@pytest.redis_version(
2, 8, 0, reason='PUBSUB NUMPAT is available since redis>=2.8.0')
@pytest.mark.run_loop
async def test_pubsub_numpat(create_redis, server, loop, redis):
sub = await create_redis(
server.tcp_address, loop=loop)
res = await redis.pubsub_numpat()
assert res == 0
await sub.subscribe('chan:1')
res = await redis.pubsub_numpat()
assert res == 0
await sub.psubscribe('chan:*')
res = await redis.pubsub_numpat()
assert res == 1
@pytest.mark.run_loop
async def test_close_pubsub_channels(redis, loop):
ch, = await redis.subscribe('chan:1')
async def waiter(ch):
assert not await ch.wait_message()
tsk = asyncio.ensure_future(waiter(ch), loop=loop)
redis.close()
await redis.wait_closed()
await tsk
@pytest.mark.run_loop
async def test_close_pubsub_patterns(redis, loop):
ch, = await redis.psubscribe('chan:*')
async def waiter(ch):
assert not await ch.wait_message()
tsk = asyncio.ensure_future(waiter(ch), loop=loop)
redis.close()
await redis.wait_closed()
await tsk
@pytest.mark.run_loop
async def test_close_cancelled_pubsub_channel(redis, loop):
ch, = await redis.subscribe('chan:1')
async def waiter(ch):
with pytest.raises(asyncio.CancelledError):
await ch.wait_message()
tsk = asyncio.ensure_future(waiter(ch), loop=loop)
await asyncio.sleep(0, loop=loop)
tsk.cancel()
@pytest.mark.run_loop
async def test_channel_get_after_close(create_redis, loop, server):
sub = await create_redis(
server.tcp_address, loop=loop)
pub = await create_redis(
server.tcp_address, loop=loop)
ch, = await sub.subscribe('chan:1')
await pub.publish('chan:1', 'message')
assert await ch.get() == b'message'
loop.call_soon(sub.close)
assert await ch.get() is None
with pytest.raises(aioredis.ChannelClosedError):
assert await ch.get()
@pytest.mark.run_loop
async def test_subscribe_concurrency(create_redis, server, loop):
sub = await create_redis(
server.tcp_address, loop=loop)
pub = await create_redis(
server.tcp_address, loop=loop)
async def subscribe(*args):
return await sub.subscribe(*args)
async def publish(*args):
await asyncio.sleep(0, loop=loop)
return await pub.publish(*args)
res = await asyncio.gather(
subscribe('channel:0'),
publish('channel:0', 'Hello'),
subscribe('channel:1'),
loop=loop)
(ch1,), subs, (ch2,) = res
assert ch1.name == b'channel:0'
assert subs == 1
assert ch2.name == b'channel:1'
@pytest.redis_version(
3, 2, 0, reason='PUBSUB PING is available since redis>=3.2.0')
@pytest.mark.run_loop
async def test_pubsub_ping(redis):
await redis.subscribe('chan:1', 'chan:2')
res = await redis.ping()
assert res == b'PONG'
res = await redis.ping('Hello')
assert res == b'Hello'
res = await redis.ping('Hello', encoding='utf-8')
assert res == 'Hello'
await redis.unsubscribe('chan:1', 'chan:2')
@pytest.mark.run_loop
async def test_pubsub_channel_iter(create_redis, server, loop):
sub = await create_redis(server.tcp_address, loop=loop)
pub = await create_redis(server.tcp_address, loop=loop)
ch, = await sub.subscribe('chan:1')
async def coro(ch):
lst = []
async for msg in ch.iter():
lst.append(msg)
return lst
tsk = asyncio.ensure_future(coro(ch), loop=loop)
await pub.publish_json('chan:1', {'Hello': 'World'})
await pub.publish_json('chan:1', ['message'])
await asyncio.sleep(0, loop=loop)
ch.close()
assert await tsk == [b'{"Hello": "World"}', b'["message"]']
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _ # noqa
from heat.openstack.common import importutils
from heat.openstack.common import jsonutils
from heat.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('heat.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""Serialization wrapper.
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
def _deserialize(data):
"""Deserialization wrapper."""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""A tiny wrapper around ZeroMQ.
Simplifies the send/recv protocol and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if self.subscriptions:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self, **kwargs):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart(**kwargs)
def send(self, data, **kwargs):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data, **kwargs)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr):
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
def cast(self, msg_id, topic, data, envelope):
msg_id = msg_id or 0
if not envelope:
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'],
data.get('namespace'), **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
# this may be able to be removed earlier than
# 'I' if ConsumerBase.process were refactored.
if type(msg) is list:
payload = msg[-1]
else:
payload = msg
response = ConsumerBase.normalize_reply(
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug(_("Sending reply"))
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], data.get('namespace'), **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
Used for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in,
in_bind=True, subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
def consume_in_thread(self):
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""A consumer class implementing a topic-based proxy.
Forwards to IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
data = sock.recv(copy=False)
topic = data[1].bytes
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data, copy=False)
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service."""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
try:
os.makedirs(ipc_dir)
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL)
except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = i.next()
h[k] = i.next()
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""A consumer class implementing a consumer for messages.
Can also be used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'topic': reply_topic,
# TODO(ewindisch): safe to remove mcontext in I.
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""Wraps the sending of messages.
Dispatches to the matchmaker and sends message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if not queues:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
mm = CONF.rpc_zmq_matchmaker
if mm.endswith('matchmaker.MatchMakerRing'):
mm.replace('matchmaker', 'matchmaker_ring')
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
' %(new)s instead') % dict(
orig=CONF.rpc_zmq_matchmaker, new=mm))
matchmaker = importutils.import_object(mm, *args, **kwargs)
return matchmaker
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
from xml.parsers import expat
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI
from cinder import objects
from cinder import utils
from cinder.volume import api as volume_api
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'hosts')
class HostIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hosts')
elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts')
elem.set('service-status')
elem.set('service')
elem.set('zone')
elem.set('service-state')
elem.set('host_name')
elem.set('last-update')
return xmlutil.MasterTemplate(root, 1)
class HostUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('status')
return xmlutil.MasterTemplate(root, 1)
class HostActionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
return xmlutil.MasterTemplate(root, 1)
class HostShowTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
elem = xmlutil.make_flat_dict('resource', selector='host',
subselector='resource')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class HostDeserializer(wsgi.XMLDeserializer):
def default(self, string):
try:
node = utils.safe_minidom_parse_string(string)
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
updates = {}
for child in node.childNodes[0].childNodes:
updates[child.tagName] = self.extract_text(child)
return dict(body=updates)
def _list_hosts(req, service=None):
"""Returns a summary list of hosts."""
curr_time = timeutils.utcnow()
context = req.environ['cinder.context']
services = db.service_get_all(context, False)
zone = ''
if 'zone' in req.GET:
zone = req.GET['zone']
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for host in services:
delta = curr_time - (host['updated_at'] or host['created_at'])
alive = abs(delta.total_seconds()) <= CONF.service_down_time
status = (alive and "available") or "unavailable"
active = 'enabled'
if host['disabled']:
active = 'disabled'
LOG.debug('status, active and update: %s, %s, %s',
status, active, host['updated_at'])
hosts.append({'host_name': host['host'],
'service': host['topic'],
'zone': host['availability_zone'],
'service-status': status,
'service-state': active,
'last-update': host['updated_at']})
if service:
hosts = [host for host in hosts
if host["service"] == service]
return hosts
def check_host(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, service=None, *args, **kwargs):
listed_hosts = _list_hosts(req, service)
hosts = [h["host_name"] for h in listed_hosts]
if id in hosts:
return fn(self, req, id, *args, **kwargs)
else:
message = _("Host '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=message)
return wrapped
class HostController(wsgi.Controller):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = volume_api.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
authorize(req.environ['cinder.context'])
return {'hosts': _list_hosts(req)}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostDeserializer)
@check_host
def update(self, req, id, body):
authorize(req.environ['cinder.context'])
update_values = {}
for raw_key, raw_val in body.items():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
if key == "status":
if val in ("enable", "disable"):
update_values['status'] = val.startswith("enable")
else:
explanation = _("Invalid status: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
else:
explanation = _("Invalid update setting: '%s'") % raw_key
raise webob.exc.HTTPBadRequest(explanation=explanation)
update_setters = {'status': self._set_enabled_status}
result = {}
for key, value in update_values.items():
result.update(update_setters[key](req, id, value))
return result
def _set_enabled_status(self, req, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
context = req.environ['cinder.context']
state = "enabled" if enabled else "disabled"
LOG.info(_LI("Setting host %(host)s to %(state)s."),
{'host': host, 'state': state})
result = self.api.set_host_enabled(context,
host=host,
enabled=enabled)
if result not in ("enabled", "disabled"):
# An error message was returned
raise webob.exc.HTTPBadRequest(explanation=result)
return {"host": host, "status": result}
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the volume usage info given by hosts.
:param context: security context
:param host: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'volume_count': 1, 'total_volume_gb': 2048}
"""
host = id
context = req.environ['cinder.context']
if not context.is_admin:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
try:
host_ref = db.service_get_by_host_and_topic(context,
host,
CONF.volume_topic)
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("Host not found"))
# Getting total available/used resource
# TODO(jdg): Add summary info for Snapshots
volume_refs = db.volume_get_all_by_host(context, host_ref['host'])
(count, sum) = db.volume_data_get_for_host(context,
host_ref['host'])
snap_count_total = 0
snap_sum_total = 0
resources = [{'resource': {'host': host, 'project': '(total)',
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count_total),
'total_snapshot_gb': str(snap_sum_total)}}]
project_ids = [v['project_id'] for v in volume_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
(count, sum) = db.volume_data_get_for_project(context, project_id)
(snap_count, snap_sum) = (
objects.Snapshot.snapshot_data_get_for_project(context,
project_id))
resources.append(
{'resource':
{'host': host,
'project': project_id,
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count),
'total_snapshot_gb': str(snap_sum)}})
snap_count_total += int(snap_count)
snap_sum_total += int(snap_sum)
resources[0]['resource']['snapshot_count'] = str(snap_count_total)
resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total)
return {"host": resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration."""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/volume/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={
'update': 'PUT'},
member_actions={
'startup': 'GET',
'shutdown': 'GET',
'reboot': 'GET'})]
return resources
|
|
'''
Created on Nov 29, 2016
@author: jphelps
'''
import sys
import os
from os import listdir
from os.path import isfile, join
import csv
import re
import psycopg2
import time
clist = []
def insertValues(cur, conn, crsdict, mlist, slist):
#PREPARE COURSE INSERT
# print crsdict['cid']
# cur.execute("""INSERT INTO Course(cid,year,quarter,subject,crse,section,unitlow,unithigh) VALUES (%(cid)s,%(year)s,%(quarter)s,%(subject)s,%(crse)s,%(section)s,%(unitlow)s,%(unithigh)s);""", crsdict)
command = "INSERT INTO Course(cid,year,quarter,subject,crse,section,unitlow,unithigh) VALUES (%(cid)s,%(year)s,%(quarter)s,\'%(subject)s\',%(crse)s,%(section)s,%(unitlow)s,%(unithigh)s); " % crsdict
# print command
# print crsdict['cid']
#PREPARE MEETING INSERTS
for mdict in mlist:
#Parse instructor
if(mdict['instructor'] == ''):
inst = 'null'
else:
inst = mdict['instructor'].replace(',','')
if(inst.find('\'') != -1):
# print inst
inst = inst.replace('\'','')
# print inst
#Parse type
if(mdict['type'] == ''):
typ = 'null'
else:
typ = mdict['type']
#Parse days
if(mdict['days'] == ''):
m = 'False'
t = 'False'
w = 'False'
r = 'False'
f = 'False'
s = 'False'
else:
m = 'FALSE'
t = 'FALSE'
w = 'FALSE'
r = 'FALSE'
f = 'FALSE'
s = 'FALSE'
if(mdict['days'].find('M') != -1):
m = 'TRUE'
if(mdict['days'].find('T') != -1):
t = 'TRUE'
if(mdict['days'].find('W') != -1):
w = 'TRUE'
if(mdict['days'].find('R') != -1):
r = 'TRUE'
if(mdict['days'].find('F') != -1):
f = 'TRUE'
if(mdict['days'].find('S') != -1):
s = 'TRUE'
#Parse start and end times
time = mdict['time']
if(time == ''):
start = -1
end = -1
else:
# print time
# print len(time)
if((time.find('AM') != -1) and (time.find('PM') != -1)):
if(len(time) == 17):
start = time[0]
start += time[2:4]
# print start
end = int(time[10])
if (end != 12):
end += 12
end = str(end)
end += time[12:14]
# print end
elif(len(time) == 19):
start = time[0:2]
start += time[3:5]
# print start
end = int(time[11:13])
if (end != 12):
end += 12
end = str(end)
end += time[14:16]
# print end
else:
if(time[1] == ':'):
start = time[0]
start += time[2:4]
# print start
end = int(time[10:12])
if(end != 12):
end += 12
end = str(end)
end += time[13:15]
# print end
else:
start = time[0:2]
start += time[3:5]
# print start
end = int(time[11])
if(end != 12):
end += 12
end = str(end)
end += time[13:15]
# print end
elif(time.find('PM') != -1):
if(len(time) == 17):
start = int(time[0])
if(start != 12):
start += 12
start = str(start)
start += time[2:4]
# print start
end = int(time[10])
if (end != 12):
end += 12
end = str(end)
end += time[12:14]
# print end
elif(len(time) == 19):
start = int(time[0:2])
if(start != 12):
start += 12
start = str(start)
start += time[3:5]
# print start
end = int(time[11:13])
if (end != 12):
end += 12
end = str(end)
end += time[14:16]
# print end
else:
if(time[1] == ':'):
start = int(time[0])
if(start != 12):
start += 12
start = str(start)
start += time[2:4]
# print start
end = int(time[10:12])
if(end != 12):
end += 12
end = str(end)
end += time[13:15]
# print end
else:
start = int(time[0:2])
if(start != 12):
start += 12
start = str(start)
start += time[3:5]
# print start
end = int(time[11])
if(end != 12):
end += 12
end = str(end)
end += time[13:15]
# print end
else:
if(len(time) == 17):
start = time[0]
start += time[2:4]
# print start
end = time[10]
end += time[12:14]
# print end
elif(len(time) == 19):
start = time[0:2]
start += time[3:5]
# print start
end = time[11:13]
end += time[14:16]
# print end
else:
if(time[1] == ':'):
start = time[0]
start += time[2:4]
# print start
end = time[10:12]
end += time[13:15]
# print end
else:
start = time[0:2]
start += time[3:5]
# print start
end = time[11]
end += time[13:15]
# print end
#Parse building
if(mdict['building'] == ''):
build = 'null'
else:
build = mdict['building']
#Parse room
if(mdict['room'] == ''):
room = -1
else:
room = mdict['room']
# cur.execute("""INSERT INTO Meeting(cid,instructor,type,mon,tues,wed,thur,fri,sat,starttime,endtime,building,room) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);""", (crsdict['cid'], inst, typ, m, t, w, r, f, s, start, end, build, room))
command += "INSERT INTO Meeting(cid,instructor,type,mon,tues,wed,thur,fri,sat,starttime,endtime,building,room) VALUES (%s,\'%s\',\'%s\',%s,%s,%s,%s,%s,%s,%s,%s,\'%s\',%s); " % (crsdict['cid'], inst, typ, m, t, w, r, f, s, start, end, build, room)
#PREPARE STUDENT INSERT
for sdict in slist:
#Parse name
name = sdict['surname']
if(name.find('\'') != -1):
# print name
name = name.replace('\'','')
# print name
#Parse grade
grad = sdict['grade']
grade = 'null'
if (grad == 'A+') or (grad == 'A'):
grade = 4.0
elif (len(grad) == 1 or len(grad) == 2):
if (grad[0] == 'B'):
grade = 3.0
elif (grad[0] == 'C'):
grade = 2.0
elif (grad[0] == 'D'):
grade = 1.0
elif (grad == 'F'):
grade = 0.0
if (len(grad) == 2 and (grade != 'null') and (grad[1] == '+')):
grade += 0.3
if (len(grad) == 2 and (grade != 'null') and (grad[1] == '-')):
grade -= 0.3
#Parse units
if(sdict['units'] == ''):
unts = 0
else:
unts = sdict['units']
#Parse email
email = sdict['email']
if(email.find('\'') != -1):
# print email
email = email.replace('\'','')
# print email
# cur.execute("""INSERT INTO Student(cid,sid,surname,prefname,level,units,class,major,grade,status,email) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);""", (crsdict['cid'], sdict['sid'], sdict['surname'], sdict['prefname'], sdict['level'], unts, sdict['class'], sdict['major'], grade, sdict['status'], sdict['email']))
command += "INSERT INTO Student(cid,sid,surname,prefname,level,units,class,major,grade,status,email) VALUES (%s,%s,\'%s\',\'%s\',\'%s\',%s,\'%s\',\'%s\',%s,\'%s\',\'%s\'); " % (crsdict['cid'], sdict['sid'], name, sdict['prefname'], sdict['level'], unts, sdict['class'], sdict['major'], grade, sdict['status'], email)
#SEND COMMAND TO DATABASE
cur.execute(command)
# cur.executemany(command, ())
# conn.commit()
# print "EXECUTED"
# print command
# print '\n'
# print command
# print mdict['days']
def parse(cur, conn, filepath):
keys = (1,2,3,4,5,6,7,8,9,10,11)
data = False
valid = True
mlist = []
slist = []
# clist = []
with open(filepath) as csvfile:
reader = csv.DictReader(csvfile, keys, 'gweem', 'none')
for row in reader:
if((valid == False) and (row[1] != 'CID')):
# print ">>>>>>>>Invalid and not CID"
continue
if(row[2] != 'none'):
# print ">>>>>>>>Row is not just quotes"
#NEXT IS COURSE DATA
if(row[1] == 'CID'):
# print "COURSE"
Type = 1
valid = True
continue
#NEXT IS MEETING DATA
if(row[1] == 'INSTRUCTOR(S)'):
# print "MEETING"
Type = 2
continue
#NEXT IS STUDENT DATA
if(row[1] == 'SEAT'):
# print "STUDENT"
Type = 3
continue
# print "DATA"
data = True
#COURSE DATA
if(Type == 1):
cid = row[1]
if(len(row[6]) > 1):
if(len(row[6]) == 14):
coursedict = ({"cid":row[1],"year":row[2][:4],"quarter":row[2][4:], \
"subject":row[3],"crse":row[4],"section":row[5],"unitlow":row[6][0], \
"unithigh":row[6][8:10]})
continue
else:
coursedict = ({"cid":row[1],"year":row[2][:4],"quarter":row[2][4:], \
"subject":row[3],"crse":row[4],"section":row[5],"unitlow":row[6][0], \
"unithigh":row[6][8]})
continue
else:
coursedict = ({"cid":row[1],"year":row[2][:4],"quarter":row[2][4:], \
"subject":row[3],"crse":row[4],"section":row[5],"unitlow":row[6][0], \
"unithigh":row[6][0]})
continue
#MEETING DATA
if(Type == 2):
meetdict = ({"cid":cid,"instructor":row[1],"type":row[2], \
"days":row[3],"time":row[4],"building":row[5],"room":row[6]})
mlist.append(meetdict)
continue
#STUDENT DATA
if(Type == 3):
studDict = ({"cid":cid,"seat":row[1],"sid":row[2], \
"surname":row[3],"prefname":row[4],"level":row[5], \
"units":row[6],"class":row[7],"major":row[8],"grade":row[9], \
"status":row[10],"email":row[11]})
slist.append(studDict)
continue
#The line is just ""
else:
# print ">>>>>>>>Row is just quotes"
if(data == False):
# print ">>>>>>>>Row should be data: INVALID"
mlist = []
slist = []
valid = False
continue
data = False
if(Type == 3):
# print cid
clist.append(cid)
insertValues(cur, conn, coursedict, mlist, slist)
mlist = []
slist = []
# print len(clist)
def makeTables(cur):
cur.execute("BEGIN; CREATE TABLE Course(cid INT, year INT, quarter INT," \
" subject CHAR(3), crse INT, section INT, unitlow INT, unithigh INT);" \
"CREATE TABLE Meeting(cid INT, instructor CHAR(32), type CHAR(32),"\
" mon BOOLEAN, tues BOOLEAN, wed BOOLEAN, thur BOOLEAN, fri BOOLEAN,"\
" sat BOOLEAN, starttime INT, endtime INT, building CHAR(16), room INT);" \
"CREATE TABLE Student(cid INT, sid INT, surname CHAR(16),"\
" prefname CHAR(16), level CHAR(8), units FLOAT, class CHAR(8),"\
" major CHAR(4), grade DOUBLE PRECISION, status CHAR(8), email CHAR(64)); COMMIT;")
def connect():
# user = os.environ['USER']
user = "postgres"
try:
conn = psycopg2.connect(dbname="postgres", user=user)
return conn
except:
print "Failed to connect to the database"
def main(argv):
if (2 != len(sys.argv)):
print "Incorrect number of arguments"
return 0
start_time = time.time()
conn = connect()
cur = conn.cursor()
makeTables(cur)
path = str(sys.argv[1])
csvfiles = [f for f in listdir(path) if isfile(join(path, f))]
# print len(csvfiles)
slash = '/'
for csvfile in csvfiles:
if (0 == csvfile):
print "File not valid"
else:
# print csvfile
pathseq = (path, csvfile)
csvfilepath = slash.join(pathseq)
parse(cur, conn, csvfilepath)
# print len(clist)
# parse(cur, conn, 'Grades/1995_Q4.csv')
# print len(clist)
print("--- %s seconds ---" % (time.time() - start_time))
if __name__ == '__main__': main(sys.argv[1:])
|
|
from collections import Mapping, Sequence
from warnings import warn
from .common import string, SchemaDefinitionError, InputDataError,\
InputDataWarning
from .lib_interface import YAMLTaggedValue
__all__ = ['SchemaRepository', 'SchemaDefinitionError']
class AnonymousClass(Mapping):
def __contains__(self, key):
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
def __iter__(self):
return iter(self.__dict__)
def __getitem__(self, item):
return getattr(self, item)
def _get_type_and_attrs(schema_def):
if schema_def is None:
return ('any', {})
elif isinstance(schema_def, string):
return (schema_def, {})
elif isinstance(schema_def, Mapping):
attrs = schema_def.copy()
if 'type' not in attrs:
raise SchemaDefinitionError("missing 'type' attribute in "
"schema definition %r" % schema_def)
del attrs['type']
return (schema_def['type'], attrs)
else:
raise SchemaDefinitionError("invalid schema definition %r"
% schema_def)
class ListLoader(object):
def __init__(self, repo, item_type, object_class=None, tag=None):
self.object_class = object_class
self.tag = tag
self.item_loader = repo.make_loader(item_type)
def __call__(self, name, data, context):
# params = {}
if isinstance(data, YAMLTaggedValue):
# Check proper subclass ...
tag = data.tag
data = data.value
if self.tag is not None and tag != self.tag:
repo = context['repo']
if repo.is_tag_sub_type(tag, self.tag):
return repo.load_tagged(name, data, context, tag)
else:
raise InputDataError(
"Expected %r object (or sub type), "
"got %r object" % (self.tag, tag))
elif not isinstance(data, Sequence):
raise InputDataError('Input data is not sequence type: %r' % data)
# Load parameters with multiple alternatives.
result = [self.item_loader(name, item, context) for item in data]
# Return constructed value.
if self.object_class is not None:
# Class of object is defined.
if hasattr(self.object_class, 'yaml_construct'):
# User provided a constructor.
try:
return self.object_class.yaml_construct(result, context)
except Exception as exc:
from sys import exc_traceback
from traceback import format_tb
raise InputDataError(
'%s.yaml_construct() reported error '
'"%s" with traceback:\n%s'
% (self.object_class.__name__, exc,
''.join(format_tb(exc_traceback))))
else:
return result
else:
return result
class ObjectLoader(object):
def __init__(
self, repo, members, object_class=None, tag=None,
has_open_namespace=False):
if not isinstance(members, Mapping):
raise InputDataError('Provided members %r is not a mapping type'
% members)
self.object_class = object_class
self.tag = tag
self.has_open_namespace = has_open_namespace
members = members.copy()
# Convert parameter names to unicode strings.
renames = []
for name in members:
if not isinstance(name, str):
old_name = name
renames.append((old_name, string(name)))
for (old_name, new_name) in renames:
entry = members[old_name]
del members[old_name]
members[new_name] = entry
# Validate schema attributes and collect conflicts and alternatives.
# conflicts = {} -- TODO
self.loaders = {}
self.descs = {}
self.requireds = set()
self.optionals = set()
self.defaults = {}
self.alt_sets = set()
alt_sets_index = {}
for name in members:
# Determine attributes of parameter.
schema_def = members[name]
typ, attrs = _get_type_and_attrs(schema_def)
# Extract description of parameter (if one is available).
self.descs[name] = attrs.get('desc')
# Look for special attributes: optional, default, or alts:
if attrs.get('optional'):
if 'default' in attrs or attrs.get('alts'):
raise SchemaDefinitionError("use of 'default' or 'alts'",
"attributes conflict with use",
"of 'optional' in %r"
% schema_def)
self.optionals.add(name)
elif 'default' in attrs:
if attrs.get('optional') or attrs.get('alts'):
raise SchemaDefinitionError("use of 'optional' or 'alts'",
"attributes conflicts with",
"use of 'optional' in %r"
% schema_def)
self.defaults[name] = attrs['default']
elif 'alts' in attrs:
if attrs.get('optional') or 'default' in attrs:
raise SchemaDefinitionError("use of 'optional' or",
"'default' attributes",
"conflicts with use of 'alts'",
"in %r" % schema_def)
# Get list of alternative parameter names:
alts = attrs['alts']
if isinstance(alts, str):
alts = [alts]
if not isinstance(alts, Sequence):
raise SchemaDefinitionError("'alts' attribute must be a",
"sequence instead of %r."
% alts)
# Compute the complete set of alternatives.
class hashable_set(set):
def __hash__(self):
return id(self)
def __eq__(self, other):
return id(self) == id(other)
alt_set = hashable_set(alts)
self.alt_sets.add(alt_set)
alt_set.add(name)
for alt in alts:
if alt in alt_sets_index:
if alt_sets_index[alt] in self.alt_sets:
self.alt_sets.remove(alt_sets_index[alt])
alt_set.update(alt_sets_index[alt]) # A = A U B
# Make sure all alt_sets_index entries refer to the newly
# created set.
for alt in alt_set:
alt_sets_index[alt] = alt_set
else:
self.requireds.add(name)
# Create new schema.
self.loaders[name] = repo.make_loader(schema_def)
def __call__(self, name, data, context):
params = {}
if isinstance(data, YAMLTaggedValue):
# Check proper subclass ...
tag = data.tag
data = data.value
if self.tag is not None and tag != self.tag:
repo = context['repo']
if repo.is_tag_sub_type(tag, self.tag):
return repo.load_tagged(name, data, context, tag)
else:
raise InputDataError(
"Expected %r object (or sub type), "
"got %r object" % (self.tag, tag))
# elif data is None:
# return None
elif not isinstance(data, Mapping):
raise InputDataError('Input data for %r is not mapping type: %r'
% (name, data))
# Check for parameters not defined in schema.
for name in data:
if name not in self.loaders and not self.has_open_namespace:
warn('Invalid parameter %r specified in YAML input'
% name, InputDataWarning, 3)
# raise InputDataError('Invalid parameter %r specified in %r'
# %(name, data))
# Load required parameters.
for name in self.requireds:
if name not in data:
raise InputDataError('Missing required parameter %r in %r'
% (name, data))
params[name] = self.loaders[name](name, data[name], context)
# Load optional parameters.
for name in self.optionals:
if name in data:
params[name] = self.loaders[name](name, data[name], context)
# Load parameters with defaults.
for name in self.defaults:
if name in data:
params[name] = self.loaders[name](name, data[name], context)
else:
params[name] = self.loaders[name](
name, self.defaults[name], context)
# Load parameters with multiple alternatives.
for alt_set in self.alt_sets:
alt_names = [name for name in alt_set if name in data]
if len(alt_names) != 1:
raise InputDataError("Number of alternatives %r specified in",
"%r does not equal 1" % (alt_set, data))
name = alt_names[0]
params[name] = self.loaders[name](name, data[name], context)
# Return constructed value.
if self.object_class is not None:
# Class of object is defined.
if hasattr(self.object_class, 'yaml_construct'):
# User provided a constructor.
params['name'] = name
try:
return self.object_class.yaml_construct(params, context)
except Exception as exc:
from sys import exc_traceback
from traceback import format_tb
raise InputDataError(
'%s.yaml_construct() reported error',
'"%s" with traceback:\n%s'
% (self.object_class.__name__, exc,
''.join(format_tb(exc_traceback))))
else:
# Default constructor.
obj = type(self.object_class.__name__, (object,), {})()
for name in params:
setattr(obj, name, params[name])
obj.__class__ = self.object_class
return obj
else:
# Class of object is undefined.
obj = AnonymousClass()
for name in params:
setattr(obj, name, params[name])
return obj
class SchemaRepository(object):
def __init__(self, builtins=True):
if builtins:
# Setup builtin schemata.
from .builtins import loaders
self._loaders = loaders
else:
# No builtin schemata.
self._loaders = {}
self._loaders['object'] = ObjectLoader
def make_loader(self, schema_def):
"""Return a function that accepts an abstract tree conforming to the
provided schema definition and constructs a Python object.
"""
type_name, attrs = _get_type_and_attrs(schema_def)
loader_class = self._loaders.get(type_name)
if loader_class is None:
raise SchemaDefinitionError('undefined data type %r' % type_name)
def del_key(attrs, key):
if key in attrs:
del attrs[key]
for key in ['optional', 'default', 'alts', 'type', 'desc']:
del_key(attrs, key)
try:
return loader_class(self, **attrs)
except TypeError:
import sys
from traceback import extract_tb
typ, value, tb = sys.exc_info()
mod_name, lineno, func_name, code = extract_tb(tb)[-1]
if(mod_name == __file__
and func_name == sys._getframe().f_code.co_name):
raise SchemaDefinitionError(
'Invalid attributes to type %r: %r' % (type_name, attrs))
else:
raise
class ObjectLoaderFactory(object):
def __init__(self, members, object_class=None, tag=None):
self.members = members
self.object_class = object_class
self.tag = tag
def __call__(self, repo, **attrs):
return ObjectLoader(repo, self.members, self.object_class,
self.tag, **attrs)
class ListLoaderFactory(object):
def __init__(self, item_type, object_class=None, tag=None):
self.item_type = item_type
self.object_class = object_class
self.tag = tag
def __call__(self, repo, **attrs):
return ListLoader(repo, self.item_type, self.object_class,
self.tag, **attrs)
def load_tagged(self, name, data, context, tag):
loader = self._loaders.get(tag[1:])
if loader is None:
raise InputDataError('Unknown tag: %r' % tag)
return loader(self)(name, data, context)
def is_tag_sub_type(self, tag, base_tag):
loader = self._loaders.get(tag[1:])
base_loader = self._loaders.get(base_tag[1:])
if loader is None:
raise InputDataError('Invalid tag: %r' % tag)
if base_loader is None:
raise InputDataError('Invalid tag: %r' % base_tag)
object_class = loader.object_class
base_object_class = base_loader.object_class
if object_class is not None and base_object_class is not None:
return issubclass(object_class, base_object_class)
def make_object_loader(self, members, object_class=None,
open_namespace=False):
"""Return a function that accepts an abstract tree conforming to a
schema for an object whose `members` are defined by a yaml mapping.
"""
return self.ObjectLoaderFactory(members, object_class)(
self, has_open_namespace=open_namespace)
def register_class(self, name, schema_def, class_):
"""Register a Python class to be used to construct objects having
specified YAML tag (`name`) with provided schema definition.
"""
self.register_type(name,
self.ObjectLoaderFactory(schema_def, class_,
tag='!' + name))
def register_list_type(self, name, item_type, class_):
"""Register a Python class to be used to construct objects having
specified YAML tag (`name`) in which the data is expected to be a list
containing items of specified `item_type`.
"""
self.register_type(name,
self.ListLoaderFactory(item_type,
class_, tag='!' + name))
def register_type(self, name, loader):
"""Register a function to be called (`loader`) to construct new
objects from YAML data tagged with `name`.
"""
self._loaders[name] = loader
def unregister_type(self, name):
del self._schemata[name]
|
|
#!/usr/bin/env python3
import binascii
import logging
import time
import uuid
import socket
from pykms_Structure import Structure
from pykms_DB2Dict import kmsDB2Dict
from pykms_PidGenerator import epidGenerator
from pykms_Filetimes import filetime_to_dt
from pykms_Sql import sql_initialize, sql_update, sql_update_epid
from pykms_Format import justify, byterize, enco, deco, pretty_printer
#--------------------------------------------------------------------------------------------------------------------------------------------------------
loggersrv = logging.getLogger('logsrv')
class UUID(Structure):
commonHdr = ()
structure = (
('raw', '16s'),
)
def get(self):
return uuid.UUID(bytes_le = enco(str(self), 'latin-1'))
class kmsBase:
def __init__(self, data, srv_config):
self.data = data
self.srv_config = srv_config
class kmsRequestStruct(Structure):
commonHdr = ()
structure = (
('versionMinor', '<H'),
('versionMajor', '<H'),
('isClientVm', '<I'),
('licenseStatus', '<I'),
('graceTime', '<I'),
('applicationId', ':', UUID),
('skuId', ':', UUID),
('kmsCountedId' , ':', UUID),
('clientMachineId', ':', UUID),
('requiredClientCount', '<I'),
('requestTime', '<Q'),
('previousClientMachineId', ':', UUID),
('machineName', 'u'),
('_mnPad', '_-mnPad', '126-len(machineName)'),
('mnPad', ':'),
)
def getMachineName(self):
return self['machineName'].decode('utf-16le')
def getLicenseStatus(self):
return kmsBase.licenseStates[self['licenseStatus']] or "Unknown"
class kmsResponseStruct(Structure):
commonHdr = ()
structure = (
('versionMinor', '<H'),
('versionMajor', '<H'),
('epidLen', '<I=len(kmsEpid)+2'),
('kmsEpid', 'u'),
('clientMachineId', ':', UUID),
('responseTime', '<Q'),
('currentClientCount', '<I'),
('vLActivationInterval', '<I'),
('vLRenewalInterval', '<I'),
)
class GenericRequestHeader(Structure):
commonHdr = ()
structure = (
('bodyLength1', '<I'),
('bodyLength2', '<I'),
('versionMinor', '<H'),
('versionMajor', '<H'),
('remainder', '_'),
)
licenseStates = {
0 : "Unlicensed",
1 : "Activated",
2 : "Grace Period",
3 : "Out-of-Tolerance Grace Period",
4 : "Non-Genuine Grace Period",
5 : "Notifications Mode",
6 : "Extended Grace Period",
}
licenseStatesEnum = {
'unlicensed' : 0,
'licensed' : 1,
'oobGrace' : 2,
'ootGrace' : 3,
'nonGenuineGrace' : 4,
'notification' : 5,
'extendedGrace' : 6
}
def getPadding(self, bodyLength):
## https://forums.mydigitallife.info/threads/71213-Source-C-KMS-Server-from-Microsoft-Toolkit?p=1277542&viewfull=1#post1277542
return 4 + (((~bodyLength & 3) + 1) & 3)
def serverLogic(self, kmsRequest):
pretty_printer(num_text = 15, where = "srv")
kmsRequest = byterize(kmsRequest)
loggersrv.debug("KMS Request Bytes: \n%s\n" % justify(deco(binascii.b2a_hex(enco(str(kmsRequest), 'latin-1')), 'latin-1')))
loggersrv.debug("KMS Request: \n%s\n" % justify(kmsRequest.dump(print_to_stdout = False)))
clientMachineId = kmsRequest['clientMachineId'].get()
applicationId = kmsRequest['applicationId'].get()
skuId = kmsRequest['skuId'].get()
requestDatetime = filetime_to_dt(kmsRequest['requestTime'])
# Localize the request time, if module "tzlocal" is available.
try:
from tzlocal import get_localzone
from pytz.exceptions import UnknownTimeZoneError
try:
tz = get_localzone()
local_dt = tz.localize(requestDatetime)
except UnknownTimeZoneError:
pretty_printer(log_obj = loggersrv.warning,
put_text = "{reverse}{yellow}{bold}Unknown time zone ! Request time not localized.{end}")
local_dt = requestDatetime
except ImportError:
pretty_printer(log_obj = loggersrv.warning,
put_text = "{reverse}{yellow}{bold}Module 'tzlocal' not available ! Request time not localized.{end}")
local_dt = requestDatetime
# Activation threshold.
# https://docs.microsoft.com/en-us/windows/deployment/volume-activation/activate-windows-10-clients-vamt
MinClients = kmsRequest['requiredClientCount']
RequiredClients = MinClients * 2
if self.srv_config["clientcount"] != None:
if 0 < self.srv_config["clientcount"] < MinClients:
# fixed to 6 (product server) or 26 (product desktop)
currentClientCount = MinClients + 1
pretty_printer(log_obj = loggersrv.warning,
put_text = "{reverse}{yellow}{bold}Not enough clients ! Fixed with %s, but activated client \
could be detected as not genuine !{end}" %currentClientCount)
elif MinClients <= self.srv_config["clientcount"] < RequiredClients:
currentClientCount = self.srv_config["clientcount"]
pretty_printer(log_obj = loggersrv.warning,
put_text = "{reverse}{yellow}{bold}With count = %s, activated client could be detected as not genuine !{end}" %currentClientCount)
elif self.srv_config["clientcount"] >= RequiredClients:
# fixed to 10 (product server) or 50 (product desktop)
currentClientCount = RequiredClients
if self.srv_config["clientcount"] > RequiredClients:
pretty_printer(log_obj = loggersrv.warning,
put_text = "{reverse}{yellow}{bold}Too many clients ! Fixed with %s{end}" %currentClientCount)
else:
# fixed to 10 (product server) or 50 (product desktop)
currentClientCount = RequiredClients
# Get a name for SkuId, AppId.
kmsdb = kmsDB2Dict()
appitems = kmsdb[2]
for appitem in appitems:
kmsitems = appitem['KmsItems']
for kmsitem in kmsitems:
skuitems = kmsitem['SkuItems']
for skuitem in skuitems:
try:
if uuid.UUID(skuitem['Id']) == skuId:
skuName = skuitem['DisplayName']
break
except:
skuName = skuId
pretty_printer(log_obj = loggersrv.warning,
put_text = "{reverse}{yellow}{bold}Can't find a name for this product !{end}")
try:
if uuid.UUID(appitem['Id']) == applicationId:
appName = appitem['DisplayName']
except:
appName = applicationId
pretty_printer(log_obj = loggersrv.warning,
put_text = "{reverse}{yellow}{bold}Can't find a name for this application group !{end}")
infoDict = {
"machineName" : kmsRequest.getMachineName(),
"clientMachineId" : str(clientMachineId),
"appId" : appName,
"skuId" : skuName,
"licenseStatus" : kmsRequest.getLicenseStatus(),
"requestTime" : int(time.time()),
"kmsEpid" : None
}
loggersrv.info("Machine Name: %s" % infoDict["machineName"])
loggersrv.info("Client Machine ID: %s" % infoDict["clientMachineId"])
loggersrv.info("Application ID: %s" % infoDict["appId"])
loggersrv.info("SKU ID: %s" % infoDict["skuId"])
loggersrv.info("License Status: %s" % infoDict["licenseStatus"])
loggersrv.info("Request Time: %s" % local_dt.strftime('%Y-%m-%d %H:%M:%S %Z (UTC%z)'))
if self.srv_config['loglevel'] == 'MININFO':
loggersrv.mininfo("", extra = {'host': str(self.srv_config['raddr']),
'status' : infoDict["licenseStatus"],
'product' : infoDict["skuId"]})
# Create database.
if self.srv_config['sqlite']:
sql_initialize(self.srv_config['sqlite'])
sql_update(self.srv_config['sqlite'], infoDict)
return self.createKmsResponse(kmsRequest, currentClientCount, appName)
def createKmsResponse(self, kmsRequest, currentClientCount, appName):
response = self.kmsResponseStruct()
response['versionMinor'] = kmsRequest['versionMinor']
response['versionMajor'] = kmsRequest['versionMajor']
if not self.srv_config["epid"]:
response["kmsEpid"] = epidGenerator(kmsRequest['kmsCountedId'].get(), kmsRequest['versionMajor'],
self.srv_config["lcid"]).encode('utf-16le')
else:
response["kmsEpid"] = self.srv_config["epid"].encode('utf-16le')
response['clientMachineId'] = kmsRequest['clientMachineId']
# rule: timeserver - 4h <= timeclient <= timeserver + 4h, check if is satisfied (TODO).
response['responseTime'] = kmsRequest['requestTime']
response['currentClientCount'] = currentClientCount
response['vLActivationInterval'] = self.srv_config["activation"]
response['vLRenewalInterval'] = self.srv_config["renewal"]
# Update database epid.
if self.srv_config['sqlite']:
sql_update_epid(self.srv_config['sqlite'], kmsRequest, response, appName)
loggersrv.info("Server ePID: %s" % response["kmsEpid"].decode('utf-16le'))
return response
import pykms_RequestV4, pykms_RequestV5, pykms_RequestV6, pykms_RequestUnknown
def generateKmsResponseData(data, srv_config):
version = kmsBase.GenericRequestHeader(data)['versionMajor']
currentDate = time.strftime("%a %b %d %H:%M:%S %Y")
if version == 4:
loggersrv.info("Received V%d request on %s." % (version, currentDate))
messagehandler = pykms_RequestV4.kmsRequestV4(data, srv_config)
elif version == 5:
loggersrv.info("Received V%d request on %s." % (version, currentDate))
messagehandler = pykms_RequestV5.kmsRequestV5(data, srv_config)
elif version == 6:
loggersrv.info("Received V%d request on %s." % (version, currentDate))
messagehandler = pykms_RequestV6.kmsRequestV6(data, srv_config)
else:
loggersrv.info("Unhandled KMS version V%d." % version)
messagehandler = pykms_RequestUnknown.kmsRequestUnknown(data, srv_config)
return messagehandler.executeRequestLogic()
|
|
from __future__ import unicode_literals
import keyword
import re
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
#from django.utils import six
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_model_validation = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_modeling(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection (modeling) isn't supported for the currently selected database backend.")
def handle_modeling(self, options):
connection = connections[options.get('database')]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
strip_prefix = lambda s: s.startswith("u'") and s[1:] or s
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
yield ''
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
used_column_names = [] # Holds column names used in the table so far
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = i in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join([
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [" class Meta:",
" db_table = '%s'" % table_name,
""]
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# smoke_zephyr/utilities.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import functools
import inspect
import ipaddress
import itertools
import logging
import os
import random
import re
import shutil
import string
import subprocess
import sys
import time
import unittest
import urllib.parse
import urllib.request
import weakref
EMAIL_REGEX = re.compile(r'^[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,6}$', flags=re.IGNORECASE)
class AttributeDict(dict):
"""
This class allows dictionary keys to be accessed as attributes. For
example: ``ad = AttributeDict(test=1); ad['test'] == ad.test``
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class BruteforceGenerator(object):
"""
This class allows itarating sequences for bruteforcing.
"""
# requirments = itertools
def __init__(self, startlen, endlen=None, charset=None):
"""
:param int startlen: The minimum sequence size to generate.
:param int endlen: The maximum sequence size to generate.
:param charset: The characters to include in the resulting sequences.
"""
self.startlen = startlen
if endlen is None:
self.endlen = startlen
else:
self.endlen = endlen
if charset is None:
charset = list(map(chr, range(0, 256)))
elif isinstance(charset, str):
charset = list(charset)
elif isinstance(charset, bytes):
charset = list(map(chr, charset))
charset.sort()
self.charset = tuple(charset)
self.length = self.startlen
self._product = itertools.product(self.charset, repeat=self.length)
self._next = self.__next__
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
try:
value = next(self._product)
except StopIteration:
if self.length == self.endlen:
raise StopIteration
self.length += 1
self._product = itertools.product(self.charset, repeat=self.length)
value = next(self._product)
return ''.join(value)
_ArgSpec = collections.namedtuple('_ArgSpec', ('args', 'varargs', 'keywords', 'defaults'))
class Cache(object):
"""
This class provides a simple to use cache object which can be applied
as a decorator.
"""
def __init__(self, timeout):
"""
:param timeout: The amount of time in seconds that a cached
result will be considered valid for.
:type timeout: int, str
"""
if isinstance(timeout, str):
timeout = parse_timespan(timeout)
self.cache_timeout = timeout
self._target_function = None
self._target_function_arg_spec = None
self.__cache = {}
self.__obj = None
def __get__(self, instance, _):
self.__obj = instance
return self
def __call__(self, *args, **kwargs):
if not getattr(self, '_target_function', False):
target_function = args[0]
if not inspect.isfunction(target_function) and not inspect.ismethod(target_function):
raise RuntimeError('the cached object must be a function or method')
arg_spec = inspect.getfullargspec(target_function) # pylint: disable=W1505
arg_spec = _ArgSpec(args=arg_spec.args, varargs=arg_spec.varargs, keywords=arg_spec.kwonlyargs, defaults=arg_spec.defaults)
if arg_spec.varargs or arg_spec.keywords:
raise RuntimeError('the cached function can not use dynamic args or kwargs')
self._target_function = target_function
self._target_function_arg_spec = arg_spec
return functools.wraps(target_function)(self)
self.cache_clean()
if self.__obj is not None:
args = (self.__obj,) + args
self.__obj = None
is_method = True
else:
is_method = False
args = self._flatten_args(args, kwargs)
if is_method:
inst = args.popleft()
args = tuple(args)
ref = weakref.ref(inst, functools.partial(self._ref_callback, args))
cache_args = (ref,) + args
args = (inst,) + args
else:
cache_args = tuple(args)
args = tuple(args)
result, expiration = self.__cache.get(cache_args, (None, 0))
if expiration > time.time():
return result
result = self._target_function(*args)
self.__cache[cache_args] = (result, time.time() + self.cache_timeout)
return result
def __repr__(self):
return "<cached function {0} at 0x{1:x}>".format(self._target_function.__name__, id(self._target_function))
def _flatten_args(self, args, kwargs):
flattened_args = collections.deque(args)
arg_spec = self._target_function_arg_spec
arg_spec_defaults = (arg_spec.defaults or [])
default_args = tuple(arg_spec.args[:-len(arg_spec_defaults)])
default_kwargs = dict(zip(arg_spec.args[-len(arg_spec_defaults):], arg_spec_defaults))
for arg_id in range(len(args), len(arg_spec.args)):
arg_name = arg_spec.args[arg_id]
if arg_name in default_args:
if not arg_name in kwargs:
raise TypeError("{0}() missing required argument '{1}'".format(self._target_function.__name__, arg_name))
flattened_args.append(kwargs.pop(arg_name))
else:
flattened_args.append(kwargs.pop(arg_name, default_kwargs[arg_name]))
if kwargs:
unexpected_kwargs = tuple("'{0}'".format(a) for a in kwargs.keys())
raise TypeError("{0}() got an unexpected keyword argument{1} {2}".format(self._target_function.__name__, ('' if len(unexpected_kwargs) == 1 else 's'), ', '.join(unexpected_kwargs)))
return flattened_args
def _ref_callback(self, args, ref):
args = (ref,) + args
self.__cache.pop(args, None)
def cache_clean(self):
"""
Remove expired items from the cache.
"""
now = time.time()
keys_for_removal = collections.deque()
for key, (_, expiration) in self.__cache.items():
if expiration < now:
keys_for_removal.append(key)
for key in keys_for_removal:
del self.__cache[key]
def cache_clear(self):
"""
Remove all items from the cache.
"""
self.__cache = {}
class FileWalker(object):
"""
This class is used to easily iterate over files and subdirectories of a
specified parent directory.
"""
def __init__(self, filespath, absolute_path=False, skip_files=False, skip_dirs=False, filter_func=None, follow_links=False, max_depth=None):
"""
.. versionchanged:: 1.4.0
Added the *follow_links* and *max_depth* parameters.
:param str filespath: A path to either a file or a directory. If
a file is passed then that will be the only file returned
during the iteration. If a directory is passed, all files and
subdirectories will be recursively returned during the iteration.
:param bool absolute_path: Whether or not the absolute path or a
relative path should be returned.
:param bool skip_files: Whether or not to skip files.
:param bool skip_dirs: Whether or not to skip directories.
:param function filter_func: If defined, the filter_func function will
be called for each path (with the path as the one and only argument)
and if the function returns false the path will be skipped.
:param bool follow_links: Whether or not to follow directories pointed
to by symlinks.
:param max_depth: A maximum depth to recurse into.
"""
if not (os.path.isfile(filespath) or os.path.isdir(filespath)):
raise Exception(filespath + ' is neither a file or directory')
if absolute_path:
self.filespath = os.path.abspath(filespath)
else:
self.filespath = os.path.relpath(filespath)
self.skip_files = skip_files
self.skip_dirs = skip_dirs
self.filter_func = filter_func
self.follow_links = follow_links
self.max_depth = float('inf') if max_depth is None else max_depth
if os.path.isdir(self.filespath):
self._walk = None
self._next = self._next_dir
elif os.path.isfile(self.filespath):
self._next = self._next_file
def __iter__(self):
return self._next()
def _skip(self, cur_file):
if self.skip_files and os.path.isfile(cur_file):
return True
if self.skip_dirs and os.path.isdir(cur_file):
return True
if self.filter_func is not None:
if not self.filter_func(cur_file):
return True
return False
def _next_dir(self):
for root, dirs, files in os.walk(self.filespath, followlinks=self.follow_links):
if root == self.filespath:
depth = 0
else:
depth = os.path.relpath(root, start=self.filespath).count(os.path.sep) + 1
if depth >= self.max_depth:
continue
for entry in itertools.chain(dirs, files):
current_path = os.path.join(root, entry)
if not self._skip(current_path):
yield current_path
if self.max_depth >= 0 and not self._skip(self.filespath):
yield self.filespath
def _next_file(self):
if self.max_depth >= 0 and not self._skip(self.filespath):
yield self.filespath
class SectionConfigParser(object):
"""
Proxy access to a section of a ConfigParser object.
"""
__version__ = '0.2'
def __init__(self, section_name, config_parser):
"""
:param str section_name: Name of the section to proxy access for.
:param config_parser: ConfigParser object to proxy access for.
:type config_parse: :py:class:`ConfigParser.ConfigParser`
"""
self.section_name = section_name
self.config_parser = config_parser
def _get_raw(self, option, opt_type, default=None):
get_func = getattr(self.config_parser, 'get' + opt_type)
if default is None:
return get_func(self.section_name, option)
elif self.config_parser.has_option(self.section_name, option):
return get_func(self.section_name, option)
else:
return default
def get(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
"""
return self._get_raw(option, '', default)
def getint(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
:rtype: int
"""
return self._get_raw(option, 'int', default)
def getfloat(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
:rtype: float
"""
return self._get_raw(option, 'float', default)
def getboolean(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
:rtype: bool
"""
return self._get_raw(option, 'boolean', default)
def has_option(self, option):
"""
Check that *option* exists in the configuration file.
:param str option: The name of the option to check.
:rtype: bool
"""
return self.config_parser.has_option(self.section_name, option)
def options(self):
"""
Get a list of all options that are present in the section of the
configuration.
:return: A list of all set options.
:rtype: list
"""
return self.config_parser.options(self.section_name)
def items(self):
"""
Return all options and their values in the form of a list of tuples.
:return: A list of all values and options.
:rtype: list
"""
return self.config_parser.items(self.section_name)
def set(self, option, value):
"""
Set an option to an arbitrary value.
:param str option: The name of the option to set.
:param value: The value to set the option to.
"""
self.config_parser.set(self.section_name, option, value)
class TestCase(unittest.TestCase):
"""
This class provides additional functionality over the built in
:py:class:`unittest.TestCase` object, including better compatibility for
methods across Python 2.x and Python 3.x.
"""
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
if not hasattr(self, 'assertRegex') and hasattr(self, 'assertRegexpMatches'):
self.assertRegex = self.assertRegexpMatches
if not hasattr(self, 'assertNotRegex') and hasattr(self, 'assertNotRegexpMatches'):
self.assertNotRegex = self.assertNotRegexpMatches
if not hasattr(self, 'assertRaisesRegex') and hasattr(self, 'assertRaisesRegexp'):
self.assertRaisesRegex = self.assertRaisesRegexp
def configure_stream_logger(logger='', level=None, formatter='%(levelname)-8s %(message)s'):
"""
Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
"""
level = level or logging.WARNING
if isinstance(level, str):
level = getattr(logging, level, None)
if level is None:
raise ValueError('invalid log level: ' + level)
root_logger = logging.getLogger('')
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
logging.getLogger(logger).setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(level)
if isinstance(formatter, str):
formatter = logging.Formatter(formatter)
elif not isinstance(formatter, logging.Formatter):
raise TypeError('formatter must be an instance of logging.Formatter')
console_log_handler.setFormatter(formatter)
logging.getLogger(logger).addHandler(console_log_handler)
logging.captureWarnings(True)
return console_log_handler
def download(url, filename=None):
"""
Download a file from a url and save it to disk.
:param str url: The URL to fetch the file from.
:param str filename: The destination file to write the data to.
"""
# requirements os, shutil, urllib.parse, urllib.request
if not filename:
url_parts = urllib.parse.urlparse(url)
filename = os.path.basename(url_parts.path)
url_h = urllib.request.urlopen(url)
with open(filename, 'wb') as file_h:
shutil.copyfileobj(url_h, file_h)
url_h.close()
return
def escape_single_quote(unescaped):
"""
Escape a string containing single quotes and backslashes with backslashes.
This is useful when a string is evaluated in some way.
:param str unescaped: The string to escape.
:return: The escaped string.
:rtype: str
"""
# requirements = re
return re.sub(r'(\'|\\)', r'\\\1', unescaped)
def format_bytes_size(val):
"""
Take a number of bytes and convert it to a human readable number.
:param int val: The number of bytes to format.
:return: The size in a human readable format.
:rtype: str
"""
if not val:
return '0 bytes'
for sz_name in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']:
if val < 1024.0:
return "{0:.2f} {1}".format(val, sz_name)
val /= 1024.0
raise OverflowError()
def grep(expression, file, flags=0, invert=False):
"""
Search a file and return a list of all lines that match a regular expression.
:param str expression: The regex to search for.
:param file: The file to search in.
:type file: str, file
:param int flags: The regex flags to use when searching.
:param bool invert: Select non matching lines instead.
:return: All the matching lines.
:rtype: list
"""
# requirements = re
if isinstance(file, str):
file = open(file)
lines = []
for line in file:
if bool(re.search(expression, line, flags=flags)) ^ invert:
lines.append(line)
return lines
def is_valid_email_address(email_address):
"""
Check that the string specified appears to be a valid email address.
:param str email_address: The email address to validate.
:return: Whether the email address appears to be valid or not.
:rtype: bool
"""
# requirements = re
return EMAIL_REGEX.match(email_address) != None
def get_ip_list(ip_network, mask=None):
"""
Quickly convert an IPv4 or IPv6 network (CIDR or Subnet) to a list
of individual IPs in their string representation.
:param str ip_network:
:param int mask:
:return: list
"""
if mask and '/' not in ip_network:
net = ipaddress.ip_network("{0}/{1}".format(ip_network, mask))
elif '/' not in ip_network:
return [str(ipaddress.ip_address(ip_network))]
else:
net = ipaddress.ip_network(ip_network)
hosts = net.hosts()
if net.netmask == ipaddress.IPv4Address('255.255.255.255') and sys.version_info > (3, 9):
# see: https://github.com/zeroSteiner/smoke-zephyr/issues/8
hosts = []
return [host.__str__() for host in hosts]
def sort_ipv4_list(ip_list, unique=True):
"""
Sorts a provided list of IPv4 addresses. Optionally can remove duplicate values
Supports IPv4 addresses with ports included (ex: [10.11.12.13:80, 10.11.12.13:8080])
:param ip_list: (list) iterable of IPv4 Addresses
:param unique: (bool) removes duplicate values if true
:return: sorted list of IP addresses
"""
if unique:
ip_list = list(set(ip_list))
ipv4_list = sorted([i.rstrip(':') for i in ip_list], key=lambda ip: (
int(ip.split(".")[0]),
int(ip.split(".")[1]),
int(ip.split(".")[2]),
int(ip.split(".")[3].split(':')[0]),
int(ip.split(":")[1]) if ":" in ip else 0
))
return ipv4_list
def open_uri(uri):
"""
Open a URI in a platform intelligent way. On Windows this will use
'cmd.exe /c start' and on Linux this will use gvfs-open or xdg-open
depending on which is available. If no suitable application can be
found to open the URI, a RuntimeError will be raised.
.. versionadded:: 1.3.0
:param str uri: The URI to open.
"""
close_fds = True
startupinfo = None
proc_args = []
if sys.platform.startswith('win'):
proc_args.append(which('cmd.exe'))
proc_args.append('/c')
proc_args.append('start')
uri = uri.replace('&', '^&')
close_fds = False
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
elif which('gvfs-open'):
proc_args.append(which('gvfs-open'))
elif which('xdg-open'):
proc_args.append(which('xdg-open'))
else:
raise RuntimeError('could not find suitable application to open uri')
proc_args.append(uri)
proc_h = subprocess.Popen(proc_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=close_fds, startupinfo=startupinfo)
return proc_h.wait() == 0
def parse_case_camel_to_snake(camel):
"""
Convert a string from CamelCase to snake_case.
:param str camel: The CamelCase string to convert.
:return: The snake_case version of string.
:rtype: str
"""
# requirements = re
return re.sub('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\1', camel).lower()
def parse_case_snake_to_camel(snake, upper_first=True):
"""
Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str
"""
snake = snake.split('_')
first_part = snake[0]
if upper_first:
first_part = first_part.title()
return first_part + ''.join(word.title() for word in snake[1:])
def parse_server(server, default_port):
"""
Convert a server string to a tuple suitable for passing to connect, for
example converting 'www.google.com:443' to ('www.google.com', 443).
:param str server: The server string to convert.
:param int default_port: The port to use in case one is not specified
in the server string.
:return: The parsed server information.
:rtype: tuple
"""
server = server.rsplit(':', 1)
host = server[0]
if host.startswith('[') and host.endswith(']'):
host = host[1:-1]
if len(server) == 1:
return (host, default_port)
port = server[1]
if not port:
port = default_port
else:
port = int(port)
return (host, port)
def parse_timespan(timedef):
"""
Convert a string timespan definition to seconds, for example converting
'1m30s' to 90. If *timedef* is already an int, the value will be returned
unmodified.
:param timedef: The timespan definition to convert to seconds.
:type timedef: int, str
:return: The converted value in seconds.
:rtype: int
"""
if isinstance(timedef, int):
return timedef
converter_order = ('w', 'd', 'h', 'm', 's')
converters = {
'w': 604800,
'd': 86400,
'h': 3600,
'm': 60,
's': 1
}
timedef = timedef.lower()
if timedef.isdigit():
return int(timedef)
elif len(timedef) == 0:
return 0
seconds = -1
for spec in converter_order:
timedef = timedef.split(spec)
if len(timedef) == 1:
timedef = timedef[0]
continue
elif len(timedef) > 2 or not timedef[0].isdigit():
seconds = -1
break
adjustment = converters[spec]
seconds = max(seconds, 0)
seconds += (int(timedef[0]) * adjustment)
timedef = timedef[1]
if not len(timedef):
break
if seconds < 0:
raise ValueError('invalid time format')
return seconds
def parse_to_slug(words, maxlen=24):
"""
Parse a string into a slug format suitable for use in URLs and other
character restricted applications. Only utf-8 strings are supported at this
time.
:param str words: The words to parse.
:param int maxlen: The maximum length of the slug.
:return: The parsed words as a slug.
:rtype: str
"""
slug = ''
maxlen = min(maxlen, len(words))
for c in words:
if len(slug) == maxlen:
break
c = ord(c)
if c == 0x27:
continue
elif c >= 0x30 and c <= 0x39:
slug += chr(c)
elif c >= 0x41 and c <= 0x5a:
slug += chr(c + 0x20)
elif c >= 0x61 and c <= 0x7a:
slug += chr(c)
elif len(slug) and slug[-1] != '-':
slug += '-'
if len(slug) and slug[-1] == '-':
slug = slug[:-1]
return slug
def random_string_alphanumeric(size):
"""
Generate a random string of *size* length consisting of mixed case letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(size))
def random_string_lower_numeric(size):
"""
Generate a random string of *size* length consisting of lowercase letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(size))
def selection_collision(selections, poolsize):
"""
Calculate the probability that two random values selected from an arbitrary
sized pool of unique values will be equal. This is commonly known as the
"Birthday Problem".
:param int selections: The number of random selections.
:param int poolsize: The number of unique random values in the pool to choose from.
:rtype: float
:return: The chance that a collision will occur as a percentage.
"""
# requirments = sys
probability = 100.0
poolsize = float(poolsize)
for i in range(selections):
probability = probability * (poolsize - i) / poolsize
probability = (100.0 - probability)
return probability
def unescape_single_quote(escaped):
"""
Unescape a string which uses backslashes to escape single quotes.
:param str escaped: The string to unescape.
:return: The unescaped string.
:rtype: str
"""
escaped = escaped.replace('\\\\', '\\')
escaped = escaped.replace('\\\'', '\'')
return escaped
def unique(seq, key=None):
"""
Create a unique list or tuple from a provided list or tuple and preserve the
order.
:param seq: The list or tuple to preserve unique items from.
:type seq: list, tuple
:param key: If key is provided it will be called during the
comparison process.
:type key: function, None
"""
if key is None:
key = lambda x: x
preserved_type = type(seq)
if preserved_type not in (list, tuple):
raise TypeError("unique argument 1 must be list or tuple, not {0}".format(preserved_type.__name__))
seen = []
result = []
for item in seq:
marker = key(item)
if marker in seen:
continue
seen.append(marker)
result.append(item)
return preserved_type(result)
def weighted_choice(choices, weight):
"""
Make a random selection from the specified choices. Apply the *weight*
function to each to return a positive integer representing shares of
selection pool the choice should received. The *weight* function is passed a
single argument of the choice from the *choices* iterable.
:param choices: The choices to select from.
:type choices: list, tuple
:param weight: The function used for gather weight information for choices.
:type weight: function
:return: A randomly selected choice from the provided *choices*.
"""
# requirements = random
weights = []
# get weight values for each of the choices
for choice in choices:
choice_weight = weight(choice)
if not (isinstance(choice_weight, int) and choice_weight > 0):
raise TypeError('weight results must be positive integers')
weights.append(choice_weight)
# make a selection within the acceptable range
selection = random.randint(0, sum(weights) - 1)
# find and return the corresponding choice
for idx, choice in enumerate(choices):
if selection < sum(weights[:idx + 1]):
return choice
raise RuntimeError('no selection could be made')
def which(program):
"""
Locate an executable binary's full path by its name.
:param str program: The executables name.
:return: The full path to the executable.
:rtype: str
"""
# requirements = os
is_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK))
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if is_exe(program):
return os.path.abspath(program)
return None
def xfrange(start, stop=None, step=1):
"""
Iterate through an arithmetic progression.
:param start: Starting number.
:type start: float, int, long
:param stop: Stopping number.
:type stop: float, int, long
:param step: Stepping size.
:type step: float, int, long
"""
if stop is None:
stop = start
start = 0.0
start = float(start)
while start < stop:
yield start
start += step
|
|
from django import forms, template
from django.forms.formsets import all_valid
from django.forms.models import modelform_factory, modelformset_factory, inlineformset_factory
from django.forms.models import BaseInlineFormSet
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin import widgets
from django.contrib.admin import helpers
from django.contrib.admin.util import unquote, flatten_fieldsets, get_deleted_objects, model_ngettext, model_format_dict
from django.contrib import messages
from django.views.decorators.csrf import csrf_protect
from django.core.exceptions import PermissionDenied, ValidationError
from django.db import models, transaction
from django.db.models.fields import BLANK_CHOICE_DASH
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.decorators import method_decorator
from django.utils.datastructures import SortedDict
from django.utils.functional import update_wrapper
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.functional import curry
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext, ugettext_lazy
from django.utils.encoding import force_unicode
HORIZONTAL, VERTICAL = 1, 2
# returns the <ul> class for a given radio_admin field
get_ul_class = lambda x: 'radiolist%s' % ((x == HORIZONTAL) and ' inline' or '')
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(object):
"""Functionality common to both ModelAdmin and InlineAdmin."""
__metaclass__ = forms.MediaDefiningClass
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
def __init__(self):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
formfield.widget = widgets.RelatedFieldWidgetWrapper(formfield.widget, db_field.rel, self.admin_site)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[klass], **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank = db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = db_field.blank and _('None') or None
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.rel.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(db_field.verbose_name, (db_field.name in self.filter_vertical))
return db_field.formfield(**kwargs)
def _declared_fieldsets(self):
if self.fieldsets:
return self.fieldsets
elif self.fields:
return [(None, {'fields': self.fields})]
return None
declared_fieldsets = property(_declared_fieldsets)
def get_readonly_fields(self, request, obj=None):
return self.readonly_fields
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
ordering = None
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
self.inline_instances = []
for inline_class in self.inlines:
inline_instance = inline_class(self.model, self.admin_site)
self.inline_instances.append(inline_instance)
if 'action_checkbox' not in self.list_display and self.actions is not None:
self.list_display = ['action_checkbox'] + list(self.list_display)
if not self.list_display_links:
for name in self.list_display:
if name != 'action_checkbox':
self.list_display_links = [name]
break
super(ModelAdmin, self).__init__()
def get_urls(self):
from django.conf.urls.defaults import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
urlpatterns = patterns('',
url(r'^$',
wrap(self.changelist_view),
name='%s_%s_changelist' % info),
url(r'^add/$',
wrap(self.add_view),
name='%s_%s_add' % info),
url(r'^(.+)/history/$',
wrap(self.history_view),
name='%s_%s_history' % info),
url(r'^(.+)/delete/$',
wrap(self.delete_view),
name='%s_%s_delete' % info),
url(r'^(.+)/$',
wrap(self.change_view),
name='%s_%s_change' % info),
)
return urlpatterns
def urls(self):
return self.get_urls()
urls = property(urls)
def _media(self):
from django.conf import settings
js = ['js/core.js', 'js/admin/RelatedObjectLookups.js',
'js/jquery.min.js', 'js/jquery.init.js']
if self.actions is not None:
js.extend(['js/actions.min.js'])
if self.prepopulated_fields:
js.append('js/urlify.js')
js.append('js/prepopulate.min.js')
if self.opts.get_ordered_objects():
js.extend(['js/getElementsBySelector.js', 'js/dom-drag.js' , 'js/admin/ordering.js'])
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
media = property(_media)
def has_add_permission(self, request):
"Returns True if the given request has permission to add an object."
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_add_permission())
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance.
If `obj` is None, this should return True if the given request has
permission to change *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission())
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance.
If `obj` is None, this should return True if the given request has
permission to delete *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission())
def get_model_perms(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_query_set()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)
if ordering:
qs = qs.order_by(*ordering)
return qs
def get_fieldsets(self, request, obj=None):
"Hook for specifying fieldsets for the add form."
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_form(request, obj)
fields = form.base_fields.keys() + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(kwargs.get("exclude", []))
exclude.extend(self.get_readonly_fields(request, obj))
# if exclude is an empty list we pass None to be consistant with the
# default on modelform_factory
exclude = exclude or None
defaults = {
"form": self.form,
"fields": fields,
"exclude": exclude,
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelform_factory(self.model, **defaults)
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id):
"""
Returns an instance matching the primary key provided. ``None`` is
returned if no match is found (or the object_id failed validation
against the primary key field).
"""
queryset = self.queryset(request)
model = queryset.model
try:
object_id = model._meta.pk.to_python(object_id)
return queryset.get(pk=object_id)
except (model.DoesNotExist, ValidationError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(self.model,
self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults)
def get_formsets(self, request, obj=None):
for inline in self.inline_instances:
yield inline.get_formset(request, obj)
def log_addition(self, request, object):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id = request.user.pk,
content_type_id = ContentType.objects.get_for_model(object).pk,
object_id = object.pk,
object_repr = force_unicode(object),
action_flag = ADDITION
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id = request.user.pk,
content_type_id = ContentType.objects.get_for_model(object).pk,
object_id = object.pk,
object_repr = force_unicode(object),
action_flag = CHANGE,
change_message = message
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object has been successfully deleted. Note that since the
object is deleted, it might no longer be safe to call *any* methods
on the object, hence this method getting object_repr.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id = request.user.id,
content_type_id = ContentType.objects.get_for_model(self.model).pk,
object_id = object.pk,
object_repr = object_repr,
action_flag = DELETION
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_unicode(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitally set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None:
return []
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend([self.get_action(action) for action in class_actions])
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into a SortedDict keyed by name
# and sorted by description.
actions.sort(lambda a,b: cmp(a[2].lower(), b[2].lower()))
actions = SortedDict([
(name, (func, name, desc))
for func, name, desc in actions
])
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in self.get_actions(request).itervalues():
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def construct_change_message(self, request, form, formsets):
"""
Construct a change message from a changed object.
"""
change_message = []
if form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': force_unicode(added_object._meta.verbose_name),
'object': force_unicode(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': force_unicode(changed_object._meta.verbose_name),
'object': force_unicode(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': force_unicode(deleted_object._meta.verbose_name),
'object': force_unicode(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
"""
messages.info(request, message)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
ordered_objects = opts.get_ordered_objects()
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'ordered_objects': ordered_objects,
'form_url': mark_safe(form_url),
'opts': opts,
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'root_path': self.admin_site.root_path,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.object_name.lower()),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context, context_instance=context_instance)
def response_add(self, request, obj, post_url_continue='../%s/'):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if request.POST.has_key("_continue"):
self.message_user(request, msg + ' ' + _("You may edit it again below."))
if request.POST.has_key("_popup"):
post_url_continue += "?_popup=1"
return HttpResponseRedirect(post_url_continue % pk_value)
if request.POST.has_key("_popup"):
return HttpResponse('<script type="text/javascript">opener.dismissAddAnotherPopup(window, "%s", "%s");</script>' % \
# escape() calls force_unicode.
(escape(pk_value), escape(obj)))
elif request.POST.has_key("_addanother"):
self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect(request.path)
else:
self.message_user(request, msg)
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if self.has_change_permission(request, None):
post_url = '../'
else:
post_url = '../../../'
return HttpResponseRedirect(post_url)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
if request.POST.has_key("_continue"):
self.message_user(request, msg + ' ' + _("You may edit it again below."))
if request.REQUEST.has_key('_popup'):
return HttpResponseRedirect(request.path + "?_popup=1")
else:
return HttpResponseRedirect(request.path)
elif request.POST.has_key("_saveasnew"):
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % {'name': force_unicode(opts.verbose_name), 'obj': obj}
self.message_user(request, msg)
return HttpResponseRedirect("../%s/" % pk_value)
elif request.POST.has_key("_addanother"):
self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect("../add/")
else:
self.message_user(request, msg)
return HttpResponseRedirect("../")
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg)
return None
@csrf_protect_m
@transaction.commit_on_success
def add_view(self, request, form_url='', extra_context=None):
"The 'add' admin view for this model."
model = self.model
opts = model._meta
if not self.has_add_permission(request):
raise PermissionDenied
ModelForm = self.get_form(request)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES)
if form.is_valid():
new_object = self.save_form(request, form, change=False)
form_validated = True
else:
form_validated = False
new_object = self.model()
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request), self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(data=request.POST, files=request.FILES,
instance=new_object,
save_as_new=request.POST.has_key("_saveasnew"),
prefix=prefix, queryset=inline.queryset(request))
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=False)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=False)
self.log_addition(request, new_object)
return self.response_add(request, new_object)
else:
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
initial = dict(request.GET.items())
for k in initial:
try:
f = opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
form = ModelForm(initial=initial)
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request),
self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(instance=self.model(), prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)),
self.prepopulated_fields, self.get_readonly_fields(request),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request))
readonly = list(inline.get_readonly_fields(request))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Add %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'is_popup': request.REQUEST.has_key('_popup'),
'show_delete': False,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, form_url=form_url, add=True)
@csrf_protect_m
@transaction.commit_on_success
def change_view(self, request, object_id, extra_context=None):
"The 'change' admin view for this model."
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and request.POST.has_key("_saveasnew"):
return self.add_view(request, form_url='../add/')
ModelForm = self.get_form(request, obj)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request, new_object),
self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(request.POST, request.FILES,
instance=new_object, prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=True)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=True)
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form = ModelForm(instance=obj)
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request, obj), self.inline_instances):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(instance=obj, prefix=prefix,
queryset=inline.queryset(request))
formsets.append(formset)
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),
self.prepopulated_fields, self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': request.REQUEST.has_key('_popup'),
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
# Remove action checkboxes if there aren't any actions available.
list_display = list(self.list_display)
if not actions:
try:
list_display.remove('action_checkbox')
except ValueError:
pass
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(request, self.model, list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_query_set())
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_query_set())
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and self.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
form.save_m2m()
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_unicode(opts.verbose_name)
else:
name = force_unicode(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_unicode(obj)}
self.message_user(request, msg)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif self.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = {
'module_name': force_unicode(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'media': media,
'has_add_permission': self.has_add_permission(request),
'root_path': self.admin_site.root_path,
'app_label': app_label,
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, context_instance=context_instance)
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
obj = self.get_object(request, unquote(object_id))
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, perms_needed) = get_deleted_objects((obj,), opts, request.user, self.admin_site)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_unicode(obj)
self.log_deletion(request, obj, obj_display)
obj.delete()
self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
context = {
"title": _("Are you sure?"),
"object_name": force_unicode(opts.verbose_name),
"object": obj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": self.admin_site.root_path,
"app_label": app_label,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context, context_instance=context_instance)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
model = self.model
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id = object_id,
content_type__id__exact = ContentType.objects.get_for_model(model).id
).select_related().order_by('action_time')
# If no history was found, see whether this object even exists.
obj = get_object_or_404(model, pk=unquote(object_id))
context = {
'title': _('Change history: %s') % force_unicode(obj),
'action_list': action_list,
'module_name': capfirst(force_unicode(opts.verbose_name_plural)),
'object': obj,
'root_path': self.admin_site.root_path,
'app_label': app_label,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.object_name.lower()),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context, context_instance=context_instance)
#
# DEPRECATED methods.
#
def __call__(self, request, url):
"""
DEPRECATED: this is the old way of URL resolution, replaced by
``get_urls()``. This only called by AdminSite.root(), which is also
deprecated.
Again, remember that the following code only exists for
backwards-compatibility. Any new URLs, changes to existing URLs, or
whatever need to be done up in get_urls(), above!
This function still exists for backwards-compatibility; it will be
removed in Django 1.3.
"""
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.changelist_view(request)
elif url == "add":
return self.add_view(request)
elif url.endswith('/history'):
return self.history_view(request, unquote(url[:-8]))
elif url.endswith('/delete'):
return self.delete_view(request, unquote(url[:-7]))
else:
return self.change_view(request, unquote(url))
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``name`` to specify the attribute name of the ``ForeignKey`` from
``model`` to its parent. This is required if ``model`` has more than one
``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
def _media(self):
from django.conf import settings
js = ['js/jquery.min.js', 'js/jquery.init.js', 'js/inlines.min.js']
if self.prepopulated_fields:
js.append('js/urlify.js')
js.append('js/prepopulate.min.js')
if self.filter_vertical or self.filter_horizontal:
js.extend(['js/SelectBox.js' , 'js/SelectFilter2.js'])
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
media = property(_media)
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(kwargs.get("exclude", []))
exclude.extend(self.get_readonly_fields(request, obj))
# if exclude is an empty list we use None, since that's the actual
# default
exclude = exclude or None
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
"extra": self.extra,
"max_num": self.max_num,
"can_delete": self.can_delete,
}
defaults.update(kwargs)
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fieldsets(self, request, obj=None):
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_formset(request).form
fields = form.base_fields.keys() + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
def queryset(self, request):
return self.model._default_manager.all()
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
|
|
# Copyright (c) The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Key manager implementation for Barbican
"""
from barbicanclient import client as barbican_client
from barbicanclient import exceptions as barbican_exception
from keystoneauth1 import identity
from keystoneauth1 import session
from oslo_log import log as logging
from tacker._i18n import _
from tacker.keymgr import exception
from tacker.keymgr import key_manager
LOG = logging.getLogger(__name__)
class BarbicanKeyManager(key_manager.KeyManager):
"""Key Manager Interface that wraps the Barbican client API."""
def __init__(self, auth_url):
self._barbican_client = None
self._base_url = None
self._auth_url = auth_url
def _get_barbican_client(self, context):
"""Creates a client to connect to the Barbican service.
:param context: the user context for authentication
:return: a Barbican Client object
:raises Forbidden: if the context is empty
:raises KeyManagerError: if context is missing tenant or tenant is
None or error occurs while creating client
"""
# Confirm context is provided, if not raise forbidden
if not context:
msg = _("User is not authorized to use key manager.")
LOG.error(msg)
raise exception.Forbidden(msg)
if self._barbican_client and self._current_context == context:
return self._barbican_client
try:
auth = self._get_keystone_auth(context)
sess = session.Session(auth=auth)
self._barbican_endpoint = self._get_barbican_endpoint(auth, sess)
if self._barbican_endpoint[-1] != '/':
self._barbican_endpoint += '/'
self._barbican_client = barbican_client.Client(
session=sess,
endpoint=self._barbican_endpoint)
self._current_context = context
except Exception as e:
LOG.error("Error creating Barbican client: %s", e)
raise exception.KeyManagerError(reason=e)
self._base_url = self._create_base_url(auth,
sess,
self._barbican_endpoint)
return self._barbican_client
def _get_keystone_auth(self, context):
if context.__class__.__name__ == 'KeystonePassword':
return identity.Password(
auth_url=self._auth_url,
username=context.username,
password=context.password,
user_id=context.user_id,
user_domain_id=context.user_domain_id,
user_domain_name=context.user_domain_name,
trust_id=context.trust_id,
domain_id=context.domain_id,
domain_name=context.domain_name,
project_id=context.project_id,
project_name=context.project_name,
project_domain_id=context.project_domain_id,
project_domain_name=context.project_domain_name,
reauthenticate=context.reauthenticate)
elif context.__class__.__name__ == 'KeystoneToken':
return identity.Token(
auth_url=self._auth_url,
token=context.token,
trust_id=context.trust_id,
domain_id=context.domain_id,
domain_name=context.domain_name,
project_id=context.project_id,
project_name=context.project_name,
project_domain_id=context.project_domain_id,
project_domain_name=context.project_domain_name,
reauthenticate=context.reauthenticate)
# this will be kept for oslo.context compatibility until
# projects begin to use utils.credential_factory
elif (context.__class__.__name__ == 'RequestContext' or
context.__class__.__name__ == 'Context'):
return identity.Token(
auth_url=self._auth_url,
token=context.auth_token,
project_id=context.tenant)
else:
msg = _("context must be of type KeystonePassword, "
"KeystoneToken, RequestContext, or Context, got type "
"%s") % context.__class__.__name__
LOG.error(msg)
raise exception.Forbidden(reason=msg)
def _get_barbican_endpoint(self, auth, sess):
service_parameters = {'service_type': 'key-manager',
'service_name': 'barbican',
'interface': 'internal'}
return auth.get_endpoint(sess, **service_parameters)
def _create_base_url(self, auth, sess, endpoint):
discovery = auth.get_discovery(sess, url=endpoint)
raw_data = discovery.raw_version_data()
if len(raw_data) == 0:
msg = _(
"Could not find discovery information for %s") % endpoint
LOG.error(msg)
raise exception.KeyManagerError(reason=msg)
latest_version = raw_data[-1]
api_version = latest_version.get('id')
base_url = "%s%s/" % (endpoint, api_version)
return base_url
def store(self, context, secret, expiration=None):
"""Stores a secret with the key manager.
:param context: contains information of the user and the environment
for the request
:param secret: a secret object with unencrypted payload.
Known as "secret" to the barbicanclient api
:param expiration: the expiration time of the secret in ISO 8601
format
:returns: the UUID of the stored object
:raises KeyManagerError: if object store fails
"""
barbican_client = self._get_barbican_client(context)
try:
secret = barbican_client.secrets.create(
payload=secret,
secret_type='opaque')
secret.expiration = expiration
secret_ref = secret.store()
return self._retrieve_secret_uuid(secret_ref)
except (barbican_exception.HTTPAuthError,
barbican_exception.HTTPClientError,
barbican_exception.HTTPServerError) as e:
LOG.error("Error storing object: %s", e)
raise exception.KeyManagerError(reason=e)
def _create_secret_ref(self, object_id):
"""Creates the URL required for accessing a secret.
:param object_id: the UUID of the key to copy
:return: the URL of the requested secret
"""
if not object_id:
msg = _("Key ID is None")
raise exception.KeyManagerError(reason=msg)
return "%ssecrets/%s" % (self._base_url, object_id)
def _retrieve_secret_uuid(self, secret_ref):
"""Retrieves the UUID of the secret from the secret_ref.
:param secret_ref: the href of the secret
:return: the UUID of the secret
"""
# The secret_ref is assumed to be of a form similar to
# http://host:9311/v1/secrets/d152fa13-2b41-42ca-a934-6c21566c0f40
# with the UUID at the end. This command retrieves everything
# after the last '/', which is the UUID.
return secret_ref.rpartition('/')[2]
def _is_secret_not_found_error(self, error):
if (isinstance(error, barbican_exception.HTTPClientError) and
error.status_code == 404):
return True
else:
return False
def get(self, context, managed_object_id, metadata_only=False):
"""Retrieves the specified managed object.
:param context: contains information of the user and the environment
for the request
:param managed_object_id: the UUID of the object to retrieve
:param metadata_only: whether secret data should be included
:return: ManagedObject representation of the managed object
:raises KeyManagerError: if object retrieval fails
:raises ManagedObjectNotFoundError: if object not found
"""
barbican_client = self._get_barbican_client(context)
try:
secret_ref = self._create_secret_ref(managed_object_id)
return barbican_client.secrets.get(secret_ref)
except (barbican_exception.HTTPAuthError,
barbican_exception.HTTPClientError,
barbican_exception.HTTPServerError) as e:
LOG.error("Error retrieving object: %s", e)
if self._is_secret_not_found_error(e):
raise exception.ManagedObjectNotFoundError(
uuid=managed_object_id)
else:
raise exception.KeyManagerError(reason=e)
def delete(self, context, managed_object_id):
"""Deletes the specified managed object.
:param context: contains information of the user and the environment
for the request
:param managed_object_id: the UUID of the object to delete
:raises KeyManagerError: if object deletion fails
:raises ManagedObjectNotFoundError: if the object could not be found
"""
barbican_client = self._get_barbican_client(context)
try:
secret_ref = self._create_secret_ref(managed_object_id)
barbican_client.secrets.delete(secret_ref)
except (barbican_exception.HTTPAuthError,
barbican_exception.HTTPClientError,
barbican_exception.HTTPServerError) as e:
LOG.error("Error deleting object: %s", e)
if self._is_secret_not_found_error(e):
pass
else:
raise exception.KeyManagerError(reason=e)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling optimizations in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
from tensorflow.core.framework import dataset_options_pb2
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
class _AutotuneAlgorithm(enum.Enum):
"""Controls what algorithm is used in the autotune implementation."""
HILL_CLIMB = 0
GRADIENT_DESCENT = 1
@tf_export("data.experimental.OptimizationOptions")
class OptimizationOptions(options.OptionsBase):
"""Represents options for dataset optimizations.
You can set the optimization options of a dataset through the
`experimental_optimization` property of `tf.data.Options`; the property is
an instance of `tf.data.experimental.OptimizationOptions`.
```python
options = tf.data.Options()
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
```
"""
apply_default_optimizations = options.create_option(
name="apply_default_optimizations",
ty=bool,
docstring=
"Whether to apply default graph optimizations. If False, only graph "
"optimizations that have been explicitly enabled will be applied.")
autotune = options.create_option(
name="autotune",
ty=bool,
docstring=
"Whether to automatically tune performance knobs. If None, defaults to "
"True.")
autotune_buffers = options.create_option(
name="autotune_buffers",
ty=bool,
docstring=
"When autotuning is enabled (through `autotune`), determines whether to "
"also autotune buffer sizes for datasets with parallelism. If None,"
" defaults to False.")
autotune_cpu_budget = options.create_option(
name="autotune_cpu_budget",
ty=int,
docstring=
"When autotuning is enabled (through `autotune`), determines the CPU "
"budget to use. Values greater than the number of schedulable CPU cores "
"are allowed but may result in CPU contention. If None, defaults to the "
"number of schedulable CPU cores.")
autotune_ram_budget = options.create_option(
name="autotune_ram_budget",
ty=int,
docstring=
"When autotuning is enabled (through `autotune`), determines the RAM "
"budget to use. Values greater than the available RAM in bytes may "
"result in OOM. If None, defaults to half of the available RAM in bytes.")
filter_fusion = options.create_option(
name="filter_fusion",
ty=bool,
docstring=
"Whether to fuse filter transformations. If None, defaults to False.")
map_and_batch_fusion = options.create_option(
name="map_and_batch_fusion",
ty=bool,
docstring=
"Whether to fuse map and batch transformations. If None, defaults to "
"True.")
map_and_filter_fusion = options.create_option(
name="map_and_filter_fusion",
ty=bool,
docstring=
"Whether to fuse map and filter transformations. If None, defaults to "
"False.")
map_fusion = options.create_option(
name="map_fusion",
ty=bool,
docstring="Whether to fuse map transformations. If None, defaults to "
"False.")
map_parallelization = options.create_option(
name="map_parallelization",
ty=bool,
docstring=
"Whether to parallelize stateless map transformations. If None, defaults "
"to True.")
noop_elimination = options.create_option(
name="noop_elimination",
ty=bool,
docstring=
"Whether to eliminate no-op transformations. If None, defaults to True.")
parallel_batch = options.create_option(
name="parallel_batch",
ty=bool,
docstring="Whether to parallelize copying of batch elements. This "
"optimization is highly experimental and can cause performance "
"degradation (e.g. when the parallelization overhead exceeds the "
"benefits of performing the data copies in parallel). You should only "
"enable this optimization if a) your input pipeline is bottlenecked on "
"batching and b) you have validated that this optimization improves "
"performance. If None, defaults to False.")
shuffle_and_repeat_fusion = options.create_option(
name="shuffle_and_repeat_fusion",
ty=bool,
docstring="Whether to fuse shuffle and repeat transformations. If None, "
"defaults to True.")
def _to_proto(self):
pb = dataset_options_pb2.OptimizationOptions()
if self.apply_default_optimizations is not None:
pb.apply_default_optimizations = self.apply_default_optimizations
if self.autotune is not None:
pb.autotune = self.autotune
if self.autotune_buffers is not None:
pb.autotune_buffers = self.autotune_buffers
if self.autotune_cpu_budget is not None:
pb.autotune_cpu_budget = self.autotune_cpu_budget
if self.autotune_ram_budget is not None:
pb.autotune_ram_budget = self.autotune_ram_budget
if self.filter_fusion is not None:
pb.filter_fusion = self.filter_fusion
if self.map_and_batch_fusion is not None:
pb.map_and_batch_fusion = self.map_and_batch_fusion
if self.map_and_filter_fusion is not None:
pb.map_and_filter_fusion = self.map_and_filter_fusion
if self.map_fusion is not None:
pb.map_fusion = self.map_fusion
if self.map_parallelization is not None:
pb.map_parallelization = self.map_parallelization
if self.noop_elimination is not None:
pb.noop_elimination = self.noop_elimination
if self.parallel_batch is not None:
pb.parallel_batch = self.parallel_batch
if self.shuffle_and_repeat_fusion is not None:
pb.shuffle_and_repeat_fusion = self.shuffle_and_repeat_fusion
return pb
def _from_proto(self, pb):
if pb.WhichOneof("optional_apply_default_optimizations") is not None:
self.apply_default_optimizations = pb.apply_default_optimizations
if pb.WhichOneof("optional_autotune") is not None:
self.autotune = pb.autotune
if pb.WhichOneof("optional_autotune_buffers") is not None:
self.autotune_buffers = pb.autotune_buffers
if pb.WhichOneof("optional_autotune_cpu_budget") is not None:
self.autotune_cpu_budget = pb.autotune_cpu_budget
if pb.WhichOneof("optional_autotune_ram_budget") is not None:
self.autotune_ram_budget = pb.autotune_ram_budget
if pb.WhichOneof("optional_filter_fusion") is not None:
self.filter_fusion = pb.filter_fusion
if pb.WhichOneof("optional_map_and_batch_fusion") is not None:
self.map_and_batch_fusion = pb.map_and_batch_fusion
if pb.WhichOneof("optional_map_and_filter_fusion") is not None:
self.map_and_filter_fusion = pb.map_and_filter_fusion
if pb.WhichOneof("optional_map_fusion") is not None:
self.map_fusion = pb.map_fusion
if pb.WhichOneof("optional_map_parallelization") is not None:
self.map_parallelization = pb.map_parallelization
if pb.WhichOneof("optional_noop_elimination") is not None:
self.noop_elimination = pb.noop_elimination
if pb.WhichOneof("optional_parallel_batch") is not None:
self.parallel_batch = pb.parallel_batch
if pb.WhichOneof("optional_shuffle_and_repeat_fusion") is not None:
self.shuffle_and_repeat_fusion = pb.shuffle_and_repeat_fusion
def _set_mutable(self, mutable):
"""Change the mutability value to `mutable` on this options and children."""
# pylint: disable=protected-access
object.__setattr__(self, "_mutable", mutable)
|
|
import sys
import numpy as np
import threading
import queue
import uuid
from .AudioHardware import *
from .mach_time import *
from .stream import IOStream, InArrayStream, OutArrayStream
defInBufSize = defOutBufSize = 2048
# all timestamps are compatible with time.monotonic(), which internally uses mach_absolute_time
class IOSession:
def __init__(self):
self.cv = threading.Condition()
with self.cv:
self.negotiated = {
kAudioObjectPropertyScopeInput.value: False,
kAudioObjectPropertyScopeOutput.value: False,
}
self.created = False
self.running = False
self.masterDeviceUID = None
self.deviceScopes = []
self.vfDesired = {} # indexed by device, scope
self.vfActual = {} # indexed by device, scope
self.notifiers = {} # indexed by device, scope
self.bufSize = {} # indexed by scope
self.ioProc_ptr = AudioDeviceIOProc(self.ioProc)
self.inStream = None
self.outStream = None
def __del__(self):
self.destroyAggregate()
def addDevice(self, device, scope):
with self.cv:
key = (device, scope.value)
if key in self.deviceScopes:
return
self.deviceScopes.append(key)
self.notifiers[key] = [device.notify(k, self.propListener) for k in [
(kAudioStreamPropertyVirtualFormat, scope),
(kAudioDeviceProcessorOverload, scope),
]]
def addDefaultInputDevice(self):
self.addDevice(AudioSystemObject[kAudioHardwarePropertyDefaultInputDevice], kAudioObjectPropertyScopeInput)
def addDefaultOutputDevice(self):
self.addDevice(AudioSystemObject[kAudioHardwarePropertyDefaultOutputDevice], kAudioObjectPropertyScopeOutput)
def negotiateFormat(self, scope,
minimumSampleRate=96000,
maximumSampleRate=96000,
minimumBitsPerChannel=32,
maximumBitsPerChannel=32,
inBufSize=defInBufSize,
outBufSize=defOutBufSize,
):
scope = getattr(scope, 'value', scope)
with self.cv:
if self.negotiated[scope]:
return
self.bufSize[scope] = outBufSize if scope == kAudioObjectPropertyScopeOutput else inBufSize
# Filter formats by sample rate range, bit depth range, format ID
formatRanges = {}
for d, s in self.deviceScopes:
if s != scope:
continue
vf = d[kAudioStreamPropertyAvailableVirtualFormats, scope]
vf_filtered = []
for f in vf:
if f.mSampleRateRange.mMinimum > maximumSampleRate or minimumSampleRate > f.mSampleRateRange.mMaximum:
continue
if not minimumBitsPerChannel <= f.mFormat.mBitsPerChannel <= maximumBitsPerChannel:
continue
if f.mFormat.mFormatID != kAudioFormatLinearPCM.value:
continue
if f.mFormat.mFormatFlags != kAudioFormatFlagIsFloat.value | kAudioFormatFlagIsPacked.value:
continue
vf_filtered.append(f)
if not vf_filtered:
raise ValueError('No candidate formats for %s' % d)
formatRanges[d] = vf_filtered
# Find the lowest candidate sample rate viable for every device
for sampleRate in np.sort(
[f.mSampleRateRange.mMinimum for d, vf in formatRanges.items() for f in vf] +
[f.mSampleRateRange.mMaximum for d, vf in formatRanges.items() for f in vf] +
[minimumSampleRate, maximumSampleRate]):
if not minimumSampleRate <= sampleRate <= maximumSampleRate:
continue
formats = {}
for d, vf in formatRanges.items():
formats[d] = []
for f in vf:
if f.mSampleRateRange.mMinimum <= sampleRate <= f.mSampleRateRange.mMaximum:
asbd = AudioStreamBasicDescription()
memmove(pointer(asbd), pointer(f.mFormat), sizeof(asbd))
asbd.mSampleRate = sampleRate
formats[d].append(asbd)
if not formats[d]:
break
else:
break # every device has a viable format
continue # some device has no viable format
else:
raise ValueError('No format is viable for all devices')
# Find the greatest number of channels for each device
for d in formats.keys():
channels = max(f.mChannelsPerFrame for f in formats[d])
formats[d] = [f for f in formats[d] if f.mChannelsPerFrame == channels]
# Find the least bit depth for each device
for d in formats.keys():
bitsPerChannel = min(f.mBitsPerChannel for f in formats[d])
formats[d] = [f for f in formats[d] if f.mBitsPerChannel == bitsPerChannel]
# Break ties and set format for each device
for d, vf in formats.items():
d[kAudioStreamPropertyVirtualFormat, scope] = self.vfDesired[d, scope] = vf[0]
self.vfActual[d, scope] = d[kAudioStreamPropertyVirtualFormat, scope]
d[kAudioDevicePropertyBufferFrameSize, scope] = self.bufSize[scope]
self.negotiated[scope] = True
def setMasterDevice(self, device):
self.masterDeviceUID = device[kAudioDevicePropertyDeviceUID]
def createAggregate(self):
with self.cv:
if self.created:
return
self.negotiateFormat(kAudioObjectPropertyScopeInput)
self.negotiateFormat(kAudioObjectPropertyScopeOutput)
uid = str(uuid.uuid4())
devices = [d for d, s in self.deviceScopes]
self.subDevices = sorted(set(devices), key=devices.index)
if self.masterDeviceUID is None:
self.masterDeviceUID = self.subDevices[0][kAudioDevicePropertyDeviceUID]
composition = {
kAudioAggregateDeviceUIDKey: uid,
kAudioAggregateDeviceNameKey: 'Python Audio HAL Aggregate Device',
kAudioAggregateDeviceIsPrivateKey: 1,
kAudioAggregateDeviceIsStackedKey: 0,
kAudioAggregateDeviceSubDeviceListKey: [{
kAudioSubDeviceUIDKey: d[kAudioDevicePropertyDeviceUID],
kAudioSubDeviceInputChannelsKey: self.vfActual[d,kAudioObjectPropertyScopeInput.value].mChannelsPerFrame if (d,kAudioObjectPropertyScopeInput.value) in self.vfActual else 0,
kAudioSubDeviceOutputChannelsKey: self.vfActual[d,kAudioObjectPropertyScopeOutput.value].mChannelsPerFrame if (d,kAudioObjectPropertyScopeOutput.value) in self.vfActual else 0,
kAudioSubDeviceExtraInputLatencyKey: 0,
kAudioSubDeviceExtraOutputLatencyKey: 0,
kAudioSubDeviceDriftCompensationKey: 1,
kAudioSubDeviceDriftCompensationQualityKey: kAudioSubDeviceDriftCompensationMaxQuality,
} for d in self.subDevices],
kAudioAggregateDeviceMasterSubDeviceKey: self.masterDeviceUID,
}
composition = CoreFoundation.CFDictionaryRef(composition)
objectID = AudioObjectID()
trap(AudioHardwareCreateAggregateDevice, composition.__c_void_p__(), byref(objectID))
self.device = AudioObject(objectID)
self.ioProcID = AudioDeviceIOProcID()
trap(AudioDeviceCreateIOProcID, self.device.objectID, self.ioProc_ptr, None, byref(self.ioProcID))
for scope in (kAudioObjectPropertyScopeInput, kAudioObjectPropertyScopeOutput):
self.device[kAudioDevicePropertyBufferFrameSize, scope] = self.bufSize[scope.value]
self.created = True
def destroyAggregate(self):
with self.cv:
self.stop()
if self.created:
trap(AudioDeviceDestroyIOProcID, self.device.objectID, self.ioProcID)
trap(AudioHardwareDestroyAggregateDevice, self.device.objectID)
self.created = False
def nChannelsPerFrame(self, scope):
scope = getattr(scope, 'value', scope)
with self.cv:
self.negotiateFormat(scope)
result = 0
for d, s in self.deviceScopes:
if s == scope:
result += self.vfActual[d, scope].mChannelsPerFrame
return result
def start(self, inStream=None, outStream=None, startTime=None):
with self.cv:
if self.running:
return
if startTime is None:
startTime = time.monotonic()
self.createAggregate()
self.ioProcException = None
self.running = True
self.ioStartHostTime = monotonicToHostTime(startTime)
if inStream is not None:
self.inStream = inStream
if outStream is not None:
self.outStream = outStream
trap(AudioDeviceStart, self.device.objectID, self.ioProcID)
def stop(self):
with self.cv:
if not self.running:
return
self.stopIO()
self.wait()
def wait(self):
with self.cv:
while self.running:
self.cv.wait()
if self.ioProcException:
raise self.ioProcException
return self.inStream
def stopIO(self):
with self.cv:
trap(AudioDeviceStop, self.device.objectID, self.ioProcID)
self.running = False
self.cv.notifyAll()
def ioProc(self,
inDevice: AudioObjectID,
inNow: POINTER(AudioTimeStamp),
inInputData: POINTER(AudioBufferList),
inInputTime: POINTER(AudioTimeStamp),
outOutputData: POINTER(AudioBufferList),
inOutputTime: POINTER(AudioTimeStamp),
inClientData: c_void_p) -> OSStatus:
try:
inNow = inNow.contents
if not (inNow.mFlags & kAudioTimeStampHostTimeValid.value):
raise Exception('No host timestamps')
if inInputData and inInputData.contents.mNumberBuffers:
inInputTime = inInputTime.contents
inputBuffers = cast(inInputData.contents.mBuffers, POINTER(AudioBuffer))
asbd = self.device[kAudioStreamPropertyVirtualFormat, kAudioObjectPropertyScopeInput]
if not (inInputTime.mFlags & kAudioTimeStampHostTimeValid.value):
raise Exception('No host timestamps')
self.inLatency = (inNow.mHostTime - inInputTime.mHostTime) * d_monotonic_d_hostTime
samples = np.concatenate([arrayFromBuffer(inputBuffers[i], asbd) for i in range(inInputData.contents.mNumberBuffers)], 1)
nFrames = samples.shape[0]
ticksPerFrame = d_hostTime_d_monotonic / asbd.mSampleRate
firstGoodSample = max(min((self.ioStartHostTime - inInputTime.mHostTime) / ticksPerFrame, nFrames), 0)
if firstGoodSample:
samples = samples[firstGoodSample:]
self.inStream.write(samples, hostTimeToMonotonic(inInputTime.mHostTime), hostTimeToMonotonic(inNow.mHostTime))
if outOutputData and outOutputData.contents.mNumberBuffers:
inOutputTime = inOutputTime.contents
outputBuffers = cast(outOutputData.contents.mBuffers, POINTER(AudioBuffer))
asbd = self.device[kAudioStreamPropertyVirtualFormat, kAudioObjectPropertyScopeOutput]
if not (inOutputTime.mFlags & kAudioTimeStampHostTimeValid.value):
raise Exception('No host timestamps')
self.outLatency = (inOutputTime.mHostTime - inNow.mHostTime) * d_monotonic_d_hostTime
b = outputBuffers[0]
nFrames = b.mDataByteSize // asbd.mBytesPerFrame
ticksPerFrame = d_hostTime_d_monotonic / asbd.mSampleRate
firstGoodSample = max(min((self.ioStartHostTime - inOutputTime.mHostTime) / ticksPerFrame, nFrames), 0)
y = self.outStream.read(nFrames - firstGoodSample, hostTimeToMonotonic(inOutputTime.mHostTime), hostTimeToMonotonic(inNow.mHostTime))
nFramesRead = y.shape[0]
nextChannel = 0
for i in range(outOutputData.contents.mNumberBuffers):
samples = arrayFromBuffer(outputBuffers[i], asbd)
if firstGoodSample:
samples[:firstGoodSample] = 0
samples = samples[firstGoodSample:]
mNumberChannels = outputBuffers[i].mNumberChannels
samples[:nFramesRead] = y[:,nextChannel:nextChannel+mNumberChannels]
samples[nFramesRead:] = 0
nextChannel += mNumberChannels
inDone = not self.inStream or self.inStream.inDone()
outDone = not self.outStream or self.outStream.outDone()
if inDone and outDone:
self.stopIO()
except Exception as e:
with self.cv:
import traceback
traceback.print_exc()
self.ioProcException = e
self.stopIO()
return 0
def propListener(self,
objectID: AudioObjectID,
inNumberAddresses: UInt32,
inAddresses: POINTER(AudioObjectPropertyAddress),
inClientData: c_void_p):
obj = AudioObject(objectID)
for i in range(inNumberAddresses):
scope = inAddresses[i].mScope
key = (obj, scope)
if key in self.deviceScopes:
if inAddresses[i].mSelector == kAudioStreamPropertyVirtualFormat.value:
self.vfActual[key] = obj[kAudioStreamPropertyVirtualFormat, scope]
elif inAddresses[i].mSelector == kAudioDeviceProcessorOverload.value:
print('Overrun', file=sys.stderr)
return 0
def __enter__(self):
self.createAggregate()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.destroyAggregate()
return exc_type is KeyboardInterrupt
def coerceInputStream(inStream):
if isinstance(count_or_stream, IOStream):
inStream = count_or_stream
elif np.ndim(count_or_stream) == 0:
inStream = InArrayStream(np.empty((int(count_or_stream), 1), np.float32))
else:
inStream = InArrayStream(np.asarray(count_or_stream))
return inStream
def coerceOutputStream(outStream):
if not isinstance(outStream, IOStream):
outStream = OutArrayStream(np.asarray(outStream))
return outStream
def prepareSession(Fs, outDevice, inDevice):
ios = IOSession()
try:
iter(outDevice)
outDevices = outDevice
except TypeError:
outDevices = (outDevice,) if outDevice else ()
try:
iter(inDevice)
inDevices = inDevice
except TypeError:
inDevices = (inDevice,) if inDevice else ()
if outDevices:
ios.negotiateFormat(kAudioObjectPropertyScopeOutput, minimumSampleRate=Fs, maximumSampleRate=Fs)
for device in outDevices:
ios.addDevice(device, kAudioObjectPropertyScopeOutput)
if inDevices:
ios.negotiateFormat(kAudioObjectPropertyScopeInput, minimumSampleRate=Fs, maximumSampleRate=Fs)
for device in inDevices:
ios.addDevice(device, kAudioObjectPropertyScopeInput)
return ios
def play(outStream, Fs, outDevice=None):
outStream = coerceOutputStream(outStream)
with prepareSession(Fs, outDevice, None) as ios:
ios.start(outStream=outStream)
return ios.wait()
def record(count_or_stream, Fs, inDevice=None):
inStream = coerceInputStream(count_or_stream)
with prepareSession(Fs, None, inDevice) as ios:
ios.start(inStream=inStream)
return ios.wait()
def play_and_record(stream, Fs, outDevice=None, inDevice=None):
inStream = coerceInputStream(stream)
outStream = coerceOutputStream(stream)
with prepareSession(Fs, outDevice, inDevice) as ios:
ios.start(inStream=inStream, outStream=outStream)
return ios.wait()
|
|
"""Unit tests for Superset"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import csv
import doctest
import json
import logging
import io
import random
import unittest
from flask import escape
from superset import db, utils, appbuilder, sm, jinja_context, sql_lab
from superset.models import core as models
from superset.models.sql_lab import Query
from superset.views.core import DatabaseView
from superset.connectors.sqla.models import SqlaTable
from .base_tests import SupersetTestCase
class CoreTests(SupersetTestCase):
requires_examples = True
def __init__(self, *args, **kwargs):
super(CoreTests, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
cls.table_ids = {tbl.table_name: tbl.id for tbl in (
db.session
.query(SqlaTable)
.all()
)}
def setUp(self):
db.session.query(Query).delete()
db.session.query(models.DatasourceAccessRequest).delete()
def tearDown(self):
db.session.query(Query).delete()
def test_login(self):
resp = self.get_resp(
'/login/',
data=dict(username='admin', password='general'))
self.assertIn('Welcome', resp)
resp = self.get_resp('/logout/', follow_redirects=True)
self.assertIn('User confirmation needed', resp)
resp = self.get_resp(
'/login/',
data=dict(username='admin', password='wrongPassword'))
self.assertNotIn('Welcome', resp)
self.assertIn('User confirmation needed', resp)
def test_welcome(self):
self.login()
resp = self.client.get('/superset/welcome')
assert 'Welcome' in resp.data.decode('utf-8')
def test_slice_endpoint(self):
self.login(username='admin')
slc = self.get_slice("Girls", db.session)
resp = self.get_resp('/superset/slice/{}/'.format(slc.id))
assert 'Time Column' in resp
assert 'List Roles' in resp
# Testing overrides
resp = self.get_resp(
'/superset/slice/{}/?standalone=true'.format(slc.id))
assert 'List Roles' not in resp
def test_slice_json_endpoint(self):
self.login(username='admin')
slc = self.get_slice("Girls", db.session)
json_endpoint = (
'/superset/explore_json/{}/{}?form_data={}'
.format(slc.datasource_type, slc.datasource_id, json.dumps(slc.viz.form_data))
)
resp = self.get_resp(json_endpoint)
assert '"Jennifer"' in resp
def test_slice_csv_endpoint(self):
self.login(username='admin')
slc = self.get_slice("Girls", db.session)
csv_endpoint = (
'/superset/explore_json/{}/{}?csv=true&form_data={}'
.format(slc.datasource_type, slc.datasource_id, json.dumps(slc.viz.form_data))
)
resp = self.get_resp(csv_endpoint)
assert 'Jennifer,' in resp
def test_admin_only_permissions(self):
def assert_admin_permission_in(role_name, assert_func):
role = sm.find_role(role_name)
permissions = [p.permission.name for p in role.permissions]
assert_func('can_sync_druid_source', permissions)
assert_func('can_approve', permissions)
assert_admin_permission_in('Admin', self.assertIn)
assert_admin_permission_in('Alpha', self.assertNotIn)
assert_admin_permission_in('Gamma', self.assertNotIn)
def test_admin_only_menu_views(self):
def assert_admin_view_menus_in(role_name, assert_func):
role = sm.find_role(role_name)
view_menus = [p.view_menu.name for p in role.permissions]
assert_func('ResetPasswordView', view_menus)
assert_func('RoleModelView', view_menus)
assert_func('Security', view_menus)
assert_func('UserDBModelView', view_menus)
assert_func('SQL Lab',
view_menus)
assert_func('AccessRequestsModelView', view_menus)
assert_admin_view_menus_in('Admin', self.assertIn)
assert_admin_view_menus_in('Alpha', self.assertNotIn)
assert_admin_view_menus_in('Gamma', self.assertNotIn)
def test_save_slice(self):
self.login(username='admin')
slice_name = "Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
copy_name = "Test Sankey Save"
tbl_id = self.table_ids.get('energy_usage')
new_slice_name = "Test Sankey Overwirte"
url = (
"/superset/explore/table/{}/?slice_name={}&"
"action={}&datasource_name=energy_usage&form_data={}")
form_data = {
'viz_type': 'sankey',
'groupby': 'source',
'groupby': 'target',
'metric': 'sum__value',
'row_limit': 5000,
'slice_id': slice_id,
}
# Changing name and save as a new slice
resp = self.get_resp(
url.format(
tbl_id,
copy_name,
'saveas',
json.dumps(form_data)
)
)
slices = db.session.query(models.Slice) \
.filter_by(slice_name=copy_name).all()
assert len(slices) == 1
new_slice_id = slices[0].id
form_data = {
'viz_type': 'sankey',
'groupby': 'source',
'groupby': 'target',
'metric': 'sum__value',
'row_limit': 5000,
'slice_id': new_slice_id,
}
# Setting the name back to its original name by overwriting new slice
resp = self.get_resp(
url.format(
tbl_id,
new_slice_name,
'overwrite',
json.dumps(form_data)
)
)
slc = db.session.query(models.Slice).filter_by(id=new_slice_id).first()
assert slc.slice_name == new_slice_name
db.session.delete(slc)
def test_filter_endpoint(self):
self.login(username='admin')
slice_name = "Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
tbl_id = self.table_ids.get('energy_usage')
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)
table.filter_select_enabled = True
url = (
"/superset/filter/table/{}/target/?viz_type=sankey&groupby=source"
"&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&"
"slice_id={}&datasource_name=energy_usage&"
"datasource_id=1&datasource_type=table")
# Changing name
resp = self.get_resp(url.format(tbl_id, slice_id))
assert len(resp) > 0
assert 'Carbon Dioxide' in resp
def test_slices(self):
# Testing by hitting the two supported end points for all slices
self.login(username='admin')
Slc = models.Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, 'slice_url', slc.slice_url),
(slc.slice_name, 'slice_id_url', slc.slice_id_url),
]
for name, method, url in urls:
logging.info("[{name}]/[{method}]: {url}".format(**locals()))
self.client.get(url)
def test_tablemodelview_list(self):
self.login(username='admin')
url = '/tablemodelview/list/'
resp = self.get_resp(url)
# assert that a table is listed
table = db.session.query(SqlaTable).first()
assert table.name in resp
assert '/superset/explore/table/{}'.format(table.id) in resp
def test_add_slice(self):
self.login(username='admin')
# assert that /slicemodelview/add responds with 200
url = '/slicemodelview/add'
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_slices_V2(self):
# Add explore-v2-beta role to admin user
# Test all slice urls as user with with explore-v2-beta role
sm.add_role('explore-v2-beta')
appbuilder.sm.add_user(
'explore_beta', 'explore_beta', ' user', 'explore_beta@airbnb.com',
appbuilder.sm.find_role('explore-v2-beta'),
password='general')
self.login(username='explore_beta', password='general')
Slc = models.Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, 'slice_url', slc.slice_url),
]
for name, method, url in urls:
print("[{name}]/[{method}]: {url}".format(**locals()))
response = self.client.get(url)
def test_dashboard(self):
self.login(username='admin')
urls = {}
for dash in db.session.query(models.Dashboard).all():
urls[dash.dashboard_title] = dash.url
for title, url in urls.items():
assert escape(title) in self.client.get(url).data.decode('utf-8')
def test_doctests(self):
modules = [utils, models, sql_lab]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_misc(self):
assert self.get_resp('/health') == "OK"
assert self.get_resp('/ping') == "OK"
def test_testconn(self):
database = self.get_main_database(db.session)
# validate that the endpoint works with the password-masked sqlalchemy uri
data = json.dumps({
'uri': database.safe_sqlalchemy_uri(),
'name': 'main'
})
response = self.client.post('/superset/testconn', data=data, content_type='application/json')
assert response.status_code == 200
# validate that the endpoint works with the decrypted sqlalchemy uri
data = json.dumps({
'uri': database.sqlalchemy_uri_decrypted,
'name': 'main'
})
response = self.client.post('/superset/testconn', data=data, content_type='application/json')
assert response.status_code == 200
def test_databaseview_edit(self, username='admin'):
# validate that sending a password-masked uri does not over-write the decrypted uri
self.login(username=username)
database = self.get_main_database(db.session)
sqlalchemy_uri_decrypted = database.sqlalchemy_uri_decrypted
url = 'databaseview/edit/{}'.format(database.id)
data = {k: database.__getattribute__(k) for k in DatabaseView.add_columns}
data['sqlalchemy_uri'] = database.safe_sqlalchemy_uri()
self.client.post(url, data=data)
database = self.get_main_database(db.session)
self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)
def test_warm_up_cache(self):
slc = self.get_slice("Girls", db.session)
data = self.get_json_resp(
'/superset/warm_up_cache?slice_id={}'.format(slc.id))
assert data == [{'slice_id': slc.id, 'slice_name': slc.slice_name}]
data = self.get_json_resp(
'/superset/warm_up_cache?table_name=energy_usage&db_name=main')
assert len(data) == 3
def test_shortner(self):
self.login(username='admin')
data = (
"//superset/explore/table/1/?viz_type=sankey&groupby=source&"
"groupby=target&metric=sum__value&row_limit=5000&where=&having=&"
"flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name="
"Energy+Sankey&collapsed_fieldsets=&action=&datasource_name="
"energy_usage&datasource_id=1&datasource_type=table&"
"previous_viz_type=sankey"
)
resp = self.client.post('/r/shortner/', data=data)
assert '/r/' in resp.data.decode('utf-8')
def test_kv(self):
self.logout()
self.login(username='admin')
try:
resp = self.client.post('/kv/store/', data=dict())
except Exception as e:
self.assertRaises(TypeError)
value = json.dumps({'data': 'this is a test'})
resp = self.client.post('/kv/store/', data=dict(data=value))
self.assertEqual(resp.status_code, 200)
kv = db.session.query(models.KeyValue).first()
kv_value = kv.value
self.assertEqual(json.loads(value), json.loads(kv_value))
resp = self.client.get('/kv/{}/'.format(kv.id))
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(value),
json.loads(resp.data.decode('utf-8')))
try:
resp = self.client.get('/kv/10001/')
except Exception as e:
self.assertRaises(TypeError)
def test_save_dash(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug="births").first()
positions = []
for i, slc in enumerate(dash.slices):
d = {
'col': 0,
'row': i * 4,
'size_x': 4,
'size_y': 4,
'slice_id': '{}'.format(slc.id)}
positions.append(d)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': dash.dashboard_title
}
url = '/superset/save_dash/{}/'.format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
self.assertIn("SUCCESS", resp)
def test_save_dash_with_dashboard_title(self, username='admin'):
self.login(username=username)
dash = (
db.session.query(models.Dashboard)
.filter_by(slug="births")
.first()
)
origin_title = dash.dashboard_title
positions = []
for i, slc in enumerate(dash.slices):
d = {
'col': 0,
'row': i * 4,
'size_x': 4,
'size_y': 4,
'slice_id': '{}'.format(slc.id)}
positions.append(d)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': 'new title'
}
url = '/superset/save_dash/{}/'.format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
updatedDash = (
db.session.query(models.Dashboard)
.filter_by(slug="births")
.first()
)
self.assertEqual(updatedDash.dashboard_title, 'new title')
# # bring back dashboard original title
data['dashboard_title'] = origin_title
self.get_resp(url, data=dict(data=json.dumps(data)))
def test_copy_dash(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug="births").first()
positions = []
for i, slc in enumerate(dash.slices):
d = {
'col': 0,
'row': i * 4,
'size_x': 4,
'size_y': 4,
'slice_id': '{}'.format(slc.id)}
positions.append(d)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': 'Copy Of Births',
}
# Save changes to Births dashboard and retrieve updated dash
dash_id = dash.id
url = '/superset/save_dash/{}/'.format(dash_id)
self.client.post(url, data=dict(data=json.dumps(data)))
dash = db.session.query(models.Dashboard).filter_by(
id=dash_id).first()
orig_json_data = dash.data
# Verify that copy matches original
url = '/superset/copy_dash/{}/'.format(dash_id)
resp = self.get_json_resp(url, data=dict(data=json.dumps(data)))
self.assertEqual(resp['dashboard_title'], 'Copy Of Births')
self.assertEqual(resp['position_json'], orig_json_data['position_json'])
self.assertEqual(resp['metadata'], orig_json_data['metadata'])
self.assertEqual(resp['slices'], orig_json_data['slices'])
def test_add_slices(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug="births").first()
new_slice = db.session.query(models.Slice).filter_by(
slice_name="Mapbox Long/Lat").first()
existing_slice = db.session.query(models.Slice).filter_by(
slice_name="Name Cloud").first()
data = {
"slice_ids": [new_slice.data["slice_id"],
existing_slice.data["slice_id"]]
}
url = '/superset/add_slices/{}/'.format(dash.id)
resp = self.client.post(url, data=dict(data=json.dumps(data)))
assert "SLICES ADDED" in resp.data.decode('utf-8')
dash = db.session.query(models.Dashboard).filter_by(
slug="births").first()
new_slice = db.session.query(models.Slice).filter_by(
slice_name="Mapbox Long/Lat").first()
assert new_slice in dash.slices
assert len(set(dash.slices)) == len(dash.slices)
# cleaning up
dash = db.session.query(models.Dashboard).filter_by(
slug="births").first()
dash.slices = [
o for o in dash.slices if o.slice_name != "Mapbox Long/Lat"]
db.session.commit()
def test_gamma(self):
self.login(username='gamma')
assert "List Slice" in self.get_resp('/slicemodelview/list/')
assert "List Dashboard" in self.get_resp('/dashboardmodelview/list/')
def test_csv_endpoint(self):
self.login('admin')
sql = """
SELECT first_name, last_name
FROM ab_user
WHERE first_name='admin'
"""
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp('/superset/csv/{}'.format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(
io.StringIO("first_name,last_name\nadmin, user\n"))
self.assertEqual(list(expected_data), list(data))
self.logout()
def test_public_user_dashboard_access(self):
table = (
db.session
.query(SqlaTable)
.filter_by(table_name='birth_names')
.one()
)
# Try access before adding appropriate permissions.
self.revoke_public_access_to_table(table)
self.logout()
resp = self.get_resp('/slicemodelview/list/')
self.assertNotIn('birth_names</a>', resp)
resp = self.get_resp('/dashboardmodelview/list/')
self.assertNotIn('/superset/dashboard/births/', resp)
self.grant_public_access_to_table(table)
# Try access after adding appropriate permissions.
self.assertIn('birth_names', self.get_resp('/slicemodelview/list/'))
resp = self.get_resp('/dashboardmodelview/list/')
self.assertIn("/superset/dashboard/births/", resp)
self.assertIn('Births', self.get_resp('/superset/dashboard/births/'))
# Confirm that public doesn't have access to other datasets.
resp = self.get_resp('/slicemodelview/list/')
self.assertNotIn('wb_health_population</a>', resp)
resp = self.get_resp('/dashboardmodelview/list/')
self.assertNotIn("/superset/dashboard/world_health/", resp)
def test_dashboard_with_created_by_can_be_accessed_by_public_users(self):
self.logout()
table = (
db.session
.query(SqlaTable)
.filter_by(table_name='birth_names')
.one()
)
self.grant_public_access_to_table(table)
dash = db.session.query(models.Dashboard).filter_by(
slug="births").first()
dash.owners = [appbuilder.sm.find_user('admin')]
dash.created_by = appbuilder.sm.find_user('admin')
db.session.merge(dash)
db.session.commit()
assert 'Births' in self.get_resp('/superset/dashboard/births/')
def test_only_owners_can_save(self):
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug="births")
.first()
)
dash.owners = []
db.session.merge(dash)
db.session.commit()
self.test_save_dash('admin')
self.logout()
self.assertRaises(
Exception, self.test_save_dash, 'alpha')
alpha = appbuilder.sm.find_user('alpha')
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug="births")
.first()
)
dash.owners = [alpha]
db.session.merge(dash)
db.session.commit()
self.test_save_dash('alpha')
def test_extra_table_metadata(self):
self.login('admin')
dbid = self.get_main_database(db.session).id
self.get_json_resp(
'/superset/extra_table_metadata/{dbid}/'
'ab_permission_view/panoramix/'.format(**locals()))
def test_process_template(self):
maindb = self.get_main_database(db.session)
sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
tp = jinja_context.get_template_processor(database=maindb)
rendered = tp.process_template(sql)
self.assertEqual("SELECT '2017-01-01T00:00:00'", rendered)
def test_get_template_kwarg(self):
maindb = self.get_main_database(db.session)
s = "{{ foo }}"
tp = jinja_context.get_template_processor(database=maindb, foo='bar')
rendered = tp.process_template(s)
self.assertEqual("bar", rendered)
def test_template_kwarg(self):
maindb = self.get_main_database(db.session)
s = "{{ foo }}"
tp = jinja_context.get_template_processor(database=maindb)
rendered = tp.process_template(s, foo='bar')
self.assertEqual("bar", rendered)
def test_templated_sql_json(self):
self.login('admin')
sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}' as test"
data = self.run_sql(sql, "fdaklj3ws")
self.assertEqual(data['data'][0]['test'], "2017-01-01T00:00:00")
def test_table_metadata(self):
maindb = self.get_main_database(db.session)
backend = maindb.backend
data = self.get_json_resp(
"/superset/table/{}/ab_user/null/".format(maindb.id))
self.assertEqual(data['name'], 'ab_user')
assert len(data['columns']) > 5
assert data.get('selectStar').startswith('SELECT')
# Engine specific tests
if backend in ('mysql', 'postgresql'):
self.assertEqual(data.get('primaryKey').get('type'), 'pk')
self.assertEqual(
data.get('primaryKey').get('column_names')[0], 'id')
self.assertEqual(len(data.get('foreignKeys')), 2)
if backend == 'mysql':
self.assertEqual(len(data.get('indexes')), 7)
elif backend == 'postgresql':
self.assertEqual(len(data.get('indexes')), 5)
def test_fetch_datasource_metadata(self):
self.login(username='admin')
url = (
'/superset/fetch_datasource_metadata?'
+ 'datasourceKey=1__table'
)
resp = self.get_json_resp(url)
keys = [
'name', 'filterable_cols', 'gb_cols', 'type', 'all_cols',
'order_by_choices', 'metrics_combo', 'granularity_sqla',
'time_grain_sqla', 'id',
]
for k in keys:
self.assertIn(k, resp.keys())
def test_user_profile(self, username='admin'):
self.login(username=username)
slc = self.get_slice("Girls", db.session)
# Setting some faves
url = '/superset/favstar/Slice/{}/select/'.format(slc.id)
resp = self.get_json_resp(url)
self.assertEqual(resp['count'], 1)
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug="births")
.first()
)
url = '/superset/favstar/Dashboard/{}/select/'.format(dash.id)
resp = self.get_json_resp(url)
self.assertEqual(resp['count'], 1)
userid = appbuilder.sm.find_user('admin').id
resp = self.get_resp('/superset/profile/admin/')
self.assertIn('"app"', resp)
data = self.get_json_resp('/superset/recent_activity/{}/'.format(userid))
self.assertNotIn('message', data)
data = self.get_json_resp('/superset/created_slices/{}/'.format(userid))
self.assertNotIn('message', data)
data = self.get_json_resp('/superset/created_dashboards/{}/'.format(userid))
self.assertNotIn('message', data)
data = self.get_json_resp('/superset/fave_slices/{}/'.format(userid))
self.assertNotIn('message', data)
data = self.get_json_resp('/superset/fave_dashboards/{}/'.format(userid))
self.assertNotIn('message', data)
data = self.get_json_resp('/superset/fave_dashboards_by_username/{}/'.format(username))
self.assertNotIn('message', data)
if __name__ == '__main__':
unittest.main()
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_brick import encryptors
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from nova import block_device
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.virt import fake as fake_virt
from nova.volume import cinder
ATTACHMENT_ID = uuids.attachment_id
class TestDriverBlockDevice(test.NoDBTestCase):
# This is used to signal if we're dealing with a new style volume
# attachment (Cinder v3.44 flow).
attachment_id = None
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'volsnapshot': driver_block_device.DriverVolSnapshotBlockDevice,
'volimage': driver_block_device.DriverVolImageBlockDevice,
'volblank': driver_block_device.DriverVolBlankBlockDevice
}
swap_bdm_dict = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm_dict = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm_dict = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0,
'volume_type': None}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
volume_bdm_dict_without_conn_info = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': None,
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm_without_conn_info = {
'attachment_id': None,
'mount_device': '/dev/sda1',
'connection_info': {},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0,
'volume_type': None}
volsnapshot_bdm_dict = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1,
'volume_type': None})
volsnapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_type': None}
volsnapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
volimage_bdm_dict = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1,
'volume_type': None})
volimage_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_type': None}
volimage_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
volblank_bdm_dict = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'boot_index': -1,
'volume_type': None})
volblank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_type': None}
volblank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = mock.MagicMock(autospec=cinder.API)
self.virt_driver = mock.MagicMock(autospec=driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
# create bdm objects for testing
self.swap_bdm = fake_block_device.fake_bdm_object(
self.context, self.swap_bdm_dict)
self.ephemeral_bdm = fake_block_device.fake_bdm_object(
self.context, self.ephemeral_bdm_dict)
self.volume_bdm = fake_block_device.fake_bdm_object(
self.context, self.volume_bdm_dict)
self.volume_bdm_without_conn_info = fake_block_device.fake_bdm_object(
self.context, self.volume_bdm_dict_without_conn_info)
self.volsnapshot_bdm = fake_block_device.fake_bdm_object(
self.context, self.volsnapshot_bdm_dict)
self.volimage_bdm = fake_block_device.fake_bdm_object(
self.context, self.volimage_bdm_dict)
self.volblank_bdm = fake_block_device.fake_bdm_object(
self.context, self.volblank_bdm_dict)
# Set the attachment_id on our fake class variables which we have
# to do in setUp so that any attachment_id set by a subclass will
# be used properly.
for name in ('volume', 'volsnapshot', 'volimage', 'volblank'):
for attr in ('%s_bdm', '%s_driver_bdm'):
bdm = getattr(self, attr % name)
bdm['attachment_id'] = self.attachment_id
def stub_volume_create(self, volume):
# For any test that creates a volume (boot from volume where the source
# type is blank/image/snapshot), we'll also be creating an attachment
# so set the self.attachment_id value on the test and stub out the
# attachment_create method.
self.volume_api.create.return_value = volume
self.attachment_id = ATTACHMENT_ID
self.volume_api.attachment_create.return_value = {'id': ATTACHMENT_ID}
@mock.patch('nova.virt.block_device.LOG')
@mock.patch('os_brick.encryptors')
def test_driver_detach_passes_failed(self, enc, log):
virt = mock.MagicMock()
virt.detach_volume.side_effect = exception.DeviceDetachFailed(
device='sda', reason='because testing')
driver_bdm = self.driver_classes['volume'](self.volume_bdm)
inst = mock.MagicMock(),
vol_api = mock.MagicMock()
# Make sure we pass through DeviceDetachFailed,
# but don't log it as an exception, just a warning
self.assertRaises(exception.DeviceDetachFailed,
driver_bdm.driver_detach,
self.context, inst, vol_api, virt)
self.assertFalse(log.exception.called)
self.assertTrue(log.warning.called)
vol_api.roll_detaching.assert_called_once_with(self.context,
driver_bdm.volume_id)
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
bdm = fake_block_device.fake_bdm_object(
self.context, {'no_device': True})
self.assertRaises(driver_block_device._NotTransformable,
cls, bdm)
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
expected = getattr(self, "%s_driver_bdm" % name)
self.assertThat(expected, matchers.DictMatches(test_bdm))
for k, v in db_bdm.items():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
for field, value in expected.items():
# Test that all driver bdm fields are available as both attrs and
# dict values
self.assertEqual(test_bdm[field], value)
self.assertEqual(getattr(test_bdm, field), value)
test_value = mock.sentinel.value
if field in test_bdm._proxy_as_attr:
# We can't set a versioned object field to a sentinel because
# it's an invalid type. It's not worth creating valid example
# values for all possible field types just for this, so we just
# test setting it to its current value. This at least
# exercises the code path without being a maintenance burden.
test_value = value
# Test that we can set values via either attribute or dict
test_bdm[field] = test_value
self.assertEqual(getattr(test_bdm, field), test_value)
setattr(test_bdm, field, value)
self.assertEqual(test_bdm[field], value)
# Reset the value
test_bdm[field] = value
expected = getattr(self, "%s_legacy_driver_bdm" % name)
self.assertThat(expected, matchers.DictMatches(test_bdm.legacy()))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.items():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
for fld, alias in test_bdm._update_on_save.items():
# We can't set fake values on enums, like device_type,
# so skip those.
if not isinstance(test_bdm._bdm_obj.fields[fld],
fields.BaseEnumField):
field = alias or fld
if field == 'attachment_id':
# Must set UUID values on UUID fields.
fake_value = ATTACHMENT_ID
else:
fake_value = 'fake_changed_value'
test_bdm[field] = fake_value
test_bdm.save()
for fld, alias in test_bdm._update_on_save.items():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with()
def check_save():
self.assertEqual(set([]), test_bdm._bdm_obj.obj_what_changed())
# Test that nothing is set on the object if there are no actual changes
test_bdm._bdm_obj.obj_reset_changes()
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
save_mock.side_effect = check_save
test_bdm.save()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm_dict" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](
fake_block_device.fake_bdm_object(self.context, no_size_bdm))
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](
fake_block_device.fake_bdm_object(self.context, no_size_bdm))
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
self.assertEqual('fake-volume-id-1', test_bdm.get('volume_id'))
def test_driver_snapshot_block_device(self):
self._test_driver_device("volsnapshot")
test_bdm = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
self.assertEqual('fake-snapshot-id-1', test_bdm.get('snapshot_id'))
def test_driver_image_block_device(self):
self._test_driver_device('volimage')
test_bdm = self.driver_classes['volimage'](
self.volimage_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
self.assertEqual('fake-image-id-1', test_bdm.get('image_id'))
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('volimage')
bdm = self.volimage_bdm_dict.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['volimage'],
fake_block_device.fake_bdm_object(self.context, bdm))
def test_driver_blank_block_device(self):
self._test_driver_device('volblank')
test_bdm = self.driver_classes['volblank'](
self.volblank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertIsNone(test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
self.assertIsNone(test_bdm.get('volume_id'))
def _test_call_wait_func(self, delete_on_termination, delete_fail=False):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['delete_on_termination'] = delete_on_termination
with mock.patch.object(self.volume_api, 'delete') as vol_delete:
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
if delete_on_termination and delete_fail:
vol_delete.side_effect = Exception()
self.assertRaises(exception.VolumeNotCreated,
test_bdm._call_wait_func,
context=self.context,
wait_func=wait_func,
volume_api=self.volume_api,
volume_id='fake-id')
self.assertEqual(delete_on_termination, vol_delete.called)
def test_call_wait_delete_volume(self):
self._test_call_wait_func(True)
def test_call_wait_delete_volume_fail(self):
self._test_call_wait_func(True, True)
def test_call_wait_no_delete_volume(self):
self._test_call_wait_func(False)
def test_volume_delete_attachment(self, include_shared_targets=False):
attachment_id = uuids.attachment
driver_bdm = self.driver_classes['volume'](self.volume_bdm)
driver_bdm['attachment_id'] = attachment_id
elevated_context = self.context.elevated()
instance_detail = {'id': '123', 'uuid': uuids.uuid,
'availability_zone': None}
instance = fake_instance.fake_instance_obj(self.context,
**instance_detail)
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
volume = {'id': driver_bdm.volume_id,
'attach_status': 'attached',
'status': 'in-use'}
if include_shared_targets:
volume['shared_targets'] = True
volume['service_uuid'] = uuids.service_uuid
with test.nested(
mock.patch.object(driver_bdm, '_get_volume', return_value=volume),
mock.patch.object(self.virt_driver, 'get_volume_connector',
return_value=connector),
mock.patch('os_brick.initiator.utils.guard_connection'),
mock.patch.object(self.volume_api, 'attachment_delete'),
) as (mock_get_volume, mock_get_connector, mock_guard,
vapi_attach_del):
driver_bdm.detach(elevated_context, instance,
self.volume_api, self.virt_driver,
attachment_id=attachment_id)
mock_guard.assert_called_once_with(volume)
vapi_attach_del.assert_called_once_with(elevated_context,
attachment_id)
def test_volume_delete_attachment_with_shared_targets(self):
self.test_volume_delete_attachment(include_shared_targets=True)
@mock.patch.object(encryptors, 'get_encryption_metadata')
@mock.patch.object(objects.BlockDeviceMapping, 'save')
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, mock_save, mock_get_encry,
fail_check_av_zone=False,
driver_attach=False, fail_driver_attach=False,
volume_attach=True, fail_volume_attach=False,
access_mode='rw', availability_zone=None,
multiattach=False, driver_multi_attach=False,
fail_with_virt_driver=False,
include_shared_targets=False,
wait_func=None):
expected_save_calls = []
if driver_multi_attach:
# 'supports_multiattach' is True
self.virt_driver.capabilities.get.return_value = True
else:
# 'supports_multiattach' is False
self.virt_driver.capabilities.get.return_value = False
elevated_context = self.context.elevated()
self.stub_out('nova.context.RequestContext.elevated',
lambda s: elevated_context)
instance_detail = {'id': '123', 'uuid': uuids.uuid,
'availability_zone': availability_zone}
instance = fake_instance.fake_instance_obj(self.context,
**instance_detail)
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
if multiattach and driver_multi_attach:
expected_conn_info['multiattach'] = True
enc_data = {'fake': 'enc_data'}
if include_shared_targets:
fake_volume['shared_targets'] = True
fake_volume['service_uuid'] = uuids.service_uuid
self.volume_api.get.return_value = fake_volume
else:
# First call to get() fails because the API isn't new enough.
# So we fallback to the old call.
self.volume_api.get.side_effect = [
exception.CinderAPIVersionNotAvailable(version='3.48'),
fake_volume]
try:
if fail_check_av_zone:
self.volume_api.check_availability_zone.side_effect = (
test.TestingException())
# The @update_db decorator will save any changes.
expected_save_calls.append(mock.call())
# Exit setting mock
raise test.TestingException()
self.virt_driver.get_volume_connector.return_value = connector
if fail_with_virt_driver:
expected_save_calls.append(mock.call())
# Exit setting mock
raise test.TestingException()
if self.attachment_id is None:
self.volume_api.initialize_connection.return_value = (
connection_info)
else:
self.volume_api.attachment_update.return_value = {
'connection_info': connection_info}
if driver_attach:
mock_get_encry.return_value = enc_data
if fail_driver_attach:
self.virt_driver.attach_volume.side_effect = (
test.TestingException())
# The @update_db decorator will save any changes.
expected_save_calls.append(mock.call())
# Exit setting mock
raise test.TestingException()
if volume_attach:
# save updates before marking the volume as in-use
expected_save_calls.append(mock.call())
if fail_volume_attach:
if self.attachment_id is None:
self.volume_api.attach.side_effect = (
test.TestingException())
else:
self.volume_api.attachment_complete.side_effect = (
test.TestingException())
# The @update_db decorator will save any changes.
expected_save_calls.append(mock.call())
except test.TestingException:
pass
if multiattach and fail_with_virt_driver:
self.assertRaises(exception.MultiattachNotSupportedByVirtDriver,
driver_bdm.attach, self.context, instance,
self.volume_api, self.virt_driver)
elif fail_check_av_zone or fail_driver_attach or fail_volume_attach:
self.assertRaises(test.TestingException, driver_bdm.attach,
self.context, instance, self.volume_api,
self.virt_driver,
do_driver_attach=driver_attach)
else:
if wait_func:
driver_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
wait_func)
else:
driver_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_driver_attach=driver_attach)
self.assertThat(driver_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
if include_shared_targets:
self.volume_api.get.assert_called_once_with(
self.context, fake_volume['id'], microversion='3.48')
else:
# First call to get() fails because the API isn't new enough.
# So we fallback to the old call.
self.volume_api.get.assert_has_calls([
mock.call(self.context, fake_volume['id'],
microversion='3.48'),
mock.call(self.context, fake_volume['id'])])
try:
self.volume_api.check_availability_zone.assert_called_once_with(
self.context, fake_volume, instance=instance)
if fail_check_av_zone:
# Exit assert calls
raise test.TestingException()
self.virt_driver.get_volume_connector.assert_called_once_with(
instance)
if fail_with_virt_driver:
raise test.TestingException()
if self.attachment_id is None:
self.volume_api.initialize_connection.assert_called_once_with(
elevated_context, fake_volume['id'], connector)
else:
self.volume_api.attachment_update.assert_called_once_with(
elevated_context, self.attachment_id, connector,
bdm_dict['device_name'])
if driver_attach:
mock_get_encry.assert_called_once_with(
elevated_context, self.volume_api, fake_volume['id'],
connection_info)
self.virt_driver.attach_volume.assert_called_once_with(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'], disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'], encryption=enc_data)
if fail_driver_attach:
if self.attachment_id is None:
mock_terminate = self.volume_api.terminate_connection
mock_terminate.assert_called_once_with(
elevated_context, fake_volume['id'], connector)
else:
mock_att_delete = self.volume_api.attachment_delete
mock_att_delete.assert_called_once_with(
elevated_context, self.attachment_id)
# Exit assert calls
raise test.TestingException()
if volume_attach:
if not fail_volume_attach:
if self.attachment_id is None:
self.volume_api.attach.assert_called_once_with(
elevated_context, fake_volume['id'], uuids.uuid,
bdm_dict['device_name'], mode=access_mode)
else:
mock_att_complete = self.volume_api.attachment_complete
mock_att_complete.assert_called_once_with(
elevated_context, self.attachment_id)
else:
if self.attachment_id is None:
self.volume_api.attach.assert_called_once_with(
elevated_context, fake_volume['id'], uuids.uuid,
bdm_dict['device_name'], mode=access_mode)
mock_terminate = self.volume_api.terminate_connection
mock_terminate.assert_called_once_with(
elevated_context, fake_volume['id'], connector)
self.volume_api.detach.assert_called_once_with(
elevated_context, fake_volume['id'])
else:
mock_att_complete = self.volume_api.attachment_complete
mock_att_complete.assert_called_once_with(
elevated_context, self.attachment_id)
mock_att_delete = self.volume_api.attachment_delete
mock_att_delete.assert_called_once_with(
elevated_context, self.attachment_id)
except test.TestingException:
pass
if expected_save_calls:
mock_save.assert_has_calls(expected_save_calls)
return instance.uuid
def test_volume_attach(self, include_shared_targets=False):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
self._test_volume_attach(test_bdm, self.volume_bdm, volume,
include_shared_targets=include_shared_targets)
def test_volume_attach_with_shared_targets(self):
self.test_volume_attach(include_shared_targets=True)
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
def test_volume_attach_update_size(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm.volume_size = None
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached',
'size': 42}
self._test_volume_attach(test_bdm, self.volume_bdm, volume)
self.assertEqual(42, test_bdm.volume_size)
def test_volume_attach_check_av_zone_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
self._test_volume_attach(test_bdm, self.volume_bdm, volume,
fail_check_av_zone=True)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
self._test_volume_attach(test_bdm, self.volume_bdm, volume,
driver_attach=False)
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
self._test_volume_attach(test_bdm, self.volume_bdm, volume,
driver_attach=True)
def test_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
self._test_volume_attach(test_bdm, self.volume_bdm, volume,
driver_attach=True, fail_driver_attach=True)
@mock.patch('nova.objects.BlockDeviceMapping.save')
@mock.patch('nova.volume.cinder.API')
@mock.patch('os_brick.encryptors.get_encryption_metadata',
return_value={})
def test_volume_attach_volume_attach_fails(self, mock_get_encryption,
mock_volume_api, mock_bdm_save):
"""Tests that attaching the volume fails and driver rollback occurs."""
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
mock_volume_api.get.return_value = volume
instance = fake_instance.fake_instance_obj(self.context)
virt_driver = fake_virt.SmallFakeDriver(virtapi=mock.MagicMock())
fake_conn_info = {
'serial': volume['id'],
'data': {
'foo': 'bar'
}
}
if self.attachment_id:
mock_volume_api.attachment_update.return_value = {
'connection_info': fake_conn_info
}
mock_volume_api.attachment_complete.side_effect = (
test.TestingException)
else:
# legacy flow, stub out the volume_api accordingly
mock_volume_api.attach.side_effect = test.TestingException
mock_volume_api.initialize_connection.return_value = fake_conn_info
with mock.patch.object(virt_driver, 'detach_volume') as drvr_detach:
with mock.patch.object(self.context, 'elevated',
return_value=self.context):
self.assertRaises(test.TestingException, test_bdm.attach,
self.context, instance, mock_volume_api,
virt_driver, do_driver_attach=True)
drvr_detach.assert_called_once_with(
self.context, fake_conn_info, instance,
self.volume_bdm.device_name,
encryption=mock_get_encryption.return_value)
if self.attachment_id:
mock_volume_api.attachment_delete.assert_called_once_with(
self.context, self.attachment_id)
else:
mock_volume_api.terminate_connection.assert_called_once_with(
self.context, volume['id'],
virt_driver.get_volume_connector(instance))
mock_volume_api.detach.assert_called_once_with(
self.context, volume['id'])
self.assertEqual(2, mock_bdm_save.call_count)
def test_volume_attach_no_driver_attach_volume_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
self._test_volume_attach(test_bdm, self.volume_bdm, volume,
fail_volume_attach=True)
@mock.patch.object(objects.BlockDeviceMapping, 'save')
def test_refresh_connection(self, mock_save):
test_bdm = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
if self.attachment_id is None:
self.virt_driver.get_volume_connector.return_value = connector
self.volume_api.initialize_connection.return_value = (
connection_info)
else:
self.volume_api.attachment_get.return_value = {
'connection_info': connection_info}
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
if self.attachment_id is None:
self.virt_driver.get_volume_connector.assert_called_once_with(
instance)
self.volume_api.initialize_connection.assert_called_once_with(
self.context, test_bdm.volume_id, connector)
else:
self.volume_api.attachment_get.assert_called_once_with(
self.context, self.attachment_id)
mock_save.assert_called_once_with()
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.volsnapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['volsnapshot'](
fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot))
# Make sure the test didn't already setup an attachment_id on the
# DriverVolSnapshotBlockDevice that we use to create a new volume.
self.assertIsNone(test_bdm.get('attachment_id'), test_bdm)
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = mock.MagicMock()
self.volume_api.get_snapshot.return_value = snapshot
self.stub_volume_create(volume)
instance_uuid = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume, wait_func=wait_func)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.volume_api.get_snapshot.assert_called_once_with(
self.context, 'fake-snapshot-id-1')
self.volume_api.create.assert_called_once_with(
self.context, 3, '', '', availability_zone=None,
snapshot=snapshot, volume_type=None)
wait_func.assert_called_once_with(self.context, 'fake-volume-id-2')
self.volume_api.attachment_create.assert_called_once_with(
self.context, volume['id'], instance_uuid)
self.assertEqual(ATTACHMENT_ID, test_bdm.get('attachment_id'))
def test_snapshot_attach_no_volume_cinder_cross_az_attach_false(self):
# Tests that the volume created from the snapshot has the same AZ as
# the instance.
self.flags(cross_az_attach=False, group='cinder')
no_volume_snapshot = self.volsnapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['volsnapshot'](
fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot))
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = mock.MagicMock()
self.volume_api.get_snapshot.return_value = snapshot
self.stub_volume_create(volume)
self._test_volume_attach(test_bdm, no_volume_snapshot, volume,
availability_zone='test-az',
wait_func=wait_func)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.volume_api.get_snapshot.assert_called_once_with(
self.context, 'fake-snapshot-id-1')
self.volume_api.create.assert_called_once_with(
self.context, 3, '', '', availability_zone='test-az',
snapshot=snapshot, volume_type=None)
wait_func.assert_called_once_with(self.context, 'fake-volume-id-2')
def test_snapshot_attach_fail_volume(self):
fail_volume_snapshot = self.volsnapshot_bdm_dict.copy()
fail_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['volsnapshot'](
fake_block_device.fake_bdm_object(
self.context, fail_volume_snapshot))
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
with test.nested(
mock.patch.object(self.volume_api, 'get_snapshot',
return_value=snapshot),
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_get_snap, vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_get_snap.assert_called_once_with(
self.context, 'fake-snapshot-id-1')
vol_create.assert_called_once_with(
self.context, 3, '', '', availability_zone=None,
snapshot=snapshot, volume_type=None)
vol_delete.assert_called_once_with(self.context, volume['id'])
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
with test.nested(
mock.patch.object(self.driver_classes['volume'], 'attach'),
mock.patch.object(self.volume_api, 'get_snapshot'),
mock.patch.object(self.volume_api, 'create'),
) as (mock_attach, mock_get_snapshot, mock_create):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
mock_get_snapshot.assert_not_called()
mock_create.assert_not_called()
def test_snapshot_attach_no_volume_and_no_volume_type(self):
bdm = self.driver_classes['volsnapshot'](self.volsnapshot_bdm)
instance = fake_instance.fake_instance_obj(self.context,
**{'uuid': uuids.uuid})
snapshot = {'volume_id': uuids.original_volume_id}
original_volume = {'id': uuids.original_volume_id,
'volume_type_id': 'original_volume_type'}
new_volume = {'id': uuids.new_volume_id}
with test.nested(
mock.patch.object(self.driver_classes['volume'], 'attach'),
mock.patch.object(self.volume_api, 'get_snapshot',
return_value=snapshot),
mock.patch.object(self.volume_api, 'get',
return_value=original_volume),
mock.patch.object(self.volume_api, 'create',
return_value=new_volume),
) as (mock_attach, mock_get_snapshot, mock_get, mock_create):
bdm.volume_id = None
bdm.volume_type = None
bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
# Assert that the original volume type is fetched, stored within
# the bdm and then used to create the new snapshot based volume.
mock_get.assert_called_once_with(self.context,
uuids.original_volume_id)
self.assertEqual('original_volume_type', bdm.volume_type)
mock_create.assert_called_once_with(self.context, bdm.volume_size,
'', '', volume_type='original_volume_type', snapshot=snapshot,
availability_zone=None)
def test_image_attach_no_volume(self):
no_volume_image = self.volimage_bdm_dict.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['volimage'](
fake_block_device.fake_bdm_object(
self.context, no_volume_image))
# Make sure the test didn't already setup an attachment_id on the
# DriverVolImageBlockDevice that we use to create a new volume.
self.assertIsNone(test_bdm.get('attachment_id'), test_bdm)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = mock.MagicMock()
self.stub_volume_create(volume)
instance_uuid = self._test_volume_attach(
test_bdm, no_volume_image, volume, wait_func=wait_func)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.volume_api.create.assert_called_once_with(
self.context, 1, '', '', image_id=image['id'],
availability_zone=None, volume_type=None)
wait_func.assert_called_once_with(self.context, 'fake-volume-id-2')
self.volume_api.attachment_create.assert_called_once_with(
self.context, volume['id'], instance_uuid)
self.assertEqual(ATTACHMENT_ID, test_bdm.get('attachment_id'))
def test_image_attach_no_volume_cinder_cross_az_attach_false(self):
# Tests that the volume created from the image has the same AZ as the
# instance.
self.flags(cross_az_attach=False, group='cinder')
no_volume_image = self.volimage_bdm_dict.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['volimage'](
fake_block_device.fake_bdm_object(
self.context, no_volume_image))
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = mock.MagicMock()
self.stub_volume_create(volume)
self._test_volume_attach(test_bdm, no_volume_image, volume,
availability_zone='test-az',
wait_func=wait_func)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.volume_api.create.assert_called_once_with(
self.context, 1, '', '', image_id=image['id'],
availability_zone='test-az', volume_type=None)
wait_func.assert_called_once_with(self.context, 'fake-volume-id-2')
def test_image_attach_fail_volume(self):
fail_volume_image = self.volimage_bdm_dict.copy()
fail_volume_image['volume_id'] = None
test_bdm = self.driver_classes['volimage'](
fake_block_device.fake_bdm_object(
self.context, fail_volume_image))
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_create.assert_called_once_with(
self.context, 1, '', '', image_id=image['id'],
availability_zone=None, volume_type=None)
vol_delete.assert_called_once_with(self.context, volume['id'])
def test_image_attach_volume(self):
test_bdm = self.driver_classes['volimage'](
self.volimage_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
with test.nested(
mock.patch.object(self.driver_classes['volume'], 'attach'),
mock.patch.object(self.volume_api, 'get_snapshot'),
mock.patch.object(self.volume_api, 'create'),
) as (mock_attch, mock_get_snapshot, mock_create):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
mock_attch.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
mock_get_snapshot.assert_not_called()
mock_create.assert_not_called()
def test_blank_attach_fail_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['volblank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', volume_type=None, availability_zone=None)
vol_delete.assert_called_once_with(
self.context, volume['id'])
def test_blank_attach_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['volblank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
# Make sure the test didn't already setup an attachment_id on the
# DriverVolBlankBlockDevice that we use to create a new volume.
self.assertIsNone(test_bdm.get('attachment_id'), test_bdm)
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
self.stub_volume_create(volume)
with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.volume_api.create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', volume_type=None, availability_zone=None)
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.volume_api.attachment_create.assert_called_once_with(
self.context, volume['id'], instance.uuid)
self.assertEqual(ATTACHMENT_ID, test_bdm.get('attachment_id'))
def test_blank_attach_volume_cinder_cross_az_attach_false(self):
# Tests that the blank volume created is in the same availability zone
# as the instance.
self.flags(cross_az_attach=False, group='cinder')
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['volblank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
updates = {'uuid': uuids.uuid, 'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**updates)
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
self.stub_volume_create(volume)
with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.volume_api.create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', volume_type=None, availability_zone='test-az')
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
bdms = objects.BlockDeviceMappingList(
objects=[self.volume_bdm, self.ephemeral_bdm])
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'], bdms)
self.assertEqual(converted, [self.volume_driver_bdm])
def test_convert_all_volumes(self):
converted = driver_block_device.convert_all_volumes()
self.assertEqual([], converted)
converted = driver_block_device.convert_all_volumes(
self.volume_bdm, self.ephemeral_bdm, self.volimage_bdm,
self.volblank_bdm, self.volsnapshot_bdm)
self.assertEqual(converted, [self.volume_driver_bdm,
self.volimage_driver_bdm,
self.volblank_driver_bdm,
self.volsnapshot_driver_bdm])
def test_convert_volume(self):
self.assertIsNone(driver_block_device.convert_volume(self.swap_bdm))
self.assertEqual(self.volume_driver_bdm,
driver_block_device.convert_volume(self.volume_bdm))
self.assertEqual(self.volsnapshot_driver_bdm,
driver_block_device.convert_volume(
self.volsnapshot_bdm))
def test_convert_volume_without_connection_info(self):
self.assertEqual(self.volume_driver_bdm_without_conn_info,
driver_block_device.convert_volume(
self.volume_bdm_without_conn_info))
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.volsnapshot_legacy_driver_bdm,
self.volsnapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in range(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in range(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.volimage_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.volsnapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.volimage_bdm_dict.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(
fake_block_device.fake_bdm_object(self.context, local_image)))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['volimage'](self.volimage_bdm)
test_snapshot = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['volblank'](self.volblank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
def test_get_volume_create_az_cinder_cross_az_attach_true(self):
# Tests that we get None back if cinder.cross_az_attach=True even if
# the instance has an AZ assigned. Note that since cross_az_attach
# defaults to True we don't need to set a flag explicitly for the test.
updates = {'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(self.context, **updates)
self.assertIsNone(
driver_block_device._get_volume_create_az_value(instance))
def test_refresh_conn_infos(self):
# Only DriverVolumeBlockDevice derived devices should refresh their
# connection_info during a refresh_conn_infos call.
test_volume = mock.MagicMock(
spec=driver_block_device.DriverVolumeBlockDevice)
test_image = mock.MagicMock(
spec=driver_block_device.DriverVolImageBlockDevice)
test_snapshot = mock.MagicMock(
spec=driver_block_device.DriverVolSnapshotBlockDevice)
test_blank = mock.MagicMock(
spec=driver_block_device.DriverVolBlankBlockDevice)
test_eph = mock.MagicMock(
spec=driver_block_device.DriverEphemeralBlockDevice)
test_swap = mock.MagicMock(
spec=driver_block_device.DriverSwapBlockDevice)
block_device_mapping = [test_volume, test_image, test_eph,
test_snapshot, test_swap, test_blank]
driver_block_device.refresh_conn_infos(block_device_mapping,
mock.sentinel.refresh_context,
mock.sentinel.refresh_instance,
mock.sentinel.refresh_vol_api,
mock.sentinel.refresh_virt_drv)
for test_mock in [test_volume, test_image, test_snapshot, test_blank]:
test_mock.refresh_connection_info.assert_called_once_with(
mock.sentinel.refresh_context,
mock.sentinel.refresh_instance,
mock.sentinel.refresh_vol_api,
mock.sentinel.refresh_virt_drv)
# NOTE(lyarwood): Can't think of a better way of testing this as we
# can't assert_not_called if the method isn't in the spec.
self.assertFalse(hasattr(test_eph, 'refresh_connection_info'))
self.assertFalse(hasattr(test_swap, 'refresh_connection_info'))
def test_proxy_as_attr(self):
class A(driver_block_device.DriverBlockDevice):
pass
def _transform(self):
pass
class B(A):
_proxy_as_attr_inherited = set('B')
class C(A):
_proxy_as_attr_inherited = set('C')
class D(B):
_proxy_as_attr_inherited = set('D')
class E(B, C):
_proxy_as_attr_inherited = set('E')
bdm = objects.BlockDeviceMapping(self.context, no_device=False)
self.assertEqual(set(['uuid', 'is_volume']), A(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'B']),
B(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'C']),
C(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'B', 'D']),
D(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'B', 'C', 'E']),
E(bdm)._proxy_as_attr)
def _test_boot_from_volume_source_blank_volume_type(
self, bdm, expected_volume_type):
self.flags(cross_az_attach=False, group='cinder')
test_bdm = self.driver_classes['volblank'](bdm)
updates = {'uuid': uuids.uuid, 'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**updates)
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
self.stub_volume_create(volume)
with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.volume_api.create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid, '',
volume_type=expected_volume_type,
availability_zone='test-az')
vol_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_boot_from_volume_source_blank_with_unset_volume_type(self):
"""Tests the scenario that the BlockDeviceMapping.volume_type field
is unset for RPC compatibility to an older compute.
"""
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
bdm = fake_block_device.fake_bdm_object(self.context, no_blank_volume)
delattr(bdm, 'volume_type')
self.assertNotIn('volume_type', bdm)
self._test_boot_from_volume_source_blank_volume_type(bdm, None)
def test_boot_from_volume_source_blank_with_volume_type(self):
# Tests that the blank volume created specifies the volume type.
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
no_blank_volume['volume_type'] = 'fake-lvm-1'
bdm = fake_block_device.fake_bdm_object(self.context, no_blank_volume)
self._test_boot_from_volume_source_blank_volume_type(bdm, 'fake-lvm-1')
def _test_boot_from_volume_source_image_volume_type(
self, bdm, expected_volume_type):
self.flags(cross_az_attach=False, group='cinder')
test_bdm = self.driver_classes['volimage'](bdm)
updates = {'uuid': uuids.uuid, 'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**updates)
volume_class = self.driver_classes['volume']
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-image-vol'}
self.stub_volume_create(volume)
with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.volume_api.create.assert_called_once_with(
self.context, test_bdm.volume_size,
'', '', image_id=image['id'],
volume_type=expected_volume_type,
availability_zone='test-az')
vol_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_boot_from_volume_source_image_with_unset_volume_type(self):
"""Tests the scenario that the BlockDeviceMapping.volume_type field
is unset for RPC compatibility to an older compute.
"""
no_volume_image = self.volimage_bdm_dict.copy()
no_volume_image['volume_id'] = None
bdm = fake_block_device.fake_bdm_object(self.context, no_volume_image)
delattr(bdm, 'volume_type')
self.assertNotIn('volume_type', bdm)
self._test_boot_from_volume_source_image_volume_type(bdm, None)
def test_boot_from_volume_source_image_with_volume_type(self):
# Tests that the volume created from the image specifies the volume
# type.
no_volume_image = self.volimage_bdm_dict.copy()
no_volume_image['volume_id'] = None
no_volume_image['volume_type'] = 'fake-lvm-1'
bdm = fake_block_device.fake_bdm_object(self.context, no_volume_image)
self._test_boot_from_volume_source_image_volume_type(bdm, 'fake-lvm-1')
def _test_boot_from_volume_source_snapshot_volume_type(
self, bdm, expected_volume_type):
self.flags(cross_az_attach=False, group='cinder')
test_bdm = self.driver_classes['volsnapshot'](bdm)
snapshot = {'id': 'fake-snapshot-id-1',
'attach_status': 'detached'}
updates = {'uuid': uuids.uuid, 'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**updates)
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-snapshot-vol'}
self.stub_volume_create(volume)
with test.nested(
mock.patch.object(self.volume_api, 'get_snapshot',
return_value=snapshot),
mock.patch.object(volume_class, 'attach')
) as (
vol_get_snap, vol_attach
):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.volume_api.create.assert_called_once_with(
self.context, test_bdm.volume_size, '', '',
availability_zone='test-az', snapshot=snapshot,
volume_type=expected_volume_type)
vol_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_boot_from_volume_source_snapshot_with_unset_volume_type(self):
"""Tests the scenario that the BlockDeviceMapping.volume_type field
is unset for RPC compatibility to an older compute.
"""
no_volume_snapshot = self.volsnapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
bdm = fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot)
delattr(bdm, 'volume_type')
self.assertNotIn('volume_type', bdm)
self._test_boot_from_volume_source_snapshot_volume_type(bdm, None)
def test_boot_from_volume_source_snapshot_with_volume_type(self):
# Tests that the volume created from the snapshot specifies the volume
# type.
no_volume_snapshot = self.volsnapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
no_volume_snapshot['volume_type'] = 'fake-lvm-1'
bdm = fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot)
self._test_boot_from_volume_source_snapshot_volume_type(
bdm, 'fake-lvm-1')
class TestDriverBlockDeviceNewFlow(TestDriverBlockDevice):
"""Virt block_device tests for the Cinder 3.44 volume attach flow
where a volume BDM has an attachment_id.
"""
attachment_id = ATTACHMENT_ID
def test_volume_attach_multiattach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'multiattach': True,
'attach_status': 'attached',
'status': 'in-use',
'attachments': {'fake_instance_2':
{'mountpoint': '/dev/vdc'}}}
self._test_volume_attach(test_bdm, self.volume_bdm, volume,
multiattach=True, driver_multi_attach=True)
def test_volume_attach_multiattach_no_virt_driver_support(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'multiattach': True,
'attach_status': 'attached',
'status': 'in-use',
'attachments': {'fake_instance_2':
{'mountpoint': '/dev/vdc'}}}
self._test_volume_attach(test_bdm, self.volume_bdm,
volume, multiattach=True,
fail_with_virt_driver=True)
@mock.patch('nova.objects.BlockDeviceMapping.save')
def test_refresh_connection_preserve_multiattach(self, mock_bdm_save):
"""Tests that we've already attached a multiattach-capable volume
and when refreshing the connection_info from the attachment record,
the multiattach flag in the bdm.connection_info is preserved.
"""
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['connection_info']['multiattach'] = True
volume_api = mock.Mock()
volume_api.attachment_get.return_value = {
'connection_info': {
'data': {
'some': 'goodies'
}
}
}
test_bdm.refresh_connection_info(
self.context, mock.sentinel.instance,
volume_api, mock.sentinel.virt_driver)
volume_api.attachment_get.assert_called_once_with(
self.context, self.attachment_id)
mock_bdm_save.assert_called_once_with()
expected_connection_info = {
'data': {
'some': 'goodies'
},
'serial': self.volume_bdm.volume_id,
'multiattach': True
}
self.assertDictEqual(expected_connection_info,
test_bdm['connection_info'])
class TestGetVolumeId(test.NoDBTestCase):
def test_get_volume_id_none_found(self):
self.assertIsNone(driver_block_device.get_volume_id(None))
self.assertIsNone(driver_block_device.get_volume_id({}))
self.assertIsNone(driver_block_device.get_volume_id({'data': {}}))
def test_get_volume_id_found_volume_id_no_serial(self):
self.assertEqual(uuids.volume_id,
driver_block_device.get_volume_id(
{'data': {'volume_id': uuids.volume_id}}))
def test_get_volume_id_found_no_volume_id_serial(self):
self.assertEqual(uuids.serial,
driver_block_device.get_volume_id(
{'serial': uuids.serial}))
def test_get_volume_id_found_both(self):
# volume_id is taken over serial
self.assertEqual(uuids.volume_id,
driver_block_device.get_volume_id(
{'serial': uuids.serial,
'data': {'volume_id': uuids.volume_id}}))
|
|
import inspect
import logging
import os
import signal
import socket
import sys
import time
from optparse import OptionParser
import gevent
import locust
from . import events, runners, web
from .core import HttpLocust, Locust
from .inspectlocust import get_task_ratio_dict, print_task_ratio
from .log import console_logger, setup_logging
from .runners import LocalLocustRunner, MasterLocustRunner, SlaveLocustRunner
from .stats import (print_error_report, print_percentile_stats, print_stats,
stats_printer, stats_writer, write_stat_csvs)
_internals = [Locust, HttpLocust]
version = locust.__version__
def parse_options():
"""
Handle command-line options with optparse.OptionParser.
Return list of arguments, largely for use in `parse_arguments`.
"""
# Initialize
parser = OptionParser(usage="locust [options] [LocustClass [LocustClass2 ... ]]")
parser.add_option(
'-H', '--host',
dest="host",
default=None,
help="Host to load test in the following format: http://10.21.32.33"
)
parser.add_option(
'--web-host',
dest="web_host",
default="",
help="Host to bind the web interface to. Defaults to '' (all interfaces)"
)
parser.add_option(
'-P', '--port', '--web-port',
type="int",
dest="port",
default=8089,
help="Port on which to run web host"
)
parser.add_option(
'-f', '--locustfile',
dest='locustfile',
default='locustfile',
help="Python module file to import, e.g. '../other.py'. Default: locustfile"
)
# A file that contains the current request stats.
parser.add_option(
'--csv', '--csv-base-name',
action='store',
type='str',
dest='csvfilebase',
default=None,
help="Store current request stats to files in CSV format.",
)
# if locust should be run in distributed mode as master
parser.add_option(
'--master',
action='store_true',
dest='master',
default=False,
help="Set locust to run in distributed mode with this process as master"
)
# if locust should be run in distributed mode as slave
parser.add_option(
'--slave',
action='store_true',
dest='slave',
default=False,
help="Set locust to run in distributed mode with this process as slave"
)
# master host options
parser.add_option(
'--master-host',
action='store',
type='str',
dest='master_host',
default="127.0.0.1",
help="Host or IP address of locust master for distributed load testing. Only used when running with --slave. Defaults to 127.0.0.1."
)
parser.add_option(
'--master-port',
action='store',
type='int',
dest='master_port',
default=5557,
help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --slave. Defaults to 5557. Note that slaves will also connect to the master node on this port + 1."
)
parser.add_option(
'--master-bind-host',
action='store',
type='str',
dest='master_bind_host',
default="*",
help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)."
)
parser.add_option(
'--master-bind-port',
action='store',
type='int',
dest='master_bind_port',
default=5557,
help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557. Note that Locust will also use this port + 1, so by default the master node will bind to 5557 and 5558."
)
parser.add_option(
'--expect-slaves',
action='store',
type='int',
dest='expect_slaves',
default=1,
help="How many slaves master should expect to connect before starting the test (only when --no-web used)."
)
# if we should print stats in the console
parser.add_option(
'--no-web',
action='store_true',
dest='no_web',
default=False,
help="Disable the web interface, and instead start running the test immediately. Requires -c and -r to be specified."
)
# Number of clients
parser.add_option(
'-c', '--clients',
action='store',
type='int',
dest='num_clients',
default=1,
help="Number of concurrent clients. Only used together with --no-web"
)
# Client hatch rate
parser.add_option(
'-r', '--hatch-rate',
action='store',
type='float',
dest='hatch_rate',
default=1,
help="The rate per second in which clients are spawned. Only used together with --no-web"
)
# Number of requests
parser.add_option(
'-n', '--num-request',
action='store',
type='int',
dest='num_requests',
default=None,
help="Number of requests to perform. Only used together with --no-web"
)
# log level
parser.add_option(
'--loglevel', '-L',
action='store',
type='str',
dest='loglevel',
default='INFO',
help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.",
)
# log file
parser.add_option(
'--logfile',
action='store',
type='str',
dest='logfile',
default=None,
help="Path to log file. If not set, log will go to stdout/stderr",
)
# if we should print stats in the console
parser.add_option(
'--print-stats',
action='store_true',
dest='print_stats',
default=False,
help="Print stats in the console"
)
# only print summary stats
parser.add_option(
'--only-summary',
action='store_true',
dest='only_summary',
default=False,
help='Only print the summary stats'
)
parser.add_option(
'--no-reset-stats',
action='store_true',
dest='no_reset_stats',
default=False,
help="Do not reset statistics once hatching has been completed",
)
# List locust commands found in loaded locust files/source files
parser.add_option(
'-l', '--list',
action='store_true',
dest='list_commands',
default=False,
help="Show list of possible locust classes and exit"
)
# Display ratio table of all tasks
parser.add_option(
'--show-task-ratio',
action='store_true',
dest='show_task_ratio',
default=False,
help="print table of the locust classes' task execution ratio"
)
# Display ratio table of all tasks in JSON format
parser.add_option(
'--show-task-ratio-json',
action='store_true',
dest='show_task_ratio_json',
default=False,
help="print json data of the locust classes' task execution ratio"
)
# Version number (optparse gives you --version but we have to do it
# ourselves to get -V too. sigh)
parser.add_option(
'-V', '--version',
action='store_true',
dest='show_version',
default=False,
help="show program's version number and exit"
)
# Finalize
# Return three-tuple of parser + the output from parse_args (opt obj, args)
opts, args = parser.parse_args()
return parser, opts, args
def _is_package(path):
"""
Is the given path a Python package?
"""
return (
os.path.isdir(path)
and os.path.exists(os.path.join(path, '__init__.py'))
)
def find_locustfile(locustfile):
"""
Attempt to locate a locustfile, either explicitly or by searching parent dirs.
"""
# Obtain env value
names = [locustfile]
# Create .py version if necessary
if not names[0].endswith('.py'):
names += [names[0] + '.py']
# Does the name contain path elements?
if os.path.dirname(names[0]):
# If so, expand home-directory markers and test for existence
for name in names:
expanded = os.path.expanduser(name)
if os.path.exists(expanded):
if name.endswith('.py') or _is_package(expanded):
return os.path.abspath(expanded)
else:
# Otherwise, start in cwd and work downwards towards filesystem root
path = '.'
# Stop before falling off root of filesystem (should be platform
# agnostic)
while os.path.split(os.path.abspath(path))[1]:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
if name.endswith('.py') or _is_package(joined):
return os.path.abspath(joined)
path = os.path.join('..', path)
# Implicit 'return None' if nothing was found
def is_locust(tup):
"""
Takes (name, object) tuple, returns True if it's a public Locust subclass.
"""
name, item = tup
return bool(
inspect.isclass(item)
and issubclass(item, Locust)
and hasattr(item, "task_set")
and getattr(item, "task_set")
and not name.startswith('_')
)
def load_locustfile(path):
"""
Import given locustfile path and return (docstring, callables).
Specifically, the locustfile's ``__doc__`` attribute (a string) and a
dictionary of ``{'name': callable}`` containing all callables which pass
the "is a Locust" test.
"""
# Get directory and locustfile name
directory, locustfile = os.path.split(path)
# If the directory isn't in the PYTHONPATH, add it so our import will work
added_to_path = False
index = None
if directory not in sys.path:
sys.path.insert(0, directory)
added_to_path = True
# If the directory IS in the PYTHONPATH, move it to the front temporarily,
# otherwise other locustfiles -- like Locusts's own -- may scoop the intended
# one.
else:
i = sys.path.index(directory)
if i != 0:
# Store index for later restoration
index = i
# Add to front, then remove from original position
sys.path.insert(0, directory)
del sys.path[i + 1]
# Perform the import (trimming off the .py)
imported = __import__(os.path.splitext(locustfile)[0])
# Remove directory from path if we added it ourselves (just to be neat)
if added_to_path:
del sys.path[0]
# Put back in original index if we moved it
if index is not None:
sys.path.insert(index + 1, directory)
del sys.path[0]
# Return our two-tuple
locusts = dict(filter(is_locust, vars(imported).items()))
return imported.__doc__, locusts
def main():
parser, options, arguments = parse_options()
# setup logging
setup_logging(options.loglevel, options.logfile)
logger = logging.getLogger(__name__)
if options.show_version:
print("Locust %s" % (version,))
sys.exit(0)
locustfile = find_locustfile(options.locustfile)
if not locustfile:
logger.error("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.")
sys.exit(1)
if locustfile == "locust.py":
logger.error("The locustfile must not be named `locust.py`. Please rename the file and try again.")
sys.exit(1)
docstring, locusts = load_locustfile(locustfile)
if options.list_commands:
console_logger.info("Available Locusts:")
for name in locusts:
console_logger.info(" " + name)
sys.exit(0)
if not locusts:
logger.error("No Locust class found!")
sys.exit(1)
# make sure specified Locust exists
if arguments:
missing = set(arguments) - set(locusts.keys())
if missing:
logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
sys.exit(1)
else:
names = set(arguments) & set(locusts.keys())
locust_classes = [locusts[n] for n in names]
else:
# list() call is needed to consume the dict_view object in Python 3
locust_classes = list(locusts.values())
if options.show_task_ratio:
console_logger.info("\n Task ratio per locust class")
console_logger.info( "-" * 80)
print_task_ratio(locust_classes)
console_logger.info("\n Total task ratio")
console_logger.info("-" * 80)
print_task_ratio(locust_classes, total=True)
sys.exit(0)
if options.show_task_ratio_json:
from json import dumps
task_data = {
"per_class": get_task_ratio_dict(locust_classes),
"total": get_task_ratio_dict(locust_classes, total=True)
}
console_logger.info(dumps(task_data))
sys.exit(0)
if not options.no_web and not options.slave:
# spawn web greenlet
logger.info("Starting web monitor at %s:%s" % (options.web_host or "*", options.port))
main_greenlet = gevent.spawn(web.start, locust_classes, options)
if not options.master and not options.slave:
runners.locust_runner = LocalLocustRunner(locust_classes, options)
# spawn client spawning/hatching greenlet
if options.no_web:
runners.locust_runner.start_hatching(wait=True)
main_greenlet = runners.locust_runner.greenlet
elif options.master:
runners.locust_runner = MasterLocustRunner(locust_classes, options)
if options.no_web:
while len(runners.locust_runner.clients.ready)<options.expect_slaves:
logging.info("Waiting for slaves to be ready, %s of %s connected",
len(runners.locust_runner.clients.ready), options.expect_slaves)
time.sleep(1)
runners.locust_runner.start_hatching(options.num_clients, options.hatch_rate)
main_greenlet = runners.locust_runner.greenlet
elif options.slave:
try:
runners.locust_runner = SlaveLocustRunner(locust_classes, options)
main_greenlet = runners.locust_runner.greenlet
except socket.error as e:
logger.error("Failed to connect to the Locust master: %s", e)
sys.exit(-1)
if not options.only_summary and (options.print_stats or (options.no_web and not options.slave)):
# spawn stats printing greenlet
gevent.spawn(stats_printer)
if options.csvfilebase:
gevent.spawn(stats_writer, options.csvfilebase)
def shutdown(code=0):
"""
Shut down locust by firing quitting event, printing/writing stats and exiting
"""
logger.info("Shutting down (exit code %s), bye." % code)
events.quitting.fire()
print_stats(runners.locust_runner.request_stats)
print_percentile_stats(runners.locust_runner.request_stats)
if options.csvfilebase:
write_stat_csvs(options.csvfilebase)
print_error_report()
sys.exit(code)
# install SIGTERM handler
def sig_term_handler():
logger.info("Got SIGTERM signal")
shutdown(0)
gevent.signal(signal.SIGTERM, sig_term_handler)
try:
logger.info("Starting Locust %s" % version)
main_greenlet.join()
code = 0
if len(runners.locust_runner.errors):
code = 1
shutdown(code=code)
except KeyboardInterrupt as e:
shutdown(0)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# modify the runtime template for prebuilt engine
#
# Copyright 2014 (C) zhangbin
#
# License: MIT
# ----------------------------------------------------------------------------
'''
modify the runtime template for prebuilt engine
'''
import os
import sys
from MultiLanguage import MultiLanguage
from cocos import Logging
LUA_TEMPLATE_PATH = "templates/lua-template-runtime"
XCODE_LINK_CPP_LIBS = [
"libcocos2d"
]
XCODE_LINK_LUA_LIBS = [ "libluacocos2d", "libsimulator" ]
XCODE_LINK_JS_LIBS = [ "libjscocos2d", "libsimulator" ]
class TemplateModifier(object):
def __init__(self, engine_path, libs_path, version):
if os.path.isabs(engine_path):
self.engine_path = engine_path
else:
self.engine_path = os.path.abspath(engine_path)
if os.path.isabs(libs_path):
self.libs_path = libs_path
else:
self.libs_path = os.path.abspath(libs_path)
self.version = version
if getattr(sys, 'frozen', None):
self.cur_dir = os.path.realpath(os.path.dirname(sys.executable))
else:
self.cur_dir = os.path.realpath(os.path.dirname(__file__))
proj_modifier_path = os.path.join(self.cur_dir, 'proj_modifier')
sys.path.append(proj_modifier_path)
def modify_xcode_proj(self, proj_file_path):
if proj_file_path.find('cpp-template') >= 0:
language = 'cpp'
elif proj_file_path.find('lua-template') >= 0:
language = 'lua'
elif proj_file_path.find('js-template') >= 0:
language = 'js'
else:
Logging.warning(MultiLanguage.get_string('GEN_TEMP_UNKNOWN_LANGUAGE_FMT', proj_file_path))
return
import modify_pbxproj
pbx_proj = modify_pbxproj.XcodeProject.Load(proj_file_path)
replace_engine_strs = []
if language == "cpp":
targetName = "HelloCpp"
link_libs = XCODE_LINK_CPP_LIBS
replace_engine_strs.append("$(SRCROOT)/../cocos2d")
elif language == "lua":
targetName = "HelloLua"
link_libs = XCODE_LINK_CPP_LIBS + XCODE_LINK_LUA_LIBS
replace_engine_strs.append("$(SRCROOT)/../../cocos2d-x")
else:
targetName = "HelloJavascript"
link_libs = XCODE_LINK_CPP_LIBS + XCODE_LINK_JS_LIBS
replace_engine_strs.append("$(SRCROOT)/../../cocos2d-x")
replace_engine_strs.append("../../cocos2d-x")
ios_target_name = "%s-mobile" % targetName
mac_target_name = "%s-desktop" % targetName
# remove the target dependencies
pbx_proj.remove_proj_reference("cocos2d_libs.xcodeproj")
if language == "lua":
pbx_proj.remove_proj_reference("cocos2d_lua_bindings.xcodeproj")
pbx_proj.remove_proj_reference("libsimulator.xcodeproj")
if language == "js":
pbx_proj.remove_proj_reference("cocos2d_js_bindings.xcodeproj")
pbx_proj.remove_proj_reference("libsimulator.xcodeproj")
pbx_proj.remove_file_by_path("../../cocos2d-x/cocos/scripting/js-bindings/script")
common_group = pbx_proj.get_or_create_group("JS Common")
pbx_proj.add_file_if_doesnt_exist("../../../script", common_group, tree="<group>")
# pbx_proj.remove_group_by_name("JS Common")
# add libraries search path
libs_path = "/Applications/Cocos/frameworks/%s/prebuilt" % self.version
ios_template_prebuilt_path = os.path.join(libs_path, "ios")
pbx_proj.add_library_search_paths(ios_template_prebuilt_path, target_name=ios_target_name, recursive=False)
mac_template_prebuilt_path = os.path.join(libs_path, "mac")
pbx_proj.add_library_search_paths(mac_template_prebuilt_path, target_name=mac_target_name, recursive=False)
# add libraries for targets
ios_lib_group = pbx_proj.get_or_create_group("ios-libs")
mac_lib_group = pbx_proj.get_or_create_group("mac-libs")
for lib in link_libs:
ios_lib_name = "%s iOS.a" % lib
mac_lib_name = "%s Mac.a" % lib
ios_lib_path = os.path.join(ios_template_prebuilt_path, ios_lib_name)
pbx_proj.add_file_if_doesnt_exist(ios_lib_path, ios_lib_group, tree="<group>", target=ios_target_name)
mac_lib_path = os.path.join(mac_template_prebuilt_path, mac_lib_name)
pbx_proj.add_file_if_doesnt_exist(mac_lib_path, mac_lib_group, tree="<group>", target=mac_target_name)
# add studio resources to the xcode project of cpp template
if language == "cpp":
pbx_proj.remove_file_by_path("CloseNormal.png")
pbx_proj.remove_file_by_path("CloseSelected.png")
pbx_proj.remove_file_by_path("HelloWorld.png")
pbx_proj.remove_file_by_path("Marker Felt.ttf")
pbx_proj.remove_file_by_path("fonts")
pbx_proj.remove_file_by_path("res")
res_group = pbx_proj.get_or_create_group("Resources")
pbx_proj.add_file_if_doesnt_exist("../Resources/res", res_group, tree="<group>")
if pbx_proj.modified:
Logging.info(MultiLanguage.get_string('GEN_TEMP_SAVE_XCODE_PROJ_FMT', proj_file_path))
pbx_proj.save()
# modify the engine path
f = open(proj_file_path)
file_content = f.read()
f.close()
install_path = "/Applications/Cocos/frameworks/%s" % self.version
for old_engine_path in replace_engine_strs:
file_content = file_content.replace(old_engine_path, install_path)
f = open(proj_file_path, "w")
f.write(file_content)
f.close()
def modify_vs_proj(self, proj_file_path):
if proj_file_path.find('cpp-template') >= 0:
language = 'cpp'
elif proj_file_path.find('lua-template') >= 0:
language = 'lua'
elif proj_file_path.find('js-template') >= 0:
language = 'js'
else:
Logging.warning(MultiLanguage.get_string('GEN_TEMP_UNKNOWN_LANGUAGE_FMT', proj_file_path))
return
import modify_vcxproj
vcx_proj = modify_vcxproj.VCXProject(proj_file_path)
# remove the project references
vcx_proj.remove_proj_reference()
install_path = "$(COCOS_FRAMEWORKS)\\%s\\" % self.version
copy_libs_cmd = "if not exist \"$(OutDir)\" mkdir \"$(OutDir)\"\n" \
"xcopy /Y /Q \"$(EngineRoot)\\prebuilt\\win32\\*.*\" \"$(OutDir)\"\n"
vcx_proj.set_event_command('PreLinkEvent', copy_libs_cmd, 'debug')
vcx_proj.set_event_command('PreLinkEvent', copy_libs_cmd, 'release')
if language == "js":
custom_step_event = vcx_proj.get_event_command('CustomBuildStep')
custom_step_event.replace("$(ProjectDir)..\\..\\cocos2d-x\\cocos\\scripting\\js-bindings\\script",
"$(ProjectDir)..\\..\\..\\script")
vcx_proj.set_event_command("CustomBuildStep", custom_step_event, create_new=False)
vcx_proj.remove_predefine_macro("_DEBUG", 'debug')
Logging.info(MultiLanguage.get_string('GEN_TEMP_SAVE_VS_PROJ_FMT', proj_file_path))
vcx_proj.save()
replace_strs = []
replace_strs.append("$(EngineRoot)")
if language == "cpp":
# link_libs = WIN32_LINK_CPP_LIBS
replace_strs.append("$(ProjectDir)..\\cocos2d")
replace_strs.append("..\\cocos2d")
elif language == "lua":
# link_libs = WIN32_LINK_CPP_LIBS + WIN32_LINK_LUA_LIBS
replace_strs.append("$(ProjectDir)..\\..\\cocos2d-x")
replace_strs.append("..\\..\\cocos2d-x")
else:
# link_libs = WIN32_LINK_CPP_LIBS + WIN32_LINK_JS_LIBS
replace_strs.append("$(ProjectDir)..\\..\\cocos2d-x")
replace_strs.append("..\\..\\cocos2d-x")
# modify the Runtime.cpp
vcx_proj_path = os.path.dirname(proj_file_path)
cpp_path = os.path.join(vcx_proj_path, os.path.pardir, "Classes/runtime/Runtime.cpp")
if os.path.exists(cpp_path):
f = open(cpp_path)
file_content = f.read()
f.close()
file_content = file_content.replace("#ifndef _DEBUG", "#ifndef COCOS2D_DEBUG")
f = open(cpp_path, "w")
f.write(file_content)
f.close()
f = open(proj_file_path)
file_content = f.read()
f.close()
if language == "lua":
# replace the "lua\lua;" to "lua\luajit;"
file_content = file_content.replace("lua\\lua;", "lua\\luajit\\include;")
file_content = file_content.replace("MultiThreadedDebugDLL", "MultiThreadedDLL")
for str in replace_strs:
file_content = file_content.replace(str, install_path)
file_content = file_content.replace('%s\\' % install_path, install_path)
f = open(proj_file_path, "w")
f.write(file_content)
f.close()
|
|
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
import datetime
from decimal import Decimal
from .apitask import APITask
from thing.models import Alliance, Character, Contract, Corporation, Event, Station, APIKey
class Contracts(APITask):
name = 'thing.contracts'
def run(self, url, taskstate_id, apikey_id, character_id):
if self.init(taskstate_id, apikey_id) is False:
return
# Make sure the character exists
try:
character = Character.objects.select_related('details').get(pk=character_id)
except Character.DoesNotExist:
self.log_warn('Character %s does not exist!', character_id)
return
now = datetime.datetime.now()
# Initialise for corporate query
if self.apikey.key_type == APIKey.CORPORATION_TYPE:
c_filter = Contract.objects.filter(corporation=self.apikey.corporation)
# Initialise for character query
else:
c_filter = Contract.objects.filter(character=character, corporation__isnull=True)
params = {'characterID': character_id}
if self.fetch_api(url, params) is False or self.root is None:
return
# Retrieve a list of this user's characters and corporations
# user_chars = list(Character.objects.filter(apikeys__user=self.apikey.user).values_list('id', flat=True))
# user_corps = list(APIKey.objects.filter(user=self.apikey.user).exclude(
# corpasdasd_character=None).values_list('corpasd_character__corporation__id', flat=True))
# First we need to get all of the acceptor and assignee IDs
contract_ids = set()
station_ids = set()
lookup_ids = set()
lookup_corp_ids = set()
contract_rows = []
# <row contractID="58108507" issuerID="2004011913" issuerCorpID="751993277" assigneeID="401273477"
# acceptorID="0" startStationID="60014917" endStationID="60003760" type="Courier" status="Outstanding"
# title="" forCorp="0" availability="Private" dateIssued="2012-08-02 06:50:29"
# dateExpired="2012-08-09 06:50:29" dateAccepted="" numDays="7" dateCompleted="" price="0.00"
# reward="3000000.00" collateral="0.00" buyout="0.00" volume="10000"/>
for row in self.root.findall('result/rowset/row'):
if self.apikey.key_type == APIKey.CORPORATION_TYPE:
# corp keys don't care about non-corp orders
if row.attrib['forCorp'] == '0':
continue
# corp keys don't care about orders they didn't issue - another fun
# bug where corp keys see alliance contracts they didn't make :ccp:
if self.apikey.corporation.id not in (
int(row.attrib['issuerCorpID']), int(row.attrib['assigneeID']), int(row.attrib['acceptorID'])
):
# logger.info('Skipping non-corp contract :ccp:')
continue
# non-corp keys don't care about corp orders
if self.apikey.key_type != APIKey.CORPORATION_TYPE and row.attrib['forCorp'] == '1':
continue
contract_ids.add(int(row.attrib['contractID']))
station_ids.add(int(row.attrib['startStationID']))
station_ids.add(int(row.attrib['endStationID']))
lookup_ids.add(int(row.attrib['issuerID']))
lookup_corp_ids.add(int(row.attrib['issuerCorpID']))
if row.attrib['assigneeID'] != '0':
lookup_ids.add(int(row.attrib['assigneeID']))
if row.attrib['acceptorID'] != '0':
lookup_ids.add(int(row.attrib['acceptorID']))
contract_rows.append(row)
# Fetch bulk data
char_map = Character.objects.in_bulk(lookup_ids)
corp_map = Corporation.objects.in_bulk(lookup_ids | lookup_corp_ids)
alliance_map = Alliance.objects.in_bulk(lookup_ids)
station_map = Station.objects.in_bulk(station_ids)
# Add missing IDs as *UNKNOWN* Characters for now
new = []
for new_id in lookup_ids.difference(char_map, corp_map, alliance_map, lookup_corp_ids):
char = Character(
id=new_id,
name="*UNKNOWN*",
)
new.append(char)
char_map[new_id] = char
if new:
Character.objects.bulk_create(new)
# Add missing Corporations too
new = []
for new_id in lookup_corp_ids.difference(corp_map):
corp = Corporation(
id=new_id,
name="*UNKNOWN*",
)
new.append(corp)
corp_map[new_id] = corp
if new:
Corporation.objects.bulk_create(new)
# Fetch station data
# Fetch all existing contracts
c_map = {}
for contract in c_filter.filter(contract_id__in=contract_ids):
c_map[contract.contract_id] = contract
# Finally, after all of that other bullshit, we can actually deal with
# our goddamn contract rows
new_contracts = []
new_events = []
# <row contractID="58108507" issuerID="2004011913" issuerCorpID="751993277" assigneeID="401273477"
# acceptorID="0" startStationID="60014917" endStationID="60003760" type="Courier" status="Outstanding"
# title="" forCorp="0" availability="Private" dateIssued="2012-08-02 06:50:29" dateExpired="2012-08-09 06:50:29"
# dateAccepted="" numDays="7" dateCompleted="" price="0.00" reward="3000000.00" collateral="0.00" buyout="0.00"
# volume="10000"/>
for row in contract_rows:
contractID = int(row.attrib['contractID'])
issuer_char = char_map.get(int(row.attrib['issuerID']))
if issuer_char is None:
self.log_warn('Invalid issuerID %s', row.attrib['issuerID'])
continue
issuer_corp = corp_map.get(int(row.attrib['issuerCorpID']))
if issuer_corp is None:
self.log_warn('Invalid issuerCorpID %s', row.attrib['issuerCorpID'])
continue
start_station = station_map.get(int(row.attrib['startStationID']))
if start_station is None:
self.log_warn('Invalid startStationID %s', row.attrib['startStationID'])
continue
end_station = station_map.get(int(row.attrib['endStationID']))
if end_station is None:
self.log_warn('Invalid endStationID %s', row.attrib['endStationID'])
continue
assigneeID = int(row.attrib['assigneeID'])
acceptorID = int(row.attrib['acceptorID'])
dateIssued = self.parse_api_date(row.attrib['dateIssued'])
dateExpired = self.parse_api_date(row.attrib['dateExpired'])
dateAccepted = row.attrib['dateAccepted']
if dateAccepted:
dateAccepted = self.parse_api_date(dateAccepted)
else:
dateAccepted = None
dateCompleted = row.attrib['dateCompleted']
if dateCompleted:
dateCompleted = self.parse_api_date(dateCompleted)
else:
dateCompleted = None
type = row.attrib['type']
if type == 'ItemExchange':
type = 'Item Exchange'
contract = c_map.get(contractID, None)
# Contract exists, maybe update stuff
if contract is not None:
if contract.status != row.attrib['status']:
text = "Contract %s changed status from '%s' to '%s'" % (
contract, contract.status, row.attrib['status'])
new_events.append(Event(
user_id=self.apikey.user.id,
issued=now,
text=text,
))
contract.status = row.attrib['status']
contract.date_accepted = dateAccepted
contract.date_completed = dateCompleted
contract.acceptor_id = acceptorID
contract.save()
# Contract does not exist, make a new one
else:
contract = Contract(
character=character,
contract_id=contractID,
issuer_char=issuer_char,
issuer_corp=issuer_corp,
assignee_id=assigneeID,
acceptor_id=acceptorID,
start_station=station_map[int(row.attrib['startStationID'])],
end_station=station_map[int(row.attrib['endStationID'])],
type=type,
status=row.attrib['status'],
title=row.attrib['title'],
for_corp=(row.attrib['forCorp'] == '1'),
public=(row.attrib['availability'].lower() == 'public'),
date_issued=dateIssued,
date_expired=dateExpired,
date_accepted=dateAccepted,
date_completed=dateCompleted,
num_days=int(row.attrib['numDays']),
price=Decimal(row.attrib['price']),
reward=Decimal(row.attrib['reward']),
collateral=Decimal(row.attrib['collateral']),
buyout=Decimal(row.attrib['buyout']),
volume=Decimal(row.attrib['volume']),
)
if self.apikey.key_type == APIKey.CORPORATION_TYPE:
contract.corporation = self.apikey.corporation
new_contracts.append(contract)
# If this contract is a new contract in a non-completed state, log an event
if contract.status in ('Outstanding', 'InProgress'):
# if assigneeID in user_chars or assigneeID in user_corps:
assignee = char_map.get(assigneeID, corp_map.get(assigneeID, alliance_map.get(assigneeID)))
if assignee is not None:
text = "Contract %s was created from '%s' to '%s' with status '%s'" % (
contract, contract.get_issuer_name(), assignee.name, contract.status)
new_events.append(Event(
user_id=self.apikey.user.id,
issued=now,
text=text,
))
# And save the damn things
Contract.objects.bulk_create(new_contracts)
Event.objects.bulk_create(new_events)
# Force the queryset to update
# c_filter.update()
# # Now go fetch items for each contract
# items_url = url.replace('Contracts', 'ContractItems')
# new = []
# seen_contracts = []
# # Apparently courier contracts don't have ContractItems support? :ccp:
# for contract in c_filter.filter(retrieved_items=False).exclude(type='Courier'):
# params['contractID'] = contract.contract_id
# if self.fetch_api(items_url, params) is False or self.root is None:
# continue
# for row in self.root.findall('result/rowset/row'):
# new.append(ContractItem(
# contract_id=contract.contract_id,
# item_id=row.attrib['typeID'],
# quantity=row.attrib['quantity'],
# raw_quantity=row.attrib.get('rawQuantity', 0),
# singleton=row.attrib['singleton'] == '1',
# included=row.attrib['included'] == '1',
# ))
# seen_contracts.append(contract.contract_id)
# if new:
# ContractItem.objects.bulk_create(new)
# c_filter.filter(contract_id__in=seen_contracts).update(retrieved_items=True)
return True
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
from math import ceil
import sys
import numpy as np
import tensorflow as tf
VGG_MEAN = [103.939, 116.779, 123.68]
class FCN16VGG:
def __init__(self, vgg16_npy_path=None):
if vgg16_npy_path is None:
path = sys.modules[self.__class__.__module__].__file__
# print path
path = os.path.abspath(os.path.join(path, os.pardir))
# print path
path = os.path.join(path, "vgg16.npy")
vgg16_npy_path = path
logging.info("Load npy file from '%s'.", vgg16_npy_path)
if not os.path.isfile(vgg16_npy_path):
logging.error(("File '%s' not found. Download it from "
"https://dl.dropboxusercontent.com/u/"
"50333326/vgg16.npy"), vgg16_npy_path)
sys.exit(1)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
self.wd = 5e-4
print("npy file loaded")
def build(self, rgb, train=False, num_classes=20, random_init_fc8=False,
debug=False):
"""
Build the VGG model using loaded weights
Parameters
----------
rgb: image batch tensor
Image in rgb shap. Scaled to Intervall [0, 255]
train: bool
Whether to build train or inference graph
num_classes: int
How many classes should be predicted (by fc8)
random_init_fc8 : bool
Whether to initialize fc8 layer randomly.
Finetuning is required in this case.
debug: bool
Whether to print additional Debug Information.
"""
# Convert RGB to BGR
with tf.name_scope('Processing'):
red, green, blue = tf.split(3, 3, rgb)
# assert red.get_shape().as_list()[1:] == [224, 224, 1]
# assert green.get_shape().as_list()[1:] == [224, 224, 1]
# assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(3, [
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
if debug:
bgr = tf.Print(bgr, [tf.shape(bgr)],
message='Shape of input image: ',
summarize=4, first_n=1)
self.conv1_1 = self._conv_layer(bgr, "conv1_1")
self.conv1_2 = self._conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self._max_pool(self.conv1_2, 'pool1', debug)
self.conv2_1 = self._conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self._conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self._max_pool(self.conv2_2, 'pool2', debug)
self.conv3_1 = self._conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self._conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self._conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self._max_pool(self.conv3_3, 'pool3', debug)
self.conv4_1 = self._conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self._conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self._conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self._max_pool(self.conv4_3, 'pool4', debug)
self.conv5_1 = self._conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self._conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self._conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self._max_pool(self.conv5_3, 'pool5', debug)
self.fc6 = self._fc_layer(self.pool5, "fc6")
if train:
self.fc6 = tf.nn.dropout(self.fc6, 0.5)
self.fc7 = self._fc_layer(self.fc6, "fc7")
if train:
self.fc7 = tf.nn.dropout(self.fc7, 0.5)
if random_init_fc8:
self.score_fr = self._score_layer(self.fc7, "score_fr",
num_classes)
else:
self.score_fr = self._fc_layer(self.fc7, "score_fr",
num_classes=num_classes,
relu=False)
self.pred = tf.argmax(self.score_fr, dimension=3)
self.upscore2 = self._upscore_layer(self.score_fr,
shape=tf.shape(self.pool4),
num_classes=num_classes,
debug=debug, name='upscore2',
ksize=4, stride=2)
self.score_pool4 = self._score_layer(self.pool4, "score_pool4",
num_classes=num_classes)
self.fuse_pool4 = tf.add(self.upscore2, self.score_pool4)
self.upscore32 = self._upscore_layer(self.fuse_pool4,
shape=tf.shape(bgr),
num_classes=num_classes,
debug=debug, name='upscore32',
ksize=32, stride=16)
self.pred_up = tf.argmax(self.upscore32, dimension=3)
def _max_pool(self, bottom, name, debug):
pool = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name=name)
if debug:
pool = tf.Print(pool, [tf.shape(pool)],
message='Shape of %s' % name,
summarize=4, first_n=1)
return pool
def _conv_layer(self, bottom, name):
with tf.variable_scope(name) as scope:
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
# Add summary to Tensorboard
_activation_summary(relu)
return relu
def _fc_layer(self, bottom, name, num_classes=None,
relu=True, debug=False):
with tf.variable_scope(name) as scope:
shape = bottom.get_shape().as_list()
if name == 'fc6':
filt = self.get_fc_weight_reshape(name, [7, 7, 512, 4096])
elif name == 'score_fr':
name = 'fc8' # Name of score_fr layer in VGG Model
filt = self.get_fc_weight_reshape(name, [1, 1, 4096, 1000],
num_classes=num_classes)
else:
filt = self.get_fc_weight_reshape(name, [1, 1, 4096, 4096])
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name, num_classes=num_classes)
bias = tf.nn.bias_add(conv, conv_biases)
if relu:
bias = tf.nn.relu(bias)
_activation_summary(bias)
if debug:
bias = tf.Print(bias, [tf.shape(bias)],
message='Shape of %s' % name,
summarize=4, first_n=1)
return bias
def _score_layer(self, bottom, name, num_classes):
with tf.variable_scope(name) as scope:
# get number of input channels
in_features = bottom.get_shape()[3].value
shape = [1, 1, in_features, num_classes]
# He initialization Sheme
if name == "score_fr":
num_input = in_features
stddev = (2 / num_input)**0.5
elif name == "score_pool4":
stddev = 0.001
# Apply convolution
w_decay = self.wd
weights = self._variable_with_weight_decay(shape, stddev, w_decay)
conv = tf.nn.conv2d(bottom, weights, [1, 1, 1, 1], padding='SAME')
# Apply bias
conv_biases = self._bias_variable([num_classes], constant=0.0)
bias = tf.nn.bias_add(conv, conv_biases)
_activation_summary(bias)
return bias
def _upscore_layer(self, bottom, shape,
num_classes, name, debug,
ksize=4, stride=2):
strides = [1, stride, stride, 1]
with tf.variable_scope(name):
in_features = bottom.get_shape()[3].value
if shape is None:
# Compute shape out of Bottom
in_shape = tf.shape(bottom)
h = ((in_shape[1] - 1) * stride) + 1
w = ((in_shape[2] - 1) * stride) + 1
new_shape = [in_shape[0], h, w, num_classes]
else:
new_shape = [shape[0], shape[1], shape[2], num_classes]
output_shape = tf.pack(new_shape)
logging.debug("Layer: %s, Fan-in: %d" % (name, in_features))
f_shape = [ksize, ksize, num_classes, in_features]
# create
num_input = ksize * ksize * in_features / stride
stddev = (2 / num_input)**0.5
weights = self.get_deconv_filter(f_shape)
deconv = tf.nn.conv2d_transpose(bottom, weights, output_shape,
strides=strides, padding='SAME')
if debug:
deconv = tf.Print(deconv, [tf.shape(deconv)],
message='Shape of %s' % name,
summarize=4, first_n=1)
_activation_summary(deconv)
return deconv
def get_deconv_filter(self, f_shape):
width = f_shape[0]
heigh = f_shape[0]
f = ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
return tf.get_variable(name="up_filter", initializer=init,
shape=weights.shape)
def get_conv_filter(self, name):
init = tf.constant_initializer(value=self.data_dict[name][0],
dtype=tf.float32)
shape = self.data_dict[name][0].shape
print('Layer name: %s' % name)
print('Layer shape: %s' % str(shape))
var = tf.get_variable(name="filter", initializer=init, shape=shape)
if not tf.get_variable_scope().reuse:
weight_decay = tf.mul(tf.nn.l2_loss(var), self.wd,
name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def get_bias(self, name, num_classes=None):
bias_wights = self.data_dict[name][1]
shape = self.data_dict[name][1].shape
if name == 'fc8':
bias_wights = self._bias_reshape(bias_wights, shape[0],
num_classes)
shape = [num_classes]
init = tf.constant_initializer(value=bias_wights,
dtype=tf.float32)
return tf.get_variable(name="biases", initializer=init, shape=shape)
def get_fc_weight(self, name):
init = tf.constant_initializer(value=self.data_dict[name][0],
dtype=tf.float32)
shape = self.data_dict[name][0].shape
var = tf.get_variable(name="weights", initializer=init, shape=shape)
if not tf.get_variable_scope().reuse:
weight_decay = tf.mul(tf.nn.l2_loss(var), self.wd,
name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _bias_reshape(self, bweight, num_orig, num_new):
""" Build bias weights for filter produces with `_summary_reshape`
"""
n_averaged_elements = num_orig//num_new
avg_bweight = np.zeros(num_new)
for i in range(0, num_orig, n_averaged_elements):
start_idx = i
end_idx = start_idx + n_averaged_elements
avg_idx = start_idx//n_averaged_elements
if avg_idx == num_new:
break
avg_bweight[avg_idx] = np.mean(bweight[start_idx:end_idx])
return avg_bweight
def _summary_reshape(self, fweight, shape, num_new):
""" Produce weights for a reduced fully-connected layer.
FC8 of VGG produces 1000 classes. Most semantic segmentation
task require much less classes. This reshapes the original weights
to be used in a fully-convolutional layer which produces num_new
classes. To archive this the average (mean) of n adjanced classes is
taken.
Consider reordering fweight, to perserve semantic meaning of the
weights.
Args:
fweight: original weights
shape: shape of the desired fully-convolutional layer
num_new: number of new classes
Returns:
Filter weights for `num_new` classes.
"""
num_orig = shape[3]
shape[3] = num_new
assert(num_new < num_orig)
n_averaged_elements = num_orig//num_new
avg_fweight = np.zeros(shape)
for i in range(0, num_orig, n_averaged_elements):
start_idx = i
end_idx = start_idx + n_averaged_elements
avg_idx = start_idx//n_averaged_elements
if avg_idx == num_new:
break
avg_fweight[:, :, :, avg_idx] = np.mean(
fweight[:, :, :, start_idx:end_idx], axis=3)
return avg_fweight
def _variable_with_weight_decay(self, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal
distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = tf.get_variable('weights', shape=shape,
initializer=initializer)
if wd and (not tf.get_variable_scope().reuse):
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _bias_variable(self, shape, constant=0.0):
initializer = tf.constant_initializer(constant)
return tf.get_variable(name='biases', shape=shape,
initializer=initializer)
def get_fc_weight_reshape(self, name, shape, num_classes=None):
print('Layer name: %s' % name)
print('Layer shape: %s' % shape)
weights = self.data_dict[name][0]
weights = weights.reshape(shape)
if num_classes is not None:
weights = self._summary_reshape(weights, shape,
num_new=num_classes)
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
return tf.get_variable(name="weights", initializer=init, shape=shape)
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = x.op.name
# tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
|
|
import json
import os
import string
import uuid
from copy import copy
from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
import commonware.log
import jinja2
from tower import ugettext as _
import mkt
from lib.crypto import generate_key
from lib.pay_server import client
from mkt.access.models import Group
from mkt.constants.payments import (ACCESS_SIMULATE, PROVIDER_BANGO,
PROVIDER_CHOICES)
from mkt.ratings.models import Review
from mkt.site.models import ManagerBase, ModelBase
from mkt.site.storage_utils import private_storage, storage_is_remote
from mkt.tags.models import Tag
from mkt.users.models import UserForeignKey, UserProfile
from mkt.versions.models import Version
from mkt.webapps.models import Webapp
from mkt.websites.models import Website
log = commonware.log.getLogger('z.devhub')
class CantCancel(Exception):
pass
class SolitudeSeller(ModelBase):
# TODO: When Solitude allows for it, this should be updated to be 1:1 with
# users.
user = UserForeignKey()
uuid = models.CharField(max_length=255, unique=True)
resource_uri = models.CharField(max_length=255)
class Meta:
db_table = 'payments_seller'
@classmethod
def create(cls, user):
uuid_ = str(uuid.uuid4())
res = client.api.generic.seller.post(data={'uuid': uuid_})
uri = res['resource_uri']
obj = cls.objects.create(user=user, uuid=uuid_, resource_uri=uri)
log.info('[User:%s] Created Solitude seller (uuid:%s)' %
(user, uuid_))
return obj
class PaymentAccount(ModelBase):
user = UserForeignKey()
name = models.CharField(max_length=64)
agreed_tos = models.BooleanField(default=False)
solitude_seller = models.ForeignKey(SolitudeSeller)
# These two fields can go away when we're not 1:1 with SolitudeSellers.
seller_uri = models.CharField(max_length=255, unique=True)
uri = models.CharField(max_length=255, unique=True)
# A soft-delete so we can talk to Solitude asynchronously.
inactive = models.BooleanField(default=False)
# The id for this account from the provider.
account_id = models.CharField(max_length=255)
# Each account will be for a particular provider.
provider = models.IntegerField(choices=PROVIDER_CHOICES,
default=PROVIDER_BANGO)
shared = models.BooleanField(default=False)
class Meta:
db_table = 'payment_accounts'
unique_together = ('user', 'uri')
def cancel(self, disable_refs=False):
"""Cancels the payment account.
If `disable_refs` is set, existing apps that use this payment account
will be set to STATUS_NULL.
"""
account_refs = AddonPaymentAccount.objects.filter(account_uri=self.uri)
if self.shared and account_refs:
# With sharing a payment account comes great responsibility. It
# would be really mean to create a payment account, share it
# and have lots of apps use it. Then one day you remove it and
# make a whole pile of apps in the marketplace get removed from
# the store, or have in-app payments fail.
#
# For the moment I'm just stopping this completely, if this ever
# happens, we'll have to go through a deprecation phase.
# - let all the apps that use it know
# - when they have all stopped sharing it
# - re-run this
log.error('Cannot cancel a shared payment account that has '
'apps using it.')
raise CantCancel('You cannot cancel a shared payment account.')
self.update(inactive=True)
log.info('Soft-deleted payment account (uri: %s)' % self.uri)
for acc_ref in account_refs:
if (disable_refs and
not acc_ref.addon.has_multiple_payment_accounts()):
log.info('Changing app status to NULL for app: {0}'
'because of payment account deletion'.format(
acc_ref.addon_id))
acc_ref.addon.update(status=mkt.STATUS_NULL)
log.info('Deleting AddonPaymentAccount for app: {0} because of '
'payment account deletion'.format(acc_ref.addon_id))
acc_ref.delete()
def get_provider(self):
"""Returns an instance of the payment provider for this account."""
# TODO: fix circular import. Providers imports models which imports
# forms which imports models.
from mkt.developers.providers import get_provider
return get_provider(id=self.provider)
def __unicode__(self):
date = self.created.strftime('%m/%y')
if not self.shared:
return u'%s - %s' % (date, self.name)
# L10n: {0} is the name of the account.
return _(u'Donate to {0}'.format(self.name))
def get_agreement_url(self):
return reverse('mkt.developers.provider.agreement', args=[self.pk])
class AddonPaymentAccount(ModelBase):
addon = models.ForeignKey(
'webapps.Webapp', related_name='app_payment_accounts')
payment_account = models.ForeignKey(PaymentAccount)
account_uri = models.CharField(max_length=255)
product_uri = models.CharField(max_length=255, unique=True)
class Meta:
db_table = 'addon_payment_account'
@property
def user(self):
return self.payment_account.user
class UserInappKey(ModelBase):
solitude_seller = models.ForeignKey(SolitudeSeller)
seller_product_pk = models.IntegerField(unique=True)
def secret(self):
return self._product().get()['secret']
def public_id(self):
return self._product().get()['public_id']
def reset(self):
self._product().patch(data={'secret': generate_key(48)})
@classmethod
def create(cls, user, public_id=None, secret=None, access_type=None):
if public_id is None:
public_id = str(uuid.uuid4())
if secret is None:
secret = generate_key(48)
if access_type is None:
access_type = ACCESS_SIMULATE
sel = SolitudeSeller.create(user)
prod = client.api.generic.product.post(data={
'seller': sel.resource_uri, 'secret': secret,
'external_id': str(uuid.uuid4()), 'public_id': public_id,
'access': access_type,
})
log.info(u'User %s created an in-app payments dev key product=%s '
u'with %s' % (unicode(user), prod['resource_pk'], sel))
return cls.objects.create(solitude_seller=sel,
seller_product_pk=prod['resource_pk'])
def _product(self):
return client.api.generic.product(self.seller_product_pk)
class Meta:
db_table = 'user_inapp_keys'
class PreloadTestPlan(ModelBase):
addon = models.ForeignKey('webapps.Webapp')
last_submission = models.DateTimeField(auto_now_add=True)
filename = models.CharField(max_length=60)
status = models.PositiveSmallIntegerField(default=mkt.STATUS_PUBLIC)
class Meta:
db_table = 'preload_test_plans'
ordering = ['-last_submission']
@property
def preload_test_plan_url(self):
if storage_is_remote():
return private_storage.url(self.preload_test_plan_path)
else:
host = (settings.PRIVATE_MIRROR_URL if self.addon.is_disabled
else settings.LOCAL_MIRROR_URL)
return os.path.join(host, str(self.addon.id), self.filename)
@property
def preload_test_plan_path(self):
return os.path.join(settings.ADDONS_PATH, str(self.addon_id),
self.filename)
# When an app is deleted we need to remove the preload test plan.
def preload_cleanup(*args, **kwargs):
instance = kwargs.get('instance')
PreloadTestPlan.objects.filter(addon=instance).delete()
models.signals.post_delete.connect(preload_cleanup, sender=Webapp,
dispatch_uid='webapps_preload_cleanup')
class AppLog(ModelBase):
"""
This table is for indexing the activity log by app.
"""
addon = models.ForeignKey('webapps.Webapp', db_constraint=False)
activity_log = models.ForeignKey('ActivityLog')
class Meta:
db_table = 'log_activity_app'
ordering = ('-created',)
class CommentLog(ModelBase):
"""
This table is for indexing the activity log by comment.
"""
activity_log = models.ForeignKey('ActivityLog')
comments = models.TextField()
class Meta:
db_table = 'log_activity_comment'
ordering = ('-created',)
class VersionLog(ModelBase):
"""
This table is for indexing the activity log by version.
"""
activity_log = models.ForeignKey('ActivityLog')
version = models.ForeignKey(Version)
class Meta:
db_table = 'log_activity_version'
ordering = ('-created',)
class UserLog(ModelBase):
"""
This table is for indexing the activity log by user.
Note: This includes activity performed unto the user.
"""
activity_log = models.ForeignKey('ActivityLog')
user = models.ForeignKey(UserProfile)
class Meta:
db_table = 'log_activity_user'
ordering = ('-created',)
class GroupLog(ModelBase):
"""
This table is for indexing the activity log by access group.
"""
activity_log = models.ForeignKey('ActivityLog')
group = models.ForeignKey(Group)
class Meta:
db_table = 'log_activity_group'
ordering = ('-created',)
class ActivityLogManager(ManagerBase):
def for_apps(self, apps):
vals = (AppLog.objects.filter(addon__in=apps)
.values_list('activity_log', flat=True))
if vals:
return self.filter(pk__in=list(vals))
else:
return self.none()
def for_version(self, version):
vals = (VersionLog.objects.filter(version=version)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_group(self, group):
return self.filter(grouplog__group=group)
def for_user(self, user):
vals = (UserLog.objects.filter(user=user)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_developer(self):
return self.exclude(action__in=mkt.LOG_ADMINS + mkt.LOG_HIDE_DEVELOPER)
def admin_events(self):
return self.filter(action__in=mkt.LOG_ADMINS)
def editor_events(self):
return self.filter(action__in=mkt.LOG_EDITORS)
def review_queue(self, webapp=False):
qs = self._by_type(webapp)
return (qs.filter(action__in=mkt.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID))
def total_reviews(self, webapp=False):
qs = self._by_type(webapp)
"""Return the top users, and their # of reviews."""
return (qs.values('user', 'user__display_name', 'user__email')
.filter(action__in=mkt.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def monthly_reviews(self, webapp=False):
"""Return the top users for the month, and their # of reviews."""
qs = self._by_type(webapp)
now = datetime.now()
created_date = datetime(now.year, now.month, 1)
return (qs.values('user', 'user__display_name', 'user__email')
.filter(created__gte=created_date,
action__in=mkt.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def user_position(self, values_qs, user):
try:
return next(i for (i, d) in enumerate(list(values_qs))
if d.get('user') == user.id) + 1
except StopIteration:
return None
def total_reviews_user_position(self, user, webapp=False):
return self.user_position(self.total_reviews(webapp), user)
def monthly_reviews_user_position(self, user, webapp=False):
return self.user_position(self.monthly_reviews(webapp), user)
def _by_type(self, webapp=False):
qs = super(ActivityLogManager, self).get_queryset()
return qs.extra(
tables=['log_activity_app'],
where=['log_activity_app.activity_log_id=log_activity.id'])
class SafeFormatter(string.Formatter):
"""A replacement for str.format that escapes interpolated values."""
def get_field(self, *args, **kw):
# obj is the value getting interpolated into the string.
obj, used_key = super(SafeFormatter, self).get_field(*args, **kw)
return jinja2.escape(obj), used_key
class ActivityLog(ModelBase):
TYPES = sorted([(value.id, key) for key, value in mkt.LOG.items()])
user = models.ForeignKey('users.UserProfile', null=True)
action = models.SmallIntegerField(choices=TYPES, db_index=True)
_arguments = models.TextField(blank=True, db_column='arguments')
_details = models.TextField(blank=True, db_column='details')
objects = ActivityLogManager()
formatter = SafeFormatter()
class Meta:
db_table = 'log_activity'
ordering = ('-created',)
def f(self, *args, **kw):
"""Calls SafeFormatter.format and returns a Markup string."""
# SafeFormatter escapes everything so this is safe.
return jinja2.Markup(self.formatter.format(*args, **kw))
@property
def arguments(self):
try:
# d is a structure:
# ``d = [{'addons.addon':12}, {'addons.addon':1}, ... ]``
d = json.loads(self._arguments)
except:
log.debug('unserializing data from addon_log failed: %s' % self.id)
return None
objs = []
for item in d:
# item has only one element.
model_name, pk = item.items()[0]
if model_name in ('str', 'int', 'null'):
objs.append(pk)
else:
(app_label, model_name) = model_name.split('.')
model = apps.get_model(app_label, model_name)
# Cope with soft deleted models.
if hasattr(model, 'with_deleted'):
objs.extend(model.with_deleted.filter(pk=pk))
else:
objs.extend(model.objects.filter(pk=pk))
return objs
@arguments.setter
def arguments(self, args=[]):
"""
Takes an object or a tuple of objects and serializes them and stores it
in the db as a json string.
"""
if args is None:
args = []
if not isinstance(args, (list, tuple)):
args = (args,)
serialize_me = []
for arg in args:
if isinstance(arg, basestring):
serialize_me.append({'str': arg})
elif isinstance(arg, (int, long)):
serialize_me.append({'int': arg})
elif isinstance(arg, tuple):
# Instead of passing an addon instance you can pass a tuple:
# (Webapp, 3) for Webapp with pk=3
serialize_me.append(dict(((unicode(arg[0]._meta), arg[1]),)))
elif arg is not None:
serialize_me.append(dict(((unicode(arg._meta), arg.pk),)))
self._arguments = json.dumps(serialize_me)
@property
def details(self):
if self._details:
return json.loads(self._details)
@details.setter
def details(self, data):
self._details = json.dumps(data)
@property
def log(self):
return mkt.LOG_BY_ID[self.action]
def to_string(self, type_=None):
log_type = mkt.LOG_BY_ID[self.action]
if type_ and hasattr(log_type, '%s_format' % type_):
format = getattr(log_type, '%s_format' % type_)
else:
format = log_type.format
# We need to copy arguments so we can remove elements from it
# while we loop over self.arguments.
arguments = copy(self.arguments)
addon = None
review = None
version = None
collection = None
tag = None
group = None
website = None
for arg in self.arguments:
if isinstance(arg, Webapp) and not addon:
addon = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
arguments.remove(arg)
if isinstance(arg, Review) and not review:
review = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), _('Review'))
arguments.remove(arg)
if isinstance(arg, Version) and not version:
text = _('Version {0}')
version = self.f(text, arg.version)
arguments.remove(arg)
if isinstance(arg, Tag) and not tag:
if arg.can_reverse():
tag = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.tag_text)
else:
tag = self.f('{0}', arg.tag_text)
if isinstance(arg, Group) and not group:
group = arg.name
arguments.remove(arg)
if isinstance(arg, Website) and not website:
website = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
arguments.remove(arg)
try:
kw = dict(addon=addon, review=review, version=version, group=group,
collection=collection, tag=tag,
user=self.user.display_name)
return self.f(format, *arguments, **kw)
except (AttributeError, KeyError, IndexError):
log.warning('%d contains garbage data' % (self.id or 0))
return 'Something magical happened.'
def __unicode__(self):
return self.to_string()
def __html__(self):
return self
|
|
from functools import reduce
from SDWLE.agents.trade.util import memoized
class FakeCard:
def __init__(self, card):
self.health = card.health
self.attack = card.base_attack
self.base_attack = card.base_attack
if hasattr(card, "taunt"):
self.taunt = card.taunt
class Trade:
def __init__(self, player, my_minion, opp_minion):
self.player = player
self.my_minion = my_minion
self.opp_minion = opp_minion
@memoized
def after_attack(self):
res = {}
res["my_minion"] = self.after_damage(self.my_minion, self.opp_minion)
res["opp_minion"] = self.after_damage(self.opp_minion, self.my_minion)
return res
def after_damage(self, target, attacker):
res = FakeCard(target)
res.health -= attacker.calculate_attack()
return res
def start_value(self):
me = self.minion_value(self.my_minion)
opp = self.minion_value(self.opp_minion)
return me - opp
def end_value(self):
me = self.minion_value(self.after_attack()['my_minion'])
opp = self.minion_value(self.after_attack()['opp_minion'])
return me - opp
@memoized
def value(self):
res = self.end_value() - self.start_value()
if self.after_attack()['my_minion'].health > 0 and \
self.after_attack()['opp_minion'].health <= 0:
res += 1.0
return round(res, 2)
def minion_desc(self, minion):
return "{} {}/{}".format(minion.try_name(), minion.base_attack,
minion.health)
def __str__(self):
s = "Trade {} for {} Value {}"
return s.format(self.minion_desc(self.my_minion),
self.minion_desc(self.opp_minion),
self.value())
def minion_value(self, minion):
if minion.health <= 0:
return 0
res = (minion.base_attack + 0.5) * minion.health ** 1.5
if minion.taunt:
res += 0.5
return res ** 0.4
def is_opp_dead(self):
return self.after_attack()['opp_minion'].health <= 0
def needs_sequence(self):
return True
class TradeSequence:
def __init__(self, current_trades_obj, past_trades=[]):
self.past_trades = past_trades
self.current_trades_obj = current_trades_obj
def after_next_trade(self, next_trade):
past_trades = [t for t in self.past_trades]
past_trades.append(next_trade)
to = self.current_trades_obj
trades_obj = Trades(to.player, to.attack_minions,
to.opp_minions, to.opp_hero.copy(to.player))
trades_obj.attack_minions.remove(next_trade.my_minion)
if next_trade.is_opp_dead():
trades_obj.opp_minions.remove(next_trade.opp_minion)
res = TradeSequence(trades_obj, past_trades)
return res
def has_lethal(self):
return self.current_trades_obj.has_lethal()
def past_trade_value(self):
if self.has_lethal():
return 99999999
else:
return reduce(lambda s, t: s + t.value(), self.past_trades, 0.0)
@memoized
def future_trade_value(self):
if self.has_lethal():
return 9999999999
if len(self.current_trades_obj.attack_minions) == 0:
return 0.0
if len(self.past_trades) > 1:
return 0
next_trades = self.current_trades_obj.trades()
if len(next_trades) == 0:
return 0.0
if len(next_trades) > 1000000:
return 0.0
if self.current_trades_obj.opp_has_taunt():
best_value = -99999999999.0
for next_trade in next_trades:
next_seq = self.after_next_trade(next_trade)
full = next_trade.value() + next_seq.future_trade_value()
if full > best_value:
best_value = full
return best_value
else:
return next_trades[0].value()
@memoized
def trade_value(self):
return self.past_trade_value() + self.future_trade_value()
class FaceTrade(Trade):
def value(self):
if self.is_lethal():
return 9999999
return self.my_minion.base_attack * 0.2
def __str__(self):
return "Face {} Value {}".format(self.minion_desc(self.my_minion),
self.value())
def is_lethal(self):
return self.my_minion.base_attack >= self.opp_minion.health
def needs_sequence(self):
return False
class Trades:
def __init__(self, player, attack_minions, opp_minions, opp_hero):
self.player = player
self.attack_minions = attack_minions[0:99999]
self.opp_minions = opp_minions[0:99999]
self.opp_hero = opp_hero
def opp_has_taunt(self):
for minion in self.opp_minions:
if minion.taunt:
return True
return False
def total_attack(self):
return reduce(lambda s, i: s + i.base_attack, self.attack_minions, 0)
@memoized
def has_lethal(self):
return not self.opp_has_taunt() and \
self.total_attack() >= self.opp_hero.health
@memoized
def trade_value(self, trade):
if not trade.needs_sequence() or len(self.attack_minions) <= 1:
return trade.value()
seq = TradeSequence(self).after_next_trade(trade)
return seq.trade_value()
@memoized
def trades(self):
res = []
me = self.attack_minions
opp = self.targetable_minions(self.opp_minions)
if not self.has_lethal():
for my_minion in me:
for opp_minion in opp:
trade = Trade(self.player, my_minion, opp_minion)
res.append(trade)
if not self.opp_has_taunt():
for my_minion in me:
trade = FaceTrade(self.player, my_minion, self.opp_hero)
res.append(trade)
if self.opp_has_taunt():
if len(res) >= 12:
res = sorted(res, key=lambda t: t.value())[0:4]
elif len(res) >= 8:
res = sorted(res, key=lambda t: t.value())[0:3]
else:
res = sorted(res, key=self.trade_value)
else:
res = sorted(res, key=lambda t: t.value())
res.reverse()
return res
def targetable_minions(self, all):
taunt = [m for m in filter(lambda m: m.taunt, all)]
if len(taunt) > 0:
return taunt
else:
return all
def __str__(self):
res = ["TRADES:"]
for t in self.trades():
s = t.__str__()
s += " Root Value: {}".format(self.trade_value(t))
res.append(s)
return str.join("\n", res)
class TradeMixin:
def trades(self, player):
res = Trades(player, self.attack_minions(player),
player.opponent.minions, player.opponent.hero)
return [t for t in res.trades() if t.value() > -1]
class AttackMixin:
def attack_once(self, player):
trades = self.trades(player)
if len(trades) > 0:
self.current_trade = trades[0]
self.current_trade.my_minion.attack()
def attack(self, player):
if len(self.trades(player)) > 0:
self.attack_once(player)
self.attack(player)
def attack_minions(self, player):
res = [minion
for minion
in filter(lambda minion: minion.can_attack(), player.minions)]
if player.hero.can_attack() and False:
res.append(player.hero)
return res
|
|
#!/usr/bin/env python
"""
A memcached test server.
Copyright 2013 Zynga Inc.
Copyright (c) 2007 Dustin Sallings <dustin@spy.net>
"""
import asyncore
import random
import string
import socket
import struct
import time
import hmac
import heapq
import os
import sys
import memcacheConstants
from memcacheConstants import MIN_RECV_PACKET, REQ_PKT_FMT, RES_PKT_FMT
from memcacheConstants import INCRDECR_RES_FMT
from memcacheConstants import REQ_MAGIC_BYTE, RES_MAGIC_BYTE, EXTRA_HDR_FMTS
VERSION="1.0"
class BaseBackend(object):
"""Higher-level backend (processes commands and stuff)."""
# Command IDs to method names. This is used to build a dispatch dict on
# the fly.
CMDS={
memcacheConstants.CMD_GET: 'handle_get',
memcacheConstants.CMD_GETQ: 'handle_getq',
memcacheConstants.CMD_SET: 'handle_set',
memcacheConstants.CMD_ADD: 'handle_add',
memcacheConstants.CMD_REPLACE: 'handle_replace',
memcacheConstants.CMD_DELETE: 'handle_delete',
memcacheConstants.CMD_INCR: 'handle_incr',
memcacheConstants.CMD_DECR: 'handle_decr',
memcacheConstants.CMD_QUIT: 'handle_quit',
memcacheConstants.CMD_FLUSH: 'handle_flush',
memcacheConstants.CMD_NOOP: 'handle_noop',
memcacheConstants.CMD_VERSION: 'handle_version',
memcacheConstants.CMD_APPEND: 'handle_append',
memcacheConstants.CMD_PREPEND: 'handle_prepend',
memcacheConstants.CMD_SASL_LIST_MECHS: 'handle_sasl_mechs',
memcacheConstants.CMD_SASL_AUTH: 'handle_sasl_auth',
memcacheConstants.CMD_SASL_STEP: 'handle_sasl_step',
}
def __init__(self):
self.handlers={}
self.sched=[]
for id, method in self.CMDS.iteritems():
self.handlers[id]=getattr(self, method, self.handle_unknown)
def _splitKeys(self, fmt, keylen, data):
"""Split the given data into the headers as specified in the given
format, the key, and the data.
Return (hdrTuple, key, data)"""
hdrSize=struct.calcsize(fmt)
assert hdrSize <= len(data), "Data too short for " + fmt + ': ' + `data`
hdr=struct.unpack(fmt, data[:hdrSize])
assert len(data) >= hdrSize + keylen
key=data[hdrSize:keylen+hdrSize]
assert len(key) == keylen, "len(%s) == %d, expected %d" \
% (key, len(key), keylen)
val=data[keylen+hdrSize:]
return hdr, key, val
def _error(self, which, msg):
return which, 0, msg
def processCommand(self, cmd, keylen, vb, cas, data):
"""Entry point for command processing. Lower level protocol
implementations deliver values here."""
now=time.time()
while self.sched and self.sched[0][0] <= now:
print "Running delayed job."
heapq.heappop(self.sched)[1]()
hdrs, key, val=self._splitKeys(EXTRA_HDR_FMTS.get(cmd, ''),
keylen, data)
return self.handlers.get(cmd, self.handle_unknown)(cmd, hdrs, key,
cas, val)
def handle_noop(self, cmd, hdrs, key, cas, data):
"""Handle a noop"""
print "Noop"
return 0, 0, ''
def handle_unknown(self, cmd, hdrs, key, cas, data):
"""invoked for any unknown command."""
return self._error(memcacheConstants.ERR_UNKNOWN_CMD,
"The command %d is unknown" % cmd)
class DictBackend(BaseBackend):
"""Sample backend implementation with a non-expiring dict."""
def __init__(self):
super(DictBackend, self).__init__()
self.storage={}
self.held_keys={}
self.challenge = ''.join(random.sample(string.ascii_letters
+ string.digits, 32))
def __lookup(self, key):
rv=self.storage.get(key, None)
if rv:
now=time.time()
if now >= rv[1]:
print key, "expired"
del self.storage[key]
rv=None
else:
print "Miss looking up", key
return rv
def handle_get(self, cmd, hdrs, key, cas, data):
val=self.__lookup(key)
if val:
rv = 0, id(val), struct.pack(
memcacheConstants.GET_RES_FMT, val[0]) + str(val[2])
else:
rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
return rv
def handle_set(self, cmd, hdrs, key, cas, data):
print "Handling a set with", hdrs
val=self.__lookup(key)
exp, flags=hdrs
def f(val):
return self.__handle_unconditional_set(cmd, hdrs, key, data)
return self._withCAS(key, cas, f)
def handle_getq(self, cmd, hdrs, key, cas, data):
rv=self.handle_get(cmd, hdrs, key, cas, data)
if rv[0] == memcacheConstants.ERR_NOT_FOUND:
print "Swallowing miss"
rv = None
return rv
def __handle_unconditional_set(self, cmd, hdrs, key, data):
exp=hdrs[1]
# If it's going to expire soon, tell it to wait a while.
if exp == 0:
exp=float(2 ** 31)
self.storage[key]=(hdrs[0], time.time() + exp, data)
print "Stored", self.storage[key], "in", key
if key in self.held_keys:
del self.held_keys[key]
return 0, id(self.storage[key]), ''
def __mutation(self, cmd, hdrs, key, data, multiplier):
amount, initial, expiration=hdrs
rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
val=self.storage.get(key, None)
print "Mutating %s, hdrs=%s, val=%s %s" % (key, `hdrs`, `val`,
multiplier)
if val:
val = (val[0], val[1], max(0, long(val[2]) + (multiplier * amount)))
self.storage[key]=val
rv=0, id(val), str(val[2])
else:
if expiration != memcacheConstants.INCRDECR_SPECIAL:
self.storage[key]=(0, time.time() + expiration, initial)
rv=0, id(self.storage[key]), str(initial)
if rv[0] == 0:
rv = rv[0], rv[1], struct.pack(
memcacheConstants.INCRDECR_RES_FMT, long(rv[2]))
print "Returning", rv
return rv
def handle_incr(self, cmd, hdrs, key, cas, data):
return self.__mutation(cmd, hdrs, key, data, 1)
def handle_decr(self, cmd, hdrs, key, cas, data):
return self.__mutation(cmd, hdrs, key, data, -1)
def __has_hold(self, key):
rv=False
now=time.time()
print "Looking for hold of", key, "in", self.held_keys, "as of", now
if key in self.held_keys:
if time.time() > self.held_keys[key]:
del self.held_keys[key]
else:
rv=True
return rv
def handle_add(self, cmd, hdrs, key, cas, data):
rv=self._error(memcacheConstants.ERR_EXISTS, 'Data exists for key')
if key not in self.storage and not self.__has_hold(key):
rv=self.__handle_unconditional_set(cmd, hdrs, key, data)
return rv
def handle_replace(self, cmd, hdrs, key, cas, data):
rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
if key in self.storage and not self.__has_hold(key):
rv=self.__handle_unconditional_set(cmd, hdrs, key, data)
return rv
def handle_flush(self, cmd, hdrs, key, cas, data):
timebomb_delay=hdrs[0]
def f():
self.storage.clear()
self.held_keys.clear()
print "Flushed"
if timebomb_delay:
heapq.heappush(self.sched, (time.time() + timebomb_delay, f))
else:
f()
return 0, 0, ''
def handle_delete(self, cmd, hdrs, key, cas, data):
def f(val):
rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
if val:
del self.storage[key]
rv = 0, 0, ''
print "Deleted", key, hdrs[0]
if hdrs[0] > 0:
self.held_keys[key] = time.time() + hdrs[0]
return rv
return self._withCAS(key, cas, f)
def handle_version(self, cmd, hdrs, key, cas, data):
return 0, 0, "Python test memcached server %s" % VERSION
def _withCAS(self, key, cas, f):
val=self.storage.get(key, None)
if cas == 0 or (val and cas == id(val)):
rv=f(val)
elif val:
rv = self._error(memcacheConstants.ERR_EXISTS, 'Exists')
else:
rv = self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
return rv
def handle_prepend(self, cmd, hdrs, key, cas, data):
def f(val):
self.storage[key]=(val[0], val[1], data + val[2])
return 0, id(self.storage[key]), ''
return self._withCAS(key, cas, f)
def handle_append(self, cmd, hdrs, key, cas, data):
def f(val):
self.storage[key]=(val[0], val[1], val[2] + data)
return 0, id(self.storage[key]), ''
return self._withCAS(key, cas, f)
def handle_sasl_mechs(self, cmd, hdrs, key, cas, data):
return 0, 0, 'PLAIN CRAM-MD5'
def handle_sasl_step(self, cmd, hdrs, key, cas, data):
assert key == 'CRAM-MD5'
u, resp = data.split(' ', 1)
expected = hmac.HMAC('testpass', self.challenge).hexdigest()
if u == 'testuser' and resp == expected:
print "Successful CRAM-MD5 auth."
return 0, 0, 'OK'
else:
print "Errored a CRAM-MD5 auth."
return self._error(memcacheConstants.ERR_AUTH, 'Auth error.')
def _handle_sasl_auth_plain(self, data):
foruser, user, passwd = data.split("\0")
if user == 'testuser' and passwd == 'testpass':
print "Successful plain auth"
return 0, 0, "OK"
else:
print "Bad username/password: %s/%s" % (user, passwd)
return self._error(memcacheConstants.ERR_AUTH, 'Auth error.')
def _handle_sasl_auth_cram_md5(self, data):
assert data == ''
print "Issuing %s as a CRAM-MD5 challenge." % self.challenge
return memcacheConstants.ERR_AUTH_CONTINUE, 0, self.challenge
def handle_sasl_auth(self, cmd, hdrs, key, cas, data):
mech = key
if mech == 'PLAIN':
return self._handle_sasl_auth_plain(data)
elif mech == 'CRAM-MD5':
return self._handle_sasl_auth_cram_md5(data)
else:
print "Unhandled auth type: %s" % mech
return self._error(memcacheConstants.ERR_AUTH, 'Auth error.')
class MemcachedBinaryChannel(asyncore.dispatcher):
"""A channel implementing the binary protocol for memcached."""
# Receive buffer size
BUFFER_SIZE = 4096
def __init__(self, channel, backend, wbuf=""):
asyncore.dispatcher.__init__(self, channel)
self.log_info("New bin connection from %s" % str(self.addr))
self.backend=backend
self.wbuf=wbuf
self.rbuf=""
def __hasEnoughBytes(self):
rv=False
if len(self.rbuf) >= MIN_RECV_PACKET:
magic, cmd, keylen, extralen, datatype, vb, remaining, opaque, cas=\
struct.unpack(REQ_PKT_FMT, self.rbuf[:MIN_RECV_PACKET])
rv = len(self.rbuf) - MIN_RECV_PACKET >= remaining
return rv
def processCommand(self, cmd, keylen, vb, cas, data):
return self.backend.processCommand(cmd, keylen, vb, cas, data)
def handle_read(self):
try:
self.rbuf += self.recv(self.BUFFER_SIZE)
while self.__hasEnoughBytes():
magic, cmd, keylen, extralen, datatype, vb, remaining, opaque, cas=\
struct.unpack(REQ_PKT_FMT, self.rbuf[:MIN_RECV_PACKET])
assert magic == REQ_MAGIC_BYTE
assert keylen <= remaining, "Keylen is too big: %d > %d" \
% (keylen, remaining)
if cmd == memcacheConstants.CMD_TAP_MUTATION:
assert extralen == memcacheConstants.MUTATION_EXTRALEN_WITHOUT_QTIME or \
extralen == memcacheConstants.EXTRA_HDR_SIZES.get(cmd, 0), \
"Extralen is too large for cmd 0x%x: %d" % (cmd, extralen)
else:
assert extralen == memcacheConstants.EXTRA_HDR_SIZES.get(cmd, 0), \
"Extralen is too large for cmd 0x%x: %d" % (cmd, extralen)
# Grab the data section of this request
data=self.rbuf[MIN_RECV_PACKET:MIN_RECV_PACKET+remaining]
assert len(data) == remaining
# Remove this request from the read buffer
self.rbuf=self.rbuf[MIN_RECV_PACKET+remaining:]
# Process the command
cmdVal = self.processCommand(cmd, keylen, vb, extralen, cas, data)
# Queue the response to the client if applicable.
if cmdVal:
try:
status, cas, response = cmdVal
except ValueError:
print "Got", cmdVal
raise
dtype=0
extralen=memcacheConstants.EXTRA_HDR_SIZES.get(cmd, 0)
self.wbuf += struct.pack(RES_PKT_FMT,
RES_MAGIC_BYTE, cmd, keylen,
extralen, dtype, status,
len(response), opaque, cas) + response
except Exception, e:
print >>sys.__stderr__,"Error reading from source server..........Error is %s..........exiting" %(e)
os._exit(1)
def writable(self):
return self.wbuf
def handle_write(self):
sent = self.send(self.wbuf)
self.wbuf = self.wbuf[sent:]
def handle_close(self):
self.log_info("Disconnected from %s" % str(self.addr))
self.close()
class MemcachedServer(asyncore.dispatcher):
"""A memcached server."""
def __init__(self, backend, handler, port=11211):
asyncore.dispatcher.__init__(self)
self.handler=handler
self.backend=backend
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind(("", port))
self.listen(5)
self.log_info("Listening on %d" % port)
def handle_accept(self):
channel, addr = self.accept()
self.handler(channel, self.backend)
if __name__ == '__main__':
port = 11211
import sys
if sys.argv > 1:
port = int(sys.argv[1])
server = MemcachedServer(DictBackend(), MemcachedBinaryChannel, port=port)
asyncore.loop()
|
|
# Copyright 2001-2004 Brad Chapman.
# Revisions copyright 2009-2013 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""General mechanisms to access applications in Biopython.
This module is not intended for direct use. It provides the basic objects which
are subclassed by our command line wrappers, such as:
- Bio.Align.Applications
- Bio.Blast.Applications
- Bio.Emboss.Applications
- Bio.Sequencing.Applications
These modules provide wrapper classes for command line tools to help you
construct command line strings by setting the values of each parameter.
The finished command line strings are then normally invoked via the built-in
Python module subprocess.
"""
from __future__ import print_function
from Bio._py3k import basestring
import os
import platform
import sys
import subprocess
import re
from subprocess import CalledProcessError as _ProcessCalledError
from Bio import File
# Use this regular expression to test the property names are going to
# be valid as Python properties or arguments
_re_prop_name = re.compile(r"^[a-zA-Z][a-zA-Z0-9_]*$")
assert _re_prop_name.match("t")
assert _re_prop_name.match("test")
assert _re_prop_name.match("_test") is None # we don't want private names
assert _re_prop_name.match("-test") is None
assert _re_prop_name.match("any-hyphen") is None
assert _re_prop_name.match("underscore_ok")
assert _re_prop_name.match("test_name")
assert _re_prop_name.match("test2")
# These are reserved names in Python itself,
_reserved_names = ["and", "del", "from", "not", "while", "as", "elif",
"global", "or", "with", "assert", "else", "if", "pass",
"yield", "break", "except", "import", "print", "class",
"exec", "in", "raise", "continue", "finally", "is",
"return", "def", "for", "lambda", "try"]
# These are reserved names due to the way the wrappers work
_local_reserved_names = ["set_parameter"]
class ApplicationError(_ProcessCalledError):
"""Raised when an application returns a non-zero exit status.
The exit status will be stored in the returncode attribute, similarly
the command line string used in the cmd attribute, and (if captured)
stdout and stderr as strings.
This exception is a subclass of subprocess.CalledProcessError.
>>> err = ApplicationError(-11, "helloworld", "", "Some error text")
>>> err.returncode, err.cmd, err.stdout, err.stderr
(-11, 'helloworld', '', 'Some error text')
>>> print(err)
Non-zero return code -11 from 'helloworld', message 'Some error text'
"""
def __init__(self, returncode, cmd, stdout="", stderr=""):
self.returncode = returncode
self.cmd = cmd
self.stdout = stdout
self.stderr = stderr
def __str__(self):
# get first line of any stderr message
try:
msg = self.stderr.lstrip().split("\n", 1)[0].rstrip()
except Exception: # TODO, ValueError? AttributeError?
msg = ""
if msg:
return "Non-zero return code %d from %r, message %r" \
% (self.returncode, self.cmd, msg)
else:
return "Non-zero return code %d from %r" \
% (self.returncode, self.cmd)
def __repr__(self):
return "ApplicationError(%i, %s, %s, %s)" \
% (self.returncode, self.cmd, self.stdout, self.stderr)
class AbstractCommandline(object):
"""Generic interface for constructing command line strings.
This class shouldn't be called directly; it should be subclassed to
provide an implementation for a specific application.
For a usage example we'll show one of the EMBOSS wrappers. You can set
options when creating the wrapper object using keyword arguments - or
later using their corresponding properties:
>>> from Bio.Emboss.Applications import WaterCommandline
>>> cline = WaterCommandline(gapopen=10, gapextend=0.5)
>>> cline
WaterCommandline(cmd='water', gapopen=10, gapextend=0.5)
You can instead manipulate the parameters via their properties, e.g.
>>> cline.gapopen
10
>>> cline.gapopen = 20
>>> cline
WaterCommandline(cmd='water', gapopen=20, gapextend=0.5)
You can clear a parameter you have already added by 'deleting' the
corresponding property:
>>> del cline.gapopen
>>> cline.gapopen
>>> cline
WaterCommandline(cmd='water', gapextend=0.5)
Once you have set the parameters you need, you can turn the object into
a string (e.g. to log the command):
>>> str(cline)
Traceback (most recent call last):
...
ValueError: You must either set outfile (output filename), or enable filter or stdout (output to stdout).
In this case the wrapper knows certain arguments are required to construct
a valid command line for the tool. For a complete example,
>>> from Bio.Emboss.Applications import WaterCommandline
>>> water_cmd = WaterCommandline(gapopen=10, gapextend=0.5)
>>> water_cmd.asequence = "asis:ACCCGGGCGCGGT"
>>> water_cmd.bsequence = "asis:ACCCGAGCGCGGT"
>>> water_cmd.outfile = "temp_water.txt"
>>> print(water_cmd)
water -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5
>>> water_cmd
WaterCommandline(cmd='water', outfile='temp_water.txt', asequence='asis:ACCCGGGCGCGGT', bsequence='asis:ACCCGAGCGCGGT', gapopen=10, gapextend=0.5)
You would typically run the command line via a standard Python operating
system call using the subprocess module for full control. For the simple
case where you just want to run the command and get the output:
stdout, stderr = water_cmd()
Note that by default we assume the underlying tool is installed on the
system $PATH environment variable. This is normal under Linux/Unix, but
may need to be done manually under Windows. Alternatively, you can specify
the full path to the binary as the first argument (cmd):
>>> from Bio.Emboss.Applications import WaterCommandline
>>> water_cmd = WaterCommandline("C:\Program Files\EMBOSS\water.exe",
... gapopen=10, gapextend=0.5,
... asequence="asis:ACCCGGGCGCGGT",
... bsequence="asis:ACCCGAGCGCGGT",
... outfile="temp_water.txt")
>>> print(water_cmd)
"C:\Program Files\EMBOSS\water.exe" -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5
Notice that since the path name includes a space it has automatically
been quoted.
"""
# TODO - Replace the above example since EMBOSS doesn't work properly
# if installed into a folder with a space like "C:\Program Files\EMBOSS"
#
# Note the call example above is not a doctest as we can't handle EMBOSS
# (or any other tool) being missing in the unit tests.
parameters = None # will be a list defined in subclasses
def __init__(self, cmd, **kwargs):
"""Create a new instance of a command line wrapper object."""
# Init method - should be subclassed!
#
# The subclass methods should look like this:
#
# def __init__(self, cmd="muscle", **kwargs):
# self.parameters = [...]
# AbstractCommandline.__init__(self, cmd, **kwargs)
#
# i.e. There should have an optional argument "cmd" to set the location
# of the executable (with a sensible default which should work if the
# command is on the path on Unix), and keyword arguments. It should
# then define a list of parameters, all objects derived from the base
# class _AbstractParameter.
#
# The keyword arguments should be any valid parameter name, and will
# be used to set the associated parameter.
self.program_name = cmd
try:
parameters = self.parameters
except AttributeError:
raise AttributeError("Subclass should have defined self.parameters")
# Create properties for each parameter at run time
aliases = set()
for p in parameters:
if not p.names:
assert isinstance(p, _StaticArgument), p
continue
for name in p.names:
if name in aliases:
raise ValueError("Parameter alias %s multiply defined"
% name)
aliases.add(name)
name = p.names[-1]
if _re_prop_name.match(name) is None:
raise ValueError("Final parameter name %s cannot be used as "
"an argument or property name in python"
% repr(name))
if name in _reserved_names:
raise ValueError("Final parameter name %s cannot be used as "
"an argument or property name because it is "
"a reserved word in python" % repr(name))
if name in _local_reserved_names:
raise ValueError("Final parameter name %s cannot be used as "
"an argument or property name due to the "
"way the AbstractCommandline class works"
% repr(name))
# Beware of binding-versus-assignment confusion issues
def getter(name):
return lambda x: x._get_parameter(name)
def setter(name):
return lambda x, value: x.set_parameter(name, value)
def deleter(name):
return lambda x: x._clear_parameter(name)
doc = p.description
if isinstance(p, _Switch):
doc += "\n\nThis property controls the addition of the %s " \
"switch, treat this property as a boolean." % p.names[0]
else:
doc += "\n\nThis controls the addition of the %s parameter " \
"and its associated value. Set this property to the " \
"argument value required." % p.names[0]
prop = property(getter(name), setter(name), deleter(name), doc)
setattr(self.__class__, name, prop) # magic!
for key, value in kwargs.items():
self.set_parameter(key, value)
def _validate(self):
"""Make sure the required parameters have been set (PRIVATE).
No return value - it either works or raises a ValueError.
This is a separate method (called from __str__) so that subclasses may
override it.
"""
for p in self.parameters:
# Check for missing required parameters:
if p.is_required and not(p.is_set):
raise ValueError("Parameter %s is not set."
% p.names[-1])
# Also repeat the parameter validation here, just in case?
def __str__(self):
"""Make the commandline string with the currently set options.
e.g.
>>> from Bio.Emboss.Applications import WaterCommandline
>>> cline = WaterCommandline(gapopen=10, gapextend=0.5)
>>> cline.asequence = "asis:ACCCGGGCGCGGT"
>>> cline.bsequence = "asis:ACCCGAGCGCGGT"
>>> cline.outfile = "temp_water.txt"
>>> print(cline)
water -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5
>>> str(cline)
'water -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5'
"""
self._validate()
commandline = "%s " % _escape_filename(self.program_name)
for parameter in self.parameters:
if parameter.is_set:
# This will include a trailing space:
commandline += str(parameter)
return commandline.strip() # remove trailing space
def __repr__(self):
"""Return a representation of the command line object for debugging.
e.g.
>>> from Bio.Emboss.Applications import WaterCommandline
>>> cline = WaterCommandline(gapopen=10, gapextend=0.5)
>>> cline.asequence = "asis:ACCCGGGCGCGGT"
>>> cline.bsequence = "asis:ACCCGAGCGCGGT"
>>> cline.outfile = "temp_water.txt"
>>> print(cline)
water -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5
>>> cline
WaterCommandline(cmd='water', outfile='temp_water.txt', asequence='asis:ACCCGGGCGCGGT', bsequence='asis:ACCCGAGCGCGGT', gapopen=10, gapextend=0.5)
"""
answer = "%s(cmd=%s" % (self.__class__.__name__, repr(self.program_name))
for parameter in self.parameters:
if parameter.is_set:
if isinstance(parameter, _Switch):
answer += ", %s=True" % parameter.names[-1]
else:
answer += ", %s=%s" \
% (parameter.names[-1], repr(parameter.value))
answer += ")"
return answer
def _get_parameter(self, name):
"""Get a commandline option value."""
for parameter in self.parameters:
if name in parameter.names:
if isinstance(parameter, _Switch):
return parameter.is_set
else:
return parameter.value
raise ValueError("Option name %s was not found." % name)
def _clear_parameter(self, name):
"""Reset or clear a commandline option value."""
cleared_option = False
for parameter in self.parameters:
if name in parameter.names:
parameter.value = None
parameter.is_set = False
cleared_option = True
if not cleared_option:
raise ValueError("Option name %s was not found." % name)
def set_parameter(self, name, value=None):
"""Set a commandline option for a program (OBSOLETE).
Every parameter is available via a property and as a named
keyword when creating the instance. Using either of these is
preferred to this legacy set_parameter method which is now
OBSOLETE, and likely to be DEPRECATED and later REMOVED in
future releases.
"""
set_option = False
for parameter in self.parameters:
if name in parameter.names:
if isinstance(parameter, _Switch):
if value is None:
import warnings
warnings.warn("For a switch type argument like %s, "
"we expect a boolean. None is treated "
"as FALSE!" % parameter.names[-1])
parameter.is_set = bool(value)
set_option = True
else:
if value is not None:
self._check_value(value, name, parameter.checker_function)
parameter.value = value
parameter.is_set = True
set_option = True
if not set_option:
raise ValueError("Option name %s was not found." % name)
def _check_value(self, value, name, check_function):
"""Check whether the given value is valid.
No return value - it either works or raises a ValueError.
This uses the passed function 'check_function', which can either
return a [0, 1] (bad, good) value or raise an error. Either way
this function will raise an error if the value is not valid, or
finish silently otherwise.
"""
if check_function is not None:
is_good = check_function(value) # May raise an exception
assert is_good in [0, 1, True, False]
if not is_good:
raise ValueError("Invalid parameter value %r for parameter %s"
% (value, name))
def __setattr__(self, name, value):
"""Set attribute name to value (PRIVATE).
This code implements a workaround for a user interface issue.
Without this __setattr__ attribute-based assignment of parameters
will silently accept invalid parameters, leading to known instances
of the user assuming that parameters for the application are set,
when they are not.
>>> from Bio.Emboss.Applications import WaterCommandline
>>> cline = WaterCommandline(gapopen=10, gapextend=0.5, stdout=True)
>>> cline.asequence = "a.fasta"
>>> cline.bsequence = "b.fasta"
>>> cline.csequence = "c.fasta"
Traceback (most recent call last):
...
ValueError: Option name csequence was not found.
>>> print(cline)
water -stdout -asequence=a.fasta -bsequence=b.fasta -gapopen=10 -gapextend=0.5
This workaround uses a whitelist of object attributes, and sets the
object attribute list as normal, for these. Other attributes are
assumed to be parameters, and passed to the self.set_parameter method
for validation and assignment.
"""
if name in ['parameters', 'program_name']: # Allowed attributes
self.__dict__[name] = value
else:
self.set_parameter(name, value) # treat as a parameter
def __call__(self, stdin=None, stdout=True, stderr=True,
cwd=None, env=None):
"""Executes the command, waits for it to finish, and returns output.
Runs the command line tool and waits for it to finish. If it returns
a non-zero error level, an exception is raised. Otherwise two strings
are returned containing stdout and stderr.
The optional stdin argument should be a string of data which will be
passed to the tool as standard input.
The optional stdout and stderr argument may be filenames (string),
but otherwise are treated as a booleans, and control if the output
should be captured as strings (True, default), or ignored by sending
it to /dev/null to avoid wasting memory (False). If sent to a file
or ignored, then empty string(s) are returned.
The optional cwd argument is a string giving the working directory
to run the command from. See Python's subprocess module documentation
for more details.
The optional env argument is a dictionary setting the environment
variables to be used in the new process. By default the current
process' environment variables are used. See Python's subprocess
module documentation for more details.
Default example usage::
from Bio.Emboss.Applications import WaterCommandline
water_cmd = WaterCommandline(gapopen=10, gapextend=0.5,
stdout=True, auto=True,
asequence="a.fasta", bsequence="b.fasta")
print("About to run: %s" % water_cmd)
std_output, err_output = water_cmd()
This functionality is similar to subprocess.check_output() added in
Python 2.7. In general if you require more control over running the
command, use subprocess directly.
As of Biopython 1.56, when the program called returns a non-zero error
level, a custom ApplicationError exception is raised. This includes
any stdout and stderr strings captured as attributes of the exception
object, since they may be useful for diagnosing what went wrong.
"""
if not stdout:
stdout_arg = open(os.devnull, "w")
elif isinstance(stdout, basestring):
stdout_arg = open(stdout, "w")
else:
stdout_arg = subprocess.PIPE
if not stderr:
stderr_arg = open(os.devnull, "w")
elif isinstance(stderr, basestring):
if stdout == stderr:
stderr_arg = stdout_arg # Write both to the same file
else:
stderr_arg = open(stderr, "w")
else:
stderr_arg = subprocess.PIPE
# We may not need to supply any piped input, but we setup the
# standard input pipe anyway as a work around for a python
# bug if this is called from a Windows GUI program. For
# details, see http://bugs.python.org/issue1124861
#
# Using universal newlines is important on Python 3, this
# gives unicode handles rather than bytes handles.
# Windows 7, 8 and 8.1 want shell = True
# TODO: Test under Windows 10 and revisit platform detection.
if sys.platform != "win32":
use_shell = True
else:
win_ver = platform.win32_ver()[0]
if win_ver in ["7", "8", "post2012Server"]:
use_shell = True
else:
use_shell = False
child_process = subprocess.Popen(str(self), stdin=subprocess.PIPE,
stdout=stdout_arg, stderr=stderr_arg,
universal_newlines=True,
cwd=cwd, env=env,
shell=use_shell)
# Use .communicate as can get deadlocks with .wait(), see Bug 2804
stdout_str, stderr_str = child_process.communicate(stdin)
if not stdout:
assert not stdout_str, stdout_str
if not stderr:
assert not stderr_str, stderr_str
return_code = child_process.returncode
# Particularly important to close handles on Jython and PyPy
# (where garbage collection is less predictable) and on Windows
# (where cannot delete files with an open handle):
if not stdout or isinstance(stdout, basestring):
# We opened /dev/null or a file
stdout_arg.close()
if not stderr or (isinstance(stderr, basestring) and stdout != stderr):
# We opened /dev/null or a file
stderr_arg.close()
if return_code:
raise ApplicationError(return_code, str(self),
stdout_str, stderr_str)
return stdout_str, stderr_str
class _AbstractParameter(object):
"""A class to hold information about a parameter for a commandline.
Do not use this directly, instead use one of the subclasses.
"""
def __init__(self):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class _Option(_AbstractParameter):
"""Represent an option that can be set for a program.
This holds UNIXish options like --append=yes and -a yes,
where a value (here "yes") is generally expected.
For UNIXish options like -kimura in clustalw which don't
take a value, use the _Switch object instead.
Attributes:
o names -- a list of string names (typically two entries) by which
the parameter can be set via the legacy set_parameter method
(eg ["-a", "--append", "append"]). The first name in list is used
when building the command line. The last name in the list is a
"human readable" name describing the option in one word. This
must be a valid Python identifier as it is used as the property
name and as a keyword argument, and should therefore follow PEP8
naming.
o description -- a description of the option. This is used as
the property docstring.
o filename -- True if this argument is a filename and should be
automatically quoted if it contains spaces.
o checker_function -- a reference to a function that will determine
if a given value is valid for this parameter. This function can either
raise an error when given a bad value, or return a [0, 1] decision on
whether the value is correct.
o equate -- should an equals sign be inserted if a value is used?
o is_required -- a flag to indicate if the parameter must be set for
the program to be run.
o is_set -- if the parameter has been set
o value -- the value of a parameter
"""
def __init__(self, names, description, filename=False, checker_function=None,
is_required=False, equate=True):
self.names = names
assert isinstance(description, basestring), \
"%r for %s" % (description, names[-1])
self.is_filename = filename
self.checker_function = checker_function
self.description = description
self.equate = equate
self.is_required = is_required
self.is_set = False
self.value = None
def __str__(self):
"""Return the value of this option for the commandline.
Includes a trailing space.
"""
# Note: Before equate was handled explicitly, the old
# code would do either "--name " or "--name=value ",
# or " -name " or " -name value ". This choice is now
# now made explicitly when setting up the option.
if self.value is None:
return "%s " % self.names[0]
if self.is_filename:
v = _escape_filename(self.value)
else:
v = str(self.value)
if self.equate:
return "%s=%s " % (self.names[0], v)
else:
return "%s %s " % (self.names[0], v)
class _Switch(_AbstractParameter):
"""Represent an optional argument switch for a program.
This holds UNIXish options like -kimura in clustalw which don't
take a value, they are either included in the command string
or omitted.
o names -- a list of string names (typically two entries) by which
the parameter can be set via the legacy set_parameter method
(eg ["-a", "--append", "append"]). The first name in list is used
when building the command line. The last name in the list is a
"human readable" name describing the option in one word. This
must be a valid Python identifer as it is used as the property
name and as a keyword argument, and should therefore follow PEP8
naming.
o description -- a description of the option. This is used as
the property docstring.
o is_set -- if the parameter has been set
NOTE - There is no value attribute, see is_set instead,
"""
def __init__(self, names, description):
self.names = names
self.description = description
self.is_set = False
self.is_required = False
def __str__(self):
"""Return the value of this option for the commandline.
Includes a trailing space.
"""
assert not hasattr(self, "value")
if self.is_set:
return "%s " % self.names[0]
else:
return ""
class _Argument(_AbstractParameter):
"""Represent an argument on a commandline.
The names argument should be a list containing one string.
This must be a valid Python identifer as it is used as the
property name and as a keyword argument, and should therefore
follow PEP8 naming.
"""
def __init__(self, names, description, filename=False,
checker_function=None, is_required=False):
# if len(names) != 1:
# raise ValueError("The names argument to _Argument should be a "
# "single entry list with a PEP8 property name.")
self.names = names
assert isinstance(description, basestring), \
"%r for %s" % (description, names[-1])
self.is_filename = filename
self.checker_function = checker_function
self.description = description
self.is_required = is_required
self.is_set = False
self.value = None
def __str__(self):
if self.value is None:
return " "
elif self.is_filename:
return "%s " % _escape_filename(self.value)
else:
return "%s " % self.value
class _ArgumentList(_Argument):
"""Represent a variable list of arguments on a command line, e.g. multiple filenames."""
# TODO - Option to require at least one value? e.g. min/max count?
def __str__(self):
assert isinstance(self.value, list), \
"Arguments should be a list"
assert self.value, "Requires at least one filename"
# A trailing space is required so that parameters following the last filename
# do not appear merged.
# e.g.: samtools cat in1.bam in2.bam-o out.sam [without trailing space][Incorrect]
# samtools cat in1.bam in2.bam -o out.sam [with trailing space][Correct]
if self.is_filename:
return " ".join(_escape_filename(v) for v in self.value) + " "
else:
return " ".join(self.value) + " "
class _StaticArgument(_AbstractParameter):
"""Represent a static (read only) argument on a commandline.
This is not intended to be exposed as a named argument or
property of a command line wrapper object.
"""
def __init__(self, value):
self.names = []
self.is_required = False
self.is_set = True
self.value = value
def __str__(self):
return "%s " % self.value
def _escape_filename(filename):
"""Escape filenames with spaces by adding quotes (PRIVATE).
Note this will not add quotes if they are already included:
>>> print((_escape_filename('example with spaces')))
"example with spaces"
>>> print((_escape_filename('"example with spaces"')))
"example with spaces"
"""
# Is adding the following helpful
# if os.path.isfile(filename):
# # On Windows, if the file exists, we can ask for
# # its alternative short name (DOS style 8.3 format)
# # which has no spaces in it. Note that this name
# # is not portable between machines, or even folder!
# try:
# import win32api
# short = win32api.GetShortPathName(filename)
# assert os.path.isfile(short)
# return short
# except ImportError:
# pass
if " " not in filename:
return filename
# We'll just quote it - works on Windows, Mac OS X etc
if filename.startswith('"') and filename.endswith('"'):
# Its already quoted
return filename
else:
return '"%s"' % filename
def _test():
"""Run the Bio.Application module's doctests."""
import doctest
doctest.testmod(verbose=1)
if __name__ == "__main__":
# Run the doctests
_test()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements cfn metadata handling
Not implemented yet:
* command line args
- placeholders are ignored
"""
import atexit
import contextlib
import errno
import functools
import grp
import json
import logging
import os
import os.path
import pwd
try:
import rpmUtils.miscutils as rpmutils
import rpmUtils.updates as rpmupdates
rpmutils_present = True
except ImportError:
rpmutils_present = False
import re
import shutil
import subprocess
import tempfile
import six
import six.moves.configparser as ConfigParser
# Override BOTO_CONFIG, which makes boto look only at the specified
# config file, instead of the default locations
os.environ['BOTO_CONFIG'] = '/var/lib/heat-cfntools/cfn-boto-cfg'
from boto import cloudformation # noqa
LOG = logging.getLogger(__name__)
def to_boolean(b):
val = b.lower().strip() if isinstance(b, six.string_types) else b
return val in [True, 'true', 'yes', '1', 1]
def parse_creds_file(path='/etc/cfn/cfn-credentials'):
'''Parse the cfn credentials file.
Default location is as specified, and it is expected to contain
exactly two keys "AWSAccessKeyId" and "AWSSecretKey)
The two keys are returned a dict (if found)
'''
creds = {'AWSAccessKeyId': None, 'AWSSecretKey': None}
for line in open(path):
for key in creds:
match = re.match("^%s *= *(.*)$" % key, line)
if match:
creds[key] = match.group(1)
return creds
class InvalidCredentialsException(Exception):
def __init__(self, credential_file):
super(Exception, self).__init__("invalid credentials file %s" %
credential_file)
class HupConfig(object):
def __init__(self, fp_list):
self.config = ConfigParser.SafeConfigParser()
for fp in fp_list:
self.config.readfp(fp)
self.load_main_section()
self.hooks = []
for s in self.config.sections():
if s != 'main':
self.hooks.append(Hook(
s,
self.config.get(s, 'triggers'),
self.config.get(s, 'path'),
self.config.get(s, 'runas'),
self.config.get(s, 'action')))
def load_main_section(self):
# required values
self.stack = self.config.get('main', 'stack')
self.credential_file = self.config.get('main', 'credential-file')
try:
with open(self.credential_file) as f:
self.credentials = f.read()
except Exception:
raise InvalidCredentialsException(self.credential_file)
# optional values
try:
self.region = self.config.get('main', 'region')
except ConfigParser.NoOptionError:
self.region = 'nova'
try:
self.interval = self.config.getint('main', 'interval')
except ConfigParser.NoOptionError:
self.interval = 10
def __str__(self):
return ('{stack: %s, credential_file: %s, region: %s, interval:%d}' %
(self.stack, self.credential_file, self.region, self.interval))
def unique_resources_get(self):
resources = []
for h in self.hooks:
r = h.resource_name_get()
if r not in resources:
resources.append(h.resource_name_get())
return resources
class Hook(object):
def __init__(self, name, triggers, path, runas, action):
self.name = name
self.triggers = triggers
self.path = path
self.runas = runas
self.action = action
def resource_name_get(self):
sp = self.path.split('.')
return sp[1]
def event(self, ev_name, ev_object, ev_resource):
if (self.resource_name_get() == ev_resource and
ev_name in self.triggers):
CommandRunner(self.action, shell=True).run(user=self.runas)
else:
LOG.debug('event: {%s, %s, %s} did not match %s' %
(ev_name, ev_object, ev_resource, self.__str__()))
def __str__(self):
return '{%s, %s, %s, %s, %s}' % (self.name,
self.triggers,
self.path,
self.runas,
self.action)
class ControlledPrivilegesFailureException(Exception):
pass
@contextlib.contextmanager
def controlled_privileges(user):
orig_euid = None
try:
real = pwd.getpwnam(user)
if os.geteuid() != real.pw_uid:
orig_euid = os.geteuid()
os.seteuid(real.pw_uid)
LOG.debug("Privileges set for user %s" % user)
except Exception as e:
raise ControlledPrivilegesFailureException(e)
try:
yield
finally:
if orig_euid is not None:
try:
os.seteuid(orig_euid)
LOG.debug("Original privileges restored.")
except Exception as e:
LOG.error("Error restoring privileges %s" % e)
class CommandRunner(object):
"""Helper class to run a command and store the output."""
def __init__(self, command, shell=False, nextcommand=None):
self._command = command
self._shell = shell
self._next = nextcommand
self._stdout = None
self._stderr = None
self._status = None
def __str__(self):
s = "CommandRunner:"
s += "\n\tcommand: %s" % self._command
if self._status:
s += "\n\tstatus: %s" % self.status
if self._stdout:
s += "\n\tstdout: %s" % self.stdout
if self._stderr:
s += "\n\tstderr: %s" % self.stderr
return s
def run(self, user='root', cwd=None, env=None):
"""Run the Command and return the output.
Returns:
self
"""
LOG.debug("Running command: %s" % self._command)
cmd = self._command
shell = self._shell
# Ensure commands that are given as string are run on shell
assert isinstance(cmd, six.string_types) is bool(shell)
try:
with controlled_privileges(user):
subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=cwd,
env=env, shell=shell)
output = subproc.communicate()
self._status = subproc.returncode
self._stdout = output[0]
self._stderr = output[1]
except ControlledPrivilegesFailureException as e:
LOG.error("Error setting privileges for user '%s': %s"
% (user, e))
self._status = 126
self._stderr = six.text_type(e)
if self._status:
LOG.debug("Return code of %d after executing: '%s'\n"
"stdout: '%s'\n"
"stderr: '%s'" % (self._status, cmd, self._stdout,
self._stderr))
if self._next:
self._next.run()
return self
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
@property
def status(self):
return self._status
class RpmHelper(object):
if rpmutils_present:
_rpm_util = rpmupdates.Updates([], [])
@classmethod
def compare_rpm_versions(cls, v1, v2):
"""Compare two RPM version strings.
Arguments:
v1 -- a version string
v2 -- a version string
Returns:
0 -- the versions are equal
1 -- v1 is greater
-1 -- v2 is greater
"""
if v1 and v2:
return rpmutils.compareVerOnly(v1, v2)
elif v1:
return 1
elif v2:
return -1
else:
return 0
@classmethod
def newest_rpm_version(cls, versions):
"""Returns the highest (newest) version from a list of versions.
Arguments:
versions -- A list of version strings
e.g., ['2.0', '2.2', '2.2-1.fc16', '2.2.22-1.fc16']
"""
if versions:
if isinstance(versions, six.string_types):
return versions
versions = sorted(versions, rpmutils.compareVerOnly,
reverse=True)
return versions[0]
else:
return None
@classmethod
def rpm_package_version(cls, pkg):
"""Returns the version of an installed RPM.
Arguments:
pkg -- A package name
"""
cmd = "rpm -q --queryformat '%%{VERSION}-%%{RELEASE}' %s" % pkg
command = CommandRunner(cmd).run()
return command.stdout
@classmethod
def rpm_package_installed(cls, pkg):
"""Indicates whether pkg is in rpm database.
Arguments:
pkg -- A package name (with optional version and release spec).
e.g., httpd
e.g., httpd-2.2.22
e.g., httpd-2.2.22-1.fc16
"""
cmd = ['rpm', '-q', pkg]
command = CommandRunner(cmd).run()
return command.status == 0
@classmethod
def yum_package_available(cls, pkg):
"""Indicates whether pkg is available via yum.
Arguments:
pkg -- A package name (with optional version and release spec).
e.g., httpd
e.g., httpd-2.2.22
e.g., httpd-2.2.22-1.fc16
"""
cmd = ['yum', '-y', '--showduplicates', 'list', 'available', pkg]
command = CommandRunner(cmd).run()
return command.status == 0
@classmethod
def dnf_package_available(cls, pkg):
"""Indicates whether pkg is available via dnf.
Arguments:
pkg -- A package name (with optional version and release spec).
e.g., httpd
e.g., httpd-2.2.22
e.g., httpd-2.2.22-1.fc21
"""
cmd = ['dnf', '-y', '--showduplicates', 'list', 'available', pkg]
command = CommandRunner(cmd).run()
return command.status == 0
@classmethod
def zypper_package_available(cls, pkg):
"""Indicates whether pkg is available via zypper.
Arguments:
pkg -- A package name (with optional version and release spec).
e.g., httpd
e.g., httpd-2.2.22
e.g., httpd-2.2.22-1.fc16
"""
cmd = ['zypper', '-n', '--no-refresh', 'search', pkg]
command = CommandRunner(cmd).run()
return command.status == 0
@classmethod
def install(cls, packages, rpms=True, zypper=False, dnf=False):
"""Installs (or upgrades) packages via RPM, yum, dnf, or zypper.
Arguments:
packages -- a list of packages to install
rpms -- if True:
* use RPM to install the packages
* packages must be a list of URLs to retrieve RPMs
if False:
* use Yum to install packages
* packages is a list of:
- pkg name (httpd), or
- pkg name with version spec (httpd-2.2.22), or
- pkg name with version-release spec
(httpd-2.2.22-1.fc16)
zypper -- if True:
* overrides use of yum, use zypper instead
dnf -- if True:
* overrides use of yum, use dnf instead
* packages must be in same format as yum pkg list
"""
if rpms:
cmd = ['rpm', '-U', '--force', '--nosignature']
elif zypper:
cmd = ['zypper', '-n', 'install']
elif dnf:
# use dnf --best to upgrade outdated-but-installed packages
cmd = ['dnf', '-y', '--best', 'install']
else:
cmd = ['yum', '-y', 'install']
cmd.extend(packages)
LOG.info("Installing packages: %s" % cmd)
command = CommandRunner(cmd).run()
if command.status:
LOG.warning("Failed to install packages: %s" % cmd)
@classmethod
def downgrade(cls, packages, rpms=True, zypper=False, dnf=False):
"""Downgrades a set of packages via RPM, yum, dnf, or zypper.
Arguments:
packages -- a list of packages to downgrade
rpms -- if True:
* use RPM to downgrade (replace) the packages
* packages must be a list of URLs to retrieve the RPMs
if False:
* use Yum to downgrade packages
* packages is a list of:
- pkg name with version spec (httpd-2.2.22), or
- pkg name with version-release spec
(httpd-2.2.22-1.fc16)
dnf -- if True:
* Use dnf instead of RPM/yum
"""
if rpms:
cls.install(packages)
elif zypper:
cmd = ['zypper', '-n', 'install', '--oldpackage']
cmd.extend(packages)
LOG.info("Downgrading packages: %s", cmd)
command = CommandRunner(cmd).run()
if command.status:
LOG.warning("Failed to downgrade packages: %s" % cmd)
elif dnf:
cmd = ['dnf', '-y', 'downgrade']
cmd.extend(packages)
LOG.info("Downgrading packages: %s", cmd)
command = CommandRunner(cmd).run()
if command.status:
LOG.warning("Failed to downgrade packages: %s" % cmd)
else:
cmd = ['yum', '-y', 'downgrade']
cmd.extend(packages)
LOG.info("Downgrading packages: %s" % cmd)
command = CommandRunner(cmd).run()
if command.status:
LOG.warning("Failed to downgrade packages: %s" % cmd)
class PackagesHandler(object):
_packages = {}
_package_order = ["dpkg", "rpm", "apt", "yum", "dnf"]
@staticmethod
def _pkgsort(pkg1, pkg2):
order = PackagesHandler._package_order
p1_name = pkg1[0]
p2_name = pkg2[0]
if p1_name in order and p2_name in order:
i1 = order.index(p1_name)
i2 = order.index(p2_name)
return (i1 > i2) - (i1 < i2)
elif p1_name in order:
return -1
elif p2_name in order:
return 1
else:
n1 = p1_name.lower()
n2 = p2_name.lower()
return (n1 > n2) - (n1 < n2)
def __init__(self, packages):
self._packages = packages
def _handle_gem_packages(self, packages):
"""very basic support for gems."""
# TODO(asalkeld) support versions
# -b == local & remote install
# -y == install deps
opts = ['-b', '-y']
for pkg_name, versions in packages.items():
if len(versions) > 0:
cmd = ['gem', 'install'] + opts
cmd.extend(['--version', versions[0], pkg_name])
CommandRunner(cmd).run()
else:
cmd = ['gem', 'install'] + opts
cmd.append(pkg_name)
CommandRunner(cmd).run()
def _handle_python_packages(self, packages):
"""very basic support for easy_install."""
# TODO(asalkeld) support versions
for pkg_name, versions in packages.items():
cmd = ['easy_install', pkg_name]
CommandRunner(cmd).run()
def _handle_zypper_packages(self, packages):
"""Handle installation, upgrade, or downgrade of packages via yum.
Arguments:
packages -- a package entries map of the form:
"pkg_name" : "version",
"pkg_name" : ["v1", "v2"],
"pkg_name" : []
For each package entry:
* if no version is supplied and the package is already installed, do
nothing
* if no version is supplied and the package is _not_ already
installed, install it
* if a version string is supplied, and the package is already
installed, determine whether to downgrade or upgrade (or do nothing
if version matches installed package)
* if a version array is supplied, choose the highest version from the
array and follow same logic for version string above
"""
# collect pkgs for batch processing at end
installs = []
downgrades = []
for pkg_name, versions in packages.items():
ver = RpmHelper.newest_rpm_version(versions)
pkg = "%s-%s" % (pkg_name, ver) if ver else pkg_name
if RpmHelper.rpm_package_installed(pkg):
# FIXME:print non-error, but skipping pkg
pass
elif not RpmHelper.zypper_package_available(pkg):
LOG.warning(
"Skipping package '%s' - unavailable via zypper", pkg)
elif not ver:
installs.append(pkg)
else:
current_ver = RpmHelper.rpm_package_version(pkg)
rc = RpmHelper.compare_rpm_versions(current_ver, ver)
if rc < 0:
installs.append(pkg)
elif rc > 0:
downgrades.append(pkg)
if installs:
RpmHelper.install(installs, rpms=False, zypper=True)
if downgrades:
RpmHelper.downgrade(downgrades, zypper=True)
def _handle_dnf_packages(self, packages):
"""Handle installation, upgrade, or downgrade of packages via dnf.
Arguments:
packages -- a package entries map of the form:
"pkg_name" : "version",
"pkg_name" : ["v1", "v2"],
"pkg_name" : []
For each package entry:
* if no version is supplied and the package is already installed, do
nothing
* if no version is supplied and the package is _not_ already
installed, install it
* if a version string is supplied, and the package is already
installed, determine whether to downgrade or upgrade (or do nothing
if version matches installed package)
* if a version array is supplied, choose the highest version from the
array and follow same logic for version string above
"""
# collect pkgs for batch processing at end
installs = []
downgrades = []
for pkg_name, versions in packages.items():
ver = RpmHelper.newest_rpm_version(versions)
pkg = "%s-%s" % (pkg_name, ver) if ver else pkg_name
if RpmHelper.rpm_package_installed(pkg):
# FIXME:print non-error, but skipping pkg
pass
elif not RpmHelper.dnf_package_available(pkg):
LOG.warning(
"Skipping package '%s'. Not available via yum" % pkg)
elif not ver:
installs.append(pkg)
else:
current_ver = RpmHelper.rpm_package_version(pkg)
rc = RpmHelper.compare_rpm_versions(current_ver, ver)
if rc < 0:
installs.append(pkg)
elif rc > 0:
downgrades.append(pkg)
if installs:
RpmHelper.install(installs, rpms=False, dnf=True)
if downgrades:
RpmHelper.downgrade(downgrades, rpms=False, dnf=True)
def _handle_yum_packages(self, packages):
"""Handle installation, upgrade, or downgrade of packages via yum.
Arguments:
packages -- a package entries map of the form:
"pkg_name" : "version",
"pkg_name" : ["v1", "v2"],
"pkg_name" : []
For each package entry:
* if no version is supplied and the package is already installed, do
nothing
* if no version is supplied and the package is _not_ already
installed, install it
* if a version string is supplied, and the package is already
installed, determine whether to downgrade or upgrade (or do nothing
if version matches installed package)
* if a version array is supplied, choose the highest version from the
array and follow same logic for version string above
"""
cmd = CommandRunner(['which', 'yum']).run()
if cmd.status == 1:
# yum not available, use DNF if available
self._handle_dnf_packages(packages)
return
elif cmd.status == 127:
# `which` command not found
LOG.info("`which` not found. Using yum without checking if dnf "
"is available")
# collect pkgs for batch processing at end
installs = []
downgrades = []
for pkg_name, versions in packages.items():
ver = RpmHelper.newest_rpm_version(versions)
pkg = "%s-%s" % (pkg_name, ver) if ver else pkg_name
if RpmHelper.rpm_package_installed(pkg):
# FIXME:print non-error, but skipping pkg
pass
elif not RpmHelper.yum_package_available(pkg):
LOG.warning(
"Skipping package '%s'. Not available via yum" % pkg)
elif not ver:
installs.append(pkg)
else:
current_ver = RpmHelper.rpm_package_version(pkg)
rc = RpmHelper.compare_rpm_versions(current_ver, ver)
if rc < 0:
installs.append(pkg)
elif rc > 0:
downgrades.append(pkg)
if installs:
RpmHelper.install(installs, rpms=False)
if downgrades:
RpmHelper.downgrade(downgrades)
def _handle_rpm_packages(self, packages):
"""Handle installation, upgrade, or downgrade of packages via rpm.
Arguments:
packages -- a package entries map of the form:
"pkg_name" : "url"
For each package entry:
* if the EXACT package is already installed, skip it
* if a different version of the package is installed, overwrite it
* if the package isn't installed, install it
"""
# FIXME(asalkeld): handle rpm installs
pass
def _handle_apt_packages(self, packages):
"""very basic support for apt."""
# TODO(asalkeld) support versions
pkg_list = list(packages)
env = {'DEBIAN_FRONTEND': 'noninteractive'}
cmd = ['apt-get', '-y', 'install'] + pkg_list
CommandRunner(cmd).run(env=env)
# map of function pointers to handle different package managers
_package_handlers = {"yum": _handle_yum_packages,
"dnf": _handle_dnf_packages,
"zypper": _handle_zypper_packages,
"rpm": _handle_rpm_packages,
"apt": _handle_apt_packages,
"rubygems": _handle_gem_packages,
"python": _handle_python_packages}
def _package_handler(self, manager_name):
handler = None
if manager_name in self._package_handlers:
handler = self._package_handlers[manager_name]
return handler
def apply_packages(self):
"""Install, upgrade, or downgrade packages listed.
Each package is a dict containing package name and a list of versions
Install order:
* dpkg
* rpm
* apt
* yum
* dnf
"""
if not self._packages:
return
try:
packages = sorted(
self._packages.items(), cmp=PackagesHandler._pkgsort)
except TypeError:
# On Python 3, we have to use key instead of cmp
# This could also work on Python 2.7, but not on 2.6
packages = sorted(
self._packages.items(),
key=functools.cmp_to_key(PackagesHandler._pkgsort))
for manager, package_entries in packages:
handler = self._package_handler(manager)
if not handler:
LOG.warning("Skipping invalid package type: %s" % manager)
else:
handler(self, package_entries)
class FilesHandler(object):
def __init__(self, files):
self._files = files
def apply_files(self):
if not self._files:
return
for fdest, meta in self._files.items():
dest = fdest.encode()
try:
os.makedirs(os.path.dirname(dest))
except OSError as e:
if e.errno == errno.EEXIST:
LOG.debug(str(e))
else:
LOG.exception(e)
if 'content' in meta:
if isinstance(meta['content'], six.string_types):
f = open(dest, 'w+')
f.write(meta['content'])
f.close()
else:
f = open(dest, 'w+')
f.write(json.dumps(meta['content'],
indent=4).encode('UTF-8'))
f.close()
elif 'source' in meta:
CommandRunner(['curl', '-o', dest, meta['source']]).run()
else:
LOG.error('%s %s' % (dest, str(meta)))
continue
uid = -1
gid = -1
if 'owner' in meta:
try:
user_info = pwd.getpwnam(meta['owner'])
uid = user_info[2]
except KeyError:
pass
if 'group' in meta:
try:
group_info = grp.getgrnam(meta['group'])
gid = group_info[2]
except KeyError:
pass
os.chown(dest, uid, gid)
if 'mode' in meta:
os.chmod(dest, int(meta['mode'], 8))
class SourcesHandler(object):
'''tar, tar+gzip,tar+bz2 and zip.'''
_sources = {}
def __init__(self, sources):
self._sources = sources
def _url_to_tmp_filename(self, url):
tempdir = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(tempdir, True))
name = os.path.basename(url)
return os.path.join(tempdir, name)
def _splitext(self, path):
(r, ext) = os.path.splitext(path)
return (r, ext.lower())
def _github_ball_type(self, url):
ext = ""
if url.endswith('/'):
url = url[0:-1]
sp = url.split('/')
if len(sp) > 2:
http = sp[0].startswith('http')
github = sp[2].endswith('github.com')
btype = sp[-2]
if http and github:
if 'zipball' == btype:
ext = '.zip'
elif 'tarball' == btype:
ext = '.tgz'
return ext
def _source_type(self, url):
(r, ext) = self._splitext(url)
if ext == '.gz':
(r, ext2) = self._splitext(r)
if ext2 == '.tar':
ext = '.tgz'
elif ext == '.bz2':
(r, ext2) = self._splitext(r)
if ext2 == '.tar':
ext = '.tbz2'
elif ext == "":
ext = self._github_ball_type(url)
return ext
def _apply_source_cmd(self, dest, url):
cmd = ""
basename = os.path.basename(url)
stype = self._source_type(url)
if stype == '.tgz':
cmd = "curl -s '%s' | gunzip | tar -xvf -" % url
elif stype == '.tbz2':
cmd = "curl -s '%s' | bunzip2 | tar -xvf -" % url
elif stype == '.zip':
tmp = self._url_to_tmp_filename(url)
cmd = "curl -s -o '%s' '%s' && unzip -o '%s'" % (tmp, url, tmp)
elif stype == '.tar':
cmd = "curl -s '%s' | tar -xvf -" % url
elif stype == '.gz':
(r, ext) = self._splitext(basename)
cmd = "curl -s '%s' | gunzip > '%s'" % (url, r)
elif stype == '.bz2':
(r, ext) = self._splitext(basename)
cmd = "curl -s '%s' | bunzip2 > '%s'" % (url, r)
if cmd != '':
cmd = "mkdir -p '%s'; cd '%s'; %s" % (dest, dest, cmd)
return cmd
def _apply_source(self, dest, url):
cmd = self._apply_source_cmd(dest, url)
# FIXME bug 1498298
if cmd != '':
runner = CommandRunner(cmd, shell=True)
runner.run()
def apply_sources(self):
if not self._sources:
return
for dest, url in self._sources.items():
self._apply_source(dest, url)
class ServicesHandler(object):
_services = {}
def __init__(self, services, resource=None, hooks=None):
self._services = services
self.resource = resource
self.hooks = hooks
def _handle_sysv_command(self, service, command):
if os.path.exists("/bin/systemctl"):
service_exe = "/bin/systemctl"
service = '%s.service' % service
service_start = [service_exe, 'start', service]
service_status = [service_exe, 'status', service]
service_stop = [service_exe, 'stop', service]
elif os.path.exists("/sbin/service"):
service_exe = "/sbin/service"
service_start = [service_exe, service, 'start']
service_status = [service_exe, service, 'status']
service_stop = [service_exe, service, 'stop']
else:
service_exe = "/usr/sbin/service"
service_start = [service_exe, service, 'start']
service_status = [service_exe, service, 'status']
service_stop = [service_exe, service, 'stop']
if os.path.exists("/bin/systemctl"):
enable_exe = "/bin/systemctl"
enable_on = [enable_exe, 'enable', service]
enable_off = [enable_exe, 'disable', service]
elif os.path.exists("/sbin/chkconfig"):
enable_exe = "/sbin/chkconfig"
enable_on = [enable_exe, service, 'on']
enable_off = [enable_exe, service, 'off']
else:
enable_exe = "/usr/sbin/update-rc.d"
enable_on = [enable_exe, service, 'enable']
enable_off = [enable_exe, service, 'disable']
cmd = None
if "enable" == command:
cmd = enable_on
elif "disable" == command:
cmd = enable_off
elif "start" == command:
cmd = service_start
elif "stop" == command:
cmd = service_stop
elif "status" == command:
cmd = service_status
if cmd is not None:
command = CommandRunner(cmd)
command.run()
return command
else:
LOG.error("Unknown sysv command %s" % command)
def _initialize_service(self, handler, service, properties):
if "enabled" in properties:
enable = to_boolean(properties["enabled"])
if enable:
LOG.info("Enabling service %s" % service)
handler(self, service, "enable")
else:
LOG.info("Disabling service %s" % service)
handler(self, service, "disable")
if "ensureRunning" in properties:
ensure_running = to_boolean(properties["ensureRunning"])
command = handler(self, service, "status")
running = command.status == 0
if ensure_running and not running:
LOG.info("Starting service %s" % service)
handler(self, service, "start")
elif not ensure_running and running:
LOG.info("Stopping service %s" % service)
handler(self, service, "stop")
def _monitor_service(self, handler, service, properties):
if "ensureRunning" in properties:
ensure_running = to_boolean(properties["ensureRunning"])
command = handler(self, service, "status")
running = command.status == 0
if ensure_running and not running:
LOG.warning("Restarting service %s" % service)
start_cmd = handler(self, service, "start")
if start_cmd.status != 0:
LOG.warning('Service %s did not start. STDERR: %s' %
(service, start_cmd.stderr))
for h in self.hooks:
h.event('service.restarted', service, self.resource)
def _monitor_services(self, handler, services):
for service, properties in services.items():
self._monitor_service(handler, service, properties)
def _initialize_services(self, handler, services):
for service, properties in services.items():
self._initialize_service(handler, service, properties)
# map of function pointers to various service handlers
_service_handlers = {
"sysvinit": _handle_sysv_command,
"systemd": _handle_sysv_command
}
def _service_handler(self, manager_name):
handler = None
if manager_name in self._service_handlers:
handler = self._service_handlers[manager_name]
return handler
def apply_services(self):
"""Starts, stops, enables, disables services."""
if not self._services:
return
for manager, service_entries in self._services.items():
handler = self._service_handler(manager)
if not handler:
LOG.warning("Skipping invalid service type: %s" % manager)
else:
self._initialize_services(handler, service_entries)
def monitor_services(self):
"""Restarts failed services, and runs hooks."""
if not self._services:
return
for manager, service_entries in self._services.items():
handler = self._service_handler(manager)
if not handler:
LOG.warning("Skipping invalid service type: %s" % manager)
else:
self._monitor_services(handler, service_entries)
class ConfigsetsHandler(object):
def __init__(self, configsets, selectedsets):
self.configsets = configsets
self.selectedsets = selectedsets
def expand_sets(self, list, executionlist):
for elem in list:
if isinstance(elem, dict):
dictkeys = elem.keys()
if len(dictkeys) != 1 or dictkeys.pop() != 'ConfigSet':
raise Exception('invalid ConfigSets metadata')
dictkey = elem.values().pop()
try:
self.expand_sets(self.configsets[dictkey], executionlist)
except KeyError:
raise Exception("Undefined ConfigSet '%s' referenced"
% dictkey)
else:
executionlist.append(elem)
def get_configsets(self):
"""Returns a list of Configsets to execute in template."""
if not self.configsets:
if self.selectedsets:
raise Exception('Template has no configSets')
return
if not self.selectedsets:
if 'default' not in self.configsets:
raise Exception('Template has no default configSet, must'
' specify')
self.selectedsets = 'default'
selectedlist = [x.strip() for x in self.selectedsets.split(',')]
executionlist = []
for item in selectedlist:
if item not in self.configsets:
raise Exception("Requested configSet '%s' not in configSets"
" section" % item)
self.expand_sets(self.configsets[item], executionlist)
if not executionlist:
raise Exception(
"Requested configSet %s empty?" % self.selectedsets)
return executionlist
def metadata_server_port(
datafile='/var/lib/heat-cfntools/cfn-metadata-server'):
"""Return the the metadata server port.
Reads the :NNNN from the end of the URL in cfn-metadata-server
"""
try:
f = open(datafile)
server_url = f.read().strip()
f.close()
except IOError:
return None
if len(server_url) < 1:
return None
if server_url[-1] == '/':
server_url = server_url[:-1]
try:
return int(server_url.split(':')[-1])
except ValueError:
return None
class CommandsHandlerRunError(Exception):
pass
class CommandsHandler(object):
def __init__(self, commands):
self.commands = commands
def apply_commands(self):
"""Execute commands on the instance in alphabetical order by name."""
if not self.commands:
return
for command_label in sorted(self.commands):
LOG.debug("%s is being processed" % command_label)
self._initialize_command(command_label,
self.commands[command_label])
def _initialize_command(self, command_label, properties):
command_status = None
cwd = None
env = properties.get("env", None)
if "cwd" in properties:
cwd = os.path.expanduser(properties["cwd"])
if not os.path.exists(cwd):
LOG.error("%s has failed. " % command_label +
"%s path does not exist" % cwd)
return
if "test" in properties:
test = CommandRunner(properties["test"], shell=True)
test_status = test.run('root', cwd, env).status
if test_status != 0:
LOG.info("%s test returns false, skipping command"
% command_label)
return
else:
LOG.debug("%s test returns true, proceeding" % command_label)
if "command" in properties:
try:
command = properties["command"]
shell = isinstance(command, six.string_types)
command = CommandRunner(command, shell=shell)
command.run('root', cwd, env)
command_status = command.status
except OSError as e:
if e.errno == errno.EEXIST:
LOG.debug(str(e))
else:
LOG.exception(e)
else:
LOG.error("%s has failed. " % command_label
+ "'command' property missing")
return
if command_status == 0:
LOG.info("%s has been successfully executed" % command_label)
else:
if ("ignoreErrors" in properties and
to_boolean(properties["ignoreErrors"])):
LOG.info("%s has failed (status=%d). Explicit ignoring"
% (command_label, command_status))
else:
raise CommandsHandlerRunError("%s has failed." % command_label)
class GroupsHandler(object):
def __init__(self, groups):
self.groups = groups
def apply_groups(self):
"""Create Linux/UNIX groups and assign group IDs."""
if not self.groups:
return
for group, properties in self.groups.items():
LOG.debug("%s group is being created" % group)
self._initialize_group(group, properties)
def _initialize_group(self, group, properties):
gid = properties.get("gid", None)
cmd = ['groupadd', group]
if gid is not None:
cmd.extend(['--gid', str(gid)])
command = CommandRunner(cmd)
command.run()
command_status = command.status
if command_status == 0:
LOG.info("%s has been successfully created" % group)
elif command_status == 9:
LOG.error("An error occurred creating %s group : " %
group + "group name not unique")
elif command_status == 4:
LOG.error("An error occurred creating %s group : " %
group + "GID not unique")
elif command_status == 3:
LOG.error("An error occurred creating %s group : " %
group + "GID not valid")
elif command_status == 2:
LOG.error("An error occurred creating %s group : " %
group + "Invalid syntax")
else:
LOG.error("An error occurred creating %s group" % group)
class UsersHandler(object):
def __init__(self, users):
self.users = users
def apply_users(self):
"""Create Linux/UNIX users and assign user IDs, groups and homedir."""
if not self.users:
return
for user, properties in self.users.items():
LOG.debug("%s user is being created" % user)
self._initialize_user(user, properties)
def _initialize_user(self, user, properties):
uid = properties.get("uid", None)
homeDir = properties.get("homeDir", None)
cmd = ['useradd', user]
if uid is not None:
cmd.extend(['--uid', six.text_type(uid)])
if homeDir is not None:
cmd.extend(['--home', six.text_type(homeDir)])
if "groups" in properties:
groups = ','.join(properties["groups"])
cmd.extend(['--groups', groups])
# Users are created as non-interactive system users with a shell
# of /sbin/nologin. This is by design and cannot be modified.
cmd.extend(['--shell', '/sbin/nologin'])
command = CommandRunner(cmd)
command.run()
command_status = command.status
if command_status == 0:
LOG.info("%s has been successfully created" % user)
elif command_status == 9:
LOG.error("An error occurred creating %s user : " %
user + "user name not unique")
elif command_status == 6:
LOG.error("An error occurred creating %s user : " %
user + "group does not exist")
elif command_status == 4:
LOG.error("An error occurred creating %s user : " %
user + "UID not unique")
elif command_status == 3:
LOG.error("An error occurred creating %s user : " %
user + "Invalid argument")
elif command_status == 2:
LOG.error("An error occurred creating %s user : " %
user + "Invalid syntax")
else:
LOG.error("An error occurred creating %s user" % user)
class MetadataServerConnectionError(Exception):
pass
class Metadata(object):
_metadata = None
_init_key = "AWS::CloudFormation::Init"
DEFAULT_PORT = 8000
def __init__(self, stack, resource, access_key=None,
secret_key=None, credentials_file=None, region=None,
configsets=None):
self.stack = stack
self.resource = resource
self.access_key = access_key
self.secret_key = secret_key
self.region = region
self.credentials_file = credentials_file
self.access_key = access_key
self.secret_key = secret_key
self.configsets = configsets
# TODO(asalkeld) is this metadata for the local resource?
self._is_local_metadata = True
self._metadata = None
self._has_changed = False
def remote_metadata(self):
"""Connect to the metadata server and retrieve the metadata."""
if self.credentials_file:
credentials = parse_creds_file(self.credentials_file)
access_key = credentials['AWSAccessKeyId']
secret_key = credentials['AWSSecretKey']
elif self.access_key and self.secret_key:
access_key = self.access_key
secret_key = self.secret_key
else:
raise MetadataServerConnectionError("No credentials!")
port = metadata_server_port() or self.DEFAULT_PORT
client = cloudformation.CloudFormationConnection(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=False, port=port,
path="/v1", debug=0)
res = client.describe_stack_resource(self.stack, self.resource)
# Note pending upstream patch will make this response a
# boto.cloudformation.stack.StackResourceDetail object
# which aligns better with all the existing calls
# see https://github.com/boto/boto/pull/857
resource_detail = res['DescribeStackResourceResponse'][
'DescribeStackResourceResult']['StackResourceDetail']
return resource_detail['Metadata']
def get_nova_meta(self,
cache_path='/var/lib/heat-cfntools/nova_meta.json'):
"""Get nova's meta_data.json and cache it.
Since this is called repeatedly return the cached metadata,
if we have it.
"""
url = 'http://169.254.169.254/openstack/2012-08-10/meta_data.json'
if not os.path.exists(cache_path):
cmd = ['curl', '-o', cache_path, url]
CommandRunner(cmd).run()
try:
with open(cache_path) as fd:
try:
return json.load(fd)
except ValueError:
pass
except IOError:
pass
return None
def get_instance_id(self):
"""Get the unique identifier for this server."""
instance_id = None
md = self.get_nova_meta()
if md is not None:
instance_id = md.get('uuid')
return instance_id
def get_tags(self):
"""Get the tags for this server."""
tags = {}
md = self.get_nova_meta()
if md is not None:
tags.update(md.get('meta', {}))
tags['InstanceId'] = md['uuid']
return tags
def retrieve(
self,
meta_str=None,
default_path='/var/lib/heat-cfntools/cfn-init-data',
last_path='/var/cache/heat-cfntools/last_metadata'):
"""Read the metadata from the given filename or from the remote server.
Returns:
True -- success
False -- error
"""
if self.resource is not None:
res_last_path = last_path + '_' + self.resource
else:
res_last_path = last_path
if meta_str:
self._data = meta_str
else:
try:
self._data = self.remote_metadata()
except MetadataServerConnectionError as ex:
LOG.warning(
"Unable to retrieve remote metadata : %s" % str(ex))
# If reading remote metadata fails, we fall-back on local files
# in order to get the most up-to-date version, we try:
# /var/cache/heat-cfntools/last_metadata, followed by
# /var/lib/heat-cfntools/cfn-init-data
# This should allow us to do the right thing both during the
# first cfn-init run (when we only have cfn-init-data), and
# in the event of a temporary interruption to connectivity
# affecting cfn-hup, in which case we want to use the locally
# cached metadata or the logic below could re-run a stale
# cfn-init-data
fd = None
for filepath in [res_last_path, last_path, default_path]:
try:
fd = open(filepath)
except IOError:
LOG.warning("Unable to open local metadata : %s" %
filepath)
continue
else:
LOG.info("Opened local metadata %s" % filepath)
break
if fd:
self._data = fd.read()
fd.close()
else:
LOG.error("Unable to read any valid metadata!")
return
if isinstance(self._data, str):
self._metadata = json.loads(self._data)
else:
self._metadata = self._data
last_data = ""
for metadata_file in [res_last_path, last_path]:
try:
with open(metadata_file) as lm:
try:
last_data = json.load(lm)
except ValueError:
pass
lm.close()
except IOError:
LOG.warning("Unable to open local metadata : %s" %
metadata_file)
continue
if self._metadata != last_data:
self._has_changed = True
# if cache dir does not exist try to create it
cache_dir = os.path.dirname(last_path)
if not os.path.isdir(cache_dir):
try:
os.makedirs(cache_dir, mode=0o700)
except IOError as e:
LOG.warning('could not create metadata cache dir %s [%s]' %
(cache_dir, e))
return
# save current metadata to file
tmp_dir = os.path.dirname(last_path)
with tempfile.NamedTemporaryFile(dir=tmp_dir,
mode='wb',
delete=False) as cf:
os.chmod(cf.name, 0o600)
cf.write(json.dumps(self._metadata).encode('UTF-8'))
os.rename(cf.name, last_path)
cf.close()
if res_last_path != last_path:
shutil.copy(last_path, res_last_path)
return True
def __str__(self):
return json.dumps(self._metadata)
def display(self, key=None):
"""Print the metadata to the standard output stream.
By default the full metadata is displayed but the ouptut can be limited
to a specific with the <key> argument.
Arguments:
key -- the metadata's key to display, nested keys can be specified
separating them by the dot character.
e.g., "foo.bar"
If the key contains a dot, it should be surrounded by single
quotes
e.g., "foo.'bar.1'"
"""
if self._metadata is None:
return
if key is None:
print(str(self))
return
value = None
md = self._metadata
while True:
key_match = re.match(r'^(?:(?:\'([^\']+)\')|([^\.]+))(?:\.|$)',
key)
if not key_match:
break
k = key_match.group(1) or key_match.group(2)
if isinstance(md, dict) and k in md:
key = key.replace(key_match.group(), '')
value = md = md[k]
else:
break
if key != '':
value = None
if value is not None:
print(json.dumps(value))
return
def _is_valid_metadata(self):
"""Should find the AWS::CloudFormation::Init json key."""
is_valid = (self._metadata and
self._init_key in self._metadata and
self._metadata[self._init_key])
if is_valid:
self._metadata = self._metadata[self._init_key]
return is_valid
def _process_config(self, config="config"):
"""Parse and process a config section.
* packages
* sources
* groups
* users
* files
* commands
* services
"""
try:
self._config = self._metadata[config]
except KeyError:
raise Exception("Could not find '%s' set in template, may need to"
" specify another set." % config)
PackagesHandler(self._config.get("packages")).apply_packages()
SourcesHandler(self._config.get("sources")).apply_sources()
GroupsHandler(self._config.get("groups")).apply_groups()
UsersHandler(self._config.get("users")).apply_users()
FilesHandler(self._config.get("files")).apply_files()
CommandsHandler(self._config.get("commands")).apply_commands()
ServicesHandler(self._config.get("services")).apply_services()
def cfn_init(self):
"""Process the resource metadata."""
if not self._is_valid_metadata():
raise Exception("invalid metadata")
else:
executionlist = ConfigsetsHandler(self._metadata.get("configSets"),
self.configsets).get_configsets()
if not executionlist:
self._process_config()
else:
for item in executionlist:
self._process_config(item)
def cfn_hup(self, hooks):
"""Process the resource metadata."""
if not self._is_valid_metadata():
LOG.debug(
'Metadata does not contain a %s section' % self._init_key)
if self._is_local_metadata:
self._config = self._metadata.get("config", {})
s = self._config.get("services")
sh = ServicesHandler(s, resource=self.resource, hooks=hooks)
sh.monitor_services()
if self._has_changed:
for h in hooks:
h.event('post.update', self.resource, self.resource)
|
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
from .epochs import Epochs
from .utils import check_fname, logger, verbose, _check_option
from .io.open import fiff_open
from .io.pick import pick_types, pick_types_forward
from .io.proj import (Projection, _has_eeg_average_ref_proj, _read_proj,
make_projector, make_eeg_average_ref_proj, _write_proj)
from .io.write import start_file, end_file
from .event import make_fixed_length_events
from .parallel import parallel_func
from .cov import _check_n_samples
from .forward import (is_fixed_orient, _subject_from_forward,
convert_forward_solution)
from .source_estimate import _make_stc
@verbose
def read_proj(fname, verbose=None):
"""Read projections from a FIF file.
Parameters
----------
fname : str
The name of file containing the projections vectors. It should end with
-proj.fif or -proj.fif.gz.
%(verbose)s
Returns
-------
projs : list
The list of projection vectors.
See Also
--------
write_proj
"""
check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz',
'_proj.fif', '_proj.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
projs = _read_proj(fid, tree)
return projs
def write_proj(fname, projs):
"""Write projections to a FIF file.
Parameters
----------
fname : str
The name of file containing the projections vectors. It should end with
-proj.fif or -proj.fif.gz.
projs : list
The list of projection vectors.
See Also
--------
read_proj
"""
check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz',
'_proj.fif', '_proj.fif.gz'))
with start_file(fname) as fid:
_write_proj(fid, projs)
end_file(fid)
@verbose
def _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix,
meg='separate', verbose=None):
from scipy import linalg
grad_ind = pick_types(info, meg='grad', ref_meg=False, exclude='bads')
mag_ind = pick_types(info, meg='mag', ref_meg=False, exclude='bads')
eeg_ind = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
_check_option('meg', meg, ['separate', 'combined'])
if meg == 'combined':
if n_grad != n_mag:
raise ValueError('n_grad (%d) must be equal to n_mag (%d) when '
'using meg="combined"')
kinds = ['meg', '', 'eeg']
n_mag = 0
grad_ind = pick_types(info, meg=True, ref_meg=False, exclude='bads')
if (n_grad > 0) and len(grad_ind) == 0:
logger.info("No MEG channels found for joint estimation. "
"Forcing n_grad=n_mag=0")
n_grad = 0
else:
kinds = ['planar', 'axial', 'eeg']
if (n_grad > 0) and len(grad_ind) == 0:
logger.info("No gradiometers found. Forcing n_grad to 0")
n_grad = 0
if (n_mag > 0) and len(mag_ind) == 0:
logger.info("No magnetometers found. Forcing n_mag to 0")
n_mag = 0
if (n_eeg > 0) and len(eeg_ind) == 0:
logger.info("No EEG channels found. Forcing n_eeg to 0")
n_eeg = 0
ch_names = info['ch_names']
grad_names, mag_names, eeg_names = ([ch_names[k] for k in ind]
for ind in [grad_ind, mag_ind,
eeg_ind])
projs = []
for n, ind, names, desc in zip([n_grad, n_mag, n_eeg],
[grad_ind, mag_ind, eeg_ind],
[grad_names, mag_names, eeg_names],
kinds):
if n == 0:
continue
data_ind = data[ind][:, ind]
# data is the covariance matrix: U * S**2 * Ut
U, Sexp2, _ = linalg.svd(data_ind, full_matrices=False)
U = U[:, :n]
exp_var = Sexp2 / Sexp2.sum()
exp_var = exp_var[:n]
for k, (u, var) in enumerate(zip(U.T, exp_var)):
proj_data = dict(col_names=names, row_names=None,
data=u[np.newaxis, :], nrow=1, ncol=u.size)
this_desc = "%s-%s-PCA-%02d" % (desc, desc_prefix, k + 1)
logger.info("Adding projection: %s" % this_desc)
proj = Projection(active=False, data=proj_data,
desc=this_desc, kind=1, explained_var=var)
projs.append(proj)
return projs
@verbose
def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1,
desc_prefix=None, meg='separate', verbose=None):
"""Compute SSP (signal-space projection) vectors on epoched data.
%(compute_ssp)s
Parameters
----------
epochs : instance of Epochs
The epochs containing the artifact.
n_grad : int
Number of vectors for gradiometers.
n_mag : int
Number of vectors for magnetometers.
n_eeg : int
Number of vectors for EEG channels.
%(n_jobs)s
Number of jobs to use to compute covariance.
desc_prefix : str | None
The description prefix to use. If None, one will be created based on
the event_id, tmin, and tmax.
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
projs: list
List of projection vectors.
See Also
--------
compute_proj_raw, compute_proj_evoked
"""
# compute data covariance
data = _compute_cov_epochs(epochs, n_jobs)
event_id = epochs.event_id
if event_id is None or len(list(event_id.keys())) == 0:
event_id = '0'
elif len(event_id.keys()) == 1:
event_id = str(list(event_id.values())[0])
else:
event_id = 'Multiple-events'
if desc_prefix is None:
desc_prefix = "%s-%-.3f-%-.3f" % (event_id, epochs.tmin, epochs.tmax)
return _compute_proj(data, epochs.info, n_grad, n_mag, n_eeg, desc_prefix,
meg=meg)
def _compute_cov_epochs(epochs, n_jobs):
"""Compute epochs covariance."""
parallel, p_fun, _ = parallel_func(np.dot, n_jobs)
data = parallel(p_fun(e, e.T) for e in epochs)
n_epochs = len(data)
if n_epochs == 0:
raise RuntimeError('No good epochs found')
n_chan, n_samples = epochs.info['nchan'], len(epochs.times)
_check_n_samples(n_samples * n_epochs, n_chan)
data = sum(data)
return data
@verbose
def compute_proj_evoked(evoked, n_grad=2, n_mag=2, n_eeg=2, desc_prefix=None,
meg='separate', verbose=None):
"""Compute SSP (signal-space projection) vectors on evoked data.
%(compute_ssp)s
Parameters
----------
evoked : instance of Evoked
The Evoked obtained by averaging the artifact.
n_grad : int
Number of vectors for gradiometers.
n_mag : int
Number of vectors for magnetometers.
n_eeg : int
Number of vectors for EEG channels.
desc_prefix : str | None
The description prefix to use. If None, one will be created based on
tmin and tmax.
.. versionadded:: 0.17
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
projs : list
List of projection vectors.
See Also
--------
compute_proj_raw, compute_proj_epochs
"""
data = np.dot(evoked.data, evoked.data.T) # compute data covariance
if desc_prefix is None:
desc_prefix = "%-.3f-%-.3f" % (evoked.times[0], evoked.times[-1])
return _compute_proj(data, evoked.info, n_grad, n_mag, n_eeg, desc_prefix,
meg=meg)
@verbose
def compute_proj_raw(raw, start=0, stop=None, duration=1, n_grad=2, n_mag=2,
n_eeg=0, reject=None, flat=None, n_jobs=1, meg='separate',
verbose=None):
"""Compute SSP (signal-space projection) vectors on continuous data.
%(compute_ssp)s
Parameters
----------
raw : instance of Raw
A raw object to use the data from.
start : float
Time (in sec) to start computing SSP.
stop : float
Time (in sec) to stop computing SSP.
None will go to the end of the file.
duration : float
Duration (in sec) to chunk data into for SSP
If duration is None, data will not be chunked.
n_grad : int
Number of vectors for gradiometers.
n_mag : int
Number of vectors for magnetometers.
n_eeg : int
Number of vectors for EEG channels.
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
%(n_jobs)s
Number of jobs to use to compute covariance.
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
projs: list
List of projection vectors.
See Also
--------
compute_proj_epochs, compute_proj_evoked
"""
if duration is not None:
duration = np.round(duration * raw.info['sfreq']) / raw.info['sfreq']
events = make_fixed_length_events(raw, 999, start, stop, duration)
picks = pick_types(raw.info, meg=True, eeg=True, eog=True, ecg=True,
emg=True, exclude='bads')
epochs = Epochs(raw, events, None, tmin=0.,
tmax=duration - 1. / raw.info['sfreq'],
picks=picks, reject=reject, flat=flat,
baseline=None, proj=False)
data = _compute_cov_epochs(epochs, n_jobs)
info = epochs.info
if not stop:
stop = raw.n_times / raw.info['sfreq']
else:
# convert to sample indices
start = max(raw.time_as_index(start)[0], 0)
stop = raw.time_as_index(stop)[0] if stop else raw.n_times
stop = min(stop, raw.n_times)
data, times = raw[:, start:stop]
_check_n_samples(stop - start, data.shape[0])
data = np.dot(data, data.T) # compute data covariance
info = raw.info
# convert back to times
start = start / raw.info['sfreq']
stop = stop / raw.info['sfreq']
desc_prefix = "Raw-%-.3f-%-.3f" % (start, stop)
projs = _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix,
meg=meg)
return projs
@verbose
def sensitivity_map(fwd, projs=None, ch_type='grad', mode='fixed', exclude=[],
verbose=None):
"""Compute sensitivity map.
Such maps are used to know how much sources are visible by a type
of sensor, and how much projections shadow some sources.
Parameters
----------
fwd : Forward
The forward operator.
projs : list
List of projection vectors.
ch_type : 'grad' | 'mag' | 'eeg'
The type of sensors to use.
mode : str
The type of sensitivity map computed. See manual. Should be 'free',
'fixed', 'ratio', 'radiality', 'angle', 'remaining', or 'dampening'
corresponding to the argument --map 1, 2, 3, 4, 5, 6 and 7 of the
command mne_sensitivity_map.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in fwd['info']['bads'].
%(verbose)s
Returns
-------
stc : SourceEstimate | VolSourceEstimate
The sensitivity map as a SourceEstimate or VolSourceEstimate instance
for visualization.
"""
from scipy import linalg
# check strings
_check_option('ch_type', ch_type, ['eeg', 'grad', 'mag'])
_check_option('mode', mode, ['free', 'fixed', 'ratio', 'radiality',
'angle', 'remaining', 'dampening'])
# check forward
if is_fixed_orient(fwd, orig=True):
raise ValueError('fwd should must be computed with free orientation')
# limit forward (this will make a copy of the data for us)
if ch_type == 'eeg':
fwd = pick_types_forward(fwd, meg=False, eeg=True, exclude=exclude)
else:
fwd = pick_types_forward(fwd, meg=ch_type, eeg=False, exclude=exclude)
convert_forward_solution(fwd, surf_ori=True, force_fixed=False,
copy=False, verbose=False)
if not fwd['surf_ori'] or is_fixed_orient(fwd):
raise RuntimeError('Error converting solution, please notify '
'mne-python developers')
gain = fwd['sol']['data']
# Make sure EEG has average
if ch_type == 'eeg':
if projs is None or not _has_eeg_average_ref_proj(projs):
eeg_ave = [make_eeg_average_ref_proj(fwd['info'])]
else:
eeg_ave = []
projs = eeg_ave if projs is None else projs + eeg_ave
# Construct the projector
residual_types = ['angle', 'remaining', 'dampening']
if projs is not None:
proj, ncomp, U = make_projector(projs, fwd['sol']['row_names'],
include_active=True)
# do projection for most types
if mode not in residual_types:
gain = np.dot(proj, gain)
elif ncomp == 0:
raise RuntimeError('No valid projectors found for channel type '
'%s, cannot compute %s' % (ch_type, mode))
# can only run the last couple methods if there are projectors
elif mode in residual_types:
raise ValueError('No projectors used, cannot compute %s' % mode)
n_sensors, n_dipoles = gain.shape
n_locations = n_dipoles // 3
sensitivity_map = np.empty(n_locations)
for k in range(n_locations):
gg = gain[:, 3 * k:3 * (k + 1)]
if mode != 'fixed':
s = linalg.svd(gg, full_matrices=False, compute_uv=False)
if mode == 'free':
sensitivity_map[k] = s[0]
else:
gz = linalg.norm(gg[:, 2]) # the normal component
if mode == 'fixed':
sensitivity_map[k] = gz
elif mode == 'ratio':
sensitivity_map[k] = gz / s[0]
elif mode == 'radiality':
sensitivity_map[k] = 1. - (gz / s[0])
else:
if mode == 'angle':
co = linalg.norm(np.dot(gg[:, 2], U))
sensitivity_map[k] = co / gz
else:
p = linalg.norm(np.dot(proj, gg[:, 2]))
if mode == 'remaining':
sensitivity_map[k] = p / gz
elif mode == 'dampening':
sensitivity_map[k] = 1. - p / gz
else:
raise ValueError('Unknown mode type (got %s)' % mode)
# only normalize fixed and free methods
if mode in ['fixed', 'free']:
sensitivity_map /= np.max(sensitivity_map)
subject = _subject_from_forward(fwd)
vertices = [s['vertno'] for s in fwd['src']]
return _make_stc(sensitivity_map[:, np.newaxis], vertices, fwd['src'].kind,
tmin=0., tstep=1., subject=subject)
|
|
from cslvr import *
"""
Things to Do:
insert boundary conditions that velocity goes to 0 at boundaries doesn't seem to try to calc that currently
start time steping so I can start trying to calculate changes in thickness
Obtain correct surface for flow to occur
split into subdomains to allow mainflow to have different slope from Loket?
TEST CHANGEssss
"""
#######################################################################
###################Mesh Creation, start D2model #######################
#######################################################################
#Read in Mesh from gmsh .xml file
mesh1 = Mesh("2dmesh.xml")
mesh2 = Mesh()
coor = mesh1.coordinates()
boundary_parts = MeshFunction('size_t', mesh1, mesh1.topology().dim()-1)
#directory for results
plt_dir = './velbalance_results/'
# the balance velocity uses a 2D-model :
model = D2Model(mesh1, out_dir = 'plt_dir', order=1)
V = VectorFunctionSpace(mesh1, "Lagrange", 2)
Q = FunctionSpace(mesh1, "Lagrange", 1)
W = V * Q
######################################################################
####################### No Slip Boundary #############################
######################################################################
data = gdal.Open('input_data_bed_v2/DEM_2010/ifsar_2010.tif')
S_array = data.ReadAsArray()[::-1,:]
ncols = data.RasterXSize
nrows = data.RasterYSize
transf = data.GetGeoTransform()
x = arange(transf[0],transf[0]+transf[1]*data.RasterXSize,transf[1])
y = arange(transf[3],transf[3]+transf[5]*data.RasterYSize,transf[5])[::-1]
S_spline = RectBivariateSpline(x,y,S_array.T,kx=1,ky=1,s=0)
data = gdal.Open('input_data_bed_v2/BED_MC/bed.tif')
B_array = data.ReadAsArray()[::-1,:]
ncols = data.RasterXSize
nrows = data.RasterYSize
transf = data.GetGeoTransform()
x = arange(transf[0],transf[0]+transf[1]*data.RasterXSize,transf[1])
y = arange(transf[3],transf[3]+transf[5]*data.RasterYSize,transf[5])[::-1]
B_spline = RectBivariateSpline(x,y,B_array.T,kx=1,ky=1,s=0)
data = gdal.Open('input_data_bed_v2/SMB_2010_2013/mb_field_25.tif')
adot_array = data.ReadAsArray()[::-1,:]
adot_array = fill(adot_array,adot_array==adot_array.min())
ncols = data.RasterXSize
nrows = data.RasterYSize
transf = data.GetGeoTransform()
x = arange(transf[0],transf[0]+transf[1]*data.RasterXSize,transf[1])
y = arange(transf[3],transf[3]+transf[5]*data.RasterYSize,transf[5])[::-1]
adot_spline = RectBivariateSpline(x,y,adot_array.T,kx=1,ky=1,s=0)
data = gdal.Open('input_data_bed_v2/DH_2010_2013/dhdt_weq_lower.tif')
dhdt_array = data.ReadAsArray()[::-1,:]
dhdt_array[dhdt_array<-1000] = 0
dhdt_array = fill(dhdt_array,dhdt_array==dhdt_array.min())
ncols = data.RasterXSize
nrows = data.RasterYSize
transf = data.GetGeoTransform()
x = arange(transf[0],transf[0]+transf[1]*data.RasterXSize,transf[1])
y = arange(transf[3],transf[3]+transf[5]*data.RasterYSize,transf[5])[::-1]
dhdt_spline = RectBivariateSpline(x,y,dhdt_array.T,kx=1,ky=1,s=0)
##########################################################
################# SET PETSC OPTIONS ####################
##########################################################
PETScOptions.set("ksp_type","preonly")
PETScOptions.set("pc_type","lu")
PETScOptions.set("pc_factor_mat_solver_package","mumps")
PETScOptions.set("mat_mumps_icntl_14","1000")
PETScOptions.set("ksp_final_residual","0")
##########################################################
################# SET FENICS OPTIONS ###################
##########################################################
parameters['form_compiler']['quadrature_degree'] = 2
parameters['form_compiler']['cpp_optimize'] = True
parameters['form_compiler']['representation'] = 'quadrature'
#parameters['form_compiler']['precision'] = 30
parameters['allow_extrapolation'] = True
ffc_options = {"optimize": True, \
"eliminate_zeros": True, \
"precompute_basis_const": True, \
"precompute_ip_const": True}
##########################################################
#################### CONSTANTS #########################
##########################################################
# TIME
minute = 60.0
hour = 60*minute
day = 24*hour
year = 365*day
# CONSTANTS
rho = 917.
g = 9.81
# RHEOLOGICAL CONSTANTS
rho_i = 910.
n = 3.0
Bc = 3.61e-13*year
Bw = 1.73e3*year
Qc = 6e4
Qw = 13.9e4
Rc = 8.314
gamma = 8.7e-4
eps_reg = Constant(1e-10)
# THERMAL CONTANTS
k = 2.1*year
Cp = 2009.
kappa = k/(rho_i*Cp)
q_geo = 0.042*year
# ADJOINT REG
theta = Constant(1e-10)
# MASS
thklim = 10.
dt = Constant(0.001)
#######################################################################
##########################Calc S and B, plot S ########################
#######################################################################
S0 = 400 #Surface of Glacier at Acculumation zone
B0 = 0 #Bed of Galcier, doesn't affect flow
L= 4 #Length of Domain, set to give gradient of glacier slope within mesh
#Hardcoded Surface and Bed values, currently set to be sloped so we have flow down tributary and main flow
#Degree 3 to let me have (x,y,z)
#CHANGE THIS TO BE IN TERMS OF ANGLE!!!!
S = Expression('S0 + S0*x[1]/(.8*L) - S0*x[0]*x[0]/(2*L)', S0=S0,L=L, degree=3)
B = Expression('B0', B0=B0, degree=3)
#Inintialize my bed and surface expressions in the model
model.init_B(B)
model.init_S(S)
#Supposed to initialize Glenn's Flow exponent, doesn't change output
model.n(3)
#Figure out change in Surface, plot surface with countours for height
S_min = model.S.vector ().min()
S_max = model.S.vector ().max()
S_lvls = array([S_min, 200 , 300 , 400, 500, S_max])
#Plot Surface
plot_variable(u = model.S, name = 'S', direc = plt_dir , figsize = (5,5), levels = S_lvls , tp = True , show = False , cb = False , contour_type = 'lines', hide_ax_tick_labels = True)
#Won't Graph without this on. I should put in old BCS from Blackrapids.py????
model.calculate_boundaries(latmesh=False, mask=None, lat_mask=None, adot=None, U_mask=None, mark_divide=False)
#######################################################################
################### Solve BalanceVelocity and Graph ###################
#######################################################################
#direction of flow used in BalanceVelocity.direction_of_flow()
d = (model.S.dx(0),-model.S.dx(1))
#kappa is floating-point value representing surface smoothing radius in units of ice thickness 'H = S-B'
#kappas = [0,5,10]
#methods = ['SUPG', 'SSM', 'GLS']
kappa = 10
methods = 'GLS'
#######################################################################
################### Initial Balancevelocity ###################
#######################################################################
S = model.S
B = model.B
H = S - B
h = model.h
N = model.N
uhat = model.uhat
vhat = model.vhat
adot = model.adot
Fb = model.Fb
# form to calculate direction of flow (down driving stress gradient) :
phi = TestFunction(Q)
ubar = TrialFunction(Q)
kappa = Constant(kappa)
# stabilization test space :
Uhat = as_vector([uhat, vhat])
tau = 1 / (2*H/h + div(H*Uhat))
phihat = phi + tau * dot(Uhat, grad(phi))
# the left-hand side :
def L(u): return u*H*div(Uhat) + dot(grad(u*H), Uhat)
def L_star(u): return u*H*div(Uhat) - dot(grad(u*H), Uhat)
def L_adv(u): return dot(grad(u*H), Uhat)
Nb = sqrt(B.dx(0)**2 + B.dx(1)**2 + 1)
Ns = sqrt(S.dx(0)**2 + S.dx(1)**2 + 1)
f = Ns*adot - Nb*Fb
# use streamline-upwind/Petrov-Galerkin :
if stabilization_method == 'SUPG':
s = " - using streamline-upwind/Petrov-Galerkin stabilization -"
model.B = + L(ubar) * phi * dx \
+ inner(L_adv(phi), tau*L(ubar)) * dx
model.a = + f * phi * dx \
+ inner(L_adv(phi), tau*f) * dx
# use Galerkin/least-squares
elif stabilization_method == 'GLS':
s = " - using Galerkin/least-squares stabilization -"
model.B = + L(ubar) * phi * dx \
+ inner(L(phi), tau*L(ubar)) * dx
model.a = + f * phi * dx \
+ inner(L(phi), tau*f) * dx
# use subgrid-scale-model :
elif stabilization_method == 'SSM':
s = " - using subgrid-scale-model stabilization -"
model.B = + L(ubar) * phi * dx \
- inner(L_star(phi), tau*L(ubar)) * dx
model.a = + f * phi * dx \
- inner(L_star(phi), tau*f) * dx
print_text(s, cls=model)
#######################################################################
################### Solve direction of flow ###################
#######################################################################
phi = TestFunction(Q)
d_x = TrialFunction(Q)
d_y = TrialFunction(Q)
kappa = Constant(model.kappa)
# horizontally smoothed direction of flow :
a_dSdx = + d_x * phi * dx + (kappa*H)**2 * dot(grad(phi), grad(d_x)) * dx \
- (kappa*H)**2 * dot(grad(d_x), N) * phi * ds
L_dSdx = d[0] * phi * dx
a_dSdy = + d_y * phi * dx + (kappa*H)**2 * dot(grad(phi), grad(d_y)) * dx \
- (kappa*H)**2 * dot(grad(d_y), N) * phi * ds
L_dSdy = d[1] * phi*dx
# update velocity direction :
s = "::: solving for smoothed x-component of flow direction " + \
"with kappa = %g :::" % model.kappa
print_text(s, cls=model)
solve(a_dSdx == L_dSdx, model.d_x, annotate=annotate)
print_min_max(model.d_x, 'd_x')
s = "::: solving for smoothed y-component of flow direction " + \
"with kappa = %g :::" % model.kappa
print_text(s, cls=model)
solve(a_dSdy == L_dSdy, model.d_y, annotate=annotate)
print_min_max(model.d_y, 'd_y')
# normalize the direction vector :
s = r"::: calculating normalized flux direction from \nabla S:::"
print_text(s, cls=model)
d_x_v = model.d_x.vector().array()
d_y_v = model.d_y.vector().array()
d_n_v = np.sqrt(d_x_v**2 + d_y_v**2 + 1e-16)
model.assign_variable(model.uhat, d_x_v / d_n_v)
model.assign_variable(model.vhat, d_y_v / d_n_v)
#######################################################################
############################ Solve ###################################
#######################################################################
s = "::: solving velocity balance magnitude :::"
print_text(s, cls=model)
solve(model.B == model.a, model.Ubar, annotate=annotate)
print_min_max(model.Ubar, 'Ubar')
# enforce positivity of balance-velocity :
s = "::: removing negative values of balance velocity :::"
print_text(s, cls=model)
Ubar_v = model.Ubar.vector().array()
Ubar_v[Ubar_v < 0] = 0
model.assign_variable(model.Ubar, Ubar_v)
#######################################################################
############################ Graph ###################################
#######################################################################
U_max = model.Ubar.vector().max()
U_min = model.Ubar.vector().min()
#U_lvls = array([U_min, 2, 10, 20, 50, 100, 200, 500, 1000, U_max])
#hand chose intervals to have contours on graph, will need to generalize later
U_lvls = array([U_min,.4,.8,1.2,1.4,1.5,1.7,2,2.4,2.8,3.2,3.6,U_max])
name = 'Ubar_%iH_kappa_%i_%s' % (5, kappa, method)
tit = r'$\bar{u}_{%i}$' % kappa
#plot_variable(model.Ubar, name=name, direc=plt_dir,
# figsize = (8,3),
# equal_axes = False)
plot_variable(model.Ubar , name=name , direc=plt_dir ,
title=tit , show=False ,
levels=U_lvls , tp=False , cb_format='%.1e')
"""
for kappa in kappas:
for method in methods:
bv = BalanceVelocity(model, kappa=kappa, stabilization_method=method)
bv.solve_direction_of_flow(d)
bv.solve()
U_max = model.Ubar.vector().max()
U_min = model.Ubar.vector().min()
#U_lvls = array([U_min, 2, 10, 20, 50, 100, 200, 500, 1000, U_max])
#hand chose intervals to have contours on graph, will need to generalize later
U_lvls = array([U_min,.4,.8,1.2,1.4,1.5,1.7,2,2.4,2.8,3.2,3.6,U_max])
name = 'Ubar_%iH_kappa_%i_%s' % (5, kappa, method)
tit = r'$\bar{u}_{%i}$' % kappa
#plot_variable(model.Ubar, name=name, direc=plt_dir,
# figsize = (8,3),
# equal_axes = False)
plot_variable(model.Ubar , name=name , direc=plt_dir ,
title=tit , show=False ,
levels=U_lvls , tp=False , cb_format='%.1e')
"""
|
|
from django.utils.datastructures import SortedDict
from tower import ugettext_lazy as _lazy
# The number of answers per page.
ANSWERS_PER_PAGE = 20
# The number of questions per page.
QUESTIONS_PER_PAGE = 20
# Highest ranking to show for a user
HIGHEST_RANKING = 100
# Special tag names:
ESCALATE_TAG_NAME = 'escalate'
NEEDS_INFO_TAG_NAME = 'needsinfo'
OFFTOPIC_TAG_NAME = 'offtopic'
# Escalation config
ESCALATE_EXCLUDE_PRODUCTS = ['thunderbird', 'webmaker', 'open-badges']
# AAQ config:
products = SortedDict([
('desktop', {
'name': _lazy(u'Firefox for Desktop'),
'subtitle': _lazy(u'Windows, Mac, or Linux'),
'extra_fields': ['troubleshooting', 'ff_version', 'os', 'plugins'],
'tags': ['desktop'],
'products': ['firefox'],
'categories': SortedDict([
# TODO: Just use the IA topics for this.
# See bug 979397
('download-and-install', {
'name': _lazy(u'Download, install and migration'),
'topic': 'download-and-install',
'tags': ['download-and-install'],
}),
('privacy-and-security', {
'name': _lazy(u'Privacy and security settings'),
'topic': 'privacy-and-security',
'tags': ['privacy-and-security'],
}),
('customize', {
'name': _lazy(u'Customize controls, options and add-ons'),
'topic': 'customize',
'tags': ['customize'],
}),
('fix-problems', {
'name': _lazy(u'Fix slowness, crashing, error messages and '
u'other problems'),
'topic': 'fix-problems',
'tags': ['fix-problems'],
}),
('tips', {
'name': _lazy(u'Tips and tricks'),
'topic': 'tips',
'tags': ['tips'],
}),
('bookmarks', {
'name': _lazy(u'Bookmarks'),
'topic': 'bookmarks',
'tags': ['bookmarks'],
}),
('cookies', {
'name': _lazy(u'Cookies'),
'topic': 'cookies',
'tags': ['cookies'],
}),
('tabs', {
'name': _lazy(u'Tabs'),
'topic': 'tabs',
'tags': ['tabs'],
}),
('websites', {
'name': _lazy(u'Websites'),
'topic': 'websites',
'tags': ['websites'],
}),
('other', {
'name': _lazy(u'Other'),
'topic': 'other',
'tags': ['other'],
}),
])
}),
('mobile', {
'name': _lazy(u'Firefox for Mobile'),
'subtitle': _lazy(u'Android'),
'extra_fields': ['ff_version', 'os', 'plugins'],
'tags': ['mobile'],
'products': ['mobile'],
'categories': SortedDict([
# TODO: Just use the IA topics for this.
# See bug 979397
('download-and-install', {
'name': _lazy(u'Download, install and migration'),
'topic': 'download-and-install',
'tags': ['download-and-install'],
}),
('privacy-and-security', {
'name': _lazy(u'Privacy and security settings'),
'topic': 'privacy-and-security',
'tags': ['privacy-and-security'],
}),
('customize', {
'name': _lazy(u'Customize controls, options and add-ons'),
'topic': 'customize',
'tags': ['customize'],
}),
('fix-problems', {
'name': _lazy(u'Fix slowness, crashing, error messages and '
u'other problems'),
'topic': 'fix-problems',
'tags': ['fix-problems'],
}),
('tips', {
'name': _lazy(u'Tips and tricks'),
'topic': 'tips',
'tags': ['tips'],
}),
('bookmarks', {
'name': _lazy(u'Bookmarks'),
'topic': 'bookmarks',
'tags': ['bookmarks'],
}),
('cookies', {
'name': _lazy(u'Cookies'),
'topic': 'cookies',
'tags': ['cookies'],
}),
('tabs', {
'name': _lazy(u'Tabs'),
'topic': 'tabs',
'tags': ['tabs'],
}),
('websites', {
'name': _lazy(u'Websites'),
'topic': 'websites',
'tags': ['websites'],
}),
('other', {
'name': _lazy(u'Other'),
'topic': 'other',
'tags': ['other'],
}),
])
}),
('firefox-os', {
'name': _lazy(u'Firefox OS'),
'subtitle': '',
'extra_fields': ['device', 'os'],
'tags': [],
'products': ['firefox-os'],
'categories': SortedDict([
# TODO: Just use the IA topics for this.
# See bug 979397
('download-and-install', {
'name': _lazy(u'Download and install apps'),
'topic': 'marketplace',
'tags': ['marketplace'],
}),
('customize', {
'name': _lazy(u'Customize controls, options, settings and '
u'preferences'),
'topic': 'settings',
'tags': ['settings'],
}),
('fix-problems', {
'name': _lazy(u'Fix slowness, crashing, error messages and '
u'other problems'),
'topic': 'fix-problems',
'tags': ['fix-problems'],
}),
])
}),
('webmaker', {
'name': _lazy(u'Webmaker'),
'subtitle': _lazy('Tools for creating and teaching the web'),
'extra_fields': [],
'tags': [],
'products': ['webmaker'],
'categories': SortedDict([
# TODO: Just use the IA topics for this.
# See bug 979397
('popcorn-maker', {
'name': _lazy(u'Using Popcorn Maker'),
'topic': 'popcorn-maker',
'tags': ['popcorn-maker'],
}),
('thimble', {
'name': _lazy(u'Using Thimble'),
'topic': 'thimble',
'tags': ['thimble'],
}),
('x-ray-goggles', {
'name': _lazy(u'Using X-Ray Goggles'),
'topic': 'x-ray-goggles',
'tags': ['x-ray-goggles'],
}),
('get-the-most-from-webmaker', {
'name': _lazy(u'Using a feature on webmaker.org'),
'topic': 'get-the-most-from-webmaker',
'tags': ['get-the-most-from-webmaker'],
}),
('events-and-help-for-mentors', {
'name': _lazy(u'Contributing to Webmaker'),
'topic': 'events-and-help-for-mentors',
'tags': ['events-and-help-for-mentors'],
}),
])
}),
('thunderbird', {
'name': _lazy(u'Thunderbird'),
'subtitle': '',
'extra_fields': [],
'tags': [],
'products': ['thunderbird'],
'categories': SortedDict([
# TODO: Just use the IA topics for this.
# See bug 979397
('download-and-install', {
'name': _lazy(u'Download, install and migration'),
'topic': 'download-install-and-migration',
'tags': ['download-and-install'],
}),
('privacy-and-security', {
'name': _lazy(u'Privacy and security settings'),
'topic': 'privacy-and-security-settings',
'tags': ['privacy-and-security'],
}),
('customize', {
'name': _lazy(u'Customize controls, options and add-ons'),
'topic': 'customize-controls-options-and-add-ons',
'tags': ['customize'],
}),
('fix-problems', {
'name': _lazy(u'Fix slowness, crashing, error messages and '
u'other problems'),
'topic': 'fix-slowness-crashing-error-messages-and-other-'
'problems',
'tags': ['fix-problems'],
}),
('calendar', {
'name': _lazy('Calendar'),
'topic': 'calendar',
'tags': ['calendar'],
}),
('other', {
'name': _lazy(u'Other'),
'topic': 'other',
'tags': ['other'],
}),
])
}),
('other', {
'name': _lazy(u'Other Mozilla products'),
'subtitle': '',
'html': 'This site is only provides support for some of our products. '
'For other support, please find your product below.'
'<ul class="product-support">'
'<li><a href="http://www.seamonkey-project.org/doc/">'
'SeaMonkey support</a></li>'
'<li><a href="http://caminobrowser.org/help/">'
'Camino support</a></li>'
'<li><a '
'href="http://www.mozilla.org/projects/calendar/faq.html">'
'Lightning and Sunbird support</a></li>'
'</ul>',
'categories': SortedDict([]),
'deadend': True,
}),
])
def add_backtrack_keys(products):
"""Insert 'key' keys so we can go from product or category back to key."""
for p_k, p_v in products.iteritems():
p_v['key'] = p_k
for c_k, c_v in p_v['categories'].iteritems():
c_v['key'] = c_k
add_backtrack_keys(products)
|
|
from __future__ import division
from datetime import datetime, timedelta
from django.test import TestCase
from django.core.exceptions import ValidationError
from model_mommy import mommy
from facilities.models import Facility
from ..models import (
CommunityHealthUnit,
CommunityHealthWorker,
CommunityHealthWorkerContact,
Status,
CommunityHealthUnitContact,
CHUService,
CHURating,
ChuUpdateBuffer
)
class TestChuUpdateBuffer(TestCase):
def test_save(self):
mommy.make(ChuUpdateBuffer, basic="{'name': 'new name'}")
self.assertEquals(1, ChuUpdateBuffer.objects.count())
def test_str(self):
chu_update = mommy.make(ChuUpdateBuffer, basic="{'name': 'new name'}")
self.assertEquals(1, ChuUpdateBuffer.objects.count())
self.assertEquals(chu_update.__str__(), chu_update.health_unit.name)
def test_chu_update_basic_not_edited(self):
chu_update = mommy.make(
ChuUpdateBuffer,
workers='[{"first_name": "jina"}]'
)
self.assertIsNone(chu_update.updates.get('basic'))
def test_atleast_one_thing_editted(self):
with self.assertRaises(ValidationError):
mommy.make(
ChuUpdateBuffer, basic=None, workers=None, contacts=None)
class TestCommunityHealthUnit(TestCase):
def test_save(self):
mommy.make(CommunityHealthUnit)
self.assertEquals(1, CommunityHealthUnit.objects.count())
def test_date_operational_less_than_date_established(self):
today = datetime.now().date()
last_week = today - timedelta(days=7)
with self.assertRaises(ValidationError):
mommy.make(
CommunityHealthUnit,
date_established=today, date_operational=last_week)
def test_date_established_not_in_future(self):
today = datetime.now().date()
next_month = today + timedelta(days=30)
with self.assertRaises(ValidationError):
mommy.make(
CommunityHealthUnit,
date_established=today, date_operational=next_month)
def test_valid_dates(self):
today = datetime.now().date()
last_week = today - timedelta(days=7)
mommy.make(
CommunityHealthUnit,
date_established=last_week, date_operational=today)
self.assertEquals(1, CommunityHealthUnit.objects.count())
def test_save_with_code(self):
mommy.make(CommunityHealthUnit, code='7800')
self.assertEquals(1, CommunityHealthUnit.objects.count())
def test_facility_is_not_closed(self):
facility = mommy.make(Facility, closed=True)
with self.assertRaises(ValidationError):
mommy.make(CommunityHealthUnit, facility=facility)
def test_chu_approval_or_rejection_and_not_both(self):
with self.assertRaises(ValidationError):
mommy.make(CommunityHealthUnit, is_approved=True, is_rejected=True)
# test rejecting an approve chu
chu = mommy.make(CommunityHealthUnit, is_approved=True)
chu.is_rejected = True
chu.is_approved = False
chu.save()
chu_2 = mommy.make(CommunityHealthUnit, is_rejected=True)
chu_2.is_approved = True
chu_2.is_rejected = False
chu_2.save()
def test_average_rating(self):
chu = mommy.make(CommunityHealthUnit)
chu2 = mommy.make(CommunityHealthUnit)
ratings = [4, 3, 2, 4, 5, 1]
for i in ratings:
mommy.make(CHURating, chu=chu2, rating=i)
self.assertEqual(chu.average_rating, 0)
self.assertEqual(chu2.average_rating, sum(ratings, 0) / len(ratings))
def test_rating_count(self):
chu = mommy.make(CommunityHealthUnit)
chu2 = mommy.make(CommunityHealthUnit)
ratings = [4, 3, 2, 4, 5, 1]
for i in ratings:
mommy.make(CHURating, chu=chu2, rating=i)
self.assertEqual(chu.rating_count, 0)
self.assertEqual(chu2.rating_count, len(ratings))
def test_contacts(self):
chu = mommy.make(CommunityHealthUnit)
mommy.make(
CommunityHealthUnitContact, health_unit=chu)
self.assertIsInstance(chu.contacts, list)
def test_latest_update(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
update = mommy.make(
ChuUpdateBuffer,
health_unit=chu,
basic='{"name": "some new name"}')
self.assertEquals(chu.latest_update, update)
def test_pending_upates(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
update = mommy.make(
ChuUpdateBuffer,
health_unit=chu,
basic='{"name": "some new name"}')
self.assertEquals(chu.latest_update, update)
self.assertIsInstance(chu.pending_updates, dict)
def test_has_edits_true(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
mommy.make(
ChuUpdateBuffer,
health_unit=chu,
basic='{"name": "some new name"}')
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertTrue(chu_refetched.has_edits)
def test_has_edits_false_afater_approval(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
update = mommy.make(
ChuUpdateBuffer,
health_unit=chu,
basic='{"name": "some new name"}')
update.is_approved = True
update.save()
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertFalse(chu_refetched.has_edits)
def test_has_edits_false_after_rejection(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
update = mommy.make(
ChuUpdateBuffer,
health_unit=chu,
basic='{"name": "some new name"}')
update.is_rejected = True
update.save()
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertFalse(chu_refetched.has_edits)
def test_chu_workers(self):
chu = mommy.make(CommunityHealthUnit)
mommy.make(CommunityHealthWorker, health_unit=chu)
self.assertIsInstance(chu.workers, list)
class TestCommunityHealthWorkerModel(TestCase):
def test_save(self):
mommy.make(CommunityHealthWorker)
self.assertEquals(1, CommunityHealthWorker.objects.count())
class TestCommunityHealthWorkerContact(TestCase):
def test_save(self):
mommy.make(CommunityHealthWorkerContact)
self.assertEquals(1, CommunityHealthWorkerContact.objects.count())
class TestModels(TestCase):
def test_save(self):
models = [
CommunityHealthUnit, CommunityHealthWorker,
CommunityHealthWorkerContact, Status,
CommunityHealthUnitContact, CHUService
]
for model_cls in models:
mommy.make(model_cls)
self.assertNotEquals(0, len(model_cls.objects.all()))
|
|
"""Git-related utilities."""
import logging
import re
import git
from gitdb.util import hex_to_bin
from django.core.exceptions import ValidationError
from git.exc import BadName, InvalidGitRepositoryError, NoSuchPathError
from readthedocs.builds.constants import EXTERNAL
from readthedocs.config import ALL
from readthedocs.projects.constants import (
GITHUB_BRAND,
GITHUB_PR_PULL_PATTERN,
GITLAB_BRAND,
GITLAB_MR_PULL_PATTERN,
)
from readthedocs.projects.exceptions import RepositoryError
from readthedocs.projects.validators import validate_submodule_url
from readthedocs.vcs_support.base import BaseVCS, VCSVersion
log = logging.getLogger(__name__)
class Backend(BaseVCS):
"""Git VCS backend."""
supports_tags = True
supports_branches = True
supports_submodules = True
supports_lsremote = True
fallback_branch = 'master' # default branch
repo_depth = 50
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.token = kwargs.get('token')
self.repo_url = self._get_clone_url()
def _get_clone_url(self):
if '://' in self.repo_url:
hacked_url = self.repo_url.split('://')[1]
hacked_url = re.sub('.git$', '', hacked_url)
clone_url = 'https://%s' % hacked_url
if self.token:
clone_url = 'https://{}@{}'.format(self.token, hacked_url)
return clone_url
# Don't edit URL because all hosts aren't the same
# else:
# clone_url = 'git://%s' % (hacked_url)
return self.repo_url
def set_remote_url(self, url):
return self.run('git', 'remote', 'set-url', 'origin', url)
def update(self):
"""Clone or update the repository."""
super().update()
if self.repo_exists():
self.set_remote_url(self.repo_url)
return self.fetch()
self.make_clean_working_dir()
# A fetch is always required to get external versions properly
if self.version_type == EXTERNAL:
self.clone()
return self.fetch()
return self.clone()
def repo_exists(self):
try:
self._repo
except (InvalidGitRepositoryError, NoSuchPathError):
return False
return True
@property
def _repo(self):
"""Get a `git.Repo` instance from the current `self.working_dir`."""
return git.Repo(self.working_dir, expand_vars=False)
def are_submodules_available(self, config):
"""Test whether git submodule checkout step should be performed."""
submodules_in_config = (
config.submodules.exclude != ALL or config.submodules.include
)
if not submodules_in_config:
return False
# Keep compatibility with previous projects
return bool(self.submodules)
def validate_submodules(self, config):
"""
Returns the submodules and check that its URLs are valid.
.. note::
Allways call after `self.are_submodules_available`.
:returns: tuple(bool, list)
Returns `True` if all required submodules URLs are valid.
Returns a list of all required submodules:
- Include is `ALL`, returns all submodules available.
- Include is a list, returns just those.
- Exclude is `ALL` - this should never happen.
- Exlude is a list, returns all available submodules
but those from the list.
Returns `False` if at least one submodule is invalid.
Returns the list of invalid submodules.
"""
submodules = {sub.path: sub for sub in self.submodules}
for sub_path in config.submodules.exclude:
path = sub_path.rstrip('/')
if path in submodules:
del submodules[path]
if config.submodules.include != ALL and config.submodules.include:
submodules_include = {}
for sub_path in config.submodules.include:
path = sub_path.rstrip('/')
submodules_include[path] = submodules[path]
submodules = submodules_include
invalid_submodules = []
for path, submodule in submodules.items():
try:
validate_submodule_url(submodule.url)
except ValidationError:
invalid_submodules.append(path)
if invalid_submodules:
return False, invalid_submodules
return True, submodules.keys()
def use_shallow_clone(self):
"""
Test whether shallow clone should be performed.
.. note::
Temporarily, we support skipping this option as builds that rely on
git history can fail if using shallow clones. This should
eventually be configurable via the web UI.
"""
from readthedocs.projects.models import Feature
return not self.project.has_feature(Feature.DONT_SHALLOW_CLONE)
def fetch(self):
# --force lets us checkout branches that are not fast-forwarded
# https://github.com/readthedocs/readthedocs.org/issues/6097
cmd = ['git', 'fetch', 'origin',
'--force', '--tags', '--prune', '--prune-tags']
if self.use_shallow_clone():
cmd.extend(['--depth', str(self.repo_depth)])
if self.verbose_name and self.version_type == EXTERNAL:
if self.project.git_provider_name == GITHUB_BRAND:
cmd.append(
GITHUB_PR_PULL_PATTERN.format(id=self.verbose_name)
)
if self.project.git_provider_name == GITLAB_BRAND:
cmd.append(
GITLAB_MR_PULL_PATTERN.format(id=self.verbose_name)
)
code, stdout, stderr = self.run(*cmd)
if code != 0:
raise RepositoryError
return code, stdout, stderr
def checkout_revision(self, revision=None):
if not revision:
branch = self.default_branch or self.fallback_branch
revision = 'origin/%s' % branch
code, out, err = self.run('git', 'checkout', '--force', revision)
if code != 0:
raise RepositoryError(
RepositoryError.FAILED_TO_CHECKOUT.format(revision),
)
return [code, out, err]
def clone(self):
"""Clones the repository."""
cmd = ['git', 'clone', '--no-single-branch']
if self.use_shallow_clone():
cmd.extend(['--depth', str(self.repo_depth)])
cmd.extend([self.repo_url, '.'])
code, stdout, stderr = self.run(*cmd)
if code != 0:
raise RepositoryError
return code, stdout, stderr
@property
def lsremote(self):
"""
Use ``git ls-remote`` to list branches and tags without clonning the repository.
:returns: tuple containing a list of branch and tags
"""
cmd = ['git', 'ls-remote', self.repo_url]
self.check_working_dir()
code, stdout, stderr = self.run(*cmd)
if code != 0:
raise RepositoryError
tags = []
branches = []
for line in stdout.splitlines()[1:]: # skip HEAD
commit, ref = line.split()
if ref.startswith('refs/heads/'):
branch = ref.replace('refs/heads/', '')
branches.append(VCSVersion(self, branch, branch))
if ref.startswith('refs/tags/'):
tag = ref.replace('refs/tags/', '')
if tag.endswith('^{}'):
# skip annotated tags since they are duplicated
continue
tags.append(VCSVersion(self, commit, tag))
return branches, tags
@property
def tags(self):
versions = []
repo = self._repo
# Build a cache of tag -> commit
# GitPython is not very optimized for reading large numbers of tags
ref_cache = {} # 'ref/tags/<tag>' -> hexsha
# This code is the same that is executed for each tag in gitpython,
# we excute it only once for all tags.
for hexsha, ref in git.TagReference._iter_packed_refs(repo):
gitobject = git.Object.new_from_sha(repo, hex_to_bin(hexsha))
if gitobject.type == 'commit':
ref_cache[ref] = str(gitobject)
elif gitobject.type == 'tag' and gitobject.object.type == 'commit':
ref_cache[ref] = str(gitobject.object)
for tag in repo.tags:
if tag.path in ref_cache:
hexsha = ref_cache[tag.path]
else:
try:
hexsha = str(tag.commit)
except ValueError:
# ValueError: Cannot resolve commit as tag TAGNAME points to a
# blob object - use the `.object` property instead to access it
# This is not a real tag for us, so we skip it
# https://github.com/rtfd/readthedocs.org/issues/4440
log.warning('Git tag skipped: %s', tag, exc_info=True)
continue
versions.append(VCSVersion(self, hexsha, str(tag)))
return versions
@property
def branches(self):
repo = self._repo
versions = []
branches = []
# ``repo.remotes.origin.refs`` returns remote branches
if repo.remotes:
branches += repo.remotes.origin.refs
for branch in branches:
verbose_name = branch.name
if verbose_name.startswith('origin/'):
verbose_name = verbose_name.replace('origin/', '')
if verbose_name == 'HEAD':
continue
versions.append(VCSVersion(self, str(branch), verbose_name))
return versions
@property
def commit(self):
if self.repo_exists():
_, stdout, _ = self.run('git', 'rev-parse', 'HEAD', record=False)
return stdout.strip()
return None
@property
def submodules(self):
return list(self._repo.submodules)
def checkout(self, identifier=None):
"""Checkout to identifier or latest."""
super().checkout()
# Find proper identifier
if not identifier:
identifier = self.default_branch or self.fallback_branch
identifier = self.find_ref(identifier)
# Checkout the correct identifier for this branch.
code, out, err = self.checkout_revision(identifier)
if code != 0:
return code, out, err
# Clean any remains of previous checkouts
self.run('git', 'clean', '-d', '-f', '-f')
return code, out, err
def update_submodules(self, config):
if self.are_submodules_available(config):
valid, submodules = self.validate_submodules(config)
if valid:
self.checkout_submodules(submodules, config)
else:
raise RepositoryError(
RepositoryError.INVALID_SUBMODULES.format(submodules),
)
def checkout_submodules(self, submodules, config):
"""Checkout all repository submodules."""
self.run('git', 'submodule', 'sync')
cmd = [
'git',
'submodule',
'update',
'--init',
'--force',
]
if config.submodules.recursive:
cmd.append('--recursive')
cmd += submodules
self.run(*cmd)
def find_ref(self, ref):
# Check if ref starts with 'origin/'
if ref.startswith('origin/'):
return ref
# Check if ref is a branch of the origin remote
if self.ref_exists('remotes/origin/' + ref):
return 'origin/' + ref
return ref
def ref_exists(self, ref):
try:
if self._repo.commit(ref):
return True
except (BadName, ValueError):
return False
return False
|
|
import sys
import math
from fractions import Fraction
import tft_utils
GROUP_ERR_VAR_PREFIX = "__g_err_"
TCAST_ERR_VAR_PREFIX = "__tc_err_"
ERR_VAR_PREFIX = "__err_"
ERR_SUM_PREFIX = "__errsum_"
ERR_TERM_REF_PREFIX = "__eterm_"
CNUM_PREFIX = "__cum"
PRESERVED_VAR_LABEL_PREFIXES = [GROUP_ERR_VAR_PREFIX, TCAST_ERR_VAR_PREFIX, ERR_VAR_PREFIX, ERR_SUM_PREFIX, ERR_TERM_REF_PREFIX, CNUM_PREFIX]
PRESERVED_CONST_VPREFIX = "__const"
PRESERVED_CONST_GID = 9999
ALL_VariableExprs = []
# ========
# sub-routines
# ========
def isConstVar (var):
if (not isinstance(var, VariableExpr)):
return False
if (var.label().startswith(PRESERVED_CONST_VPREFIX)):
return True
else:
return False
def isPseudoBooleanVar (var):
if (not isinstance(var, VariableExpr)):
return False
if (not var.hasBounds()):
return False
if (not (var.lb() == ConstantExpr(0) and var.ub() == ConstantExpr(1))):
return False
return True
def RegisterVariableExpr (var):
global ALL_VariableExprs
assert(isinstance(var, VariableExpr))
if (var.isPreservedVar()):
return
was_registered = False
for v in ALL_VariableExprs:
assert(v.hasBounds())
if (v.identical(var)):
was_registered = True
if (var == v):
if (var.hasBounds()):
assert(var.lb().value() == v.lb().value())
assert(var.ub().value() == v.ub().value())
else:
var.setBounds(v.lb(), v.ub())
if (not was_registered):
ALL_VariableExprs.append(var)
def ExprStatistics (expr, stat={}):
assert(isinstance(expr, Expr))
# initialize stat
if ("# constants" not in stat.keys()):
stat["# constants"] = 0
if ("# variables" not in stat.keys()):
stat["# variables"] = 0
if ("# operations" not in stat.keys()):
stat["# operations"] = 0
if ("groups" not in stat.keys()):
stat["groups"] = []
if (isinstance(expr, ConstantExpr)):
print ("ERROR: should not do statistics with expression containing real ConstantExprs...")
elif (isinstance(expr, VariableExpr)):
if (isConstVar(expr)):
stat["# constants"] = stat["# constants"] + 1
else:
assert(expr.getGid() != PRESERVED_CONST_GID)
gid = expr.getGid()
if (gid not in stat["groups"]):
stat["groups"].append(gid)
stat["# variables"] = stat["# variables"] + 1
elif (isinstance(expr, UnaryExpr)):
gid = expr.getGid()
if (gid not in stat["groups"]):
stat["groups"].append(gid)
stat["# operations"] = stat["# operations"] + 1
ExprStatistics(expr.opd(), stat)
elif (isinstance(expr, BinaryExpr)):
gid = expr.getGid()
if (gid not in stat["groups"]):
stat["groups"].append(gid)
stat["# operations"] = stat["# operations"] + 1
ExprStatistics(expr.lhs(), stat)
ExprStatistics(expr.rhs(), stat)
# ========
# class definitions
# ========
# ==== the base class of expression ====
ExprCounter = 0
class Expr (object):
index = None
operands = None
lower_bound = None
upper_bound = None
gid = None
def __init__ (self, set_index=True):
global ExprCounter
if (set_index):
self.index = ExprCounter
self.operands = []
lower_bound = ""
upper_bound = ""
ExprCounter = ExprCounter + 1
class ArithmeticExpr (Expr):
def __init__ (self, set_index=True):
if (sys.version_info.major == 2):
sys.exit("Error: FPTuner is currently based on Python3 only...")
super(ArithmeticExpr, self).__init__(set_index)
elif (sys.version_info.major == 3):
super().__init__(set_index)
else:
sys.exit("ERROR: not supported python version: " + str(sys.version_info))
class Predicate (Expr):
def __init__ (self, set_index=True):
if (sys.version_info.major == 2):
sys.exit("Error: FPTuner is currently based on Python3 only...")
super(Predicate, self).__init__(set_index)
elif (sys.version_info.major == 3):
super().__init__(set_index)
else:
sys.exit("ERROR: not supported python version: " + str(sys.version_info))
# ==== constant expression ====
class ConstantExpr (ArithmeticExpr):
def __init__ (self, val):
assert((type(val) is int) or (isinstance(val, Fraction)) or (type(val) is float))
if (type(val) is float):
val = Fraction(val)
if (sys.version_info.major == 2):
sys.exit("Error: FPTuner is currently based on Python3 only...")
super(ConstantExpr, self).__init__(False)
elif (sys.version_info.major == 3):
super().__init__(False)
else:
sys.exit("ERROR: not supported python version: " + str(sys.version_info))
self.operands.append(val)
self.lower_bound = val
self.upper_bound = val
self.gid = -1
def value (self):
assert(len(self.operands) == 1)
return self.operands[0]
def type (self):
return type(self.value())
def rational_value (self):
if (self.type() == int):
return Fraction(self.value(), 1)
elif (self.type() == Fraction):
return self.value()
else:
sys.exit("ERROR: invalid value type for ConstantExpr")
def lb (self):
return self
def ub (self):
return self
def setLB (self, expr):
assert(isinstance(expr, Expr))
assert(expr.value() <= self.value())
def setUB (self, expr):
assert(isinstance(expr, Expr))
assert(self.value() <= expr.value())
def __str__ (self):
if (self.type() == int):
return "([Const:int] " + str(self.value()) + ")"
elif (self.type() == Fraction):
return "([Const:Fraction] " + str(float(self.value())) + ")"
else:
sys.exit("ERROR: invalid type of ConstantExpr found in __str__")
def toCString (self, const_inline=False): # to int or float string
if (self.type() == int):
return str(self.value())
elif (self.type() == Fraction):
assert(isinstance(self.value(), Fraction))
# return str(float(self.value().numerator) / float(self.value().denominator))
return str(float(self.value()))
else:
sys.exit("ERROR: invalid type of ConstantExpr: " + str(self))
def toIRString (self):
return "(" + self.toCString() + ")"
def toASTString (self):
return self.toIRString()
def toFPCoreString (self):
return self.toCString()
def __eq__ (self, rhs):
if (not isinstance(rhs, ConstantExpr)):
return False
if (self.type() == rhs.type()):
return (self.value() == rhs.value())
elif (self.type() == int and rhs.type() == Fraction):
return (Fraction(self.value(), 1) == rhs.value())
elif (self.type() == Fraction and rhs.type() == int):
return (self.value() == Fraction(rhs.value(), 1))
else:
sys.exit("ERROR: invlaid __eq__ scenario of ConstExpr")
def identical (self, rhs):
if (not isinstance(rhs, ConstantExpr)):
return False
if (self.index == rhs.index):
assert(self.value() == rhs.value())
return True
else:
return False
def __ne__ (self, rhs):
return (not self == rhs)
def __gt__ (self, rhs):
if (not isinstance(rhs, ConstantExpr)):
return False
if (self.type() == rhs.type()):
return (self.value() > rhs.value())
elif (self.type() == int and rhs.type() == Fraction):
return (Fraction(self.value(), 1) > rhs.value())
elif (self.type() == Fraction and rhs.type() == int):
return (self.value() > Fraction(rhs.value(), 1))
else:
sys.exit("ERROR: invlaid __gt__ scenario of ConstExpr")
def __lt__ (self, rhs):
if (not isinstance(rhs, ConstantExpr)):
return False
if (self.type() == rhs.type()):
return (self.value() < rhs.value())
elif (self.type() == int and rhs.type() == Fraction):
return (Fraction(self.value(), 1) < rhs.value())
elif (self.type() == Fraction and rhs.type() == int):
return (self.value() < Fraction(rhs.value(), 1))
else:
sys.exit("ERROR: invlaid __lt__ scenario of ConstExpr")
def __ge__ (self, rhs):
return ((self == rhs) or (self > rhs))
def __le__ (self, rhs):
return ((self == rhs) or (self < rhs))
def hasLB (self):
return True
def hasUB (self):
return True
def hasBounds (self):
return True
def vars (self, by_label=True):
return []
def __hash__ (self):
return hash(self.value())
def getGid (self):
return self.gid
def includedGids (self):
return [self.getGid()]
def concEval (self, vmap = {}):
retv = self.value()
assert((type(retv) is int) or (type(retv) is float) or (isinstance(retv, Fraction)))
return retv
def getCastings (self):
return []
def listCrisis (self):
return []
def copy (self, check_prefix=True):
return ConstantExpr(self.value())
# ==== variable expression ====
class VariableExpr (ArithmeticExpr):
vtype = None
def __init__ (self, label, vtype, gid, check_prefix=True):
assert(isinstance(label, str))
assert(vtype == int or vtype == Fraction)
assert(type(gid) is int)
if (sys.version_info.major == 2):
sys.exit("Error: FPTuner is currently based on Python3 only...")
super(VariableExpr, self).__init__()
elif (sys.version_info.major == 3):
super().__init__()
else:
sys.exit("ERROR: not supported python version: " + str(sys.version_info))
if (gid == PRESERVED_CONST_GID):
assert(label.startswith(PRESERVED_CONST_VPREFIX))
self.vtype = vtype
self.operands.append(label)
self.gid = gid
if (check_prefix):
if (self.isPreservedVar()):
print("ERROR: the given label \"" + label + "\" has a preserved prefix...")
assert(False)
RegisterVariableExpr(self)
def isPreservedVar (self):
for pre in PRESERVED_VAR_LABEL_PREFIXES:
if (self.label().startswith(pre)):
return True
return False
def label (self):
assert(len(self.operands) == 1)
assert(isinstance(self.operands[0], str))
return self.operands[0]
def __str__ (self):
return "([Var] " + self.toIRString() + ")"
def idLabel (self):
return self.label() + "_eid_" + str(self.index)
def toCString (self, const_inline=False):
if (isConstVar(self)):
assert(self.lb().value() == self.ub().value())
return self.lb().toCString()
return self.label()
def toIRString (self):
if (self.vtype is int):
return "(" + self.label() + "$" + str(self.gid) + "$Int)"
else:
return "(" + self.label() + "$" + str(self.gid) + ")"
def toASTString (self):
return self.idLabel()
def toFPCoreString (self):
if (isConstVar(self)):
assert(self.hasLB() and self.hasUB() and self.lb().value() == self.ub().value())
return str(float(self.lb().value()))
return self.label()
def __eq__ (self, rhs):
if (not isinstance(rhs, VariableExpr)):
return False
return (self.label() == rhs.label() and self.type() == rhs.type() and self.getGid() == rhs.getGid())
def identical (self, rhs):
if (not isinstance(rhs, VariableExpr)):
return False
if (self.index == rhs.index):
assert(self == rhs)
return True
else:
return False
def setLB (self, lb):
assert(isinstance(lb, ConstantExpr) or (type(lb) in [int, float]) or isinstance(lb, Fraciton))
if (not isinstance(lb, ConstantExpr)):
lb = ConstantExpr(lb)
if (self.lower_bound is None):
self.lower_bound = lb
assert(self.lower_bound.value() == lb.value())
def setUB (self, ub):
assert(isinstance(ub, ConstantExpr) or (type(ub) in [int, float]) or isinstance(ub, Fraciton))
if (not isinstance(ub, ConstantExpr)):
ub = ConstantExpr(ub)
if (self.upper_bound is None):
self.upper_bound = ub
assert(self.upper_bound.value() == ub.value())
def setBounds (self, lb, ub):
self.setLB(lb)
self.setUB(ub)
def hasLB (self):
return (isinstance(self.lower_bound, ConstantExpr))
def hasUB (self):
return (isinstance(self.upper_bound, ConstantExpr))
def hasBounds (self):
return (self.hasLB() and self.hasUB())
def lb (self):
assert(self.hasLB())
assert(isinstance(self.lower_bound, ConstantExpr))
return self.lower_bound
def ub (self):
assert(self.hasUB())
assert(isinstance(self.upper_bound, ConstantExpr))
return self.upper_bound
def type (self):
assert(self.vtype == int or self.vtype == Fraction)
return self.vtype
def vars (self, by_label=True):
return [self]
def __hash__ (self):
return hash(self.label())
def getGid (self):
return self.gid
def includedGids (self):
return [self.getGid()]
def concEval (self, vmap = {}):
assert(self in vmap.keys())
retv = vmap[self]
assert((type(retv) is int) or (type(retv) is float) or (isinstance(retv, Fraction)))
return retv
def getCastings (self):
return []
def listCrisis (self):
return []
def copy(self, check_prefix=True):
ret = VariableExpr(self.label(), self.type(), self.getGid(), check_prefix)
if (self.hasLB()):
ret.setLB(self.lb())
if (self.hasUB()):
ret.setUB(self.ub())
return ret
# ==== unary operator ====
UnaryOpLabels = ["sqrt", "abs", "-", "sin", "cos", "exp", "log"]
class UnaryOp:
gid = None
label = None
def __init__ (self, gid, label):
assert(type(gid) is int)
assert(type(label) is str)
assert(label in UnaryOpLabels)
self.gid = gid
self.label = label
def toCString (self):
return self.label
def toIRString (self):
return str(self)
def toASTString (self):
return self.label
def __str__ (self):
return self.label
def __eq__ (self, rhs):
assert(isinstance(rhs, UnaryOp))
return (self.label == rhs.label)
def identical (self, rhs):
assert(isinstance(rhs, UnaryOp))
return (self.label == rhs.label)
def __ne__ (self, rhs):
return (not (self == rhs))
# ==== unary expression ====
class UnaryExpr (ArithmeticExpr):
operator = None
def __init__ (self, opt, opd0):
assert(isinstance(opt, UnaryOp))
assert(isinstance(opd0, Expr))
if (opt.label == "-"):
sys.exit("ERROR: cannot directly create UnaryExpr -. It must be properly transfered to an expression tree...")
if (sys.version_info.major == 2):
sys.exit("Error: FPTuner is currently based on Python3 only...")
super(UnaryExpr, self).__init__()
elif (sys.version_info.major == 3):
super().__init__()
else:
sys.exit("ERROR: not supported python version: " + str(sys.version_info))
self.gid = opt.gid
self.operator = opt
self.operands.append(opd0)
assert(self.gid is not None)
def opd (self):
assert(len(self.operands) == 1)
assert(isinstance(self.operands[0], Expr))
return self.operands[0]
def __str__ (self):
return "([UExpr] " + str(self.operator) + " " + str(self.opd()) + ")"
def toCString (self, const_inline=False):
return self.operator.toCString() + "(" + self.opd().toCString(const_inline) + ")"
def toIRString (self):
return "(" + self.operator.toIRString() + "$" + str(self.getGid()) + "(" + self.opd().toIRString() + "))"
def toASTString (self):
return "(" + self.operator.toASTString() + "(" + self.opd().toASTString() + "))"
def toFPCoreString (self):
assert(False), "Error: toFPCoreString doesn't support for Unary Expression"
def __eq__ (self, rhs):
if (not isinstance(rhs, UnaryExpr)):
return False
assert(isinstance(self.operator, UnaryOp))
assert(isinstance(rhs.operator, UnaryOp))
if (not (self.operator == rhs.operator)):
return False
if (self.getGid() != rhs.getGid()):
return False
if (self.opd() == rhs.opd()):
return True
else:
return False
def identical (self, rhs):
if (not isinstance(rhs, UnaryExpr)):
return False
assert(isinstance(self.operator, UnaryOp))
assert(isinstance(rhs.operator, UnaryOp))
if (not (self.operator.identical( rhs.operator ))):
return False
if (self.opd().identical( rhs.opd() )):
return True
else:
return False
def setLB (self, lb):
assert(isinstance(lb, ConstantExpr))
if (self.operator.label in ["abs", "sqrt"]):
assert(lb.value() >= Fraction(0, 1))
self.lower_bound = lb
def setUB (self, ub):
assert(isinstance(ub, ConstantExpr))
if (self.operator.label in ["abs", "sqrt"]):
assert(ub.value() >= Fraction(0, 1))
self.upper_bound = ub
def setBounds (self, lb, ub):
self.setLB(lb)
self.setUB(ub)
def hasLB (self):
return (isinstance(self.lower_bound, ConstantExpr))
def hasUB (self):
return (isinstance(self.upper_bound, ConstantExpr))
def hasBounds (self):
return (self.hasLB() and self.hasUB())
def lb (self):
assert(self.hasLB())
assert(isinstance(self.lower_bound, ConstantExpr))
return self.lower_bound
def ub (self):
assert(self.hasUB())
assert(isinstance(self.upper_bound, ConstantExpr))
return self.upper_bound
def vars (self, by_label=True):
return self.opd().vars(by_label)
def getGid (self):
return self.gid
def includedGids (self):
return tft_utils.unionSets([self.getGid()], self.opd().includedGids())
def concEval (self, vmap = {}):
retv = self.opd().concEval(vmap)
assert((type(retv) is int) or (type(retv) is float) or (isinstance(retv, Fraction)))
if (self.operator.label == "abs"):
return abs(retv)
elif (self.operator.label == "sqrt"):
return math.sqrt(retv)
elif (self.operator.label == "-"):
if (type(retv) is int):
return (-1 * retv)
elif ((type(retv) is float) or (isinstance(retv, Fraction))):
return (-1.0 * retv)
else:
assert(False)
else:
sys.exit("ERROR: unknwon operator found in function \"concEval\" of a UnaryExpr")
def getCastings (self):
if (self.operator.label in ["abs", "-"]):
return []
elif (self.operator.label == "sqrt"):
if (isinstance(self.opd(), ConstantExpr)):
return []
else:
return [(self.opd().getGid(), self.getGid())]
else:
sys.exit("ERROR: unknown operator found in function \"getCastings\" of a UnaryExpr")
def listCrisis (self):
return self.opd().listCrisis()
def copy(self, check_prefix=True):
ret = UnaryExpr(self.operator, self.opd().copy(check_prefix))
if (self.hasLB()):
ret.setLB(self.lb())
if (self.hasUB()):
ret.setUB(self.ub())
return ret
# ==== binary operator ====
BinaryOpLabels = ["+", "-", "*", "/", "^"]
class BinaryOp:
gid = None
label = None
def __init__ (self, gid, label):
assert(type(gid) is int)
assert(type(label) is str)
if (label not in BinaryOpLabels):
print ("ERROR: invalid label for BinaryOp: " + label)
assert(label in BinaryOpLabels)
self.gid = gid
self.label = label
def toCString (self):
return self.label
def toIRString (self):
return str(self)
def toASTString (self):
return self.label
def __str__ (self):
return self.label
def __eq__ (self, rhs):
assert(isinstance(rhs, BinaryOp))
return (self.label == rhs.label)
def identical (self, rhs):
assert(isinstance(rhs, BinaryOp))
return (self.label == rhs.label)
def __ne__ (self, rhs):
return (not (self == rhs))
# ==== binary expression ====
class BinaryExpr (ArithmeticExpr):
operator = None
def __init__ (self, opt, opd0, opd1):
assert(isinstance(opt, BinaryOp))
assert(isinstance(opd0, Expr))
assert(isinstance(opd1, Expr))
if (opt.label == "^"):
sys.exit("ERROR: cannot directly create BinaryExpr ^. It must be properly transfered to an expression tree...")
if (sys.version_info.major == 2):
sys.exit("Error: FPTuner is currently based on Python3 only...")
super(BinaryExpr, self).__init__()
elif (sys.version_info.major == 3):
super().__init__()
else:
sys.exit("ERROR: not supported python version: " + str(sys.version_info))
self.gid = opt.gid
self.operator = opt
self.operands.append(opd0)
self.operands.append(opd1)
assert(self.gid is not None)
def lhs (self):
assert(len(self.operands) == 2)
assert(isinstance(self.operands[0], Expr))
return self.operands[0]
def rhs (self):
assert(len(self.operands) == 2)
assert(isinstance(self.operands[1], Expr))
return self.operands[1]
def __str__ (self):
return "([BiExpr] " + str(self.operator) + " " + str(self.lhs()) + " " + str(self.rhs()) + ")"
def toCString (self, const_inline=False):
return "(" + self.lhs().toCString(const_inline) + " " + self.operator.toCString() + " " + self.rhs().toCString(const_inline) + ")"
def toIRString (self):
return "(" + self.lhs().toIRString() + " " + self.operator.toIRString() + "$" + str(self.getGid()) + " " + self.rhs().toIRString() + ")"
def toASTString (self):
return "(" + self.lhs().toASTString() + " " + self.operator.toASTString() + " " + self.rhs().toASTString() + ")"
def toFPCoreString (self):
str_opt = self.operator.toCString()
assert(str_opt in ['+', '-', '*', '/']), "Error: toFPCoreString doesn't support operator : " + str_opt
return "(" + str_opt + " " + self.lhs().toFPCoreString() + " " + self.rhs().toFPCoreString() + ")"
def __eq__ (self, rhs):
if (not isinstance(rhs, BinaryExpr)):
return False
assert(isinstance(self.operator, BinaryOp))
assert(isinstance(rhs.operator, BinaryOp))
if (not (self.operator == rhs.operator)):
return False
if (self.getGid() != rhs.getGid()):
return False
if (self.operator.label in ["+", "*"]):
if ((self.lhs() == rhs.lhs()) and (self.rhs() == rhs.rhs())):
return True
elif ((self.lhs() == rhs.rhs()) and (self.rhs() == rhs.lhs())):
return True
else:
return False
elif (self.operator.label in ["-", "/", "^"]):
if ((self.lhs() == rhs.lhs()) and (self.rhs() == rhs.rhs())):
return True
else:
return False
else:
sys.exit("ERROR: unknown binary operator... " + str(self.operator.label))
def identical (self, rhs):
if (not isinstance(rhs, BinaryExpr)):
return False
assert(isinstance(self.operator, BinaryOp))
assert(isinstance(rhs.operator, BinaryOp))
if (not (self.operator.identical( rhs.operator ))):
return False
if (self.operator.label in ["+", "*"]):
if (self.lhs().identical( rhs.lhs() ) and self.rhs().identical( rhs.rhs() )):
return True
elif (self.lhs().identical( rhs.rhs() ) and self.rhs().identical( rhs.lhs() )):
return True
else:
return False
elif (self.operator.label in ["-", "/", "^"]):
if (self.lhs().identical( rhs.lhs() ) and self.rhs().identical( rhs.rhs() )):
return True
else:
return False
else:
sys.exit("ERROR: unknown binary operator... " + str(self.operator.label))
def setLB (self, lb):
assert(isinstance(lb, ConstantExpr))
self.lower_bound = lb
def setUB (self, ub):
assert(isinstance(ub, ConstantExpr))
self.upper_bound = ub
def setBounds (self, lb, ub):
self.setLB(lb)
self.setUB(ub)
def hasLB (self):
return (isinstance(self.lower_bound, ConstantExpr))
def hasUB (self):
return (isinstance(self.upper_bound, ConstantExpr))
def hasBounds (self):
return (self.hasLB() and self.hasUB())
def lb (self):
assert(self.hasLB())
assert(isinstance(self.lower_bound, ConstantExpr))
return self.lower_bound
def ub (self):
assert(self.hasUB())
assert(isinstance(self.upper_bound, ConstantExpr))
return self.upper_bound
def vars (self, by_label=True):
vars_lhs = self.lhs().vars(by_label)
vars_rhs = self.rhs().vars(by_label)
ret = vars_lhs[:]
for v in vars_rhs:
was_there = False
if (by_label):
for rv in ret:
if (v.label() == rv.label()):
was_there = True
break
else:
for rv in ret:
if (v.index == rv.index):
print ("ERROR: duplicated vars in different subexpressions...")
assert(False)
if (not was_there):
ret.append(v)
return ret
def getGid (self):
return self.gid
def includedGids (self):
return tft_utils.unionSets([self.getGid()], tft_utils.unionSets(self.lhs().includedGids(), self.rhs().includedGids()))
def concEval (self, vmap = {}):
retv_lhs = self.lhs().concEval(vmap)
assert((type(retv_lhs) is int) or (type(retv_lhs) is float) or (isinstance(retv_lhs, Fraction)))
retv_rhs = self.rhs().concEval(vmap)
assert((type(retv_rhs) is int) or (type(retv_rhs) is float) or (isinstance(retv_rhs, Fraction)))
if (self.operator.label == "+"):
return (retv_lhs + retv_rhs)
elif (self.operator.label == "-"):
return (retv_lhs - retv_rhs)
elif (self.operator.label == "*"):
return (retv_lhs * retv_rhs)
elif (self.operator.label == "/"):
return (retv_lhs / retv_rhs)
elif (self.operator.label == "^"):
assert(type(retv_rhs) is int)
return math.pow(retv_lhs, retv_rhs)
else:
sys.exit("ERROR: unknown operator found in function \"similar\" of a BinaryExpr")
def getCastings (self):
if (self.operator.label in ["+", "-", "*", "/"]):
ret_castings = []
if (isinstance(self.lhs(), ConstantExpr)):
pass
else:
ret_castings.append((self.lhs().getGid(), self.getGid()))
if (isinstance(self.rhs(), ConstantExpr)):
pass
else:
ret_castings.append((self.rhs().getGid(), self.getGid()))
return ret_castings
elif (self.operator.label in "^"):
if (isinstance(self.lhs(), ConstantExpr)):
return []
else:
return [(self.lhs().getGid(), self.getGid())]
else:
sys.exit("ERROR: unknown operator found in function \"getCastings\" of a BinaryExpr")
def listCrisis (self):
lc = self.lhs().listCrisis() + self.rhs().listCrisis()
if (self.operator.label == "/"):
return [self.rhs().toCString()] + lc
else:
return lc
def copy(self, check_prefix=True):
ret = BinaryExpr(self.operator,
self.lhs().copy(check_prefix),
self.rhs().copy(check_prefix))
if (self.hasLB()):
ret.setLB(self.lb())
if (self.hasUB()):
ret.setUB(self.ub())
return ret
# ==== class predicate ====
BinaryRelationLabels = ["=", "<", "<="]
class BinaryRelation:
label = None
def __init__ (self, label):
assert(label in BinaryRelationLabels)
self.label = label
def __eq__ (self, rhs):
if (not isinstance(rhs, BinaryRelation)):
return False
return (self.label == rhs.label)
def toIRString (self):
return self.label
def __str__ (self):
return self.toIRString()
class BinaryPredicate (Predicate):
relation = None
def __init__ (self, relation, opd0, opd1):
assert(isinstance(relation, BinaryRelation))
assert(relation.label in BinaryRelationLabels)
if (sys.version_info.major == 2):
sys.exit("Error: FPTuner is currently based on Python3 only...")
super(BinaryPredicate, self).__init__(False)
elif (sys.version_info.major == 3):
super().__init__(False)
self.relation = relation
self.operands.append(opd0)
self.operands.append(opd1)
if (self.relation.label in ["=", "<", "<="]):
assert(isinstance(self.lhs(), ArithmeticExpr))
assert(isinstance(self.rhs(), ArithmeticExpr))
def lhs (self):
assert(len(self.operands) == 2)
if (self.relation.label in ["=", "<", "<="]):
assert(isinstance(self.operands[0], Expr))
assert(isinstance(self.operands[1], Expr))
return self.operands[0]
else:
sys.exit("ERROR: invalid BinaryPredicate")
def rhs (self):
assert(len(self.operands) == 2)
if (self.relation.label in ["=", "<", "<="]):
assert(isinstance(self.operands[0], Expr))
assert(isinstance(self.operands[1], Expr))
return self.operands[1]
else:
sys.exit("ERROR: invalid BinaryPredicate")
def vars (self):
return tft_utils.unionSets(self.lhs().vars(), self.rhs().vars())
def concEval (self, vmap = {}):
vlhs = self.lhs().concEval(vmap)
vrhs = self.rhs().concEval(vmap)
if (self.relation.label == "="):
return (vlhs == vrhs)
elif (self.relation.label == "<"):
return (vlhs < vrhs)
elif (self.relation.label == "<="):
return (vlhs <= vrhs)
else:
sys.exit("Error: unhandled relation in concEval...")
def __eq__ (self, rhs):
if (not isinstance(rhs, BinaryPredicate)):
return False
if (not (self.relation == rhs.relation)):
return False
if (self.relation.label in ["="]):
return (((self.lhs() == rhs.lhs()) and (self.rhs() == rhs.rhs())) or ((self.lhs() == rhs.rhs()) and (self.rhs() == rhs.lhs())))
elif (self.relation.label in ["<", "<="]):
return ((self.lhs() == rhs.lhs()) and (self.rhs() == rhs.rhs()))
else:
sys.exit("ERROR: not handled binary relation for __eq__")
def toIRString (self):
return "(" + self.lhs().toIRString() + " " + self.relation.toIRString() + " " + self.rhs().toIRString() + ")"
def __str__ (self):
return self.toIRString()
# ==== some expression judgements ====
def isPowerOf2 (f):
assert(type(f) is float)
log2 = math.log(abs(f), 2.0)
return (int(log2) == log2)
def isPreciseConstantExpr (expr):
assert(isinstance(expr, ConstantExpr))
f = float(expr.value())
if (f == 0.0):
return True
if (int(f) == f):
return True
return False
def isPreciseOperation (expr):
assert(isinstance(expr, Expr))
if (isinstance(expr, ConstantExpr)):
return isPreciseConstantExpr(expr)
elif (isinstance(expr, VariableExpr)):
if (expr.getGid() == PRESERVED_CONST_GID):
assert(expr.hasBounds())
assert(expr.lb() == expr.ub())
return isPreciseConstantExpr(expr.lb())
if (expr.hasBounds() and (expr.lb() == expr.ub())):
return isPreciseConstantExpr(expr.lb())
return False
elif (isinstance(expr, UnaryExpr)):
if (expr.operator.label in ["-", "abs"]):
return True
else:
return False
elif (isinstance(expr, BinaryExpr)):
if (expr.operator.label in ["+", "-"]):
if (isinstance(expr.lhs(), ConstantExpr) and
(float(expr.lhs().value()) == 0.0)):
return True
if (isinstance(expr.rhs(), ConstantExpr) and
(float(expr.rhs().value()) == 0.0)):
return True
if (isinstance(expr.lhs(), VariableExpr) and
(expr.lhs().hasBounds() and
(float(expr.lhs().lb().value()) == 0.0) and
(float(expr.lhs().ub().value()) == 0.0))):
return True
if (isinstance(expr.rhs(), VariableExpr) and
(expr.rhs().hasBounds() and
(float(expr.rhs().lb().value()) == 0.0) and
(float(expr.rhs().ub().value()) == 0.0))):
return True
elif (expr.operator.label in ["*"]):
if (isinstance(expr.lhs(), ConstantExpr) and
isPowerOf2(float(expr.lhs().value()))):
return True
if (isinstance(expr.rhs(), ConstantExpr) and
isPowerOf2(float(expr.rhs().value()))):
return True
# if (isinstance(expr.lhs(), ConstantExpr) and
# (float(expr.lhs().value()) in [1.0, -1.0])):
# return True
# if (isinstance(expr.rhs(), ConstantExpr) and
# (float(expr.rhs().value()) in [1.0, -1.0])):
# return True
if (isinstance(expr.lhs(), VariableExpr) and
(expr.lhs().hasBounds() and
(expr.lhs().lb() == expr.lhs().ub()) and
isPowerOf2(float(expr.lhs().lb().value())))):
return True
if (isinstance(expr.rhs(), VariableExpr) and
(expr.rhs().hasBounds() and
(expr.rhs().lb() == expr.rhs().ub()) and
isPowerOf2(float(expr.rhs().lb().value())))):
return True
elif (expr.operator.label in ["/"]):
if (isinstance(expr.rhs(), ConstantExpr) and
(isPowerOf2(float(expr.rhs().value())))):
return True
if (isinstance(expr.rhs(), VariableExpr) and
(expr.rhs().hasBounds() and
(expr.rhs().lb() == expr.rhs().ub()) and
isPowerOf2(float(expr.rhs().lb().value())))):
return True
else:
pass
return False
else:
sys.exit("ERROR: unknown expression type...")
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import importlib
import sys
import os
import shutil
if 'GAE_SDK' in os.environ:
SDK_PATH = os.environ['GAE_SDK']
sys.path.insert(0, SDK_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
else:
print "GAE_SDK environment variable must be on path and point to App Engine's SDK folder"
from gaeforms.ndb.property import SimpleCurrency, SimpleDecimal
from google.appengine.ext.ndb.model import StringProperty, TextProperty, DateProperty, DateTimeProperty, \
IntegerProperty, \
FloatProperty, BooleanProperty
PROJECT_DIR = os.path.dirname(__file__)
PROJECT_DIR = os.path.abspath(os.path.join(PROJECT_DIR, '..'))
APPS_DIR = os.path.join(PROJECT_DIR, 'apps')
TEST_DIR = os.path.join(PROJECT_DIR, 'test')
sys.path.insert(1, APPS_DIR)
APPENGINE_DIR = os.path.join(PROJECT_DIR, 'appengine')
WEB_DIR = os.path.join(APPENGINE_DIR, 'routes')
TEMPLATES_DIR = os.path.join(APPENGINE_DIR, 'templates')
# Templates
REST_TESTS_TEMPLATE = '''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime, date
from decimal import Decimal
from base import GAETestCase
from %(app)s_app.%(app)s_model import %(model)s
from routes.%(app)ss import rest
from gaegraph.model import Node
from mock import Mock
from mommygae import mommy
class IndexTests(GAETestCase):
def test_success(self):
mommy.save_one(%(model)s)
mommy.save_one(%(model)s)
json_response = rest.index()
context = json_response.context
self.assertEqual(2, len(context))
%(model_underscore)s_dct = context[0]
self.assertSetEqual(set(['id', 'creation', %(model_properties)s]), set(%(model_underscore)s_dct.iterkeys()))
self.assert_can_serialize_as_json(json_response)
class NewTests(GAETestCase):
def test_success(self):
self.assertIsNone(%(model)s.query().get())
json_response = rest.new(None, %(request_values)s)
db_%(model_underscore)s = %(model)s.query().get()
self.assertIsNotNone(db_%(model_underscore)s)
%(model_assertions)s
self.assert_can_serialize_as_json(json_response)
def test_error(self):
resp = Mock()
json_response = rest.new(resp)
errors = json_response.context
self.assertEqual(500, resp.status_code)
self.assertSetEqual(set([%(model_properties)s]), set(errors.keys()))
self.assert_can_serialize_as_json(json_response)
class EditTests(GAETestCase):
def test_success(self):
%(model_underscore)s = mommy.save_one(%(model)s)
old_properties = %(model_underscore)s.to_dict()
json_response = rest.edit(None, %(model_underscore)s.key.id(), %(request_values)s)
db_%(model_underscore)s = %(model_underscore)s.key.get()
%(model_assertions)s
self.assertNotEqual(old_properties, db_%(model_underscore)s.to_dict())
self.assert_can_serialize_as_json(json_response)
def test_error(self):
%(model_underscore)s = mommy.save_one(%(model)s)
old_properties = %(model_underscore)s.to_dict()
resp = Mock()
json_response = rest.edit(resp, %(model_underscore)s.key.id())
errors = json_response.context
self.assertEqual(500, resp.status_code)
self.assertSetEqual(set([%(model_properties)s]), set(errors.keys()))
self.assertEqual(old_properties, %(model_underscore)s.key.get().to_dict())
self.assert_can_serialize_as_json(json_response)
class DeleteTests(GAETestCase):
def test_success(self):
%(model_underscore)s = mommy.save_one(%(model)s)
rest.delete(None, %(model_underscore)s.key.id())
self.assertIsNone(%(model_underscore)s.key.get())
def test_non_%(model_underscore)s_deletion(self):
non_%(model_underscore)s = mommy.save_one(Node)
response = Mock()
json_response = rest.delete(response, non_%(model_underscore)s.key.id())
self.assertIsNotNone(non_%(model_underscore)s.key.get())
self.assertEqual(500, response.status_code)
self.assert_can_serialize_as_json(json_response)
'''
HOME_TESTS_TEMPLATE = '''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from %(app)s_app.%(app)s_model import %(model)s
from routes.%(app)ss.home import index, delete
from gaebusiness.business import CommandExecutionException
from gaegraph.model import Node
from mommygae import mommy
from tekton.gae.middleware.redirect import RedirectResponse
class IndexTests(GAETestCase):
def test_success(self):
mommy.save_one(%(model)s)
template_response = index()
self.assert_can_render(template_response)
class DeleteTests(GAETestCase):
def test_success(self):
%(model_underscore)s = mommy.save_one(%(model)s)
redirect_response = delete(%(model_underscore)s.key.id())
self.assertIsInstance(redirect_response, RedirectResponse)
self.assertIsNone(%(model_underscore)s.key.get())
def test_non_%(model_underscore)s_deletion(self):
non_%(model_underscore)s = mommy.save_one(Node)
self.assertRaises(CommandExecutionException, delete, non_%(model_underscore)s.key.id())
self.assertIsNotNone(non_%(model_underscore)s.key.get())
'''
EDIT_TESTS_TEMPLATE = '''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from datetime import datetime, date
from decimal import Decimal
from %(app)s_app.%(app)s_model import %(model)s
from routes.%(app)ss.edit import index, save
from mommygae import mommy
from tekton.gae.middleware.redirect import RedirectResponse
class IndexTests(GAETestCase):
def test_success(self):
%(model_underscore)s = mommy.save_one(%(model)s)
template_response = index(%(model_underscore)s.key.id())
self.assert_can_render(template_response)
class EditTests(GAETestCase):
def test_success(self):
%(model_underscore)s = mommy.save_one(%(model)s)
old_properties = %(model_underscore)s.to_dict()
redirect_response = save(%(model_underscore)s.key.id(), %(request_values)s)
self.assertIsInstance(redirect_response, RedirectResponse)
edited_%(model_underscore)s = %(model_underscore)s.key.get()
%(model_assertions)s
self.assertNotEqual(old_properties, edited_%(model_underscore)s.to_dict())
def test_error(self):
%(model_underscore)s = mommy.save_one(%(model)s)
old_properties = %(model_underscore)s.to_dict()
template_response = save(%(model_underscore)s.key.id())
errors = template_response.context['errors']
self.assertSetEqual(set([%(model_properties)s]), set(errors.keys()))
self.assertEqual(old_properties, %(model_underscore)s.key.get().to_dict())
self.assert_can_render(template_response)
'''
NEW_TESTS_TEMPLATE = '''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from datetime import datetime, date
from decimal import Decimal
from %(app)s_app.%(app)s_model import %(model)s
from routes.%(app)ss.new import index, save
from tekton.gae.middleware.redirect import RedirectResponse
class IndexTests(GAETestCase):
def test_success(self):
template_response = index()
self.assert_can_render(template_response)
class SaveTests(GAETestCase):
def test_success(self):
self.assertIsNone(%(model)s.query().get())
redirect_response = save(%(request_values)s)
self.assertIsInstance(redirect_response, RedirectResponse)
saved_%(model_underscore)s = %(model)s.query().get()
self.assertIsNotNone(saved_%(model_underscore)s)
%(model_assertions)s
def test_error(self):
template_response = save()
errors = template_response.context['errors']
self.assertSetEqual(set([%(model_properties)s]), set(errors.keys()))
self.assert_can_render(template_response)
'''
MODEL_TEMPLATE = '''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from gaegraph.model import Node
from gaeforms.ndb import property
class %(model)s(Node):
%(properties)s
'''
COMMANDS_TEMPLATE = '''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.gaeutil import SaveCommand, ModelSearchCommand
from gaeforms.ndb.form import ModelForm
from gaegraph.business_base import UpdateNode, NodeSearch, DeleteNode
from %(app_path)s.%(app)s_model import %(model)s
class %(model)sSaveForm(ModelForm):
"""
Form used to save and update %(model)s
"""
_model_class = %(model)s
_include = [%(form_properties)s]
class %(model)sForm(ModelForm):
"""
Form used to expose %(model)s's properties for list or json
"""
_model_class = %(model)s
class Get%(model)sCommand(NodeSearch):
_model_class = %(model)s
class Delete%(model)sCommand(DeleteNode):
_model_class = %(model)s
class Save%(model)sCommand(SaveCommand):
_model_form_class = %(model)sSaveForm
class Update%(model)sCommand(UpdateNode):
_model_form_class = %(model)sSaveForm
class List%(model)sCommand(ModelSearchCommand):
def __init__(self):
super(List%(model)sCommand, self).__init__(%(model)s.query_by_creation())
'''
FACADE_TEMPLATE = r'''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaegraph.business_base import NodeSearch, DeleteNode
from %(app_path)s.%(app)s_commands import List%(model)sCommand, Save%(model)sCommand, Update%(model)sCommand, %(model)sForm,\
Get%(model)sCommand, Delete%(model)sCommand
def save_%(model_underscore)s_cmd(**%(model_underscore)s_properties):
"""
Command to save %(model)s entity
:param %(model_underscore)s_properties: a dict of properties to save on model
:return: a Command that save %(model)s, validating and localizing properties received as strings
"""
return Save%(model)sCommand(**%(model_underscore)s_properties)
def update_%(model_underscore)s_cmd(%(model_underscore)s_id, **%(model_underscore)s_properties):
"""
Command to update %(model)s entity with id equals '%(model_underscore)s_id'
:param %(model_underscore)s_properties: a dict of properties to update model
:return: a Command that update %(model)s, validating and localizing properties received as strings
"""
return Update%(model)sCommand(%(model_underscore)s_id, **%(model_underscore)s_properties)
def list_%(model_underscore)ss_cmd():
"""
Command to list %(model)s entities ordered by their creation dates
:return: a Command proceed the db operations when executed
"""
return List%(model)sCommand()
def %(model_underscore)s_form(**kwargs):
"""
Function to get %(model)s's detail form.
:param kwargs: form properties
:return: Form
"""
return %(model)sForm(**kwargs)
def get_%(model_underscore)s_cmd(%(model_underscore)s_id):
"""
Find %(model_underscore)s by her id
:param %(model_underscore)s_id: the %(model_underscore)s id
:return: Command
"""
return Get%(model)sCommand(%(model_underscore)s_id)
def delete_%(model_underscore)s_cmd(%(model_underscore)s_id):
"""
Construct a command to delete a %(model)s
:param %(model_underscore)s_id: %(model_underscore)s's id
:return: Command
"""
return Delete%(model)sCommand(%(model_underscore)s_id)
'''
HOME_SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from tekton import router
from gaecookie.decorator import no_csrf
from %(app_name)s import %(app)s_facade
from routes.%(web_name)s import new, edit
from tekton.gae.middleware.redirect import RedirectResponse
@no_csrf
def index():
cmd = %(app)s_facade.list_%(model_underscore)ss_cmd()
%(model_underscore)ss = cmd()
edit_path = router.to_path(edit)
delete_path = router.to_path(delete)
%(model_underscore)s_form = %(app)s_facade.%(model_underscore)s_form()
def localize_%(model_underscore)s(%(model_underscore)s):
%(model_underscore)s_dct = %(model_underscore)s_form.fill_with_model(%(model_underscore)s)
%(model_underscore)s_dct['edit_path'] = router.to_path(edit_path, %(model_underscore)s_dct['id'])
%(model_underscore)s_dct['delete_path'] = router.to_path(delete_path, %(model_underscore)s_dct['id'])
return %(model_underscore)s_dct
localized_%(model_underscore)ss = [localize_%(model_underscore)s(%(model_underscore)s) for %(model_underscore)s in %(model_underscore)ss]
context = {'%(model_underscore)ss': localized_%(model_underscore)ss,
'new_path': router.to_path(new)}
return TemplateResponse(context, '%(app)ss/%(app)s_home.html')
def delete(%(model_underscore)s_id):
%(app)s_facade.delete_%(model_underscore)s_cmd(%(model_underscore)s_id)()
return RedirectResponse(router.to_path(index))
'''
NEW_SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaebusiness.business import CommandExecutionException
from tekton import router
from gaecookie.decorator import no_csrf
from %(app_name)s import %(app)s_facade
from routes import %(web_name)s
from tekton.gae.middleware.redirect import RedirectResponse
@no_csrf
def index():
return TemplateResponse({'save_path': router.to_path(save)}, '%(web_name)s/%(app)s_form.html')
def save(**%(model_underscore)s_properties):
cmd = %(app)s_facade.save_%(model_underscore)s_cmd(**%(model_underscore)s_properties)
try:
cmd()
except CommandExecutionException:
context = {'errors': cmd.errors,
'%(model_underscore)s': %(model_underscore)s_properties}
return TemplateResponse(context, '%(web_name)s/%(app)s_form.html')
return RedirectResponse(router.to_path(%(web_name)s))
'''
EDIT_SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaebusiness.business import CommandExecutionException
from tekton import router
from gaecookie.decorator import no_csrf
from %(app_name)s import %(app)s_facade
from routes import %(web_name)s
from tekton.gae.middleware.redirect import RedirectResponse
@no_csrf
def index(%(model_underscore)s_id):
%(model_underscore)s = %(app)s_facade.get_%(model_underscore)s_cmd(%(model_underscore)s_id)()
%(model_underscore)s_form = %(app)s_facade.%(model_underscore)s_form()
context = {'save_path': router.to_path(save, %(model_underscore)s_id), '%(model_underscore)s': %(model_underscore)s_form.fill_with_model(%(model_underscore)s)}
return TemplateResponse(context, '%(web_name)s/%(app)s_form.html')
def save(%(model_underscore)s_id, **%(model_underscore)s_properties):
cmd = %(app)s_facade.update_%(model_underscore)s_cmd(%(model_underscore)s_id, **%(model_underscore)s_properties)
try:
cmd()
except CommandExecutionException:
context = {'errors': cmd.errors, '%(model_underscore)s': %(model_underscore)s_properties}
return TemplateResponse(context, '%(web_name)s/%(app)s_form.html')
return RedirectResponse(router.to_path(%(web_name)s))
'''
REST_SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from tekton.gae.middleware.json_middleware import JsonResponse
from %(app_name)s import %(app)s_facade
def index():
cmd = %(app)s_facade.list_%(model_underscore)ss_cmd()
%(model_underscore)s_list = cmd()
%(model_underscore)s_form = %(app)s_facade.%(model_underscore)s_form()
%(model_underscore)s_dcts = [%(model_underscore)s_form.fill_with_model(m) for m in %(model_underscore)s_list]
return JsonResponse(%(model_underscore)s_dcts)
def new(_resp, **%(model_underscore)s_properties):
cmd = %(app)s_facade.save_%(model_underscore)s_cmd(**%(model_underscore)s_properties)
return _save_or_update_json_response(cmd, _resp)
def edit(_resp, id, **%(model_underscore)s_properties):
cmd = %(app)s_facade.update_%(model_underscore)s_cmd(id, **%(model_underscore)s_properties)
return _save_or_update_json_response(cmd, _resp)
def delete(_resp, id):
cmd = %(app)s_facade.delete_%(model_underscore)s_cmd(id)
try:
cmd()
except CommandExecutionException:
_resp.status_code = 500
return JsonResponse(cmd.errors)
def _save_or_update_json_response(cmd, _resp):
try:
%(model_underscore)s = cmd()
except CommandExecutionException:
_resp.status_code = 500
return JsonResponse(cmd.errors)
%(model_underscore)s_form = %(app)s_facade.%(model_underscore)s_form()
return JsonResponse(%(model_underscore)s_form.fill_with_model(%(model_underscore)s))
'''
HOME_HTML_TEMPLATE = '''{%% extends '%(web_name)s/%(app)s_base.html' %%}
{%% block body %%}
<div class="container">
<div class="row">
<div class="col-md-12">
<h1>{%% trans %%}This is a generic home for %(app_name)s {%% endtrans %%} </h1>
<a href="{{ new_path }}" class="btn btn-success">{%% trans %%}Create New %(model)s{%% endtrans %%}</a>
<hr/>
<h2>{%% trans %%}List of %(model)ss{%% endtrans %%}</h2>
<table class="table table-striped table-hover">
<thead>
<tr>
<th/>
<th>{%% trans %%}Id{%% endtrans %%}</th>
<th>{%% trans %%}Creation{%% endtrans %%}</th>
%(headers)s
</tr>
</thead>
<tbody>
{%% for %(model_underscore)s in %(model_underscore)ss %%}
<tr>
<td><a href="{{ %(model_underscore)s.edit_path }}" class="btn btn-success btn-sm"><i
class="glyphicon glyphicon-pencil"></i></a></td>
<td>{{ %(model_underscore)s.id }}</td>
<td>{{ %(model_underscore)s.creation }}</td>
%(columns)s
<td>
<form action="{{ %(model_underscore)s.delete_path }}" method="post" onsubmit="return confirm('{{_('Are you sure to delete? Press cancel to avoid deletion.')}}');">
{{ csrf_input() }}
<button class="btn btn-danger btn-sm"><i
class="glyphicon glyphicon-trash"></i></button>
</form>
</td>
</tr>
{%% endfor %%}
</tbody>
</table>
</div>
</div>
</div>
{%% endblock %%}'''
FORM_HTML_TEMPLATE = '''{%% extends '%(web_name)s/%(app)s_base.html' %%}
{%% block body %%}
{%% set %(model_underscore)s=%(model_underscore)s or None %%}
{%% set errors=errors or None %%}
<div class="container">
<div class="row">
<div class="col-md-6 col-md-offset-3">
<br/>
<div class="well">
<h1 class="text-center">{%% trans %%}%(model)s Form{%% endtrans %%}</h1>
<form action="{{ save_path }}" method="post" role="form">
{{ csrf_input() }}
%(inputs)s
<button type="submit" class="btn btn-success">{%% trans %%}Save{%% endtrans %%}</button>
</form>
</div>
</div>
</div>
</div>
{%% endblock %%}'''
def _create_dir_if_not_existing(package_path):
if not os.path.exists(package_path):
os.mkdir(package_path)
def _create_file_if_not_existing(file_path, content=''):
if not os.path.isfile(file_path):
with open(file_path, 'w') as f:
f.write(content.encode('utf8'))
def _create_package(package_path):
_create_dir_if_not_existing(package_path)
_create_file_if_not_existing(os.path.join(package_path, '__init__.py'))
def _create_app(name, app_path, model, *properties):
properties = '\n'.join(parse_property(p) for p in properties)
properties = properties or ' pass'
_create_package(app_path)
_create_file_if_not_existing(os.path.join(app_path, '%s_model.py' % name),
MODEL_TEMPLATE % {'model': model, 'properties': properties})
def parse_property(p):
name, type_alias = p.split(':')
types = {'string': 'ndb.StringProperty(required=True)',
'date': 'ndb.DateProperty(required=True)',
'datetime': 'ndb.DateTimeProperty(required=True)',
'int': 'ndb.IntegerProperty(required=True)',
'float': 'ndb.FloatProperty(required=True)',
'decimal': 'property.SimpleDecimal(required=True)',
'currency': 'property.SimpleCurrency(required=True)',
'bool': 'ndb.BooleanProperty(required=True)'}
return ' %s = %s' % (name, types[type_alias])
def init_app(name, model, *properties):
_title('Creating app package')
app_path = os.path.join(APPS_DIR, name + '_app')
_create_app(name, app_path, model, *properties)
PROPERTY = '%(model)s.%(property)s'
def _build_properties(model, properties):
return ', \n '.join([PROPERTY % {'model': model, 'property': p} for p in properties])
def _model_class(app, model):
app_path = app + '_app'
model_module = importlib.import_module(app_path + '.%s_model' % app)
model_class = getattr(model_module, model)
return model_class
def _model_properties(app, model):
model_class = _model_class(app, model)
properties = set(model_class._properties.keys())
properties = properties.difference(set(['class']))
return properties
def commands_code_for(app, model):
app_path = app + '_app'
properties = _model_properties(app, model)
full_properties = _build_properties(model, properties)
form_properties = properties.difference(set(['creation']))
form_properties = _build_properties(model, form_properties)
dct = {'app': app, 'app_path': app_path, 'model': model, 'full_properties': full_properties,
'form_properties': form_properties}
return COMMANDS_TEMPLATE % dct
def _title(param):
n = 15
print ('- ' * n) + param + (' -' * n)
def _to_app_name(app):
return app + '_app'
def _to_underscore_case(model):
model_underscore = model[0].lower() + model[1:]
return ''.join(('_' + letter.lower() if letter.isupper() else letter) for letter in model_underscore)
def generate_generic(app, model, template_path_function, file_name, content_function):
app_template_path = template_path_function(app)
template_file = os.path.join(app_template_path, file_name)
content = content_function(app, model)
_create_file_if_not_existing(template_file, content)
return content
def _to_app_path(app):
return os.path.join(APPS_DIR, app + '_app')
def generate_app_file(app, model, file_name, content_function):
file_name = '%s_%s.py' % (app, file_name)
return generate_generic(app, model, _to_app_path, file_name, content_function)
def init_commands(app, model):
return generate_app_file(app, model, 'commands', commands_code_for)
def facade_code_for(app, model):
app_path = _to_app_name(app)
model_underscore = _to_underscore_case(model)
dct = {'app': app, 'app_path': app_path, 'model': model, 'model_underscore': model_underscore}
return FACADE_TEMPLATE % dct
def init_facade(app, model):
return generate_app_file(app, model, 'facade', facade_code_for)
def _to_routes_name(app):
return app + 's'
def init_routes(app):
web_path = _to_routes_path(app)
_create_package(web_path)
def _to_routes_path(app):
return os.path.join(WEB_DIR, _to_routes_name(app))
def generate_routes(app, model, file_name, content_function):
file_name = '%s.py' % file_name
return generate_generic(app, model, _to_routes_path, file_name, content_function)
def code_for_home_script(app, model):
web_name = _to_routes_name(app)
app_name = _to_app_name(app)
return HOME_SCRIPT_TEMPLATE % {'app_name': app_name,
'model_underscore': _to_underscore_case(model),
'web_name': web_name,
'app': app}
def init_home_script(app, model):
return generate_routes(app, model, 'home', code_for_home_script)
def code_for_new_script(app, model):
web_name = _to_routes_name(app)
app_name = _to_app_name(app)
return NEW_SCRIPT_TEMPLATE % {'app_name': app_name,
'model_underscore': _to_underscore_case(model),
'web_name': web_name,
'app': app}
def init_new_script(app, model):
return generate_routes(app, model, 'new', code_for_new_script)
def code_for_edit_script(app, model):
web_name = _to_routes_name(app)
app_name = _to_app_name(app)
return EDIT_SCRIPT_TEMPLATE % {'app_name': app_name,
'model_underscore': _to_underscore_case(model),
'web_name': web_name,
'app': app}
def init_edit_script(app, model):
return generate_routes(app, model, 'edit', code_for_edit_script)
def code_for_rest_script(app, model):
web_name = _to_routes_name(app)
app_name = _to_app_name(app)
return REST_SCRIPT_TEMPLATE % {'app_name': app_name,
'model_underscore': _to_underscore_case(model),
'web_name': web_name,
'app': app}
def init_rest_script(app, model):
return generate_routes(app, model, 'rest', code_for_rest_script)
APP_BASE_HTML_TEMPLATE = '''{%% extends 'base/base.html' %%}
{%% block tabs %%}
{{ select_tab('%(app_name_upper)s') }}
{%% endblock %%}'''
def _to_template_path(app):
return os.path.join(TEMPLATES_DIR, _to_routes_name(app))
def init_html_templates(app):
template_path = _to_template_path(app)
content = APP_BASE_HTML_TEMPLATE % {'app_name_upper': _to_routes_name(app).upper()}
_create_dir_if_not_existing(template_path)
base_dir = os.path.join(template_path, '%s_base.html' % app)
_create_file_if_not_existing(base_dir, content)
def _to_label(label):
names = label.split('_')
upper_names = [n[0].upper() + n[1:] for n in names]
return ' '.join(upper_names)
def _to_html_table_header(properties):
template = ' ' * 24 + '<th>{%% trans %%}%s{%% endtrans %%}</th>'
properties = [_to_label(p) for p in properties]
rendered = [template % p for p in properties]
return '\n'.join(rendered)
def _to_html_table_columns(model_underscore, properties):
template = ' ' * 28 + '<td>{{ %(model_underscore)s.%(property)s }}</td>'
rendered = [template % {'model_underscore': model_underscore, 'property': p} for p in properties]
return '\n'.join(rendered)
def _to_html_form_inputs(model_underscore, properties):
template = "{{ form_input(_('%(label)s'),'%(property)s',%(model_underscore)s.%(property)s,errors.%(property)s) }}"
template = ' ' * 24 + template
rendered = [template % {'model_underscore': model_underscore, 'property': p, 'label': _to_label(p)} for p in
properties]
return '\n'.join(rendered)
def generate_template(app, model, file_name, content_function):
file_name = '%s_%s.html' % (app, file_name)
return generate_generic(app, model, _to_template_path, file_name, content_function)
def code_for_home_html(app, model):
web_name = _to_routes_name(app)
app_name = _to_app_name(app)
properties = _model_properties(app, model)
properties = properties.difference(set(['creation']))
model_underscore = _to_underscore_case(model)
return HOME_HTML_TEMPLATE % {'app_name': app_name,
'model_underscore': model_underscore,
'model': model,
'web_name': web_name,
'headers': _to_html_table_header(properties),
'columns': _to_html_table_columns(model_underscore, properties),
'app': app}
def init_home_html(app, model):
return generate_template(app, model, 'home', code_for_home_html)
def code_for_form_html(app, model):
web_name = _to_routes_name(app)
app_name = _to_app_name(app)
properties = _model_properties(app, model)
properties = properties.difference(set(['creation']))
model_underscore = _to_underscore_case(model)
return FORM_HTML_TEMPLATE % {'app_name': app_name,
'model_underscore': model_underscore,
'model': model,
'web_name': web_name,
'inputs': _to_html_form_inputs(model_underscore, properties),
'app': app}
def init_form_html(app, model):
return generate_template(app, model, 'form', code_for_form_html)
def init_test(name, model, *properties):
_title('Creating test package')
test_path = os.path.join(TEST_DIR, name + '_tests')
_create_package(test_path)
def _to_test_path(app):
return os.path.join(TEST_DIR, app + '_tests')
def generate_tests(app, model, file_name, content_function):
file_name = '%s_%s_tests.py' % (app, file_name)
return generate_generic(app, model, _to_test_path, file_name, content_function)
def _to_default_model_value(descriptor, name, index):
if isinstance(descriptor, (StringProperty, TextProperty)):
return "'%s_string'" % name
if isinstance(descriptor, DateProperty):
return "date(2014, 1, %s)" % (index + 1)
if isinstance(descriptor, DateTimeProperty):
return "datetime(2014, 1, 1, 1, %s, 0)" % (index + 1)
if isinstance(descriptor, (SimpleCurrency, SimpleDecimal)):
return "Decimal('1.%s')" % (index + 1 if index >= 9 else '0%s' % (index + 1))
if isinstance(descriptor, IntegerProperty):
return "%s" % (index + 1)
if isinstance(descriptor, FloatProperty):
return "1.%s" % (index + 1)
if isinstance(descriptor, BooleanProperty):
return "True"
def _to_model_assertions(variable, descriptors_dct):
template = " self.assertEquals(%(value)s, %(variable)s.%(property)s)"
rendered = [template % {'variable': variable, 'property': p, 'value': _to_default_model_value(descriptor, p, i)} for
i, (p, descriptor) in
enumerate(descriptors_dct.iteritems())]
return '\n'.join(rendered)
def _to_default_reques_value(descriptor, name, index):
if isinstance(descriptor, (StringProperty, TextProperty)):
return "'%s_string'" % name
if isinstance(descriptor, DateProperty):
return "'1/%s/2014'" % (index + 1)
if isinstance(descriptor, DateTimeProperty):
return "'1/1/2014 01:%s:0'" % (index + 1)
if isinstance(descriptor, (SimpleCurrency, SimpleDecimal)):
return "'1.%s'" % (index + 1 if index >= 9 else '0%s' % (index + 1))
if isinstance(descriptor, IntegerProperty):
return "'%s'" % (index + 1)
if isinstance(descriptor, FloatProperty):
return "'1.%s'" % (index + 1)
if isinstance(descriptor, BooleanProperty):
return "'True'"
def _to_request_values(variable, descriptors_dct):
template = "%(property)s=%(value)s"
rendered = [template % {'variable': variable, 'property': p, 'value': _to_default_reques_value(descriptor, p, i)}
for
i, (p, descriptor) in
enumerate(descriptors_dct.iteritems())]
return ', '.join(rendered)
def _model_descriptors(app, model):
model_class = _model_class(app, model)
return {k: p for k, p in model_class._properties.iteritems() if k not in ['class', 'creation']}
def code_new_tests(app, model):
descriptors_dct = _model_descriptors(app, model)
model_underscore = _to_underscore_case(model)
model_assertions = _to_model_assertions('saved_' + model_underscore, descriptors_dct)
model_properties = ', '.join("'%s'" % k for k in descriptors_dct)
request_values = _to_request_values('saved_' + model_underscore, descriptors_dct)
return NEW_TESTS_TEMPLATE % {'app': app, 'model': model, 'model_underscore': model_underscore,
'model_assertions': model_assertions, 'request_values': request_values,
'model_properties': model_properties}
def code_edit_tests(app, model):
descriptors_dct = _model_descriptors(app, model)
model_underscore = _to_underscore_case(model)
model_assertions = _to_model_assertions('edited_' + model_underscore, descriptors_dct)
model_properties = ', '.join("'%s'" % k for k in descriptors_dct)
request_values = _to_request_values('edited_' + model_underscore, descriptors_dct)
return EDIT_TESTS_TEMPLATE % {'app': app, 'model': model, 'model_underscore': model_underscore,
'model_assertions': model_assertions, 'request_values': request_values,
'model_properties': model_properties}
def code_home_tests(app, model):
model_underscore = _to_underscore_case(model)
return HOME_TESTS_TEMPLATE % {'app': app, 'model': model, 'model_underscore': model_underscore}
def code_rest_tests(app, model):
descriptors_dct = _model_descriptors(app, model)
model_underscore = _to_underscore_case(model)
model_assertions = _to_model_assertions('db_' + model_underscore, descriptors_dct)
model_properties = ', '.join("'%s'" % k for k in descriptors_dct)
request_values = _to_request_values('request_' + model_underscore, descriptors_dct)
return REST_TESTS_TEMPLATE % {'app': app, 'model': model, 'model_underscore': model_underscore,
'model_assertions': model_assertions, 'request_values': request_values,
'model_properties': model_properties}
def init_new_tests(app, model):
return generate_tests(app, model, 'new', code_new_tests)
def init_edit_tests(app, model):
return generate_tests(app, model, 'edit', code_edit_tests)
def init_home_tests(app, model):
return generate_tests(app, model, 'home', code_home_tests)
def init_rest_tests(app, model):
return generate_tests(app, model, 'rest', code_rest_tests)
def scaffold(app, model, *properties):
init_app(app, model, *properties)
_title('commands.py')
print init_commands(app, model)
_title('facade.py')
print init_facade(app, model)
_title('creating routes folder')
init_routes(app)
_title('routes home.py')
print init_home_script(app, model)
_title('routes.new.py')
print init_new_script(app, model)
_title('routes.edit.py')
print init_edit_script(app, model)
_title('routes rest.py')
print init_rest_script(app, model)
_title('creating template folder ans base.html')
init_html_templates(app)
_title('templates/home.html')
print init_home_html(app, model)
_title('templates/home.html')
print init_form_html(app, model)
init_test(app, model)
_title('creating new tests')
print init_new_tests(app, model)
_title('creating edit tests')
print init_edit_tests(app, model)
_title('creating home tests')
print init_home_tests(app, model)
_title('creating rest tests')
print init_rest_tests(app, model)
def delete_app(app):
flag = raw_input('Are you sure you want delete app %s (yes or no)? ' % app)
if flag.lower() == 'yes':
app_dir = os.path.join(APPS_DIR, app + '_app')
shutil.rmtree(app_dir)
template_dir = os.path.join(TEMPLATES_DIR, app + 's')
shutil.rmtree(template_dir)
web_dir = os.path.join(WEB_DIR, app + 's')
shutil.rmtree(web_dir)
test_dir = os.path.join(TEST_DIR, app + '_tests')
shutil.rmtree(test_dir)
FUNC_DICT = {'model': init_app, 'app': scaffold, 'delete': delete_app}
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'Commands available:'
print '\n '.join([''] + FUNC_DICT.keys())
print 'both model or app must be folowed by <app> <model>'
elif len(sys.argv) >= 3:
fcn = FUNC_DICT.get(sys.argv[1])
if fcn:
fcn(*sys.argv[2:])
else:
print 'Invalid command: %s' % sys.argv[1]
else:
print 'Must use command %s followed by params: <app> <model>' % sys.argv[1]
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer - main processing dispatching
"""
import sys
import os
import re
import gc
import bigml.api
from bigml.model import Model
from bigml.basemodel import retrieve_resource
from bigml.fields import Fields
import bigmler.utils as u
import bigmler.resourcesapi.common as r
import bigmler.resourcesapi.datasets as rds
import bigmler.resourcesapi.ensembles as rens
import bigmler.resourcesapi.models as rmod
import bigmler.resourcesapi.batch_predictions as rbp
import bigmler.labels as l
import bigmler.processing.args as a
import bigmler.processing.sources as ps
import bigmler.processing.datasets as pd
import bigmler.processing.models as pm
from bigmler.evaluation import evaluate, cross_validate
from bigmler.defaults import DEFAULTS_FILE
from bigmler.prediction import predict, combine_votes, remote_predict
from bigmler.prediction import OTHER, COMBINATION
from bigmler.reports import clear_reports, upload_reports
from bigmler.command import get_context
from bigmler.command import COMMAND_LOG, DIRS_LOG, SESSIONS_LOG
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]
MINIMUM_MODEL = "full=false"
DEFAULT_OUTPUT = 'predictions.csv'
SETTINGS = {
"command_log": COMMAND_LOG,
"sessions_log": SESSIONS_LOG,
"dirs_log": DIRS_LOG,
"default_output": DEFAULT_OUTPUT,
"defaults_file": DEFAULTS_FILE}
def belongs_to_ensemble(model):
"""Checks if a model is part of an ensemble
"""
return ('object' in model and 'ensemble' in model['object'] and
model['object']['ensemble'])
def get_ensemble_id(model):
"""Returns the ensemble/id for a model that belongs to an ensemble
"""
if 'object' in model and 'ensemble_id' in model['object']:
return "ensemble/%s" % model['object']['ensemble_id']
return None
def get_metadata(resource, key, default_value):
"""Retrieves from the user_metadata key in the resource the
given key using default_value as a default
"""
if ('object' in resource and 'user_metadata' in resource['object'] and
key in resource['object']['user_metadata']):
return resource['object']['user_metadata'][key]
return default_value
def has_source(args):
"""Checks whether the command options include a source or a previous
training file
"""
return (args.training_set or args.source or args.source_file or
args.train_stdin)
def clear_log_files(log_files):
"""Clear all contents in log files
"""
for log_file in log_files:
try:
open(log_file, 'wb', 0).close()
except IOError:
pass
def get_test_dataset(args):
"""Returns the dataset id from one of the possible user options:
--test-dataset --test-datasets
"""
args.test_dataset_ids = []
try:
# Parses dataset/id if provided.
if args.test_datasets:
args.test_dataset_ids = u.read_datasets(args.test_datasets)
except AttributeError:
pass
return (args.test_dataset if args.test_dataset is not None
else None if not args.test_dataset_ids
else args.test_dataset_ids[0])
def get_objective_id(args, fields):
"""Returns the objective id set by the user or the default
"""
if args.objective_field is not None:
try:
objective_id = u.get_objective_id(fields, args.objective_field)
fields.update_objective_field(
fields.field_column_number(objective_id), True)
except (KeyError, ValueError) as exc:
sys.exit(exc)
else:
return fields.field_id(fields.objective_field)
return objective_id
def check_args_coherence(args):
"""Checks the given options for coherence and completitude
"""
# It is compulsory to have a description to publish either datasets or
# models
if (not args.description_ and
(args.black_box or args.white_box or args.public_dataset)):
sys.exit("You should provide a description to publish.")
# When using --max-categories, it is compulsory to specify also the
# objective_field
if args.max_categories > 0 and args.objective_field is None:
sys.exit("When --max-categories is used, you must also provide the"
" --objective field name or column number")
# When using --new-fields, it is compulsory to specify also a dataset
# id
if args.new_fields and not args.dataset:
sys.exit("To use --new-fields you must also provide a dataset id"
" to generate the new dataset from it.")
# The --median option is only available for local predictions, not for
# remote ones.
if args.median and args.remote:
args.median = False
print ("WARNING: the --median option is only available for local"
" predictions. Using the mean value in the predicted node"
" instead.")
def main_dispatcher(args=sys.argv[1:]):
"""Parses command line and calls the different processing functions
"""
# If --clear-logs the log files are cleared
if "--clear-logs" in args:
clear_log_files(LOG_FILES)
settings = {}
settings.update(SETTINGS)
if '--evaluate' in args:
settings.update({"default_output": "evaluation"})
command_args, _, api, session_file, _ = get_context(args, settings)
# the predictions flag prevails to store the results
if (a.has_train(command_args) or a.has_test(command_args)
or command_args.votes_dirs):
compute_output(api, command_args)
u.log_message("_" * 80 + "\n", log_file=session_file)
def compute_output(api, args):
""" Creates one or more models using the `training_set` or uses the ids
of previously created BigML models to make predictions for the `test_set`.
"""
source = None
dataset = None
model = None
models = None
fields = None
other_label = OTHER
ensemble_ids = []
multi_label_data = None
multi_label_fields = []
#local_ensemble = None
test_dataset = None
datasets = None
# variables from command-line options
resume = args.resume_
model_ids = args.model_ids_
output = args.output
dataset_fields = args.dataset_fields_
check_args_coherence(args)
path = u.check_dir(output)
session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
csv_properties = {}
# If logging is required set the file for logging
log = None
if args.log_file:
u.check_dir(args.log_file)
log = args.log_file
# If --clear_logs the log files are cleared
clear_log_files([log])
# labels to be used in multi-label expansion
labels = (None if args.labels is None else
[label.strip() for label in
args.labels.split(args.args_separator)])
if labels is not None:
labels = sorted([label for label in labels])
# multi_label file must be preprocessed to obtain a new extended file
if args.multi_label and args.training_set is not None:
(args.training_set, multi_label_data) = ps.multi_label_expansion(
args.training_set, args.train_header, args, path,
labels=labels, session_file=session_file)
args.train_header = True
args.objective_field = multi_label_data["objective_name"]
all_labels = l.get_all_labels(multi_label_data)
if not labels:
labels = all_labels
else:
all_labels = labels
if args.objective_field:
csv_properties.update({'objective_field': args.objective_field})
if args.source_file:
# source is retrieved from the contents of the given local JSON file
source, csv_properties, fields = u.read_local_resource(
args.source_file,
csv_properties=csv_properties)
else:
# source is retrieved from the remote object
source, resume, csv_properties, fields = ps.source_processing(
api, args, resume,
csv_properties=csv_properties, multi_label_data=multi_label_data,
session_file=session_file, path=path, log=log)
if source is not None:
args.source = bigml.api.get_source_id(source)
if args.multi_label and source:
multi_label_data = l.get_multi_label_data(source)
(args.objective_field,
labels,
all_labels,
multi_label_fields) = l.multi_label_sync(args.objective_field,
labels,
multi_label_data,
fields,
multi_label_fields)
if fields and args.export_fields:
fields.summary_csv(os.path.join(path, args.export_fields))
if args.dataset_file:
# dataset is retrieved from the contents of the given local JSON file
model_dataset, csv_properties, fields = u.read_local_resource(
args.dataset_file,
csv_properties=csv_properties)
if not args.datasets:
datasets = [model_dataset]
dataset = model_dataset
else:
datasets = u.read_datasets(args.datasets)
if not datasets:
# dataset is retrieved from the remote object
datasets, resume, csv_properties, fields = pd.dataset_processing(
source, api, args, resume,
fields=fields,
csv_properties=csv_properties,
multi_label_data=multi_label_data,
session_file=session_file, path=path, log=log)
if datasets:
dataset = datasets[0]
if args.to_csv is not None:
resume = pd.export_dataset(dataset, api, args, resume,
session_file=session_file, path=path)
# Now we have a dataset, let's check if there's an objective_field
# given by the user and update it in the fields structure
args.objective_id_ = get_objective_id(args, fields)
# If test_split is used, split the dataset in a training and a test dataset
# according to the given split
if args.test_split > 0:
dataset, test_dataset, resume = pd.split_processing(
dataset, api, args, resume,
multi_label_data=multi_label_data,
session_file=session_file, path=path, log=log)
datasets[0] = dataset
# Check if the dataset has a categorical objective field and it
# has a max_categories limit for categories
if args.max_categories > 0 and len(datasets) == 1:
if pd.check_max_categories(fields.fields[args.objective_id_]):
distribution = pd.get_categories_distribution(dataset,
args.objective_id_)
if distribution and len(distribution) > args.max_categories:
categories = [element[0] for element in distribution]
other_label = pd.create_other_label(categories, other_label)
datasets, resume = pd.create_categories_datasets(
dataset, distribution, fields, args,
api, resume, session_file=session_file, path=path, log=log,
other_label=other_label)
else:
sys.exit("The provided objective field is not categorical nor "
"a full terms only text field. "
"Only these fields can be used with"
" --max-categories")
# If multi-dataset flag is on, generate a new dataset from the given
# list of datasets
if args.multi_dataset:
dataset, resume = pd.create_new_dataset(
datasets, api, args, resume, fields=fields,
session_file=session_file, path=path, log=log)
datasets = [dataset]
# Check if the dataset has a generators file associated with it, and
# generate a new dataset with the specified field structure. Also
# if the --to-dataset flag is used to clone or sample the original dataset
if args.new_fields or (args.sample_rate != 1 and args.no_model) or \
(args.lisp_filter or args.json_filter) and not has_source(args):
if fields is None:
if isinstance(dataset, str):
dataset = u.check_resource(dataset, api=api)
fields = Fields(dataset, csv_properties)
args.objective_id_ = get_objective_id(args, fields)
args.objective_name_ = fields.field_name(args.objective_id_)
dataset, resume = pd.create_new_dataset(
dataset, api, args, resume, fields=fields,
session_file=session_file, path=path, log=log)
datasets[0] = dataset
# rebuild fields structure for new ids and fields
csv_properties.update({'objective_field': args.objective_name_,
'objective_field_present': True})
fields = pd.get_fields_structure(dataset, csv_properties)
args.objective_id_ = get_objective_id(args, fields)
if args.multi_label and dataset and multi_label_data is None:
multi_label_data = l.get_multi_label_data(dataset)
(args.objective_field,
labels,
all_labels,
multi_label_fields) = l.multi_label_sync(args.objective_field,
labels,
multi_label_data,
fields, multi_label_fields)
if dataset:
# retrieves max_categories data, if any
args.max_categories = get_metadata(dataset, 'max_categories',
args.max_categories)
other_label = get_metadata(dataset, 'other_label',
other_label)
if fields and args.export_fields:
fields.summary_csv(os.path.join(path, args.export_fields))
if args.model_file:
# model is retrieved from the contents of the given local JSON file
model, csv_properties, fields = u.read_local_resource(
args.model_file,
csv_properties=csv_properties)
models = [model]
model_ids = [model['resource']]
ensemble_ids = []
elif args.ensemble_file:
# model is retrieved from the contents of the given local JSON file
ensemble, csv_properties, fields = u.read_local_resource(
args.ensemble_file,
csv_properties=csv_properties)
model_ids = ensemble['object']['models'][:]
ensemble_ids = [ensemble['resource']]
models = model_ids[:]
model = retrieve_resource(args.retrieve_api_,
models[0],
query_string=r.ALL_FIELDS_QS)
models[0] = model
else:
# model is retrieved from the remote object
models, model_ids, ensemble_ids, resume = pm.models_processing(
datasets, models, model_ids,
api, args, resume, fields=fields,
session_file=session_file, path=path, log=log, labels=labels,
multi_label_data=multi_label_data, other_label=other_label)
if models:
model = models[0]
single_model = len(models) == 1
# If multi-label flag is set and no training_set was provided, label
# info is extracted from the user_metadata. If models belong to an
# ensemble, the ensemble must be retrieved to get the user_metadata.
if model and args.multi_label and multi_label_data is None:
if ensemble_ids and isinstance(ensemble_ids[0], dict):
resource = ensemble_ids[0]
elif belongs_to_ensemble(model):
ensemble_id = get_ensemble_id(model)
resource = rens.get_ensemble(ensemble_id, api=api,
verbosity=args.verbosity,
session_file=session_file)
else:
resource = model
multi_label_data = l.get_multi_label_data(resource)
# We update the model's public state if needed
if model:
if (isinstance(model, str) or
bigml.api.get_status(model)['code'] != bigml.api.FINISHED):
if not args.evaluate and not a.has_train(args) and \
not a.has_test(args):
query_string = MINIMUM_MODEL
elif not args.test_header:
query_string = r.ALL_FIELDS_QS
else:
query_string = "%s;%s" % (r.ALL_FIELDS_QS, r.FIELDS_QS)
model = u.check_resource(model, api.get_model,
query_string=query_string)
models[0] = model
if (args.black_box or args.white_box or
(args.shared_flag and r.shared_changed(args.shared, model))):
model_args = {}
if args.shared_flag and r.shared_changed(args.shared, model):
model_args.update(shared=args.shared)
if args.black_box or args.white_box:
model_args.update(rmod.set_publish_model_args(args))
if model_args:
model = rmod.update_model(model, model_args, args,
api=api, path=path,
session_file=session_file)
models[0] = model
# We get the fields of the model if we haven't got
# them yet and need them
if model and not args.evaluate and (a.has_test(args) or
args.export_fields):
# if we are using boosted ensembles to predict, activate boosting
if model['object'].get('boosted_ensemble'):
args.boosting = True
# If more than one model, use the full field structure
if (not single_model and not args.multi_label and
belongs_to_ensemble(model)):
if ensemble_ids:
ensemble_id = ensemble_ids[0]
args.ensemble_ids_ = ensemble_ids
else:
ensemble_id = get_ensemble_id(model)
fields = pm.get_model_fields(
model, csv_properties, args, single_model=single_model,
multi_label_data=multi_label_data)
# Free memory after getting fields
# local_ensemble = None
gc.collect()
# Fills in all_labels from user_metadata
if args.multi_label and not all_labels:
(args.objective_field,
labels,
all_labels,
multi_label_fields) = l.multi_label_sync(args.objective_field, labels,
multi_label_data, fields,
multi_label_fields)
if model:
# retrieves max_categories data, if any
args.max_categories = get_metadata(model, 'max_categories',
args.max_categories)
other_label = get_metadata(model, 'other_label',
other_label)
if fields and args.export_fields:
fields.summary_csv(os.path.join(path, args.export_fields))
# If predicting
if (models and (a.has_test(args) or (test_dataset and args.remote))
and not args.evaluate):
models_per_label = 1
if test_dataset is None:
test_dataset = get_test_dataset(args)
if args.multi_label:
# When prediction starts from existing models, the
# multi_label_fields can be retrieved from the user_metadata
# in the models
if args.multi_label_fields is None and multi_label_fields:
multi_label_field_names = [field[1] for field
in multi_label_fields]
args.multi_label_fields = ",".join(multi_label_field_names)
test_set = ps.multi_label_expansion(
args.test_set, args.test_header, args, path,
labels=labels, session_file=session_file, input_flag=True)[0]
test_set_header = True
# Remote predictions: predictions are computed as batch predictions
# in bigml.com except when --no-batch flag is set on or multi-label
# or max-categories are used
if (args.remote and not args.no_batch and not args.multi_label
and not args.method == COMBINATION):
# create test source from file
test_name = "%s - test" % args.name
if args.test_source is None:
test_properties = ps.test_source_processing(
api, args, resume, session_file=session_file,
path=path, log=log)
(test_source, resume, csv_properties,
test_fields) = test_properties
else:
test_source_id = bigml.api.get_source_id(args.test_source)
test_source = api.check_resource(test_source_id)
if test_dataset is None:
# create test dataset from test source
dataset_args = rds.set_basic_dataset_args(args, name=test_name)
test_dataset, resume = pd.alternative_dataset_processing(
test_source, "test", dataset_args, api, args,
resume, session_file=session_file, path=path, log=log)
else:
test_dataset_id = bigml.api.get_dataset_id(test_dataset)
test_dataset = api.check_resource(test_dataset_id)
csv_properties.update(objective_field=None,
objective_field_present=False)
test_fields = pd.get_fields_structure(test_dataset,
csv_properties)
if args.to_dataset and args.dataset_off:
model = api.check_resource(model['resource'],
query_string=r.ALL_FIELDS_QS)
model_fields = Fields(model)
objective_field_name = model_fields.field_name( \
model_fields.objective_field)
if objective_field_name in list(test_fields.fields_by_name.keys()):
args.prediction_name = "%s (predicted)" % \
objective_field_name
batch_prediction_args = rbp.set_batch_prediction_args(
args, fields=fields,
dataset_fields=test_fields)
remote_predict(model, test_dataset, batch_prediction_args, args,
api, resume, prediction_file=output,
session_file=session_file, path=path, log=log)
else:
models_per_label = args.number_of_models
if (args.multi_label and ensemble_ids
and args.number_of_models == 1):
# use case where ensembles are read from a file
models_per_label = len(models) / len(ensemble_ids)
predict(models, fields, args, api=api, log=log,
resume=resume, session_file=session_file, labels=labels,
models_per_label=models_per_label, other_label=other_label,
multi_label_data=multi_label_data)
# When combine_votes flag is used, retrieve the predictions files saved
# in the comma separated list of directories and combine them
if args.votes_files_:
model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$',
r'\1', args.votes_files_[0]).replace("_", "/")
try:
model = u.check_resource(model_id, api.get_model)
except ValueError as exception:
sys.exit("Failed to get model %s: %s" % (model_id, str(exception)))
local_model = Model(model)
message = u.dated("Combining votes.\n")
u.log_message(message, log_file=session_file,
console=args.verbosity)
combine_votes(args.votes_files_, local_model.to_prediction,
output, method=args.method)
# If evaluate flag is on, create remote evaluation and save results in
# json and human-readable format.
if args.evaluate:
# When we resume evaluation and models were already completed, we
# should use the datasets array as test datasets
if args.has_test_datasets_:
test_dataset = get_test_dataset(args)
if args.dataset_off and not args.has_test_datasets_:
args.test_dataset_ids = datasets
if args.test_dataset_ids and args.dataset_off:
eval_ensembles = len(ensemble_ids) == len(args.test_dataset_ids)
models_or_ensembles = (ensemble_ids if eval_ensembles else
models)
# Evaluate the models with the corresponding test datasets.
resume = evaluate(models_or_ensembles, args.test_dataset_ids, api,
args, resume,
fields=fields, dataset_fields=dataset_fields,
session_file=session_file, path=path,
log=log, labels=labels, all_labels=all_labels,
objective_field=args.objective_field)
else:
if args.multi_label and args.test_set is not None:
# When evaluation starts from existing models, the
# multi_label_fields can be retrieved from the user_metadata
# in the models
if args.multi_label_fields is None and multi_label_fields:
args.multi_label_fields = multi_label_fields
test_set = ps.multi_label_expansion(
test_set, test_set_header, args, path,
labels=labels, session_file=session_file)[0]
test_set_header = True
if args.test_split > 0 or args.has_test_datasets_:
dataset = test_dataset
dataset = u.check_resource(dataset, api=api,
query_string=r.ALL_FIELDS_QS)
dataset_fields = pd.get_fields_structure(dataset, None)
models_or_ensembles = (ensemble_ids if ensemble_ids != []
else models)
resume = evaluate(models_or_ensembles, [dataset], api,
args, resume,
fields=fields, dataset_fields=dataset_fields,
session_file=session_file, path=path,
log=log, labels=labels, all_labels=all_labels,
objective_field=args.objective_field)
# If cross_validation_rate is > 0, create remote evaluations and save
# results in json and human-readable format. Then average the results to
# issue a cross_validation measure set.
if args.cross_validation_rate > 0:
args.sample_rate = 1 - args.cross_validation_rate
cross_validate(models, dataset, fields, api, args, resume,
session_file=session_file,
path=path, log=log)
u.print_generated_files(path, log_file=session_file,
verbosity=args.verbosity)
if args.reports:
clear_reports(path)
if args.upload:
upload_reports(args.reports, path)
|
|
from __future__ import absolute_import
import io
import time
from ..codec import (has_gzip, has_snappy, has_lz4,
gzip_decode, snappy_decode,
lz4_decode, lz4_decode_old_kafka)
from .frame import KafkaBytes
from .struct import Struct
from .types import (
Int8, Int32, Int64, Bytes, Schema, AbstractType
)
from ..util import crc32, WeakMethod
class Message(Struct):
SCHEMAS = [
Schema(
('crc', Int32),
('magic', Int8),
('attributes', Int8),
('key', Bytes),
('value', Bytes)),
Schema(
('crc', Int32),
('magic', Int8),
('attributes', Int8),
('timestamp', Int64),
('key', Bytes),
('value', Bytes)),
]
SCHEMA = SCHEMAS[1]
CODEC_MASK = 0x07
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
CODEC_LZ4 = 0x03
TIMESTAMP_TYPE_MASK = 0x08
HEADER_SIZE = 22 # crc(4), magic(1), attributes(1), timestamp(8), key+value size(4*2)
def __init__(self, value, key=None, magic=0, attributes=0, crc=0,
timestamp=None):
assert value is None or isinstance(value, bytes), 'value must be bytes'
assert key is None or isinstance(key, bytes), 'key must be bytes'
assert magic > 0 or timestamp is None, 'timestamp not supported in v0'
# Default timestamp to now for v1 messages
if magic > 0 and timestamp is None:
timestamp = int(time.time() * 1000)
self.timestamp = timestamp
self.crc = crc
self._validated_crc = None
self.magic = magic
self.attributes = attributes
self.key = key
self.value = value
self.encode = WeakMethod(self._encode_self)
@property
def timestamp_type(self):
"""0 for CreateTime; 1 for LogAppendTime; None if unsupported.
Value is determined by broker; produced messages should always set to 0
Requires Kafka >= 0.10 / message version >= 1
"""
if self.magic == 0:
return None
elif self.attributes & self.TIMESTAMP_TYPE_MASK:
return 1
else:
return 0
def _encode_self(self, recalc_crc=True):
version = self.magic
if version == 1:
fields = (self.crc, self.magic, self.attributes, self.timestamp, self.key, self.value)
elif version == 0:
fields = (self.crc, self.magic, self.attributes, self.key, self.value)
else:
raise ValueError('Unrecognized message version: %s' % version)
message = Message.SCHEMAS[version].encode(fields)
if not recalc_crc:
return message
self.crc = crc32(message[4:])
crc_field = self.SCHEMAS[version].fields[0]
return crc_field.encode(self.crc) + message[4:]
@classmethod
def decode(cls, data):
_validated_crc = None
if isinstance(data, bytes):
_validated_crc = crc32(data[4:])
data = io.BytesIO(data)
# Partial decode required to determine message version
base_fields = cls.SCHEMAS[0].fields[0:3]
crc, magic, attributes = [field.decode(data) for field in base_fields]
remaining = cls.SCHEMAS[magic].fields[3:]
fields = [field.decode(data) for field in remaining]
if magic == 1:
timestamp = fields[0]
else:
timestamp = None
msg = cls(fields[-1], key=fields[-2],
magic=magic, attributes=attributes, crc=crc,
timestamp=timestamp)
msg._validated_crc = _validated_crc
return msg
def validate_crc(self):
if self._validated_crc is None:
raw_msg = self._encode_self(recalc_crc=False)
self._validated_crc = crc32(raw_msg[4:])
if self.crc == self._validated_crc:
return True
return False
def is_compressed(self):
return self.attributes & self.CODEC_MASK != 0
def decompress(self):
codec = self.attributes & self.CODEC_MASK
assert codec in (self.CODEC_GZIP, self.CODEC_SNAPPY, self.CODEC_LZ4)
if codec == self.CODEC_GZIP:
assert has_gzip(), 'Gzip decompression unsupported'
raw_bytes = gzip_decode(self.value)
elif codec == self.CODEC_SNAPPY:
assert has_snappy(), 'Snappy decompression unsupported'
raw_bytes = snappy_decode(self.value)
elif codec == self.CODEC_LZ4:
assert has_lz4(), 'LZ4 decompression unsupported'
if self.magic == 0:
raw_bytes = lz4_decode_old_kafka(self.value)
else:
raw_bytes = lz4_decode(self.value)
else:
raise Exception('This should be impossible')
return MessageSet.decode(raw_bytes, bytes_to_read=len(raw_bytes))
def __hash__(self):
return hash(self._encode_self(recalc_crc=False))
class PartialMessage(bytes):
def __repr__(self):
return 'PartialMessage(%s)' % self
class MessageSet(AbstractType):
ITEM = Schema(
('offset', Int64),
('message', Bytes)
)
HEADER_SIZE = 12 # offset + message_size
@classmethod
def encode(cls, items):
# RecordAccumulator encodes messagesets internally
if isinstance(items, (io.BytesIO, KafkaBytes)):
size = Int32.decode(items)
# rewind and return all the bytes
items.seek(items.tell() - 4)
return items.read(size + 4)
encoded_values = []
for (offset, message) in items:
encoded_values.append(Int64.encode(offset))
encoded_values.append(Bytes.encode(message))
encoded = b''.join(encoded_values)
return Bytes.encode(encoded)
@classmethod
def decode(cls, data, bytes_to_read=None):
"""Compressed messages should pass in bytes_to_read (via message size)
otherwise, we decode from data as Int32
"""
if isinstance(data, bytes):
data = io.BytesIO(data)
if bytes_to_read is None:
bytes_to_read = Int32.decode(data)
# if FetchRequest max_bytes is smaller than the available message set
# the server returns partial data for the final message
# So create an internal buffer to avoid over-reading
raw = io.BytesIO(data.read(bytes_to_read))
items = []
while bytes_to_read:
try:
offset = Int64.decode(raw)
msg_bytes = Bytes.decode(raw)
bytes_to_read -= 8 + 4 + len(msg_bytes)
items.append((offset, len(msg_bytes), Message.decode(msg_bytes)))
except ValueError:
# PartialMessage to signal that max_bytes may be too small
items.append((None, None, PartialMessage()))
break
return items
@classmethod
def repr(cls, messages):
if isinstance(messages, (KafkaBytes, io.BytesIO)):
offset = messages.tell()
decoded = cls.decode(messages)
messages.seek(offset)
messages = decoded
return str([cls.ITEM.repr(m) for m in messages])
|
|
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Planar Manipulator domain."""
import collections
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import xml_tools
from lxml import etree
import numpy as np
_CLOSE = .01 # (Meters) Distance below which a thing is considered close.
_CONTROL_TIMESTEP = .01 # (Seconds)
_TIME_LIMIT = 10 # (Seconds)
_P_IN_HAND = .1 # Probabillity of object-in-hand initial state
_P_IN_TARGET = .1 # Probabillity of object-in-target initial state
_ARM_JOINTS = ['arm_root', 'arm_shoulder', 'arm_elbow', 'arm_wrist',
'finger', 'fingertip', 'thumb', 'thumbtip']
_ALL_PROPS = frozenset(['ball', 'target_ball', 'cup',
'peg', 'target_peg', 'slot'])
_TOUCH_SENSORS = ['palm_touch', 'finger_touch', 'thumb_touch',
'fingertip_touch', 'thumbtip_touch']
SUITE = containers.TaggedTasks()
def make_model(use_peg, insert):
"""Returns a tuple containing the model XML string and a dict of assets."""
xml_string = common.read_model('manipulator.xml')
parser = etree.XMLParser(remove_blank_text=True)
mjcf = etree.XML(xml_string, parser)
# Select the desired prop.
if use_peg:
required_props = ['peg', 'target_peg']
if insert:
required_props += ['slot']
else:
required_props = ['ball', 'target_ball']
if insert:
required_props += ['cup']
# Remove unused props
for unused_prop in _ALL_PROPS.difference(required_props):
prop = xml_tools.find_element(mjcf, 'body', unused_prop)
prop.getparent().remove(prop)
return etree.tostring(mjcf, pretty_print=True), common.ASSETS
@SUITE.add('benchmarking', 'hard')
def bring_ball(fully_observable=True, time_limit=_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns manipulator bring task with the ball prop."""
use_peg = False
insert = False
physics = Physics.from_xml_string(*make_model(use_peg, insert))
task = Bring(use_peg=use_peg, insert=insert,
fully_observable=fully_observable, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('hard')
def bring_peg(fully_observable=True, time_limit=_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns manipulator bring task with the peg prop."""
use_peg = True
insert = False
physics = Physics.from_xml_string(*make_model(use_peg, insert))
task = Bring(use_peg=use_peg, insert=insert,
fully_observable=fully_observable, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('hard')
def insert_ball(fully_observable=True, time_limit=_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns manipulator insert task with the ball prop."""
use_peg = False
insert = True
physics = Physics.from_xml_string(*make_model(use_peg, insert))
task = Bring(use_peg=use_peg, insert=insert,
fully_observable=fully_observable, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('hard')
def insert_peg(fully_observable=True, time_limit=_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns manipulator insert task with the peg prop."""
use_peg = True
insert = True
physics = Physics.from_xml_string(*make_model(use_peg, insert))
task = Bring(use_peg=use_peg, insert=insert,
fully_observable=fully_observable, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics with additional features for the Planar Manipulator domain."""
def bounded_joint_pos(self, joint_names):
"""Returns joint positions as (sin, cos) values."""
joint_pos = self.named.data.qpos[joint_names]
return np.vstack([np.sin(joint_pos), np.cos(joint_pos)]).T
def joint_vel(self, joint_names):
"""Returns joint velocities."""
return self.named.data.qvel[joint_names]
def body_2d_pose(self, body_names, orientation=True):
"""Returns positions and/or orientations of bodies."""
if not isinstance(body_names, str):
body_names = np.array(body_names).reshape(-1, 1) # Broadcast indices.
pos = self.named.data.xpos[body_names, ['x', 'z']]
if orientation:
ori = self.named.data.xquat[body_names, ['qw', 'qy']]
return np.hstack([pos, ori])
else:
return pos
def touch(self):
return np.log1p(self.named.data.sensordata[_TOUCH_SENSORS])
def site_distance(self, site1, site2):
site1_to_site2 = np.diff(self.named.data.site_xpos[[site2, site1]], axis=0)
return np.linalg.norm(site1_to_site2)
class Bring(base.Task):
"""A Bring `Task`: bring the prop to the target."""
def __init__(self, use_peg, insert, fully_observable, random=None):
"""Initialize an instance of the `Bring` task.
Args:
use_peg: A `bool`, whether to replace the ball prop with the peg prop.
insert: A `bool`, whether to insert the prop in a receptacle.
fully_observable: A `bool`, whether the observation should contain the
position and velocity of the object being manipulated and the target
location.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._use_peg = use_peg
self._target = 'target_peg' if use_peg else 'target_ball'
self._object = 'peg' if self._use_peg else 'ball'
self._object_joints = ['_'.join([self._object, dim]) for dim in 'xzy']
self._receptacle = 'slot' if self._use_peg else 'cup'
self._insert = insert
self._fully_observable = fully_observable
super().__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
# Local aliases
choice = self.random.choice
uniform = self.random.uniform
model = physics.named.model
data = physics.named.data
# Find a collision-free random initial configuration.
penetrating = True
while penetrating:
# Randomise angles of arm joints.
is_limited = model.jnt_limited[_ARM_JOINTS].astype(bool)
joint_range = model.jnt_range[_ARM_JOINTS]
lower_limits = np.where(is_limited, joint_range[:, 0], -np.pi)
upper_limits = np.where(is_limited, joint_range[:, 1], np.pi)
angles = uniform(lower_limits, upper_limits)
data.qpos[_ARM_JOINTS] = angles
# Symmetrize hand.
data.qpos['finger'] = data.qpos['thumb']
# Randomise target location.
target_x = uniform(-.4, .4)
target_z = uniform(.1, .4)
if self._insert:
target_angle = uniform(-np.pi/3, np.pi/3)
model.body_pos[self._receptacle, ['x', 'z']] = target_x, target_z
model.body_quat[self._receptacle, ['qw', 'qy']] = [
np.cos(target_angle/2), np.sin(target_angle/2)]
else:
target_angle = uniform(-np.pi, np.pi)
model.body_pos[self._target, ['x', 'z']] = target_x, target_z
model.body_quat[self._target, ['qw', 'qy']] = [
np.cos(target_angle/2), np.sin(target_angle/2)]
# Randomise object location.
object_init_probs = [_P_IN_HAND, _P_IN_TARGET, 1-_P_IN_HAND-_P_IN_TARGET]
init_type = choice(['in_hand', 'in_target', 'uniform'],
p=object_init_probs)
if init_type == 'in_target':
object_x = target_x
object_z = target_z
object_angle = target_angle
elif init_type == 'in_hand':
physics.after_reset()
object_x = data.site_xpos['grasp', 'x']
object_z = data.site_xpos['grasp', 'z']
grasp_direction = data.site_xmat['grasp', ['xx', 'zx']]
object_angle = np.pi-np.arctan2(grasp_direction[1], grasp_direction[0])
else:
object_x = uniform(-.5, .5)
object_z = uniform(0, .7)
object_angle = uniform(0, 2*np.pi)
data.qvel[self._object + '_x'] = uniform(-5, 5)
data.qpos[self._object_joints] = object_x, object_z, object_angle
# Check for collisions.
physics.after_reset()
penetrating = physics.data.ncon > 0
super().initialize_episode(physics)
def get_observation(self, physics):
"""Returns either features or only sensors (to be used with pixels)."""
obs = collections.OrderedDict()
obs['arm_pos'] = physics.bounded_joint_pos(_ARM_JOINTS)
obs['arm_vel'] = physics.joint_vel(_ARM_JOINTS)
obs['touch'] = physics.touch()
if self._fully_observable:
obs['hand_pos'] = physics.body_2d_pose('hand')
obs['object_pos'] = physics.body_2d_pose(self._object)
obs['object_vel'] = physics.joint_vel(self._object_joints)
obs['target_pos'] = physics.body_2d_pose(self._target)
return obs
def _is_close(self, distance):
return rewards.tolerance(distance, (0, _CLOSE), _CLOSE*2)
def _peg_reward(self, physics):
"""Returns a reward for bringing the peg prop to the target."""
grasp = self._is_close(physics.site_distance('peg_grasp', 'grasp'))
pinch = self._is_close(physics.site_distance('peg_pinch', 'pinch'))
grasping = (grasp + pinch) / 2
bring = self._is_close(physics.site_distance('peg', 'target_peg'))
bring_tip = self._is_close(physics.site_distance('target_peg_tip',
'peg_tip'))
bringing = (bring + bring_tip) / 2
return max(bringing, grasping/3)
def _ball_reward(self, physics):
"""Returns a reward for bringing the ball prop to the target."""
return self._is_close(physics.site_distance('ball', 'target_ball'))
def get_reward(self, physics):
"""Returns a reward to the agent."""
if self._use_peg:
return self._peg_reward(physics)
else:
return self._ball_reward(physics)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking code."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-maxorphantx=1000"], ["-maxorphantx=1000", "-limitancestorcount=5"]]
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
for i in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for i in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_size = 0
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
descendant_size += mempool[x]['size']
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert(chain[-1] not in v_ancestors.keys())
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert(chain[0] not in v_descendants.keys())
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000)
mempool = self.nodes[0].getrawmempool(True)
ancestor_fees = 0
for x in chain:
ancestor_fees += mempool[x]['fee']
assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000)
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000)
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000)
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1)
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.nodes[0].generate(1)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
if (x == chain[-1]):
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
# TODO: check that node1's mempool is as expected
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
# First create one parent tx with 10 children
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
for i in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
# Sending one more chained transaction will fail
utxo = transaction_package.pop(0)
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
# TODO: check that node1's mempool is as expected
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for i in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
(tx1_id, tx1_value) = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for i in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.nodes[0].generate(1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
sync_mempools(self.nodes)
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
sync_blocks(self.nodes)
if __name__ == '__main__':
MempoolPackagesTest().main()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Client configuration management.
This module holds the code for detecting and configuring the current client and
it's output directories.
It is responsible for writing out the client specific plugins that tell the
rest of the cr tool what the client is capable of.
"""
import os
import pprint
import sys
import cr
import cr.auto.build
import cr.auto.client
# The config version currently supported.
VERSION = 0.5
# The default directory name to store config inside
CLIENT_CONFIG_PATH = '.cr'
# The partial filename to add to a directory to get it's config file.
CLIENT_CONFIG_FILE = os.path.join(CLIENT_CONFIG_PATH, 'config.py')
# The format string for the header of a config file.
CONFIG_FILE_PREFIX = """
# This is an autogenerated file
# it *will* be overwritten, and changes may lost
# The system will autoload any other python file in the same folder.
import cr
OVERRIDES = cr.Config.From("""
# The format string for each value in a config file.
CONFIG_VAR_LINE = '\n {0} = {1!r},'
# The format string for the tail of a config file.
CONFIG_FILE_SUFFIX = '\n)\n'
# The name of the gclient config file
GCLIENT_FILENAME = '.gclient'
# The default config values installed by this module.
DEFAULT = cr.Config.From(
CR_ROOT_PATH=os.path.join('{GOOGLE_CODE}'),
CR_CLIENT_NAME='chromium',
CR_CLIENT_PATH=os.path.join('{CR_ROOT_PATH}', '{CR_CLIENT_NAME}'),
CR_SRC=os.path.join('{CR_CLIENT_PATH}', 'src'),
CR_BUILD_DIR=os.path.join('{CR_SRC}', '{CR_OUT_FULL}'),
)
def DetectClient(context):
# Attempt to detect the current client from the cwd
# See if we can detect the source tree root
client_path = os.getcwd()
while (client_path and
not os.path.exists(os.path.join(client_path, GCLIENT_FILENAME))):
old = client_path
client_path = os.path.dirname(client_path)
if client_path == old:
client_path = None
if client_path is not None:
dirname, basename = os.path.split(client_path)
if basename == 'src':
# we have the src path, base is one level up
client_path = dirname
if client_path is not None:
context.derived['CR_CLIENT_PATH'] = client_path
# now get the value from context, it may be different
client_path = context.Get('CR_CLIENT_PATH')
if client_path is not None:
context.derived['CR_CLIENT_NAME'] = os.path.basename(client_path)
def _GetConfigFilename(path):
return os.path.realpath(os.path.join(path, CLIENT_CONFIG_FILE))
def _IsOutputDir(path):
return os.path.isfile(_GetConfigFilename(path))
def _WriteConfig(writer, data):
writer.write(CONFIG_FILE_PREFIX)
for key, value in data.items():
writer.write(CONFIG_VAR_LINE.format(key, value))
writer.write(CONFIG_FILE_SUFFIX)
def AddArguments(parser):
parser.add_argument(
'-o', '--out', dest='_out', metavar='name',
default=None,
help='The name of the out directory to use. Overrides CR_OUT.'
)
def GetOutArgument(context):
return getattr(context.args, '_out', None)
def ApplyOutArgument(context):
# TODO(iancottrell): be flexible, allow out to do approximate match...
out = GetOutArgument(context)
if out:
context.derived.Set(CR_OUT_FULL=out)
def ReadGClient(context):
"""Loads the .gclient configuration for the current client.
This will load from CR_CLIENT_PATH.
Args:
context: The active context to load configuration for.
Returns:
The dict of values set in the .gclient file.
"""
# Now attempt to load and parse the .gclient file
result = {}
try:
gclient_file = context.Substitute(
os.path.join('{CR_CLIENT_PATH}', GCLIENT_FILENAME))
with open(gclient_file, 'r') as spec_file:
# matching the behaviour of gclient, so pylint: disable=exec-used
exec(spec_file.read(), {}, result)
except IOError:
# no .gclient file, skip it
pass
return result
def WriteGClient(context):
"""Writes the .gclient configuration for the current client.
This will write to CR_CLIENT_PATH.
Args:
context: The active context to write the configuration for.
"""
gclient_file = context.Substitute(
os.path.join('{CR_CLIENT_PATH}', GCLIENT_FILENAME))
spec = '\n'.join('%s = %s' % (key, pprint.pformat(value))
for key,value in context.gclient.items())
if context.dry_run:
print 'Write the following spec to', gclient_file
print spec
else:
with open(gclient_file, 'w') as spec_file:
spec_file.write(spec)
def LoadConfig(context):
"""Loads the client configuration for the given context.
This will load configuration if present from CR_CLIENT_PATH and then
CR_BUILD_DIR.
Args:
context: The active context to load configuratin for.
Returns:
True if configuration was fully loaded.
"""
# Load the root config, will help set default build dir
client_path = context.Find('CR_CLIENT_PATH')
if not client_path:
return False
cr.auto.client.__path__.append(os.path.join(client_path, CLIENT_CONFIG_PATH))
cr.loader.Scan()
# Now load build dir config
build_dir = context.Find('CR_BUILD_DIR')
if not build_dir:
return False
cr.auto.build.__path__.append(os.path.join(build_dir, CLIENT_CONFIG_PATH))
cr.loader.Scan()
return hasattr(cr.auto.build, 'config')
def WriteConfig(context, path, data):
"""Writes a configuration out to a file.
This writes all the key value pairs in data out to a config file below path.
Args:
context: The context to run under.
path: The base path to write the config plugin into.
data: The key value pairs to write.
"""
filename = _GetConfigFilename(path)
config_dir = os.path.dirname(filename)
if context.dry_run:
print 'makedirs', config_dir
print 'Write config to', filename
_WriteConfig(sys.stdout, data)
else:
try:
os.makedirs(config_dir)
except OSError:
if not os.path.isdir(config_dir):
raise
with open(filename, 'w') as writer:
_WriteConfig(writer, data)
def PrintInfo(context):
print 'Selected output directory is', context.Find('CR_BUILD_DIR')
try:
for name in cr.auto.build.config.OVERRIDES.exported.keys():
print ' ', name, '=', context.Get(name)
except AttributeError:
pass
|
|
# ------------------------------------------------------------------------------
# Patch Bake to Image Manager
# ------------------------------------------------------------------------------
# Will Bake+Flatten the selected Patches and place the result in the Image Manager
# ------------------------------------------------------------------------------
# http://mari.ideascale.com
# http://cg-cnu.blogspot.in/
# ------------------------------------------------------------------------------
# Written by Sreenivas Alapati, 2014
# ------------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF HE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
import mari
import os
# ------------------------------------------------------------------------------
def returnTrue(layer):
"""Returns True for any object passed to it."""
return True
# ------------------------------------------------------------------------------
def getLayerList(layer_list, criterionFn):
"""Returns a list of all of the layers in the stack that match the given criterion function, including substacks."""
matching = []
for layer in layer_list:
if criterionFn(layer):
matching.append(layer)
if hasattr(layer, 'layerStack'):
matching.extend(getLayerList(layer.layerStack().layerList(), criterionFn))
if layer.hasMaskStack():
matching.extend(getLayerList(layer.maskStack().layerList(), criterionFn))
if hasattr(layer, 'hasAdjustmentStack') and layer.hasAdjustmentStack():
matching.extend(getLayerList(layer.adjustmentStack().layerList(), criterionFn))
if layer.isGroupLayer():
matching.extend(getLayerList(layer.layerStack().layerList(), criterionFn))
if layer.isChannelLayer():
matching.extend(getLayerList(layer.channel().layerList(), criterionFn))
return matching
# ------------------------------------------------------------------------------
def findLayerSelection():
"""Searches for the current selection if mari.current.layer is not the same as layer.isSelected"""
curGeo = mari.geo.current()
curChannel = curGeo.currentChannel()
channels = curGeo.channelList()
curLayer = mari.current.layer()
layers = ()
layerSelList = []
chn_layerList = ()
layerSelect = False
if curLayer.isSelected():
# If current layer is indeed selected one just trawl through current channel to find others
layerSelect = True
chn_layerList = curChannel.layerList()
layers = getLayerList(chn_layerList,returnTrue)
for layer in layers:
if layer.isSelected():
layerSelList.append(layer)
else:
# If current layer is not selected it means that a selection sits somewhere else (non-current channel)
# so we are going trawling through the entire channel list including substacks to find it
for channel in channels:
chn_layerList = channel.layerList()
layers = getLayerList(chn_layerList,returnTrue)
for layer in layers:
if layer.isSelected():
curLayer = layer
curChannel = channel
layerSelect = True
layerSelList.append(layer)
if not layerSelect:
mari.utils.message('No Layer Selection found. \n \n Please select at least one Layer.')
return curGeo,curLayer,curChannel,layerSelList
# ------------------------------------------------------------------------------
def patchBake():
'''Bakes selected Patches to Image Manager'''
if not mari.projects.current():
mari.utils.message('No project currently open', title = 'Error')
return
# Checking for OS to determine slash handling
if mari.app.version().isWindows():
path = str(mari.resources.path("MARI_USER_PATH")).replace("\\", "/")
else:
path = str( mari.resources.path("MARI_USER_PATH") )
# Determine general Selection Info
curGeo = mari.geo.current()
curChan = curGeo.currentChannel()
# MARI 3 Only:
# colorSpace = curChan.colorspaceConfig()
curChanName = str(curChan.name())
layers = curChan.layerList()
patchList = list (curGeo.patchList() )
selPatchList = [patch for patch in patchList if patch.isSelected() ]
if len(selPatchList) == 0:
mari.utils.message('Select at least one patch', title = 'Error')
return
# Deactivate Viewport for increases Spped
deactivateViewportToggle = mari.actions.find('/Mari/Canvas/Toggle Shader Compiling')
deactivateViewportToggle.trigger()
mari.history.startMacro('Patch Bake to Image Manager')
mari.app.setWaitCursor()
for layer in layers:
layer.setSelected(True)
copyAction = mari.actions.find('/Mari/Layers/Copy')
copyAction.trigger()
pasteAction = mari.actions.find('/Mari/Layers/Paste')
pasteAction.trigger()
#running search for current selection in order to get a list of all duplicated layers
geo_data = findLayerSelection()
# Geo Data = 0 current geo, 1 current channel , 2 current layer, 3 current selection list
curSel = geo_data[3]
channelLayerLst = []
#running search from all current selected layers to get a full list of all associated layers such as masks etc.
nested_layers = getLayerList(curSel,returnTrue)
# lookin through all layers that are associated with duplicates if there are any channel layers where we duplicated channels
for layer in nested_layers:
if layer.isChannelLayer():
channelLayerLst.append(layer.channel())
# merging the duplicated layers into one
curChan.mergeLayers()
# determine new current layer (result of merge),set name and grab its image set
curLayer = curChan.currentLayer()
curLayer.setName('BakeToImageManager')
curImgSet = curLayer.imageSet()
# extract current image set to image manager
for patch in selPatchList:
try:
uv = patch.uvIndex()
curPatchIndex = str(patch.udim())
savePath = path + curChanName + '.' + curPatchIndex + '.tif'
patchImg = curImgSet.image(uv, -1)
patchImg.saveAs(savePath)
# MARI 2.6:
mari.images.load(savePath)
# MARI 3:
# mari.images.open(savePath,colorSpace)
os.remove(savePath)
except Exception:
mari.history.stopMacro()
mari.app.restoreCursor()
pass
# Running cleanup: Close newly created layer out, close any channel duplicates that may have been created as a result of copy+paste
# of channel layers
curLayer.close()
for channel in channelLayerLst:
try:
curGeo.removeChannel(channel)
except Exception:
continue
# Stop Macro, restore cursor, refresh viewport
mari.history.stopMacro()
mari.app.restoreCursor()
deactivateViewportToggle.trigger()
def patch_bake_to_imageman():
patchBake()
|
|
from functools import partial
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Series, isna
from pandas.core.arrays import DatetimeArray
import pandas.core.nanops as nanops
import pandas.util.testing as tm
use_bn = nanops._USE_BOTTLENECK
class TestnanopsDataFrame:
def setup_method(self, method):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
arr_shape = (11, 7, 5)
self.arr_float = np.random.randn(*arr_shape)
self.arr_float1 = np.random.randn(*arr_shape)
self.arr_complex = self.arr_float + self.arr_float1 * 1j
self.arr_int = np.random.randint(-10, 10, arr_shape)
self.arr_bool = np.random.randint(0, 2, arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype("S")
self.arr_utf = np.abs(self.arr_float).astype("U")
self.arr_date = np.random.randint(0, 20000, arr_shape).astype("M8[ns]")
self.arr_tdelta = np.random.randint(0, 20000, arr_shape).astype("m8[ns]")
self.arr_nan = np.tile(np.nan, arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float * np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf])
self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf])
self.arr_obj = np.vstack(
[
self.arr_float.astype("O"),
self.arr_int.astype("O"),
self.arr_bool.astype("O"),
self.arr_complex.astype("O"),
self.arr_str.astype("O"),
self.arr_utf.astype("O"),
self.arr_date.astype("O"),
self.arr_tdelta.astype("O"),
]
)
with np.errstate(invalid="ignore"):
self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj])
self.arr_float_2d = self.arr_float[:, :, 0]
self.arr_float1_2d = self.arr_float1[:, :, 0]
self.arr_nan_2d = self.arr_nan[:, :, 0]
self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]
self.arr_float_1d = self.arr_float[:, 0, 0]
self.arr_float1_1d = self.arr_float1[:, 0, 0]
self.arr_nan_1d = self.arr_nan[:, 0, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
def teardown_method(self, method):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis, check_dtype=True):
res = getattr(res, "asm8", res)
res = getattr(res, "values", res)
# timedeltas are a beast here
def _coerce_tds(targ, res):
if hasattr(targ, "dtype") and targ.dtype == "m8[ns]":
if len(targ) == 1:
targ = targ[0].item()
res = res.item()
else:
targ = targ.view("i8")
return targ, res
try:
if (
axis != 0
and hasattr(targ, "shape")
and targ.ndim
and targ.shape != res.shape
):
res = np.split(res, [targ.shape[0]], axis=0)[0]
except (ValueError, IndexError):
targ, res = _coerce_tds(targ, res)
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
except AssertionError:
# handle timedelta dtypes
if hasattr(targ, "dtype") and targ.dtype == "m8[ns]":
targ, res = _coerce_tds(targ, res)
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
return
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, "dtype") or res.dtype.kind not in ["c", "O"]:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == "O":
if targ.dtype.kind != "O":
res = res.astype(targ.dtype)
else:
try:
res = res.astype("c16")
except RuntimeError:
res = res.astype("f8")
try:
targ = targ.astype("c16")
except RuntimeError:
targ = targ.astype("f8")
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == "O":
raise
tm.assert_almost_equal(np.real(targ), np.real(res), check_dtype=check_dtype)
tm.assert_almost_equal(np.imag(targ), np.imag(res), check_dtype=check_dtype)
def check_fun_data(
self,
testfunc,
targfunc,
testarval,
targarval,
targarnanval,
check_dtype=True,
empty_targfunc=None,
**kwargs
):
for axis in list(range(targarval.ndim)) + [None]:
for skipna in [False, True]:
targartempval = targarval if skipna else targarnanval
if skipna and empty_targfunc and isna(targartempval).all():
targ = empty_targfunc(targartempval, axis=axis, **kwargs)
else:
targ = targfunc(targartempval, axis=axis, **kwargs)
try:
res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
except BaseException as exc:
exc.args += (
"axis: {axis} of {of}".format(axis=axis, of=testarval.ndim - 1),
"skipna: {skipna}".format(skipna=skipna),
"kwargs: {kwargs}".format(kwargs=kwargs),
)
raise
if testarval.ndim <= 1:
return
try:
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
targarnanval2 = np.take(targarnanval, 0, axis=-1)
except ValueError:
return
self.check_fun_data(
testfunc,
targfunc,
testarval2,
targarval2,
targarnanval2,
check_dtype=check_dtype,
empty_targfunc=empty_targfunc,
**kwargs
)
def check_fun(
self,
testfunc,
targfunc,
testar,
targar=None,
targarnan=None,
empty_targfunc=None,
**kwargs
):
if targar is None:
targar = testar
if targarnan is None:
targarnan = testar
testarval = getattr(self, testar)
targarval = getattr(self, targar)
targarnanval = getattr(self, targarnan)
try:
self.check_fun_data(
testfunc,
targfunc,
testarval,
targarval,
targarnanval,
empty_targfunc=empty_targfunc,
**kwargs
)
except BaseException as exc:
exc.args += (
"testar: {testar}".format(testar=testar),
"targar: {targar}".format(targar=targar),
"targarnan: {targarnan}".format(targarnan=targarnan),
)
raise
def check_funs(
self,
testfunc,
targfunc,
allow_complex=True,
allow_all_nan=True,
allow_str=True,
allow_date=True,
allow_tdelta=True,
allow_obj=True,
**kwargs
):
self.check_fun(testfunc, targfunc, "arr_float", **kwargs)
self.check_fun(testfunc, targfunc, "arr_float_nan", "arr_float", **kwargs)
self.check_fun(testfunc, targfunc, "arr_int", **kwargs)
self.check_fun(testfunc, targfunc, "arr_bool", **kwargs)
objs = [
self.arr_float.astype("O"),
self.arr_int.astype("O"),
self.arr_bool.astype("O"),
]
if allow_all_nan:
self.check_fun(testfunc, targfunc, "arr_nan", **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, "arr_complex", **kwargs)
self.check_fun(
testfunc, targfunc, "arr_complex_nan", "arr_complex", **kwargs
)
if allow_all_nan:
self.check_fun(testfunc, targfunc, "arr_nan_nanj", **kwargs)
objs += [self.arr_complex.astype("O")]
if allow_str:
self.check_fun(testfunc, targfunc, "arr_str", **kwargs)
self.check_fun(testfunc, targfunc, "arr_utf", **kwargs)
objs += [self.arr_str.astype("O"), self.arr_utf.astype("O")]
if allow_date:
try:
targfunc(self.arr_date)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, "arr_date", **kwargs)
objs += [self.arr_date.astype("O")]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, "arr_tdelta", **kwargs)
objs += [self.arr_tdelta.astype("O")]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == "convert":
targfunc = partial(
self._badobj_wrap, func=targfunc, allow_complex=allow_complex
)
self.check_fun(testfunc, targfunc, "arr_obj", **kwargs)
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == "O":
if allow_complex:
value = value.astype("c16")
else:
value = value.astype("f8")
return func(value, **kwargs)
def test_nanany(self):
self.check_funs(
nanops.nanany,
np.any,
allow_all_nan=False,
allow_str=False,
allow_date=False,
allow_tdelta=False,
)
def test_nanall(self):
self.check_funs(
nanops.nanall,
np.all,
allow_all_nan=False,
allow_str=False,
allow_date=False,
allow_tdelta=False,
)
def test_nansum(self):
self.check_funs(
nanops.nansum,
np.sum,
allow_str=False,
allow_date=False,
allow_tdelta=True,
check_dtype=False,
empty_targfunc=np.nansum,
)
def test_nanmean(self):
self.check_funs(
nanops.nanmean,
np.mean,
allow_complex=False,
allow_obj=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
)
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
for a in [2 ** 55, -2 ** 55, 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
assert result == a
assert result == np_result
assert result.dtype == np.float64
def test_returned_dtype(self):
dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
if hasattr(np, "float128"):
dtypes.append(np.float128)
for dtype in dtypes:
s = Series(range(10), dtype=dtype)
group_a = ["mean", "std", "var", "skew", "kurt"]
group_b = ["min", "max"]
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
assert result.dtype == np.float64
else:
assert result.dtype == dtype
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
self.check_funs(
nanops.nanmedian,
np.median,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
allow_obj="convert",
)
@pytest.mark.parametrize("ddof", range(3))
def test_nanvar(self, ddof):
self.check_funs(
nanops.nanvar,
np.var,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
allow_obj="convert",
ddof=ddof,
)
@pytest.mark.parametrize("ddof", range(3))
def test_nanstd(self, ddof):
self.check_funs(
nanops.nanstd,
np.std,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
allow_obj="convert",
ddof=ddof,
)
@td.skip_if_no_scipy
@pytest.mark.parametrize("ddof", range(3))
def test_nansem(self, ddof):
from scipy.stats import sem
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nansem,
sem,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=False,
allow_obj="convert",
ddof=ddof,
)
def _minmax_wrap(self, value, axis=None, func=None):
# numpy warns if all nan
res = func(value, axis)
if res.dtype.kind == "m":
res = np.atleast_1d(res)
return res
def test_nanmin(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._minmax_wrap, func=np.min)
self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False)
def test_nanmax(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._minmax_wrap, func=np.max)
self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isna(nans)
if res.ndim:
res[nullnan] = -1
elif (
hasattr(nullnan, "all")
and nullnan.all()
or not hasattr(nullnan, "all")
and nullnan
):
res = -1
return res
def test_nanargmax(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(
nanops.nanargmax,
func,
allow_str=False,
allow_obj=False,
allow_date=True,
allow_tdelta=True,
)
def test_nanargmin(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
func = partial(self._argminmax_wrap, func=np.argmin)
self.check_funs(nanops.nanargmin, func, allow_str=False, allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype("f8")
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.0
return result
@td.skip_if_no_scipy
def test_nanskew(self):
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nanskew,
func,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=False,
)
@td.skip_if_no_scipy
def test_nankurt(self):
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nankurt,
func,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=False,
)
def test_nanprod(self):
self.check_funs(
nanops.nanprod,
np.prod,
allow_str=False,
allow_date=False,
allow_tdelta=False,
empty_targfunc=np.nanprod,
)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)
res01 = checkfun(
self.arr_float_2d,
self.arr_float1_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs
)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs)
res11 = checkfun(
self.arr_float_nan_2d,
self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs
)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs)
res24 = checkfun(
self.arr_float_nan_2d,
self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs
)
res25 = checkfun(
self.arr_float_2d,
self.arr_float1_2d,
min_periods=len(self.arr_float_2d) + 1,
**kwargs
)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)
res01 = checkfun(
self.arr_float_1d,
self.arr_float1_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs
)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs)
res11 = checkfun(
self.arr_float_nan_1d,
self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs
)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)
res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs)
res24 = checkfun(
self.arr_float_nan_1d,
self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs
)
res25 = checkfun(
self.arr_float_1d,
self.arr_float1_1d,
min_periods=len(self.arr_float_1d) + 1,
**kwargs
)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="pearson")
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
@td.skip_if_no_scipy
def test_nancorr_kendall(self):
from scipy.stats import kendalltau
targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="kendall")
targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall")
@td.skip_if_no_scipy
def test_nancorr_spearman(self):
from scipy.stats import spearmanr
targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="spearman")
targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman")
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
def check_nancomp(self, checkfun, targ0):
arr_float = self.arr_float
arr_float1 = self.arr_float1
arr_nan = self.arr_nan
arr_nan_nan = self.arr_nan_nan
arr_float_nan = self.arr_float_nan
arr_float1_nan = self.arr_float1_nan
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
try:
res0 = checkfun(arr_float, arr_float1)
tm.assert_almost_equal(targ0, res0)
if targ0.ndim > 1:
targ1 = np.vstack([targ0, arr_nan])
else:
targ1 = np.hstack([targ0, arr_nan])
res1 = checkfun(arr_float_nan, arr_float1_nan)
tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
targ2 = arr_nan_nan
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
except Exception as exc:
exc.args += ("ndim: {arr_float.ndim}".format(arr_float=arr_float),)
raise
try:
arr_float = np.take(arr_float, 0, axis=-1)
arr_float1 = np.take(arr_float1, 0, axis=-1)
arr_nan = np.take(arr_nan, 0, axis=-1)
arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
targ0 = np.take(targ0, 0, axis=-1)
except ValueError:
break
def test_nangt(self):
targ0 = self.arr_float > self.arr_float1
self.check_nancomp(nanops.nangt, targ0)
def test_nange(self):
targ0 = self.arr_float >= self.arr_float1
self.check_nancomp(nanops.nange, targ0)
def test_nanlt(self):
targ0 = self.arr_float < self.arr_float1
self.check_nancomp(nanops.nanlt, targ0)
def test_nanle(self):
targ0 = self.arr_float <= self.arr_float1
self.check_nancomp(nanops.nanle, targ0)
def test_naneq(self):
targ0 = self.arr_float == self.arr_float1
self.check_nancomp(nanops.naneq, targ0)
def test_nanne(self):
targ0 = self.arr_float != self.arr_float1
self.check_nancomp(nanops.nanne, targ0)
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, "ndim", True):
try:
res0 = func(value, *args, **kwargs)
if correct:
assert res0
else:
assert not res0
except BaseException as exc:
exc.args += ("dim: {}".format(getattr(value, "ndim", value)),)
raise
if not hasattr(value, "ndim"):
break
try:
value = np.take(value, 0, axis=-1)
except ValueError:
break
def test__has_infs(self):
pairs = [
("arr_complex", False),
("arr_int", False),
("arr_bool", False),
("arr_str", False),
("arr_utf", False),
("arr_complex", False),
("arr_complex_nan", False),
("arr_nan_nanj", False),
("arr_nan_infj", True),
("arr_complex_nan_infj", True),
]
pairs_float = [
("arr_float", False),
("arr_nan", False),
("arr_float_nan", False),
("arr_nan_nan", False),
("arr_float_inf", True),
("arr_inf", True),
("arr_nan_inf", True),
("arr_float_nan_inf", True),
("arr_nan_nan_inf", True),
]
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
except BaseException as exc:
exc.args += (arr,)
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
self.check_bool(nanops._has_infs, val.astype("f4"), correct)
self.check_bool(nanops._has_infs, val.astype("f2"), correct)
except BaseException as exc:
exc.args += (arr,)
raise
def test__isfinite(self):
pairs = [
("arr_complex", False),
("arr_int", False),
("arr_bool", False),
("arr_str", False),
("arr_utf", False),
("arr_complex", False),
("arr_complex_nan", True),
("arr_nan_nanj", True),
("arr_nan_infj", True),
("arr_complex_nan_infj", True),
]
pairs_float = [
("arr_float", False),
("arr_nan", True),
("arr_float_nan", True),
("arr_nan_nan", True),
("arr_float_inf", True),
("arr_inf", True),
("arr_nan_inf", True),
("arr_float_nan_inf", True),
("arr_nan_nan_inf", True),
]
func1 = lambda x: np.any(nanops._isfinite(x).ravel())
# TODO: unused?
# func2 = lambda x: np.any(nanops._isfinite(x).values.ravel())
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
except BaseException as exc:
exc.args += (arr,)
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
self.check_bool(func1, val.astype("f4"), correct)
self.check_bool(func1, val.astype("f2"), correct)
except BaseException as exc:
exc.args += (arr,)
raise
def test__bn_ok_dtype(self):
assert nanops._bn_ok_dtype(self.arr_float.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_complex.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_int.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_bool.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_str.dtype, "test")
assert nanops._bn_ok_dtype(self.arr_utf.dtype, "test")
assert not nanops._bn_ok_dtype(self.arr_date.dtype, "test")
assert not nanops._bn_ok_dtype(self.arr_tdelta.dtype, "test")
assert not nanops._bn_ok_dtype(self.arr_obj.dtype, "test")
class TestEnsureNumeric:
def test_numeric_values(self):
# Test integer
assert nanops._ensure_numeric(1) == 1
# Test float
assert nanops._ensure_numeric(1.1) == 1.1
# Test complex
assert nanops._ensure_numeric(1 + 2j) == 1 + 2j
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
assert np.allclose(nanops._ensure_numeric(values), values)
# Test object ndarray
o_values = values.astype(object)
assert np.allclose(nanops._ensure_numeric(o_values), values)
# Test convertible string ndarray
s_values = np.array(["1", "2", "3"], dtype=object)
assert np.allclose(nanops._ensure_numeric(s_values), values)
# Test non-convertible string ndarray
s_values = np.array(["foo", "bar", "baz"], dtype=object)
msg = r"could not convert string to float: '(foo|baz)'"
with pytest.raises(ValueError, match=msg):
nanops._ensure_numeric(s_values)
def test_convertable_values(self):
assert np.allclose(nanops._ensure_numeric("1"), 1.0)
assert np.allclose(nanops._ensure_numeric("1.1"), 1.1)
assert np.allclose(nanops._ensure_numeric("1+1j"), 1 + 1j)
def test_non_convertable_values(self):
msg = "Could not convert foo to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric("foo")
msg = "Could not convert {} to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric({})
msg = r"Could not convert \[\] to numeric"
with pytest.raises(TypeError, match=msg):
nanops._ensure_numeric([])
class TestNanvarFixedValues:
# xref GH10242
def setup_method(self, method):
# Samples from a normal distribution.
self.variance = variance = 3.0
self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
tm.assert_almost_equal(actual_variance, self.variance, check_less_precise=2)
def test_nanvar_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_variance = nanops.nanvar(samples, skipna=True)
tm.assert_almost_equal(actual_variance, self.variance, check_less_precise=2)
actual_variance = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2)
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
tm.assert_almost_equal(actual_std, self.variance ** 0.5, check_less_precise=2)
actual_std = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_std, np.nan, check_less_precise=2)
def test_nanvar_axis(self):
# Generate some sample data.
samples_norm = self.samples
samples_unif = self.prng.uniform(size=samples_norm.shape[0])
samples = np.vstack([samples_norm, samples_unif])
actual_variance = nanops.nanvar(samples, axis=1)
tm.assert_almost_equal(
actual_variance, np.array([self.variance, 1.0 / 12]), check_less_precise=2
)
def test_nanvar_ddof(self):
n = 5
samples = self.prng.uniform(size=(10000, n + 1))
samples[:, -1] = np.nan # Force use of our own algorithm.
variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
# The unbiased estimate.
var = 1.0 / 12
tm.assert_almost_equal(variance_1, var, check_less_precise=2)
# The underestimated variance.
tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, check_less_precise=2)
# The overestimated variance.
tm.assert_almost_equal(
variance_2, (n - 1.0) / (n - 2.0) * var, check_less_precise=2
)
def test_ground_truth(self):
# Test against values that were precomputed with Numpy.
samples = np.empty((4, 4))
samples[:3, :3] = np.array(
[
[0.97303362, 0.21869576, 0.55560287],
[0.72980153, 0.03109364, 0.99155171],
[0.09317602, 0.60078248, 0.15871292],
]
)
samples[3] = samples[:, 3] = np.nan
# Actual variances along axis=0, 1 for ddof=0, 1, 2
variance = np.array(
[
[
[0.13762259, 0.05619224, 0.11568816],
[0.20643388, 0.08428837, 0.17353224],
[0.41286776, 0.16857673, 0.34706449],
],
[
[0.09519783, 0.16435395, 0.05082054],
[0.14279674, 0.24653093, 0.07623082],
[0.28559348, 0.49306186, 0.15246163],
],
]
)
# Test nanvar.
for axis in range(2):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(var[:3], variance[axis, ddof])
assert np.isnan(var[3])
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)
assert np.isnan(std[3])
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
# that variance is stable.
data = Series(766897346 * np.ones(10))
for ddof in range(3):
result = data.std(ddof=ddof)
assert result == 0.0
@property
def prng(self):
return np.random.RandomState(1234)
class TestNanskewFixedValues:
# xref GH 11974
def setup_method(self, method):
# Test data + skewness value (computed with scipy.stats.skew)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_skew = -0.1875895205961754
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
skew = nanops.nanskew(data)
assert skew == 0.0
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nanskew(left_tailed) < 0
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nanskew(right_tailed) > 0
def test_ground_truth(self):
skew = nanops.nanskew(self.samples)
tm.assert_almost_equal(skew, self.actual_skew)
def test_axis(self):
samples = np.vstack([self.samples, np.nan * np.ones(len(self.samples))])
skew = nanops.nanskew(samples, axis=1)
tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=False)
assert np.isnan(skew)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=True)
tm.assert_almost_equal(skew, self.actual_skew)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNankurtFixedValues:
# xref GH 11974
def setup_method(self, method):
# Test data + kurtosis value (computed with scipy.stats.kurtosis)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_kurt = -1.2058303433799713
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
kurt = nanops.nankurt(data)
assert kurt == 0.0
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nankurt(left_tailed) < 0
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nankurt(right_tailed) > 0
def test_ground_truth(self):
kurt = nanops.nankurt(self.samples)
tm.assert_almost_equal(kurt, self.actual_kurt)
def test_axis(self):
samples = np.vstack([self.samples, np.nan * np.ones(len(self.samples))])
kurt = nanops.nankurt(samples, axis=1)
tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=False)
assert np.isnan(kurt)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=True)
tm.assert_almost_equal(kurt, self.actual_kurt)
@property
def prng(self):
return np.random.RandomState(1234)
class TestDatetime64NaNOps:
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.xfail(reason="disabled")
# Enabling mean changes the behavior of DataFrame.mean
# See https://github.com/pandas-dev/pandas/issues/24752
def test_nanmean(self, tz):
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
expected = dti[1]
for obj in [dti, DatetimeArray(dti), Series(dti)]:
result = nanops.nanmean(obj)
assert result == expected
dti2 = dti.insert(1, pd.NaT)
for obj in [dti2, DatetimeArray(dti2), Series(dti2)]:
result = nanops.nanmean(obj)
assert result == expected
def test_use_bottleneck():
if nanops._BOTTLENECK_INSTALLED:
pd.set_option("use_bottleneck", True)
assert pd.get_option("use_bottleneck")
pd.set_option("use_bottleneck", False)
assert not pd.get_option("use_bottleneck")
pd.set_option("use_bottleneck", use_bn)
@pytest.mark.parametrize(
"numpy_op, expected",
[
(np.sum, 10),
(np.nansum, 10),
(np.mean, 2.5),
(np.nanmean, 2.5),
(np.median, 2.5),
(np.nanmedian, 2.5),
(np.min, 1),
(np.max, 4),
(np.nanmin, 1),
(np.nanmax, 4),
],
)
def test_numpy_ops(numpy_op, expected):
# GH8383
result = numpy_op(pd.Series([1, 2, 3, 4]))
assert result == expected
@pytest.mark.parametrize(
"operation",
[
nanops.nanany,
nanops.nanall,
nanops.nansum,
nanops.nanmean,
nanops.nanmedian,
nanops.nanstd,
nanops.nanvar,
nanops.nansem,
nanops.nanargmax,
nanops.nanargmin,
nanops.nanmax,
nanops.nanmin,
nanops.nanskew,
nanops.nankurt,
nanops.nanprod,
],
)
def test_nanops_independent_of_mask_param(operation):
# GH22764
s = pd.Series([1, 2, np.nan, 3, np.nan, 4])
mask = s.isna()
median_expected = operation(s)
median_result = operation(s, mask=mask)
assert median_expected == median_result
|
|
#!/usr/bin/env python
"""Provide a top level interface for master worker computing.
"""
__author__ = "Eric Heien <pymw@heien.org>"
__date__ = "10 April 2008"
import atexit
import pickle
import errno
import logging
import inspect
import os
import signal
import sys
import tempfile
import textwrap
import threading
import time
import traceback
import types
import zipfile
from .interfaces import generic
if sys.version_info[0] > 2:
from io import StringIO
class PyMW_List:
"""A class representing a Python list with atomic operation functionality needed for PyMW."""
def __init__(self):
self._lock = threading.Lock()
self._add_event = threading.Condition(self._lock)
self._data = []
def __len__(self):
return len(self._data)
def get_data(self):
"""Returns a copy of the internal data list that can be modified."""
self._lock.acquire()
copy_list = list(self._data)
self._lock.release()
return copy_list
def append(self, item):
"""Atomically appends an item to the list and notifies any waiting threads."""
self._add_event.acquire()
self._data.append(item)
self._add_event.notifyAll()
self._add_event.release()
def pop(self, blocking=False):
"""Waits for any item to appear in the list, and pops it off."""
return self.pop_specific([], blocking)
def pop_specific(self, item_list=[], blocking=False):
"""Waits for any item from item_list to appear, and pops it off.
An empty item_list indicates any item is acceptable."""
item_set = set(item_list)
self._add_event.acquire()
while True:
# Check if any of the current items are acceptable
# If we have a list of items, choose one from the list
found_item = None
if len(item_list) > 0:
data_set = set(self._data)
search = item_set & data_set
if len(search) > 0:
found_item = list(search)[0]
self._data.remove(found_item)
# Otherwise any item is acceptable
elif len(self._data) > 0:
found_item = self._data.pop()
if found_item:
self._add_event.release()
return found_item
# If we didn't find anything and we should block,
# wait for a notification from a new item being added
if blocking:
self._add_event.wait()
# If we didn't find anything and we should not block, return None
else:
self._add_event.release()
return None
def contains(self, item):
"""Checks if the list contains the specified item."""
self._add_event.acquire()
n = self._data.count(item)
self._add_event.release()
if n != 0: return True
else: return False
class TaskException(Exception):
"""Represents an exception caused by a task failure."""
def __init__(self, value):
self.param = value
def __str__(self):
return repr(self.param)
class InterfaceException(Exception):
"""Represents an exception caused by an interface failure."""
def __init__(self, value, detail_str=None):
self.param = value
if detail_str:
self.details = detail_str
else:
self.details = ""
def __str__(self):
return repr(self.param)+"\n"+repr(self.details)
class PyMW_Task:
"""Represents a task to be executed."""
TASK_SUBMITTED = "submitted"
TASK_RUNNING = "running"
TASK_ERROR = "error"
TASK_FINISHED = "finished"
def __init__(self, task_name, executable, finished_queue, store_data_func, get_result_func,
input_data=None, input_arg=None, output_arg=None, file_loc="tasks",
data_file_zip=None, modules_file_zip=None, file_input=False, raw_exec=None):
# Make sure executable is valid
if not isinstance(executable, bytes) \
and not isinstance(executable, types.FunctionType) \
and not isinstance(executable, str):
raise TypeError("executable must be a filename or Python function")
self._finished_queue = finished_queue
self._executable = executable
self._input_data = input_data
self._output_data = None
self._task_name = task_name
self._get_result_func = get_result_func
self._store_data_func = store_data_func
self._file_input = file_input
self._data_file_zip = data_file_zip
self._modules_file_zip = modules_file_zip
self._raw_exec = raw_exec
# Set the input and output file locations
if input_arg:
self._input_arg = input_arg
else:
self._input_arg = file_loc + "/in_" + self._task_name + ".dat"
logging.info("Storing task "+str(self)+" into "+self._input_arg)
self._store_data_func(input_data, self._input_arg)
if output_arg:
self._output_arg = output_arg
else:
self._output_arg = file_loc + "/out_" + self._task_name + ".dat"
# Remove any old output files
try:
os.remove(self._output_arg)
except:
pass
self._task_state = self.TASK_SUBMITTED
# Task time bookkeeping
self._times = {"submit_time": time.time(), "execute_time": 0, "finish_time": 0}
def __str__(self):
return self._task_name
def __repr__(self):
return self._task_name
def _state_data(self):
return {"task_name": self._task_name, "executable": self._executable,
"input_arg": self._input_arg, "output_arg": self._output_arg,
"times": self._times, "state": self._task_state}
def task_finished(self, task_err=None, result=None):
"""This must be called by the interface class when the
task finishes execution. The result of execution should
be in the file indicated by output_arg."""
self._error = task_err
if task_err:
logging.info("Task "+str(self)+" had an error")
elif not result:
try:
self._output_data, self._stdout, self._stderr = self._get_result_func(self._output_arg)
except:
self._output_data = None
self._error = Exception("Error reading task result "+self._output_arg)
logging.info("Task "+str(self)+" finished")
else:
try:
self._output_data=[]
for file in result:
f=open(file[0],"r")
self._output_data.append(pickle.loads(f.read()))
except:
self._output_data = result
logging.info("Task "+str(self)+" finished")
self._times["finish_time"] = time.time()
if self._error: self._task_state = self.TASK_ERROR
else: self._task_state = self.TASK_FINISHED
self._finished_queue.append(self)
try:
self._worker_finish_func(self._assigned_worker)
except:
pass
def get_total_time(self):
"""Get the time from task submission to completion.
Returns None if task has not finished execution."""
if self._task_state is self.TASK_FINISHED or self._task_state is self.TASK_ERROR:
return self._times["finish_time"] - self._times["submit_time"]
else:
return None
def get_execution_time(self):
"""Get the time from start of task execution to completion.
This may be different from the CPU time.
Returns None if task has not finished execution."""
if self._task_state is self.TASK_FINISHED or self._task_state is self.TASK_ERROR:
return self._times["finish_time"] - self._times["execute_time"]
else:
return None
def get_progress(self):
"""Get the progress of the task, as represented by a double between 0 and 1."""
if self._task_state is self.TASK_FINISHED: return 1.0
elif self._task_state is self.TASK_SUBMITTED: return 0.0
else: return 0.0
def cleanup(self, delete_files):
try:
if delete_files:
os.remove(self._input_arg)
os.remove(self._output_arg)
except OSError:
pass
class PyMW_Scheduler:
"""Takes tasks submitted by user and sends them to the master-worker interface.
This is done in a separate thread to allow for asynchronous program execution."""
def __init__(self, task_queue, interface, task_match_func):
self._task_queue = task_queue
self._interface = interface
self._running = False
self._interface_worker_lock = threading.Condition()
if task_match_func: self._task_matcher = task_match_func
else: self._task_matcher = self._default_task_match_func
def _start_scheduler(self):
if not self._running:
logging.info("PyMW_Scheduler started")
self._running = True
_scheduler_thread = threading.Thread(target=self._scheduler)
_scheduler_thread.start()
def _default_task_match_func(self, task_list, worker_list):
return task_list[0], worker_list[0]
def _worker_finished(self, worker):
self._interface_worker_lock.acquire()
try:
self._interface.worker_finished(worker)
except:
pass
self._interface_worker_lock.notify()
self._interface_worker_lock.release()
# Returns true if the scheduler should continue running
def _should_scheduler_run(self):
return (len(self._task_queue) > 0)
# Get a list of workers available on this interface
def _get_worker_list(self):
try:
worker_list = self._interface.get_available_workers()
if not type(worker_list)==list: worker_list = [None]
except:
worker_list = [None]
return worker_list
# Match a worker from the list with a task
# If we couldn't find the task/worker in the list, the task matcher returned an invalid value
def _match_worker_and_task(self, task_list, worker_list):
try:
matched_task, matched_worker = self._task_matcher(task_list, worker_list)
except:
matched_worker = worker_list[0]
matched_task = task_list[0]
if worker_list.count(matched_worker) == 0: matched_worker = worker_list[0]
return matched_task, matched_worker
# Reserve the worker with the interface and remove the task from the queue
def _reserve_task_worker(self, matched_task, matched_worker):
# Remove the task from the queue
popped_task = self._task_queue.pop_specific(item_list=[matched_task])
if not popped_task: matched_task = task_list[0]
# Reserve the worker with the interface
matched_task._assigned_worker = matched_worker
matched_task._worker_finish_func = self._worker_finished
try:
self._interface.reserve_worker(matched_worker)
except:
pass
# Lets the interface know that no workers matched, and checks if it should try again immediately
# Otherwise, it waits until a worker has finished or 1 second has passed (whichever is first)
def _wait_for_worker(self):
try:
if self._interface.try_avail_check_again(): return
except:
pass
self._interface_worker_lock.wait(timeout=1.0)
# Scheduler logic:
# While there are tasks on the queue
# - Get a list of available workers
# - If no worker is available
# ~ try again after a _worker_finished signal or 1 second (whichever is first)
# - else (> 0 workers are available)
# ~ call the task matching function with the list of tasks and list of workers
# - If the task matcher doesn't fit any worker with a task
# ~ try again after a _worker_finished signal or 1 second (whichever is first)
# - else (the task matcher gives a match)
# ~ Remove the task from the list of tasks
# ~ Reserve the worker with the interface
# ~ Execute the task on the interface with the given worker
# ~ When task_finished is called, replace the worker in the interface with _worker_finished
def _scheduler(self):
"""Waits for submissions to the task list, then submits them to the interface."""
# While there are tasks in the queue, assign them to workers
# NOTE: assumes that only the scheduler thread will remove tasks from the list
# only the scheduler thread will call reserve_worker, and there is only one scheduler thread
while self._should_scheduler_run():
# Hold the interface lock until we have a worker, matched it with a task and
# reserved it with the interface. Otherwise we may select the same worker twice
# or other problems can occur
self._interface_worker_lock.acquire()
# Get a list of available workers and tasks
# If none are available, then wait a little and try again
worker_list = self._get_worker_list()
if len(worker_list) == 0:
self._wait_for_worker()
self._interface_worker_lock.release()
continue
task_list = self._task_queue.get_data()
# Try to match one of the tasks with one of the workers
# If no suitable match is found, wait a little and try again
logging.info("Matching task with a worker")
matched_task, matched_worker = self._match_worker_and_task(task_list, worker_list)
if not matched_task:
self._wait_for_worker()
self._interface_worker_lock.release()
continue
# Confirm the match and reserve the task and worker
self._reserve_task_worker(matched_task, matched_worker)
self._interface_worker_lock.release()
# Wait until other tasks have been submitted and the thread count decreases,
# otherwise we might pass the process resource limitations
while threading.activeCount() > 100:
time.sleep(0.1)
# Execute the task on the interface with the given worker
logging.info("Executing task "+str(matched_task))
task_thread = threading.Thread(target=self._task_executor,
args=(self._interface.execute_task, matched_task, matched_worker))
task_thread.start()
logging.info("PyMW_Scheduler finished")
self._running = False
# Use this wrapper function to catch any interface exceptions,
# otherwise we can get hanging threads
def _task_executor(self, execute_task_func, next_task, worker):
try:
next_task._times["execute_time"] = time.time()
execute_task_func(next_task, worker)
except Exception as e:
next_task.task_finished(e)
def _exit(self):
self._task_queue.append(None)
class PyMW_Master:
"""Provides functions for users to submit tasks to the underlying interface."""
def __init__(self, interface=None, loglevel=logging.CRITICAL, delete_files=True, scheduler_func=None):
logging.basicConfig(level=loglevel, format="%(asctime)s %(levelname)s %(message)s")
if interface:
if not hasattr(interface, "execute_task"):
raise InterfaceException("Interface must have execute_task() function.")
if not hasattr(interface.execute_task, '__call__'):
raise InterfaceException("Interface execute_task must be a function.")
self._interface = interface
else:
self._interface = generic.GenericInterface()
self._start_time_str = str(int(time.time()))
self._submitted_tasks = []
self._queued_tasks = PyMW_List()
self._finished_tasks = PyMW_List()
self._delete_files = delete_files
self._task_dir_name = os.getcwd() + "/tasks"
self._cur_task_num = 0
self._function_source = {}
if sys.version_info[0] > 2:
self._pymw_interface_modules = "pickle", "sys", "zipfile", "traceback", "io"
else:
self._pymw_interface_modules = "pickle", "sys", "zipfile", "traceback","StringIO"
self._data_file_zips = {}
self._module_zips = {}
# Make the directory for input/output files, if it doesn't already exist
try:
os.mkdir(self._task_dir_name)
except OSError as e:
if e.errno != errno.EEXIST: raise
self._scheduler = PyMW_Scheduler(self._queued_tasks, self._interface, scheduler_func)
atexit.register(self._cleanup, None, None)
#signal.signal(signal.SIGKILL, self._cleanup)
def _setup_exec_file(self, file_name, main_func, modules, dep_funcs, file_input, data_file_zip_name):
"""Sets up a script file for executing a function. This file
contains the function source, dependent functions, dependent
modules and PyMW calls to get the input data and return the
output data."""
# If the interface doesn't provide methods for communicating with the workers, use default functions
all_funcs = (main_func,)+dep_funcs
all_funcs += (self._pymw_worker_manager, self.pymw_emit_result, )
try:
all_funcs += (self._interface.pymw_worker_read, self._interface.pymw_worker_write)
except AttributeError:
all_funcs += (self.pymw_worker_read, self.pymw_worker_write)
try:
interface_modules = self._interface._pymw_interface_modules
except AttributeError:
interface_modules = self._pymw_interface_modules
# Select the function to coordinate task execution on the worker
try:
all_funcs += (self._interface.pymw_worker_func,)
except AttributeError:
all_funcs += (self.pymw_worker_func,)
# Get the source code for the necessary functions
func_hash = hash(all_funcs)
if func_hash not in self._function_source:
func_sources = [textwrap.dedent(inspect.getsource(func)) for func in all_funcs]
self._function_source[func_hash] = [main_func.__name__, func_sources, file_name]
else:
return
# Create an archive of required modules
self._archive_files(modules, True)
func_data = self._function_source[func_hash]
func_file = open(file_name, "w")
# Create the necessary imports and function calls in the worker script
for module_name in modules+interface_modules:
func_file.write("import "+module_name+"\n")
func_file.writelines(func_data[1])
run_options = {}
if file_input: run_options["file_input"] = True
if data_file_zip_name: run_options["arch_file"] = data_file_zip_name
func_file.write("_pymw_worker_manager("+func_data[0]+", "+repr(run_options)+")\n")
func_file.close()
def _archive_files(self, data_files, is_modules=False):
if len(data_files) == 0: return None
file_hash = hash(data_files)
if is_modules:
if file_hash in self._module_zips:
return self._module_zips[file_hash]
else:
if file_hash in self._data_file_zips:
return self._data_file_zips[file_hash]
# TODO: this is insecure, try to use the arch_fd in creating the Zipfile object
if is_modules: arch_prefix = "modules_"
else: arch_prefix = "data_"
arch_fd, arch_file_name = tempfile.mkstemp(suffix=".zip", prefix=arch_prefix, dir=self._task_dir_name)
os.close(arch_fd)
archive_zip = zipfile.PyZipFile(arch_file_name, mode="w")
for dfile in data_files:
ind_file_name = dfile.split("/")[-1]
if is_modules:
try:
archive_zip.writepy(pathname=dfile+".py")
except IOError:
logging.info("Couldn't find file for module "+dfile)
else:
archive_zip.write(filename=dfile, arcname=ind_file_name)
archive_zip.close()
if is_modules:
self._module_zips[file_hash] = arch_file_name
return self._module_zips[file_hash]
else:
self._data_file_zips[file_hash] = arch_file_name
return self._data_file_zips[file_hash]
def _check_task_list(self, task_list):
if len(self._submitted_tasks) <= 0:
raise TaskException("No tasks have been submitted")
# Check that the task(s) are of type PyMW_Task
for t in task_list:
if not isinstance(t, PyMW_Task):
raise TaskException("Function requires either a task, a list of tasks, or None")
# Check that the task(s) have been submitted before
submit_intersect = set(self._submitted_tasks) & set(task_list)
if len(submit_intersect) != len(task_list):
raise TaskException("Task has not been submitted")
def submit_task(self, executable, input_data=None, modules=(), dep_funcs=(), data_files=(), input_from_file=False):
"""Creates and submits a task to the internal list for execution.
Returns the created task for later use.
executable can be either a filename (Python script) or a function."""
# Check if the executable is a Python function or a script
if hasattr(executable, '__call__'):
task_name = str(executable.__name__)+"_"+self._start_time_str+"_"+str(self._cur_task_num)
exec_file_name = self._task_dir_name+"/"+str(executable.__name__)+"_"+self._start_time_str+".py"
elif isinstance(executable, str):
# TODO: test here for existence of script
task_name = str(executable)+"_"+self._start_time_str+"_"+str(self._cur_task_num)
exec_file_name = executable+"_"+self._start_time_str+".py"
else:
raise TaskException("Executable must be a filename or function")
self._cur_task_num += 1
# Create a zip archive containing the files of data_files
if len(data_files) > 0:
zip_arch_file = self._archive_files(data_files, False)
zip_arch_file_name = zip_arch_file.split("/")[-1]
else:
zip_arch_file = None
zip_arch_file_name = None
# Create a zip archive containing the modules
if len(data_files) > 0:
mod_arch_file = self._archive_files(modules, True)
mod_arch_file_name = zip_arch_file.split("/")[-1]
else:
mod_arch_file = None
mod_arch_file_name = None
# Setup the necessary files
if hasattr(executable, '__call__'):
self._setup_exec_file(exec_file_name, executable, modules, dep_funcs, input_from_file, zip_arch_file_name)
try:
store_func = self._interface.pymw_master_write
get_result_func = self._interface.pymw_master_read
except AttributeError:
store_func = self.pymw_master_write
get_result_func = self.pymw_master_read
new_task = PyMW_Task(task_name=task_name, executable=exec_file_name,
store_data_func=store_func, get_result_func=get_result_func,
finished_queue=self._finished_tasks, input_data=input_data,
file_loc=self._task_dir_name, data_file_zip=zip_arch_file,
modules_file_zip=mod_arch_file, file_input=input_from_file,
raw_exec=executable)
self._submitted_tasks.append(new_task)
self._queued_tasks.append(item=new_task)
self._scheduler._start_scheduler()
return new_task
def get_result(self, task=None, blocking=True):
"""Gets the result of the executed task.
If task is None, return the result of the next finished task.
If task is a list of tasks, return the result of any task in the list.
If blocking is false and the task is not finished, returns None."""
if not task:
task_list = []
elif type(task)==list:
task_list = task
else:
task_list = [task]
# Check that the task(s) are of type PyMW_Task and have been submitted before
self._check_task_list(task_list)
my_task = self._finished_tasks.pop_specific(task_list, blocking)
if not my_task:
return None, None
if my_task._error:
raise my_task._error
return my_task, my_task._output_data
def get_progress(self, task):
if not task:
task_list = []
elif type(task)==list:
task_list = task
else:
task_list = [task]
# Check that the task(s) are of type PyMW_Task and have been submitted before
self._check_task_list(task_list)
task_progress = [task.get_progress() for task in task_list]
return task_progress
def get_status(self):
self._scheduler._interface_worker_lock.acquire()
try:
status = self._interface.get_status()
except:
status = {"interface_status": "error"}
self._scheduler._interface_worker_lock.release()
if not type(status)==dict: status = {"interface_status": "error"}
status["tasks"] = self._submitted_tasks
return status
def _cleanup(self, signum, frame):
self._scheduler._exit()
try:
self._interface._cleanup()
except AttributeError:
pass
for task in self._submitted_tasks:
task.cleanup(self._delete_files)
if self._delete_files:
for exec_file in self._function_source:
try:
os.remove(self._function_source[exec_file][2])
except OSError:
pass
for hash_ind in self._data_file_zips:
try:
os.remove(self._data_file_zips[hash_ind])
except OSError:
pass
for hash_ind in self._module_zips:
try:
os.remove(self._module_zips[hash_ind])
except OSError:
pass
try:
if self._delete_files:
os.rmdir(self._task_dir_name)
pass
except OSError:
pass
def pymw_master_read(self, loc):
infile = open(loc, 'rb')
obj = pickle.Unpickler(infile).load()
infile.close()
return obj
def pymw_master_write(self, output, loc):
outfile = open(loc, 'wb')
pickle.Pickler(outfile).dump(output)
outfile.close()
def pymw_worker_read(options):
infile = open(sys.argv[1], 'rb')
obj = pickle.Unpickler(infile).load()
infile.close()
return obj
def pymw_worker_write(output, options):
outfile = open(sys.argv[2], 'wb')
pickle.Pickler(outfile).dump(output)
outfile.close()
def pymw_set_progress(prog_ratio):
return
def pymw_emit_result(result):
global _res_array
_res_array.append(result)
def _pymw_worker_manager(func_name_to_call, options):
global _res_array
_res_array = []
try:
# Redirect stdout and stderr
old_stdout = sys.stdout
old_stderr = sys.stderr
if sys.version_info[0] > 2:
sys.stdout = io.StringIO()
sys.stderr = io.StringIO()
else:
sys.stdout = StringIO.StringIO()
sys.stderr = StringIO.StringIO()
# If there is a zip file, unzip the contents
if "arch_file" in options:
data_arch = zipfile.PyZipFile(file=options["arch_file"], mode='r')
archive_files = data_arch.namelist()
for file_name in archive_files:
decompressed_file = open(file_name, "wb")
decompressed_file.write(data_arch.read(file_name))
decompressed_file.close()
data_arch.close()
# Call the worker function
pymw_worker_func(func_name_to_call, options)
# Get any stdout/stderr printed during the worker execution
out_str = sys.stdout.getvalue()
err_str = sys.stderr.getvalue()
sys.stdout.close()
sys.stderr.close()
# Revert stdout/stderr to originals
sys.stdout = old_stdout
sys.stderr = old_stderr
# The interface is responsible for cleanup, so don't bother deleting the archive files
# TODO: modify this to deal with other options (multiple results, etc)
pymw_worker_write([_res_array[0], out_str, err_str], options)
except Exception as e:
sys.stdout = old_stdout
sys.stderr = old_stderr
traceback.print_exc()
exit(e)
def pymw_worker_func(func_name_to_call, options):
# Get the input data
input_data = pymw_worker_read(options)
if not input_data: input_data = ()
# Execute the worker function
result = func_name_to_call(*input_data)
# Output the result
pymw_emit_result(result)
class PyMW_MapReduce:
def __init__(self, master):
#def __init__(self, master, exec_map, exec_reduce, num_worker=1, input_data=None, modules=(), dep_funcs=()):
self._master=master
self._task_dir_name = "tasks"
def _data_split(self, data, num):
q1=len(data)//num
q2=len(data)%num
res=[]
p=0
for i in range(num):
j=0
if q2>0:
j=1
q2-=1
res.append(data[p:p+q1+j])
p=p+q1+j
return res
def submit_task_mapreduce(self, exec_map, exec_reduce, num_worker=1, input_data=None, modules=(), dep_funcs=(), red_worker=-1, file_input=False):
task_name = str(exec_map.__name__)+"_"+str(exec_reduce.__name__)+"_MR"
exec_file_name = self._task_dir_name+"/"+task_name
try:
store_func = self._master._interface.pymw_master_write
get_result_func = self._master._interface.pymw_master_read
except AttributeError:
store_func = self._master.pymw_master_write
get_result_func = self._master.pymw_master_read
new_maintask = PyMW_Task(task_name=task_name, executable=task_name,
store_data_func=store_func, get_result_func=get_result_func,
finished_queue=self._master._finished_tasks, input_data=None,
file_loc=self._task_dir_name)
self._master._submitted_tasks.append(new_maintask)
#start mapreduce_thread
thread1 = threading.Thread(target=self.mapreduce_thread, args=(new_maintask, exec_map, exec_reduce, num_worker, input_data, modules, dep_funcs,red_worker,file_input))
thread1.start()
return new_maintask
def mapreduce_thread(self, new_maintask, exec_map, exec_reduce, num_worker, input_data, modules=(), dep_funcs=(), red_worker=-1, file_input=False):
split_data=self._data_split(input_data,num_worker)
if file_input:
size=0
for i in input_data: size+=os.path.getsize(i[0])
size_list=[]
for i in self._data_split(list(range(size)),num_worker): size_list.append(i[-1]+1-i[0])
size_num=0
rest=size_list[size_num]
split_data, data_block = [],[]
for i in input_data: # for each files
pos=0
file_size=os.path.getsize(i[0])
while pos<file_size:
if file_size-pos < rest:
data_block.append([i[0],pos,file_size])
rest-=file_size-pos
pos=file_size
else:
data_block.append([i[0],pos,pos+rest])
pos+=rest
split_data.append(data_block)
data_block=[]
size_num+=1
if size_num!=num_worker : rest=size_list[size_num]
maptasks = []
for i in range(num_worker):
maptasks.append(self._master.submit_task(exec_map , input_data=(split_data[i],), modules=modules, dep_funcs=dep_funcs, input_from_file=file_input))
reducetasks = []
res_list=[]
for i in range(len(maptasks)):
res_task,result = self._master.get_result(maptasks)
maptasks.remove(res_task)
if red_worker==-1: # map_num == reduce_num
reducetasks.append(self._master.submit_task(exec_reduce, input_data=(result,), modules=modules, dep_funcs=dep_funcs, input_from_file=file_input))
else:
res_list+=result
if red_worker!=-1: # map_num > reduce_num
res_split=self._data_split(res_list, red_worker)
for i in range(red_worker):
reducetasks.append(self._master.submit_task(exec_reduce, input_data=(res_split[i],), modules=modules, dep_funcs=dep_funcs, input_from_file=file_input))
result_list = []
for i in range(len(reducetasks)):
res_task,result = self._master.get_result(reducetasks)
reducetasks.remove(res_task)
result_list.append(result)
new_maintask.task_finished(result=result_list)
|
|
# -*- coding: utf-8 -*-
# import
## batteries
from __future__ import print_function
import os
import sys
import argparse
from itertools import product,cycle
import string
## 3rd party
import numpy as np
import pandas as pd
## package
from pyTecanFluent import Utils
from pyTecanFluent import Fluent
from pyTecanFluent import Labware
# functions
def get_desc():
desc = 'Create robot commands for qPCR assay setup'
return desc
def parse_args(test_args=None, subparsers=None):
# desc
desc = get_desc()
epi = """DESCRIPTION:
Create a worklist file for the TECAN Fluent robot for qPCR setup.
The input is an exported plate layout from the BioRad qPCR software.
The file format should be Excel or CSV.
Just create a plate layout for your experimet, then export and add some needed info:
The following columns should also be added to the table:
* "Sample labware name"
* labware name containing the sample (any name that you want)
* Exmaple: "source plate"
* "Sample labware type"
* labware type (must EXACTLY match an existing labware type)
* Example: "96 Eppendorf TwinTec PCR"
* "Sample location"
* location of sample in the source plate.
* numeric; column-wise indexing
* "Sample volume"
* numeric; sample volume in ul
* "MM name"
* Name of master mix for that sample
* This allows for multiple master mixes per assay
* "MM volume"
* Volume of master mix in PCR rxn (ul)
* "Water volume"
* Volume of water in PCR rxn (ul)
Notes:
* Sample locations in plates numbered are column-wise (left-to-right)
* The setup file (input table) MUST have a header (capitalization doesn't matter)
* All volumes are in ul.
"""
if subparsers:
parser = subparsers.add_parser('qPCR', description=desc, epilog=epi,
formatter_class=argparse.RawTextHelpFormatter)
else:
parser = argparse.ArgumentParser(description=desc, epilog=epi,
formatter_class=argparse.RawTextHelpFormatter)
# args
## I/O
groupIO = parser.add_argument_group('I/O')
groupIO.add_argument('setup', metavar='SetupFile', type=str,
help='An Excel or CSV file with experimental setup')
groupIO.add_argument('--prefix', type=str, default='TECAN_qPCR',
help='Output file name prefix (default: %(default)s)')
groupIO.add_argument('--format', type=str, default=None,
choices=[None, 'excel', 'csv', 'tsv'],
help='File format (excel, csv, or tsv). If not provided, the format is determined from the file extension (default: %(default)s)')
## Source labware
src = parser.add_argument_group('Source labware')
src.add_argument('--mm-type', type=str, default='2ml Eppendorf waste',
help='Mastermix labware type (default: %(default)s)')
src.add_argument('--water-type', type=str, default='100ml_1 waste',
help='Water labware type (default: %(default)s)')
## Destination labware
dest = parser.add_argument_group('Destination labware')
dest.add_argument('--dest', type=str, default='Destination plate',
help='Destination plate labware name (default: %(default)s)')
dest.add_argument('--dest-type', type=str, default='384 Well Biorad PCR',
help='Destination plate labware type (default: %(default)s)')
# Liquid classes
liq = parser.add_argument_group('Liquid classes')
liq.add_argument('--mm-liq', type=str, default='MasterMix Free Multi Wall Disp',
help='Mastermix liquid class (default: %(default)s)')
liq.add_argument('--samp-liq', type=str, default='Water Free Single Wall Disp',
help='Sample liquid class (default: %(default)s)')
liq.add_argument('--water-liq', type=str, default='Water Free Single Wall Disp',
help='Water liquid class (default: %(default)s)')
liq.add_argument('--n-tip-reuse', type=int, default=4,
help='Number of tip reuses for applicable reagents (default: %(default)s)')
# Parse & return
if test_args:
args = parser.parse_args(test_args)
return args
return parser
def main(args=None):
# Input
if args is None:
args = parse_args()
check_args(args)
# Load input assay setup table
df_setup = load_setup(args.setup,
file_format=args.format)
# gwl object init
TipTypes = ['FCA, 1000ul SBS', 'FCA, 200ul SBS',
'FCA, 50ul SBS', 'FCA, 10ul SBS']
gwl = Fluent.gwl(TipTypes)
# adding sample/reagent destinations to setup table
n_wells = gwl.db.get_labware_wells(args.dest_type)
add_dest(df_setup, args.dest, args.dest_type, n_wells)
df_setup = check_rack_labels(df_setup)
# Reordering dest for optimal pipetting
if n_wells == 384:
df_setup = Utils.reorder_384well(df_setup, gwl,
labware_name_col='dest_labware_name',
labware_type_col='dest_labware_type',
position_col='dest_target_position')
elif n_wells == 96:
df_setup.sort(['dest_target_position'], inplace=True)
else:
msg = 'Labware type "{}" not recognized'
raise ValueError(msg.format(args.desttype))
# Adding commands to gwl object
pip_mastermixes(df_setup, gwl=gwl,
src_labware_type=args.mm_type,
liq_cls=args.mm_liq,
n_tip_reuse=args.n_tip_reuse)
## Samples
pip_samples(df_setup, gwl=gwl,
liq_cls=args.samp_liq)
## Water
pip_water(df_setup, gwl=gwl,
src_labware_type=args.water_type,
liq_cls=args.water_liq)
## writing out worklist (gwl) file
gwl_file = args.prefix + '.gwl'
gwl.write(gwl_file)
# making labware table
lw = Labware.labware()
lw.add_gwl(gwl)
lw_df = lw.table()
lw_file = args.prefix + '_labware.txt'
lw_df.to_csv(lw_file, sep='\t', index=False)
# Creating report file
report_file = args.prefix + '_report.txt'
with open(report_file, 'w') as repFH:
MM_names = np.unique(df_setup['mm name'])
for i,MM_name in enumerate(MM_names):
df = df_setup.loc[df_setup['mm name'] == MM_name]
df.reset_index(inplace=True)
write_report(df, MM_name=MM_name, outFH=repFH)
# status on files written
Utils.file_written(gwl_file)
Utils.file_written(lw_file)
Utils.file_written(report_file)
def check_args(args):
"""Checking user input
"""
# special characters for namings
args.dest = Utils.rm_special_chars(args.dest)
def load_setup(input_file, file_format=None, header=0):
"""Loading setup file (Excel, csv, or tab-delim)
"""
# format
if file_format is None:
if input_file.endswith('.csv'):
file_format = 'csv'
elif input_file.endswith('.txt') or input_file.endswith('.tsv'):
file_format = 'tab'
elif input_file.endswith('.xls') or input_file.endswith('.xlsx'):
file_format = 'excel'
else:
file_format = file_format.lower()
# load via pandas IO
if file_format == 'csv':
df = pd.read_csv(input_file, sep=';', header=header)
elif file_format == 'tab':
df = pd.read_csv(input_file, sep='\t', header=header)
elif file_format == 'excel':
xls = pd.ExcelFile(input_file)
df = pd.read_excel(xls, header=header)
else:
raise ValueError('Setup file not in usable format')
# caps-invariant column IDs
df.columns = [x.lower() for x in df.columns]
# checking format of table
check_df_setup(df)
# filtering NaN for required columns
df.dropna(subset=['sample type', 'sample labware name', 'sample location'],
inplace=True)
# making sure labware names are "TECAN worklist friendly"
df = Utils.rm_special_chars(df, 'sample labware name')
# assert & return
assert df.shape[1] > 1, 'Input file is only 1 column; wrong delimiter used?'
return df
def check_df_setup(df_setup):
"""Assertions of df_conc object formatting
"""
# checking for column IDs
col_IDs = ('row', 'column', 'sample type',
'sample labware name', 'sample labware type',
'sample location', 'sample volume',
'mm name', 'mm volume', 'water volume')
msg = 'Column "{}" not found (captilization invariant)'
for x in col_IDs:
if not x in df_setup.columns:
raise ValueError(msg.format(x))
# checking sample locations (>=1)
msg = 'ERROR (SetupFile, line={}): location is < 1'
for i,loc in enumerate(df_setup['sample location']):
if loc < 1:
print(msg.format(i), file=sys.stderr)
# checking sample conc
msg = 'ERROR (setupFile, line={}): volume is < 0'
for i,vol in enumerate(df_setup['sample volume']):
assert np.isnan(vol) or vol >= 0.0, msg.format(i)
for i,vol in enumerate(df_setup['mm volume']):
assert np.isnan(vol) or vol >= 0.0, msg.format(i)
for i,vol in enumerate(df_setup['water volume']):
assert np.isnan(vol) or vol >= 0.0, msg.format(i)
# removing "tube" from end of labware type (if present)
Utils.rm_tube(df_setup, 'sample labware type')
def check_rack_labels(df_setup):
"""Removing '.' for rack labels (causes execution failures)
"""
cols = ['sample labware name', 'mm name', 'dest_labware_name']
for x in cols:
df_setup[x] = [str(y).replace('.', '_') for y in df_setup[x].tolist()]
return df_setup
def plate2robot_loc(row_val, col_val, n_wells):
"""Changing positioning from row (letter) and column (number)
to just numeric position (column-wise) on the plate,
which is needed for TECAN robot positioning.
Using index for identifying well number in plate
[args]
row_val: string
col_vol: string
plate_type: string; plate type to determine well location indexing
"""
# index for converting row to numeric
idx = string.ascii_uppercase
idx = {x:i+1 for i,x in enumerate(idx)}
row_val = idx[row_val]
# getting location on plate
msg = 'Destination location "{}" is out of range'
if n_wells == 96:
loc = (col_val - 1) * 8 + row_val
assert loc > 0 and loc <= 96, msg.format(loc)
elif n_wells == 384:
loc = (col_val - 1) * 16 + row_val
assert loc > 0 and loc <= 384, msg.format(loc)
else:
msg = 'Number of wells is not valid: "{}"'
raise ValueError(msg.format(plate_type))
return loc
def add_dest(df_setup, dest_labware_name, dest_labware_type, n_wells=96):
"""Setting destination locations for samples & reagents
Adding to df_conc:
[dest_labware, dest_location]
"""
# setting destination labware
df_setup['dest_labware_name'] = dest_labware_name
df_setup['dest_labware_type'] = dest_labware_type
# setting destination location based on plate layout
func = lambda x: plate2robot_loc(x['row'], x['column'], n_wells=n_wells)
df_setup['dest_target_position'] = df_setup.apply(func, 1)
def reorder_384well(df, reorder_col):
"""Reorder values so that the odd, then the even locations are
transferred. This is faster for a 384-well plate
df: pandas.DataFrame
reorder_col: column name to reorder
"""
df['TECAN_sort_IS_EVEN'] = [x % 2 == 0 for x in df[reorder_col]]
df.sort_values(by=['TECAN_sort_IS_EVEN', reorder_col], inplace=True)
df = df.drop('TECAN_sort_IS_EVEN', 1)
df.index = range(df.shape[0])
return df
def pip_mastermixes(df_setup, gwl, src_labware_type,
liq_cls='Mastermix Free Single',
n_tip_reuse=1):
"""Writing worklist commands for aliquoting mastermix.
Re-using tips
"""
# split by mastermix names (different master mixes)
MM_names = np.unique(df_setup['mm name'])
gwl.add(Fluent.Comment('Mastermixes'))
for i,MM_name in enumerate(MM_names):
# partitioning to just focal mastermix
df = df_setup.loc[df_setup['mm name'] == MM_name]
df.reset_index(inplace=True)
# all same volumes for mastermix?
pip_mastermix(df, gwl=gwl,
MM_name=MM_name,
src_labware_type=src_labware_type,
liq_cls=liq_cls,
n_tip_reuse=n_tip_reuse)
def pip_mastermix(df_map, gwl, MM_name, src_labware_type,
liq_cls='Mastermix Free Single',
n_tip_reuse=1):
"""Dispense of particular mastermix
"""
# df copy
df = df_map.copy()
## ordering df for proper tip reuse
x = cycle(range(8))
df['CHANNEL_ORDER'] = [next(x) for y in range(df.shape[0])]
x = cycle(range(n_tip_reuse))
df['TIP_BATCH'] = Utils.tip_batch(df['CHANNEL_ORDER'], n_tip_reuse)
df.sort_values(by=['TIP_BATCH',
'CHANNEL_ORDER',
'dest_target_position'], inplace=True)
df.reset_index(inplace=True)
# iterating mastermix records in setup table (single mastermix)
gwl.add(Fluent.Comment('Mastermix: {}'.format(MM_name)))
for i in range(df.shape[0]):
# aspiration
asp = Fluent.Aspirate()
asp.RackLabel = '{0} MM[{1:0>3}]'.format(MM_name, 1)
asp.RackType = src_labware_type
asp.Position = 1
asp.Volume = df.loc[i,'mm volume']
asp.LiquidClass = liq_cls
gwl.add(asp)
# dispensing
disp = Fluent.Dispense()
disp.RackLabel = df.loc[i,'dest_labware_name']
disp.RackType = df.loc[i,'dest_labware_type']
disp.Position = df.loc[i,'dest_target_position']
disp.Volume = df.loc[i,'mm volume']
asp.LiquidClass = liq_cls
gwl.add(disp)
# waste
if (i + 1) % n_tip_reuse == 0 or i + 1 == df.shape[0]:
gwl.add(Fluent.Waste())
# finish section
gwl.add(Fluent.Break())
def pip_mastermix_multi_disp(df, gwl, MM_name, src_labware_type, multi_disp=6,
liq_cls='Mastermix Free Multi'):
"""Writing worklist commands for aliquoting mastermix.
Re-using tips
"""
# assertions
cols = ['dest_labware_name', 'dest_labware_type']
assert df.drop_duplicates(cols).shape[0] == 1
# getting wells to exclude
lw_type = df.loc[0,'dest_labware_type']
n_wells = gwl.db.get_labware_wells(lw_type)
all_wells = [x+1 for x in range(n_wells)]
target_pos = df['dest_target_position'].tolist()
to_exclude = set(all_wells) - set(target_pos)
# creating reagnet distribution command
rd = Fluent.Reagent_distribution()
rd.SrcRackLabel = '{0} MM[{1:0>3}]'.format(MM_name, 1)
rd.SrcRackType = '1.5ml Eppendorf waste'
rd.SrcPosStart = 1
rd.SrcPosEnd = 1
# dispense parameters
rd.DestRackLabel = df.loc[0,'dest_labware_name']
rd.DestRackType = df.loc[0,'dest_labware_type']
rd.DestPosStart = 1
rd.DestPosEnd = n_wells
# other
rd.Volume = df.loc[0,'mm volume']
rd.LiquidClass = liq_cls
rd.NoOfDiTiReuses = 2
rd.NoOfMultiDisp = multi_disp
rd.Direction = 0
rd.ExcludedDestWell = ';'.join([str(x) for x in list(to_exclude)])
# adding to gwl object
gwl.add(rd)
# adding break
gwl.add(Fluent.Break())
def pip_samples(df_setup, gwl, liq_cls='Water Contact Wet Single'):
"""Commands for aliquoting samples into distination plate
"""
gwl.add(Fluent.Comment('Samples'))
# filtering 'nan' from table
x = pd.notnull(df_setup['sample labware name'])
y = pd.notnull(df_setup['sample labware type'])
z = pd.notnull(df_setup['sample location'])
df = df_setup.loc[x & y & z]
df.reset_index(inplace=True)
if df.shape[0] < df_setup.shape[0]:
msg = 'WARNING: some samples skipped due to missing values!'
print(msg, file=sys.stderr)
# for each Sample, create asp/dispense commands
for i in range(df.shape[0]):
# aspiration
asp = Fluent.Aspirate()
asp.RackLabel = df.loc[i,'sample labware name']
asp.RackType = df.loc[i,'sample labware type']
asp.Position = df.loc[i, 'sample location']
asp.Volume = df.loc[i,'sample volume']
asp.LiquidClass = liq_cls
gwl.add(asp)
# dispensing
disp = Fluent.Dispense()
disp.RackLabel = df.loc[i,'dest_labware_name']
disp.RackType = df.loc[i,'dest_labware_type']
disp.Position = df.loc[i,'dest_target_position']
disp.Volume = df.loc[i,'sample volume']
asp.LiquidClass = liq_cls
gwl.add(disp)
# waste (no tip re-use)
gwl.add(Fluent.Waste())
gwl.add(Fluent.Break())
def pip_water(df_setup, gwl, src_labware_type,
liq_cls='Water Contact Wet Single'):
"""Writing worklist commands for aliquoting water
Using single asp-disp.
"""
gwl.add(Fluent.Comment('Water'))
# filtering 'nan' from table
x = pd.notnull(df_setup['water volume'])
df = df_setup.loc[x]
df.index = range(df.shape[0])
if df.shape[0] < df_setup.shape[0]:
msg = 'WARNING: water asp/disp for some samples skipped due to missing "water volume" values!'
print(msg, file=sys.stderr)
# for each Sample, create asp/dispense commands
for i in range(df.shape[0]):
if df.loc[i,'water volume'] <= 0:
msg = 'WARNING: skipping water asp/disp for sample (volue <= 0)'
print(msg, file=sys.stderr)
continue
# aspiration
asp = Fluent.Aspirate()
asp.RackLabel = 'Water source[{0:0>3}]'.format(1)
asp.RackType = src_labware_type
asp.Position = 1
asp.Volume = df.loc[i,'water volume']
asp.LiquidClass = liq_cls
gwl.add(asp)
# dispensing
disp = Fluent.Dispense()
disp.RackLabel = df.loc[i,'dest_labware_name']
disp.RackType = df.loc[i,'dest_labware_type']
disp.Position = df.loc[i,'dest_target_position']
disp.Volume = df.loc[i,'water volume']
asp.LiquidClass = liq_cls
gwl.add(disp)
# waste (no tip re-use)
gwl.add(Fluent.Waste())
gwl.add(Fluent.Break())
def add_error(x, error_perc):
if x is None:
return None
return x * (1.0 + error_perc / 100.0)
def write_report_line(outFH, subject, volume, round_digits=1, error_perc=None):
if volume is None:
v = 'NA'
else:
if error_perc is not None:
volume = add_error(volume, error_perc)
v = round(volume, round_digits)
outFH.write('{}:\t{}\n'.format(subject, v))
def write_report(df_setup, MM_name, outFH):
"""Writing a report on the qPCR setup
"""
# calculating total volumes
n_rxn = df_setup.shape[0]
## total mastermix
total_mm_volume = np.sum(df_setup['mm volume'])
## total water
total_water_volume = np.sum(df_setup['water volume'])
# report
# number of samples
outFH.write('# PCR REPORT\n')
outFH.write('MasterMix: {}\n'.format(MM_name))
outFH.write('Number of total rxns:\t{}\n'.format(n_rxn))
## raw total volumes
outFH.write('# Total reagent volumes (ul)\n')
write_report_line(outFH, 'MasterMix', total_mm_volume)
write_report_line(outFH, 'Water', total_water_volume)
## with pipetting error
outFH.write('# Total reagent volumes + 10% extra (ul)\n')
write_report_line(outFH, 'MasterMix', total_mm_volume, error_perc=10)
write_report_line(outFH, 'Water', total_water_volume, error_perc=10)
## end
outFH.write('\n')
# main
if __name__ == '__main__':
pass
|
|
#!/usr/bin/env python
import vtk
import numpy as np
from vmtk import vmtkscripts
from vmtk import vtkvmtk
import argparse
import itertools
import os
import math
import copy
def close_cell(section):
#assume the cell array of lines
section.BuildCells()
section.BuildLinks()
numberOfLinePoints = section.GetNumberOfPoints()
cell_ids = vtk.vtkIdList()
numberOfSingleCellPoints = 0
termination_pts = []
for i in range(section.GetNumberOfPoints()):
section.GetPointCells(i,cell_ids)
if(cell_ids.GetNumberOfIds() == 1):
numberOfSingleCellPoints += 1
termination_pts.append(i)
if(numberOfSingleCellPoints == 2):
print(termination_pts)
line = vtk.vtkLine()
line.GetPointIds().SetId(0, termination_pts[0])
line.GetPointIds().SetId(1, termination_pts[1])
section.GetLines().InsertNextCell(line)
elif(numberOfSingleCellPoints > 2):
print("disconnected section")
def ComputePolygonArea(section):
# calculate area of closed polygon
section.BuildCells()
section_area = 0.0
area_calc = 0.0
if (section.GetNumberOfCells() == 0):
print("shwarma")
return section_area
elif (section.GetNumberOfCells() > 1):
print("there should only be one cell")
trianglePointIds = vtk.vtkIdList()
points_list = vtk.vtkPoints()
for j in range(section.GetNumberOfCells()):
area_calc = 0.0
cell = section.GetCell(j)
if ( cell.GetCellType() != vtk.VTK_POLYGON ):
print(cell.GetCellType())
continue
#cell.Triangulate(j, trianglePointIds, points_list)
cell.Triangulate(trianglePointIds)
numberOfTriangles = trianglePointIds.GetNumberOfIds() // 3
#print("triangles", numberOfTriangles)
point0 = [0.0,0.0,0.0]
point1 = [0.0,0.0,0.0]
point2 = [0.0,0.0,0.0]
for i in range(numberOfTriangles):
pointId0 = trianglePointIds.GetId(3*i)
pointId1 = trianglePointIds.GetId(3*i+1)
pointId2 = trianglePointIds.GetId(3*i+2)
cell.GetPoints().GetPoint(pointId0, point0)
cell.GetPoints().GetPoint(pointId1, point1)
cell.GetPoints().GetPoint(pointId2, point2)
area_calc += vtk.vtkTriangle.TriangleArea(point0,point1,point2)
section_area = max(area_calc, section_area)
return section_area
def ComputeBranchSectionShape(section, origin):
# eccentricity of slice
pointIds = vtk.vtkIdList()
#cv = 0.0
#offset = 0.0
#shape = 0.0
for j in range(section.GetNumberOfCells()):
area_calc = 0.0
cell = section.GetCell(j)
if ( cell.GetCellType() != vtk.VTK_POLYGON ):
print(cell.GetCellType())
continue
center = [0.0,0.0,0.0]
for i in range(cell.GetNumberOfPoints()):
pt = section.GetPoint(cell.GetPointIds().GetId(i))
center = [p+c for p,c in zip(pt, center)]
center = [p/cell.GetNumberOfPoints() for p in center]
diff_origin = (vtk.vtkMath.Distance2BetweenPoints(center, origin))**0.5
rad_list = []
for i in range(cell.GetNumberOfPoints()):
pt = section.GetPoint(cell.GetPointIds().GetId(i))
radius = (vtk.vtkMath.Distance2BetweenPoints(origin, pt))**0.5
rad_list.append(radius)
mean = np.mean(rad_list)
stddev = np.std(rad_list)
shape = min(rad_list)/max(rad_list)
cv = (1.0 + 1.0/(4.0*cell.GetNumberOfPoints()))*stddev/mean
offset = diff_origin/mean
#print(mean, stddev, cv, offset, shape)
return cv, offset, shape
# get the average radius for each segment
def Execute(args):
print("evaluate centerlines")
# read centerlines
reader_ctr = vmtkscripts.vmtkSurfaceReader()
reader_ctr.InputFileName = args.centerlines
reader_ctr.Execute()
print(args.clean_ctr)
if(args.clean_ctr):
cleaner = vtk.vtkCleanPolyData()
cleaner.PointMergingOn()
cleaner.SetInputData(reader_ctr.Surface)
cleaner.Update()
centerlines = cleaner.GetOutput()
else:
centerlines = reader_ctr.Surface
centerlines.BuildLinks()
centerlines.BuildCells()
# calculate length for each segment
# seems to be some error in prevous calculation
for i in range(centerlines.GetNumberOfCells()):
cell = centerlines.GetCell(i)
length_ = 0.0
prevPoint = cell.GetPoints().GetPoint(0)
for j in range(cell.GetNumberOfPoints()):
point = cell.GetPoints().GetPoint(j)
length_ += vtk.vtkMath.Distance2BetweenPoints(prevPoint,point)**0.5
prevPoint = point
centerlines.GetCellData().GetArray("length").SetTuple(i, [length_])
#writer2 = vmtkscripts.vmtkSurfaceWriter()
#writer2.OutputFileName = "centerlines_test.vtp"
#writer2.Input = centerlines
#writer2.Execute()
#read clipped surface
reader_br = vmtkscripts.vmtkSurfaceReader()
reader_br.InputFileName = args.surface
reader_br.Execute()
#if (reader_br.Surface.GetPointData().GetNormals() == None):
#normalsFilter = vmtkscripts.vmtkSurfaceNormals()
#normalsFilter.ComputeCellNormals = 1
#normalsFilter.Surface = reader_br.Surface
#normalsFilter.NormalsArrayName = 'Normals'
#normalsFilter.Execute()
#surface_reference = normalsFilter.Surface
#else:
surface_reference = reader_br.Surface
locator_surf = vtk.vtkPointLocator()
locator_surf.SetDataSet(surface_reference)
locator_surf.BuildLocator()
locator_cell = vtk.vtkCellLocator()
locator_cell.SetDataSet(surface_reference)
locator_cell.BuildLocator()
ctr_locator_cell = vtk.vtkCellLocator()
ctr_locator_cell.SetDataSet(centerlines)
ctr_locator_cell.BuildLocator()
# read boundary
reader_br = vmtkscripts.vmtkSurfaceReader()
reader_br.InputFileName = args.boundary
reader_br.Execute()
boundary_reference = reader_br.Surface
N_bnds = boundary_reference.GetPoints().GetNumberOfPoints()
closestPoint = [0.0,0.0,0.0] # the coordinates of the closest point will be returned here
closestPointDist2 = vtk.reference(0) # the squared distance to the closest point will be returned here
ccell = vtk.vtkGenericCell()
ccellID = vtk.reference(0) # the cell id of the cell containing the closest point will be returned here
subID = vtk.reference(0) # this is rarely used (in triangle strips only, I believe)
#dist = []
cell_list = []
count = 0
cell_list_ids = vtk.vtkIdTypeArray() # have to load the Id;s into an array to makethem persistent
# otherwise the same memory location is being referenced
cell_list_ids.SetNumberOfComponents(1)
for bnd_pt_id in range(N_bnds):
bnd_point = boundary_reference.GetPoints().GetPoint(bnd_pt_id)
ctr_locator_cell.FindClosestPoint(bnd_point, closestPoint, ccell, ccellID, subID, closestPointDist2)
cell_list_ids.InsertNextTuple([ccellID])
#print("hey", test)
#c_pt_id = centerlines
#print(ccellID)
n_cell_pts = ccell.GetNumberOfPoints()
start_end_pt = [int(0), int(n_cell_pts-1)]
for c_pt_id in start_end_pt:
point_ctr = ccell.GetPoints().GetPoint(c_pt_id)
dist = vtk.vtkMath.Distance2BetweenPoints(closestPoint, point_ctr)
if ( dist < 1e-7):
cell_list.append((ccell, c_pt_id, int(cell_list_ids.GetTuple(count)[0])))
#print(bnd_pt_id, c_pt_id, dist)
count += 1
#print(cell_list)
for cell, start_pt, cell_Id in cell_list:
print(cell_Id)
n_cell_pts = cell.GetNumberOfPoints()
length = centerlines.GetCellData().GetArray("length").GetTuple(cell_Id)[0]
#print(length)
prev_point = cell.GetPoints().GetPoint(start_pt)
#print(prev_point)
if( start_pt == 0):
step = 1
stop = n_cell_pts-1
else:
step = -1
stop = -1
length_new = 0.0
avg_radius = 0.0
avg_radius_old = 0.0
std_radius = 0.0
radius_squ_sum = 0.0
z_score = 0.0
count = 1
mid_point = (0.0, 0.0, 0.0)
for k in range(start_pt, stop, step):
point = centerlines.GetPoint(cell.GetPointIds().GetId(k))
length_new += vtk.vtkMath.Distance2BetweenPoints(prev_point, point)**0.5
prev_point = point
radius = centerlines.GetPointData().GetArray("MaximumInscribedSphereRadius").GetTuple(cell.GetPointIds().GetId(k))[0]
radius_squ_sum += radius**2 # using alternate form of standard deviation
avg_radius = avg_radius_old + (radius - avg_radius_old) / count
if (count > 1):
std_radius = ((radius_squ_sum - count*(avg_radius)**2.0) / (count -1))**0.5
z_score = (radius - avg_radius_old ) / std_radius
#print(length_new, avg_radius, std_radius)
avg_radius_old = copy.deepcopy(avg_radius)
if (length_new > 0.5*length):
ctr_mid_point = centerlines.GetPoint(cell.GetPointIds().GetId(k))
if ( length_new < 0.8*length):
continue
#print(z_score)
count += 1
if ( (z_score > 2.0 and count > int(0.5*n_cell_pts)) or (length_new > (length - avg_radius_old)) ):
# within close proximity to bifurcation
# or a small branch is intersecting with a large branch
# this theory is untested
pl_vec = centerlines.GetPointData().GetArray("FrenetTangent").GetTuple(cell.GetPointIds().GetId(k))
plane1 = vtk.vtkPlane()
plane1.SetOrigin(point)
plane1.SetNormal(pl_vec)
seam_pt = locator_surf.FindClosestPoint(point)
seamFilter = vtkvmtk.vtkvmtkTopologicalSeamFilter()
seamFilter.SetInputData(surface_reference)
seamFilter.SetClosestPoint(surface_reference.GetPoint(seam_pt))
seamFilter.SetSeamScalarsArrayName("SeamScalars")
seamFilter.SetSeamFunction(plane1)
clipper = vtk.vtkClipPolyData()
clipper.SetInputConnection(seamFilter.GetOutputPort())
clipper.GenerateClipScalarsOff()
clipper.GenerateClippedOutputOn()
connectivity = vtk.vtkPolyDataConnectivityFilter()
connectivity.SetInputConnection(clipper.GetOutputPort())
connectivity.SetExtractionModeToClosestPointRegion()
surface_mid_pt = locator_surf.FindClosestPoint(ctr_mid_point)
connectivity.SetClosestPoint(surface_reference.GetPoint(surface_mid_pt))
surfaceCleaner = vtk.vtkCleanPolyData()
surfaceCleaner.SetInputConnection(connectivity.GetOutputPort())
surfaceCleaner.Update()
surfaceTriangulator = vtk.vtkTriangleFilter()
surfaceTriangulator.SetInputConnection(surfaceCleaner.GetOutputPort())
surfaceTriangulator.PassLinesOff()
surfaceTriangulator.PassVertsOff()
surfaceTriangulator.Update()
capper = vmtkscripts.vmtkSurfaceCapper()
capper.Surface = surfaceTriangulator.GetOutput()
capper.Method = "simple"
capper.Interactive = 0
capper.Execute()
get_prop = vtk.vtkMassProperties()
get_prop.SetInputData(capper.Surface)
get_prop.Update()
volume = get_prop.GetVolume()
#new_length = centerlines.GetCellData().GetArray("length").GetTuple(cell_id)[0] - bifurcation_info[cell_id]["less_length"]
average_area = volume/length_new
print(average_area)
break
#cell_Ids = vtk.vtkIdList()
#outputLines = vtk.vtkCellArray()
#output = vtk.vtkPolyData()
#triangles = vtk.vtkCellArray()
#triangle_pd = vtk.vtkPolyData()
#triangle_pts = vtk.vtkPoints()
#lengthArray = vtk.vtkDoubleArray()
#lengthArray.SetName("length")
#lengthArray.SetNumberOfComponents(1)
#pts_ids = vtk.vtkIdList()
#factor = 1.0
#factor2 = 2.0
#pd_count = 0
#size_range = [0.0, 0.0]
#mid_points = vtk.vtkPoints()
#vertex = vtk.vtkCellArray()
#bifurcation_info = {}
#for i in range(centerlines.GetNumberOfCells()):
#bifurcation_info[i] = {"clip_id": [], "cell_pt_id": [], "mid_pt": [], "step":[], "less_length": 0.0}
#ccell = centerlines.GetCell(i)
#if ccell.GetCellType() not in (vtk.VTK_POLY_LINE, vtk.VTK_LINE):
#continue
#n_cell_pts = ccell.GetNumberOfPoints()
#start_end_pt = [0, n_cell_pts-1]
#cell_length_half = centerlines.GetCellData().GetArray("length").GetTuple(i)[0]/2.0
#for j in start_end_pt:
#pt_id_pd = ccell.GetPointIds().GetId(j)
#centerlines.GetPointCells(pt_id_pd, cell_Ids)
#if (cell_Ids.GetNumberOfIds() > 1):
#radius = centerlines.GetPointData().GetArray("MaximumInscribedSphereRadius").GetTuple(pt_id_pd)[0]
#length = 0.0
#radius2 = 0.0
#prev_point = centerlines.GetPoint(pt_id_pd)
#if( j == start_end_pt[0]):
#step = 1
#stop = start_end_pt[-1]
#else:
#step = -1
#stop = -1
#for k in range(j, stop, step):
#point = centerlines.GetPoint(ccell.GetPointIds().GetId(k))
#length += vtk.vtkMath.Distance2BetweenPoints(prev_point,point)**0.5
#prev_point = point
#if (length > (factor*radius + factor2*radius2)):
##print(length)
#pl_vec = centerlines.GetPointData().GetArray("FrenetTangent").GetTuple(ccell.GetPointIds().GetId(k))
#pl = vtk.vtkPlane()
#pl.SetOrigin(point)
#pl.SetNormal(pl_vec)point
#cut = vtk.vtkCutter()
#cut.SetInputData(surface_reference)
#cut.SetCutFunction(pl)
#cut.Update()
#ex = vtk.vtkPolyDataConnectivityFilter()
#ex.SetInputConnection(cut.GetOutputPort())
##ex.SetExtractionModeToAllRegions()
#ex.SetExtractionModeToClosestPointRegion()
#ex.SetClosestPoint(point)
#ex.Update()
#lp = ex.GetOutput()
#close_cell(lp)
#cutStrips = vtk.vtkStripper() # Forms loops (closed polylines) from cutter
#cutStrips.SetInputData(lp)
#cutStrips.Update()
#cutPoly = vtk.vtkPolyData() # This trick defines polygons as polyline loop
#cutPoly.SetPoints((cutStrips.GetOutput()).GetPoints())
#cutPoly.SetPolys((cutStrips.GetOutput()).GetLines())
#area_test = ComputePolygonArea(cutPoly)
#size_ratio = area_test/(np.pi*radius**2)
##print(area_test, radius, size_ratio)
##writerline = vmtkscripts.vmtkSurfaceWriter()
##writerline.OutputFileName = "test_loop_{0}.vtp".format(pd_count)
##writerline.Input = cutPoly #ex.GetOutput()
##writerline.Execute()
##pd_count += 1
#if (length < cell_length_half):
#if(size_ratio > 2.0 ):
#continue
#cv, offset, shape = ComputeBranchSectionShape(cutPoly, point)
#if(cv > 0.2): # standard deviation / mean
#continue
#if(offset > 0.10): # centroid of slice vs centerline point
#continue
##if(shape > 0.8):
## continue
##writerline = vmtkscripts.vmtkSurfaceWriter()
##writerline.OutputFileName = "test_loop_{0}.vtp".format(pd_count)
##writerline.Input = cutPoly #ex.GetOutput()
##writerline.Execute()
##pd_count += 1
##print(length)
#clip_id = ccell.GetPointIds().GetId(k)
#bifurcation_info[i]["clip_id"].append(clip_id)
#bifurcation_info[i]["cell_pt_id"].append(k)
#bifurcation_info[i]["step"].append(step)
#bifurcation_info[i]["less_length"] += length
#tmp_idx = k
#break
#midway_length = 0.0
#prev_point = centerlines.GetPoint(pt_id_pd)
#print("hello")
#for k in range(tmp_idx, stop, step):
#if k == 1198:
#print(k)
#point = centerlines.GetPoint(cell.GetPointIds().GetId(k))
#midway_length += vtk.vtkMath.Distance2BetweenPoints(prev_point, point)**0.5
#prev_point = point
#if (midway_length >= cell_length_half):
#bifurcation_info[i]["mid_pt"].append(point)
#pt_id = mid_points.InsertNextPoint(point)
#vertex.InsertNextCell(1, [pt_id])
#mid_idx = k
#break
#mid_point_pd = vtk.vtkPolyData()
#mid_point_pd.SetPoints(mid_points)
#mid_point_pd.SetVerts(vertex)
#writerline = vmtkscripts.vmtkSurfaceWriter()
#writerline.OutputFileName = "test_vertex_{0}.vtp".format(0)
#writerline.Input = mid_point_pd
#writerline.Execute()
##return
#tree = vtk.vtkModifiedBSPTree()
#tree.SetDataSet(surface_reference)
#tree.BuildLocator()
##t = [ 1 for i in bifurcation_info.keys() if len(bifurcation_info[i]) == 2]
#two_bif = False
#pd_count = 0
#avg_x_area = vtk.vtkDoubleArray()
#avg_x_area.SetName("avg_crosssection")
#avg_x_area.SetNumberOfComponents(1)
#avg_x_area.SetNumberOfTuples(centerlines.GetNumberOfCells())
#avg_x_area.Fill(-1.0)
#aspect_ratio = vtk.vtkDoubleArray()
#aspect_ratio.SetName("aspect_ratio")
#aspect_ratio.SetNumberOfComponents(1)
#aspect_ratio.SetNumberOfTuples(centerlines.GetNumberOfCells())
#aspect_ratio.Fill(-1.0)
#vol_array = vtk.vtkDoubleArray()
#vol_array.SetName("volume")
#vol_array.SetNumberOfComponents(1)
#vol_array.SetNumberOfTuples(centerlines.GetNumberOfCells())
#vol_array.Fill(-1.0)
#len_array = vtk.vtkDoubleArray()
#len_array.SetName("length_wo_bifurcation")
#len_array.SetNumberOfComponents(1)
#len_array.SetNumberOfTuples(centerlines.GetNumberOfCells())
#len_array.Fill(-1.0)
#append = vtk.vtkAppendPolyData()
#for cell_id in bifurcation_info:
#id_sorted = sorted(bifurcation_info[cell_id]["cell_pt_id"])
#step_direction = [x for _,x in sorted(zip(bifurcation_info[cell_id]["cell_pt_id"], bifurcation_info[cell_id]["step"]))]
##print(step_direction)
#if (len(bifurcation_info[cell_id]["cell_pt_id"]) < 2):
#two_bif = False
#else:
#two_bif = True
#diff = bifurcation_info[cell_id]["cell_pt_id"][0] - bifurcation_info[cell_id]["cell_pt_id"][1]
#if(abs(diff) < 2): # there is a problem if there less than two points
#print("houston we got a problem")
#if (not two_bif):
#clip_id = centerlines.GetCell(cell_id).GetPointIds().GetId(id_sorted[0])
#clip_id_m1 = centerlines.GetCell(cell_id).GetPointIds().GetId(id_sorted[0]+step_direction[0])
#start_pt = centerlines.GetPoint(clip_id)
#surface_pt_id = locator_surf.FindClosestPoint(start_pt)
## vector from pt(start_pt+1) - pt(start_pt)
#v_start = [ x - y for x,y in zip(centerlines.GetPoint(clip_id_m1), start_pt)]
#v_ctr_start = centerlines.GetPointData().GetArray("FrenetTangent").GetTuple(clip_id)
#v_normal_start = centerlines.GetPointData().GetArray("FrenetNormal").GetTuple(clip_id)
## want inward facing normals
#if (vtk.vtkMath.Dot(v_start, v_ctr_start) < 0.0):
#v_ctr_start = [-1.0*x for x in v_ctr_start]
##print(clip_tangent)
#plane1 = vtk.vtkPlane()
#plane1.SetOrigin(start_pt)
#plane1.SetNormal(v_ctr_start)
#seamFilter = vtkvmtk.vtkvmtkTopologicalSeamFilter()
#seamFilter.SetInputData(surface_reference)
#seamFilter.SetClosestPoint(surface_reference.GetPoint(surface_pt_id))
#seamFilter.SetSeamScalarsArrayName("SeamScalars")
#seamFilter.SetSeamFunction(plane1)
#clipper = vtk.vtkClipPolyData()
#clipper.SetInputConnection(seamFilter.GetOutputPort())
#clipper.GenerateClipScalarsOff()
#clipper.GenerateClippedOutputOn()
#connectivity = vtk.vtkPolyDataConnectivityFilter()
#connectivity.SetInputConnection(clipper.GetOutputPort())
#connectivity.SetExtractionModeToClosestPointRegion()
#surface_mid_pt = locator_surf.FindClosestPoint(bifurcation_info[cell_id]["mid_pt"][0])
#connectivity.SetClosestPoint(surface_reference.GetPoint(surface_mid_pt))
#surfaceCleaner = vtk.vtkCleanPolyData()
#surfaceCleaner.SetInputConnection(connectivity.GetOutputPort())
#surfaceCleaner.Update()
#surfaceTriangulator = vtk.vtkTriangleFilter()
#surfaceTriangulator.SetInputConnection(surfaceCleaner.GetOutputPort())
#surfaceTriangulator.PassLinesOff()
#surfaceTriangulator.PassVertsOff()
#surfaceTriangulator.Update()
#capper = vmtkscripts.vmtkSurfaceCapper()
#capper.Surface = surfaceTriangulator.GetOutput()
#capper.Method = "simple"
#capper.Interactive = 0
#capper.Execute()
#get_prop = vtk.vtkMassProperties()
#get_prop.SetInputData(capper.Surface)
#get_prop.Update()
#volume = get_prop.GetVolume()
#new_length = centerlines.GetCellData().GetArray("length").GetTuple(cell_id)[0] - bifurcation_info[cell_id]["less_length"]
#average_area = volume/new_length
#avg_x_area.SetTuple(cell_id, [average_area])
#aspect_ratio.SetTuple(cell_id, [average_area/new_length])
#vol_array.SetTuple(cell_id, [volume])
#len_array.SetTuple(cell_id, [new_length])
#append.AddInputData(capper.Surface)
#append.Update()
##print(new_length, centerlines.GetCellData().GetArray("length").GetTuple(cell_id)[0], bifurcation_info[cell_id]["less_length"])
##pd_count += 1
#writerline = vmtkscripts.vmtkSurfaceWriter()
#writerline.OutputFileName = args.out_file
#writerline.Input = append.GetOutput()
#writerline.Execute()
##print( bifurcation_info)
#centerlines.GetCellData().AddArray(avg_x_area)
#centerlines.GetCellData().AddArray(aspect_ratio)
#centerlines.GetCellData().AddArray(vol_array)
#centerlines.GetCellData().AddArray(len_array)
#writer = vmtkscripts.vmtkSurfaceWriter()
#writer.OutputFileName = args.out_segments
#writer.Input = centerlines
#writer.Execute()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='estimate vertices for uniform point distribution')
parser.add_argument("-b", dest="boundary", required=True, help="boundary file", metavar="FILE")
parser.add_argument("-i", dest="surface", required=True, help="input surface file", metavar="FILE")
parser.add_argument("-c", dest="centerlines", required=True, help="centerlines", metavar="FILE")
parser.add_argument("--clean", dest="clean_ctr", action='store_true', help=" clean centerlines after")
parser.add_argument("-o", dest="out_file", required=False, help="output filename for terminal segments", metavar="FILE")
parser.add_argument("-s", dest="out_segments", required=True, help="centerlines with cross section information", metavar="FILE")
args = parser.parse_args()
#print(args)
Execute(args)
|
|
#!/usr/bin/env python
# coding: utf-8
#
# Example of synthesizing Halide schedules using OpenTuner. This program
# expects a compiled version of Halide to exist at ~/Halide or at the location
# specified by --halide-dir.
#
# Halide programs must be modified by:
# 1) Inserting AUTOTUNE_HOOK(Func) directly after the algorithm definition
# in main()
# 2) Creating a settings file that describes the functions and variables
# (see apps/halide_blur.settings for an example)
#
# Halide can be found here: https://github.com/halide/Halide
#
import adddeps # fix sys.path
import argparse
import collections
import hashlib
import json
import logging
import math
import os
import re
import subprocess
import tempfile
import textwrap
from cStringIO import StringIO
from fn import _
from pprint import pprint
import opentuner
from opentuner.search.manipulator import ConfigurationManipulator
from opentuner.search.manipulator import PowerOfTwoParameter
from opentuner.search.manipulator import PermutationParameter
from opentuner.search.manipulator import BooleanParameter
from opentuner.search.manipulator import ScheduleParameter
COMPILE_CMD = (
'{args.cxx} "{cpp}" -o "{bin}" -I "{args.halide_dir}/include" '
'"{args.halide_dir}/bin/$BUILD_PREFIX/libHalide.a" -ldl -lcurses -lpthread {args.cxxflags} '
'-DAUTOTUNE_N="{args.input_size}" -DAUTOTUNE_TRIALS={args.trials} '
'-DAUTOTUNE_LIMIT={limit} -fno-rtti')
log = logging.getLogger('halide')
parser = argparse.ArgumentParser(parents=opentuner.argparsers())
parser.add_argument('source', help='Halide source file annotated with '
'AUTOTUNE_HOOK')
parser.add_argument('--halide-dir', default=os.path.expanduser('~/Halide'),
help='Installation directory for Halide')
parser.add_argument('--input-size',
help='Input size to test with')
parser.add_argument('--trials', default=3, type=int,
help='Number of times to test each schedule')
parser.add_argument('--nesting', default=2, type=int,
help='Maximum depth for generated loops')
parser.add_argument('--max-split-factor', default=8, type=int,
help='The largest value a single split() can add')
parser.add_argument('--compile-command', default=COMPILE_CMD,
help='How to compile generated C++ code')
parser.add_argument('--cxx', default='c++',
help='C++ compiler to use (e.g., g++ or clang++)')
parser.add_argument('--cxxflags', default='',
help='Extra flags to the C++ compiler')
parser.add_argument('--tmp-dir',
default=('/run/shm' if os.access('/run/shm', os.W_OK)
else '/tmp'),
help='Where to store generated tests')
parser.add_argument('--settings-file',
help='Override location of json encoded settings')
parser.add_argument('--debug-error',
help='Stop on errors matching a given string')
parser.add_argument('--limit', type=float, default=30,
help='Kill compile + runs taking too long (seconds)')
parser.add_argument('--memory-limit', type=int, default=1024 ** 3,
help='Set memory ulimit on unix based systems')
parser.add_argument('--enable-unroll', action='store_true',
help='Enable .unroll(...) generation')
parser.add_argument('--enable-store-at', action='store_true',
help='Never generate .store_at(...)')
parser.add_argument('--gated-store-reorder', action='store_true',
help='Only reorder storage if a special parameter is given')
group = parser.add_mutually_exclusive_group()
group.add_argument('--random-test', action='store_true',
help='Generate a random configuration and run it')
group.add_argument('--random-source', action='store_true',
help='Generate a random configuration and print source ')
group.add_argument('--make-settings-file', action='store_true',
help='Create a skeleton settings file from call graph')
# class HalideRandomConfig(opentuner.search.technique.SearchTechnique):
# def desired_configuration(self):
# '''
# inject random configs with no compute_at() calls to kickstart the search process
# '''
# cfg = self.manipulator.random()
# for k in cfg.keys():
# if re.match('.*_compute_level', k):
# cfg[k] = LoopLevel.INLINE
# return cfg
#
# technique.register(bandittechniques.AUCBanditMetaTechnique([
# HalideRandomConfig(),
# differentialevolution.DifferentialEvolutionAlt(),
# evolutionarytechniques.UniformGreedyMutation(),
# evolutionarytechniques.NormalGreedyMutation(mutation_rate=0.3),
# ], name = "HalideMetaTechnique"))
class HalideTuner(opentuner.measurement.MeasurementInterface):
def __init__(self, args):
# args.technique = ['HalideMetaTechnique']
super(HalideTuner, self).__init__(args, program_name=args.source)
timing_prefix = open(os.path.join(os.path.dirname(__file__),
'timing_prefix.h')).read()
self.template = timing_prefix + open(args.source).read()
self.min_collection_cost = float('inf')
if not args.settings_file:
args.settings_file = os.path.splitext(args.source)[0] + '.settings'
if not args.make_settings_file:
with open(args.settings_file) as fd:
self.settings = json.load(fd)
self.post_dominators = post_dominators(self.settings)
if not args.input_size:
args.input_size = self.settings['input_size']
else:
self.settings = None
self.post_dominators = None
args.input_size = '1, 1'
# set "program_version" based on hash of halidetuner.py, program source
h = hashlib.md5()
#with open(__file__) as src:
# h.update(src.read())
with open(args.source) as src:
h.update(src.read())
self._version = h.hexdigest()
def compute_order_parameter(self, func):
name = func['name']
schedule_vars = []
schedule_deps = dict()
for var in func['vars']:
schedule_vars.append((var, 0))
for i in xrange(1, self.args.nesting):
schedule_vars.append((var, i))
schedule_deps[(var, i - 1)] = [(var, i)]
return ScheduleParameter('{0}_compute_order'.format(name), schedule_vars,
schedule_deps)
def manipulator(self):
"""
The definition of the manipulator is meant to mimic the Halide::Schedule
data structure and defines the configuration space to search
"""
manipulator = HalideConfigurationManipulator(self)
manipulator.add_parameter(HalideComputeAtScheduleParameter(
'schedule', self.args, self.settings['functions'],
self.post_dominators))
for func in self.settings['functions']:
name = func['name']
manipulator.add_parameter(PermutationParameter(
'{0}_store_order'.format(name), func['vars']))
manipulator.add_parameter(
BooleanParameter('{0}_store_order_enabled'.format(name)))
manipulator.add_parameter(self.compute_order_parameter(func))
for var in func['vars']:
manipulator.add_parameter(PowerOfTwoParameter(
'{0}_vectorize'.format(name), 1, self.args.max_split_factor))
manipulator.add_parameter(PowerOfTwoParameter(
'{0}_unroll'.format(name), 1, self.args.max_split_factor))
manipulator.add_parameter(BooleanParameter(
'{0}_parallel'.format(name)))
for nesting in xrange(1, self.args.nesting):
manipulator.add_parameter(PowerOfTwoParameter(
'{0}_splitfactor_{1}_{2}'.format(name, nesting, var),
1, self.args.max_split_factor))
return manipulator
def cfg_to_schedule(self, cfg):
"""
Produce a Halide schedule from a configuration dictionary
"""
o = StringIO()
cnt = 0
temp_vars = list()
schedule = ComputeAtStoreAtParser(cfg['schedule'], self.post_dominators)
compute_at = schedule.compute_at
store_at = schedule.store_at
# build list of all used variable names
var_names = dict()
var_name_order = dict()
for func in self.settings['functions']:
name = func['name']
compute_order = cfg['{0}_compute_order'.format(name)]
for var in func['vars']:
var_names[(name, var, 0)] = var
for nesting in xrange(1, self.args.nesting):
split_factor = cfg.get('{0}_splitfactor_{1}_{2}'.format(
name, nesting, var), 0)
if split_factor > 1 and (name, var, nesting - 1) in var_names:
var_names[(name, var, nesting)] = '_{var}{cnt}'.format(
func=name, var=var, nesting=nesting, cnt=cnt)
temp_vars.append(var_names[(name, var, nesting)])
cnt += 1
var_name_order[name] = [var_names[(name, v, n)] for v, n in compute_order
if (name, v, n) in var_names]
# set a schedule for each function
for func in self.settings['functions']:
name = func['name']
inner_var_name = var_name_order[name][-1] # innermost variable in the reordered list for this func
vectorize = cfg['{0}_vectorize'.format(name)]
if self.args.enable_unroll:
unroll = cfg['{0}_unroll'.format(name)]
else:
unroll = 1
print >> o, 'Halide::Func(funcs["%s"])' % name
for var in func['vars']:
# handle all splits
for nesting in xrange(1, self.args.nesting):
split_factor = cfg.get('{0}_splitfactor_{1}_{2}'.format(
name, nesting, var), 0)
if split_factor <= 1:
break
for nesting2 in xrange(nesting + 1, self.args.nesting):
split_factor2 = cfg.get('{0}_splitfactor_{1}_{2}'.format(
name, nesting2, var), 0)
if split_factor2 <= 1:
break
split_factor *= split_factor2
var_name = var_names[(name, var, nesting)]
last_var_name = var_names[(name, var, nesting - 1)]
# apply unroll, vectorize factors to all surrounding splits iff we're the innermost var
if var_name == inner_var_name:
split_factor *= unroll
split_factor *= vectorize
print >> o, '.split({0}, {0}, {1}, {2})'.format(
last_var_name, var_name, split_factor)
# drop unused variables and truncate (Halide supports only 10 reorders)
if len(var_name_order[name]) > 1:
print >> o, '.reorder({0})'.format(
', '.join(reversed(var_name_order[name][:10])))
# reorder_storage
store_order_enabled = cfg['{0}_store_order_enabled'.format(name)]
if store_order_enabled or not self.args.gated_store_reorder:
store_order = cfg['{0}_store_order'.format(name)]
if len(store_order) > 1:
print >> o, '.reorder_storage({0})'.format(', '.join(store_order))
if unroll > 1:
# apply unrolling to innermost var
print >> o, '.unroll({0}, {1})'.format(
var_name_order[name][-1], unroll * vectorize)
if vectorize > 1:
# apply vectorization to innermost var
print >> o, '.vectorize({0}, {1})'.format(
var_name_order[name][-1], vectorize)
# compute_at(not root)
if (compute_at[name] is not None and
len(var_name_order[compute_at[name][0]]) >= compute_at[name][1]):
at_func, at_idx = compute_at[name]
try:
at_var = var_name_order[at_func][-at_idx]
print >> o, '.compute_at(Halide::Func(funcs["{0}"]), {1})'.format(at_func, at_var)
if not self.args.enable_store_at:
pass # disabled
elif store_at[name] is None:
print >> o, '.store_root()'
elif store_at[name] != compute_at[name]:
at_func, at_idx = store_at[name]
at_var = var_name_order[at_func][-at_idx]
print >> o, '.store_at(Halide::Func(funcs["{0}"]), {1})'.format(at_func, at_var)
except IndexError:
# this is expected when at_idx is too large
# TODO: implement a cleaner fix
pass
# compute_root
else:
parallel = cfg['{0}_parallel'.format(name)]
if parallel:
# only apply parallelism to outermost var of root funcs
print >> o, '.parallel({0})'.format(var_name_order[name][0])
print >> o, '.compute_root()'
print >> o, ';'
if temp_vars:
return 'Halide::Var {0};\n{1}'.format(
', '.join(temp_vars), o.getvalue())
else:
return o.getvalue()
def schedule_to_source(self, schedule):
"""
Generate a temporary Halide cpp file with schedule inserted
"""
def repl_autotune_hook(match):
tmpl = '''
{
std::map<std::string, Halide::Internal::Function> funcs = Halide::Internal::find_transitive_calls((%(func)s).function());
%(sched)s
_autotune_timing_stub(%(func)s);
}'''
return tmpl % {"sched": schedule.replace('\n', '\n '), "func": match.group(1)}
source = re.sub(r'\n\s*AUTOTUNE_HOOK\(\s*([a-zA-Z0-9_]+)\s*\)',
repl_autotune_hook, self.template)
return source
def run_schedule(self, schedule, limit):
"""
Generate a temporary Halide cpp file with schedule inserted and run it
with our timing harness found in timing_prefix.h.
"""
return self.run_source(self.schedule_to_source(schedule), limit)
def run_baseline(self):
"""
Generate a temporary Halide cpp file with schedule inserted and run it
with our timing harness found in timing_prefix.h.
"""
def repl_autotune_hook(match):
return '\n\n_autotune_timing_stub(%s);' % match.group(1)
source = re.sub(r'\n\s*BASELINE_HOOK\(\s*([a-zA-Z0-9_]+)\s*\)',
repl_autotune_hook, self.template)
return self.run_source(source)
def run_source(self, source, limit=0, extra_args=''):
cmd = ''
with tempfile.NamedTemporaryFile(suffix='.cpp', prefix='halide',
dir=self.args.tmp_dir) as cppfile:
cppfile.write(source)
cppfile.flush()
# binfile = os.path.splitext(cppfile.name)[0] + '.bin'
# binfile = '/tmp/halide.bin'
binfile = ''
with tempfile.NamedTemporaryFile(suffix='.bin', prefix='halide',
dir=self.args.tmp_dir, delete=False) as binfiletmp:
binfile = binfiletmp.name # unique temp file to allow multiple concurrent tuner runs
assert(binfile)
cmd = self.args.compile_command.format(
cpp=cppfile.name, bin=binfile, args=self.args,
limit=math.ceil(limit) if limit < float('inf') else 0)
cmd += ' ' + extra_args
compile_result = self.call_program(cmd, limit=self.args.limit,
memory_limit=self.args.memory_limit)
if compile_result['returncode'] != 0:
log.error('compile failed: %s', compile_result)
return None
try:
result = self.call_program(binfile,
limit=self.args.limit,
memory_limit=self.args.memory_limit)
stdout = result['stdout']
stderr = result['stderr']
returncode = result['returncode']
if result['timeout']:
log.info('compiler timeout %d (%.2f+%.0f cost)', self.args.limit,
compile_result['time'], self.args.limit)
return float('inf')
elif returncode == 142 or returncode == -14:
log.info('program timeout %d (%.2f+%.2f cost)', math.ceil(limit),
compile_result['time'], result['time'])
return None
elif returncode != 0:
log.error('invalid schedule (returncode=%d): %s', returncode,
stderr.strip())
with tempfile.NamedTemporaryFile(suffix='.cpp', prefix='halide-error',
dir=self.args.tmp_dir, delete=False) as errfile:
errfile.write(source)
log.error('failed schedule logged to %s.\ncompile as `%s`.', errfile.name, cmd)
if self.args.debug_error is not None and (
self.args.debug_error in stderr
or self.args.debug_error == ""):
self.debug_schedule('/tmp/halideerror.cpp', source)
return None
else:
try:
time = json.loads(stdout)['time']
except:
log.exception('error parsing output: %s', result)
return None
log.info('success: %.4f (collection cost %.2f + %.2f)',
time, compile_result['time'], result['time'])
self.min_collection_cost = min(
self.min_collection_cost, result['time'])
return time
finally:
os.unlink(binfile)
def run_cfg(self, cfg, limit=0):
try:
schedule = self.cfg_to_schedule(cfg)
except:
log.exception('error generating schedule')
return None
return self.run_schedule(schedule, limit)
def run(self, desired_result, input, limit):
time = self.run_cfg(desired_result.configuration.data, limit)
if time is not None:
return opentuner.resultsdb.models.Result(time=time)
else:
return opentuner.resultsdb.models.Result(state='ERROR',
time=float('inf'))
def save_final_config(self, configuration):
"""called at the end of tuning"""
print 'Final Configuration:'
print self.cfg_to_schedule(configuration.data)
def debug_log_schedule(self, filename, source):
open(filename, 'w').write(source)
print 'offending schedule written to {0}'.format(filename)
def debug_schedule(self, filename, source):
self.debug_log_schedule(filename, source)
raw_input('press ENTER to continue')
def make_settings_file(self):
dump_call_graph_dir = os.path.join(os.path.dirname(__file__),
'dump-call-graph')
if not os.path.isdir(dump_call_graph_dir):
subprocess.check_call(['git', 'clone',
'http://github.com/halide/dump-call-graph.git'])
assert os.path.isdir(dump_call_graph_dir)
dump_call_graph_cpp = os.path.join(dump_call_graph_dir, 'DumpCallGraph.cpp')
callgraph_file = self.args.settings_file + '.callgraph'
def repl_autotune_hook(match):
return r'''dump_call_graph("%s", %s);
printf("{\"time\": 0}\n");
exit(0);''' % (callgraph_file, match.group(1))
source = re.sub(r'\n\s*AUTOTUNE_HOOK\(\s*([a-zA-Z0-9_]+)\s*\)',
repl_autotune_hook, self.template)
# TODO: BUG! - this only works correctly if given an absolute path to the
# program (or explicit settings file). Otherwise it generates the callgraph
# in a tmp dir somewhere and fails to find it in a local path here.
source = open(dump_call_graph_cpp).read() + source
self.run_source(source, extra_args='-I{0}'.format(dump_call_graph_dir))
callgraph = json.load(open(callgraph_file))
settings = {'input_size': '1024, 1024', 'functions': callgraph}
json.dump(settings, open(self.args.settings_file, 'w'), sort_keys=True,
indent=2)
print textwrap.dedent('''
{0} has been generated based on call graph of program.
This file likely needs some manual tweaks in order to work correctly.
The input size should be changed to have the right number of dimensions.
Any naming differences between variable names and function names must
be applied manually. Some temporary variables not in the source code
need to be manually removed.
'''.format(self.args.settings_file))
class ComputeAtStoreAtParser(object):
"""
A recursive descent parser to force proper loop nesting, and enforce post
dominator scheduling constraints
For each function input will have tokens like:
('foo', 's') = store_at location for foo
('foo', '2'), ('foo', '1') = opening the loop nests for foo,
the inner 2 variables
('foo', 'c') = the computation of foo, and closing all loop nests
The order of these tokens define a loop nest tree which we reconstruct
"""
def __init__(self, tokens, post_dominators):
self.tokens = list(tokens) # input, processed back to front
self.post_dominators = post_dominators
self.compute_at = dict()
self.store_at = dict()
self.process_root()
def process_root(self):
old_len = len(self.tokens)
out = []
while self.tokens:
if self.tokens[-1][1] == 's':
# store at root
self.store_at[self.tokens[-1][0]] = None
out.append(self.tokens.pop())
else:
self.process_loopnest(out, [])
self.tokens = list(reversed(out))
assert old_len == len(self.tokens)
def process_loopnest(self, out, stack):
func, idx = self.tokens[-1]
out.append(self.tokens.pop())
if idx != 'c':
raise Exception('Invalid schedule')
self.compute_at[func] = None
for targ_func, targ_idx in reversed(stack):
if targ_func in self.post_dominators[func]:
self.compute_at[func] = (targ_func, targ_idx)
break
close_tokens = [(f, i) for f, i in self.tokens if f == func and i != 's']
while close_tokens:
if self.tokens[-1] == close_tokens[-1]:
# proper nesting
close_tokens.pop()
out.append(self.tokens.pop())
elif self.tokens[-1][1] == 'c':
self.process_loopnest(out, stack + close_tokens[-1:])
elif self.tokens[-1][1] == 's':
# self.tokens[-1] is computed at this level
if func in self.post_dominators[self.tokens[-1][0]]:
self.store_at[self.tokens[-1][0]] = close_tokens[-1]
else:
self.store_at[self.tokens[-1][0]] = None
out.append(self.tokens.pop())
else:
# improper nesting, just close the loop and search/delete close_tokens
out.extend(reversed(close_tokens))
self.tokens = [x for x in self.tokens if x not in close_tokens]
break
class HalideConfigurationManipulator(ConfigurationManipulator):
def __init__(self, halide_tuner):
super(HalideConfigurationManipulator, self).__init__()
self.halide_tuner = halide_tuner
def hash_config(self, config):
"""
Multiple configs can lead to the same schedule, so we provide a custom
hash function that hashes the resulting schedule instead of the raw config.
This will lead to fewer duplicate tests.
"""
self.normalize(config)
try:
schedule = self.halide_tuner.cfg_to_schedule(config)
return hashlib.sha256(schedule).hexdigest()
except:
log.warning('error hashing config', exc_info=True)
return super(HalideConfigurationManipulator, self).hash_config(config)
class HalideComputeAtScheduleParameter(ScheduleParameter):
def __init__(self, name, args, functions, post_dominators):
"""
Custom ScheduleParameter that normalizes using ComputeAtStoreAtParser
"""
super(HalideComputeAtScheduleParameter, self).__init__(
name, *self.gen_nodes_deps(args, functions))
self.post_dominators = post_dominators
def gen_nodes_deps(self, args, functions):
"""
Compute the list of nodes and point-to-point deps to provide to base class
"""
nodes = list()
deps = collections.defaultdict(list)
for func in functions:
last = None
for idx in reversed(['c'] + # 'c' = compute location (and close loops)
range(1, len(func['vars']) * args.nesting + 1) +
['s']): # 's' = storage location
name = (func['name'], idx)
if last is not None:
# variables must go in order
deps[last].append(name)
last = name
nodes.append(name)
if idx == 'c':
# computes must follow call graph order
for callee in func['calls']:
deps[(callee, 'c')].append(name)
return nodes, deps
def normalize(self, cfg):
"""
First enforce basic point-to-point deps (in base class), then call
ComputeAtStoreAtParser to normalize schedule.
"""
super(HalideComputeAtScheduleParameter, self).normalize(cfg)
cfg[self.name] = ComputeAtStoreAtParser(cfg[self.name],
self.post_dominators).tokens
def post_dominators(settings):
"""
Compute post dominator tree using textbook iterative algorithm for the
call graph defined in settings
"""
functions = [f['name'] for f in settings['functions']]
calls = dict([(f['name'], set(f['calls'])) for f in settings['functions']])
inverse_calls = collections.defaultdict(set)
for k, callees in calls.items():
for v in callees:
inverse_calls[v].add(k)
dom = {functions[-1]: set([functions[-1]])}
for f in functions[:-1]:
dom[f] = set(functions)
change = True
while change:
change = False
for f in functions[:-1]:
old = dom[f]
dom[f] = set([f]) | reduce(
_ & _, [dom[c] for c in inverse_calls[f]], set(functions))
if old != dom[f]:
change = True
return dom
def random_test(args):
"""
Generate and run a random schedule
"""
opentuner.tuningrunmain.init_logging()
m = HalideTuner(args)
cfg = m.manipulator().random()
pprint(cfg)
print
schedule = m.cfg_to_schedule(cfg)
print schedule
print
print 'Schedule', m.run_schedule(schedule, 30)
print 'Baseline', m.run_baseline()
def random_source(args):
"""
Dump the source code of a random schedule
"""
opentuner.tuningrunmain.init_logging()
m = HalideTuner(args)
cfg = m.manipulator().random()
schedule = m.cfg_to_schedule(cfg)
source = m.schedule_to_source(schedule)
print source
def main(args):
if args.random_test:
random_test(args)
elif args.random_source:
random_source(args)
elif args.make_settings_file:
opentuner.tuningrunmain.init_logging()
HalideTuner(args).make_settings_file()
else:
HalideTuner.main(args)
if __name__ == '__main__':
main(parser.parse_args())
|
|
import ast
import datetime
import importlib
from furl import furl
import celery
from django import forms
from django.apps import apps
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.admin.widgets import AdminDateWidget
from django.core import management
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils import timezone
from django.utils.html import format_html
from oauth2_provider.models import AccessToken
from share.robot import RobotAppConfig
from share.models.celery import CeleryTask
from share.models.change import ChangeSet
from share.models.core import NormalizedData, ShareUser
from share.models.ingest import RawDatum, Source, SourceConfig, Harvester, Transformer
from share.models.logs import HarvestLog
from share.models.registration import ProviderRegistration
from share.models.banner import SiteBanner
from share.readonlyadmin import ReadOnlyAdmin
from share.tasks import HarvesterTask
class NormalizedDataAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
list_filter = ['source', ]
raw_id_fields = ('raw', 'tasks',)
class ChangeSetSubmittedByFilter(SimpleListFilter):
title = 'Source'
parameter_name = 'source_id'
def lookups(self, request, model_admin):
return ShareUser.objects.filter(is_active=True).values_list('id', 'username')
def queryset(self, request, queryset):
if self.value():
return queryset.filter(normalized_data__source_id=self.value())
return queryset
class ChangeSetAdmin(admin.ModelAdmin):
list_display = ('status_', 'count_changes', 'submitted_by', 'submitted_at')
actions = ['accept_changes']
list_filter = ['status', ChangeSetSubmittedByFilter]
raw_id_fields = ('normalized_data',)
def submitted_by(self, obj):
return obj.normalized_data.source
submitted_by.short_description = 'submitted by'
def count_changes(self, obj):
return obj.changes.count()
count_changes.short_description = 'number of changes'
def status_(self, obj):
return ChangeSet.STATUS[obj.status].title()
class AppLabelFilter(admin.SimpleListFilter):
title = 'App Label'
parameter_name = 'app_label'
def lookups(self, request, model_admin):
return sorted([
(config.label, config.label)
for config in apps.get_app_configs()
if isinstance(config, RobotAppConfig)
])
def queryset(self, request, queryset):
if self.value():
return queryset.filter(app_label=self.value())
return queryset
class TaskNameFilter(admin.SimpleListFilter):
title = 'Task'
parameter_name = 'task'
def lookups(self, request, model_admin):
return sorted(
(key, key)
for key in celery.current_app.tasks.keys()
if key.startswith('share.')
)
def queryset(self, request, queryset):
if self.value():
return queryset.filter(name=self.value())
return queryset
class CeleryTaskChangeList(ChangeList):
def get_ordering(self, request, queryset):
return ['-timestamp']
class CeleryTaskAdmin(admin.ModelAdmin):
list_display = ('timestamp', 'name', 'status', 'provider', 'app_label', 'started_by')
actions = ['retry_tasks']
list_filter = ['status', TaskNameFilter, AppLabelFilter, 'started_by']
list_select_related = ('provider', 'started_by')
fields = (
('app_label', 'app_version'),
('started_by', 'provider'),
('uuid', 'name'),
('args', 'kwargs'),
'timestamp',
'status',
'traceback',
)
readonly_fields = ('name', 'uuid', 'args', 'kwargs', 'status', 'app_version', 'app_label', 'timestamp', 'status', 'traceback', 'started_by', 'provider')
def traceback(self, task):
return apps.get_model('djcelery', 'taskmeta').objects.filter(task_id=task.uuid).first().traceback
def get_changelist(self, request, **kwargs):
return CeleryTaskChangeList
def retry_tasks(self, request, queryset):
for task in queryset:
task_id = str(task.uuid)
parts = task.name.rpartition('.')
Task = getattr(importlib.import_module(parts[0]), parts[2])
if task.app_label:
args = (task.started_by.id, task.app_label) + ast.literal_eval(task.args)
else:
args = (task.started_by.id,) + ast.literal_eval(task.args)
kwargs = ast.literal_eval(task.kwargs)
Task().apply_async(args, kwargs, task_id=task_id)
retry_tasks.short_description = 'Retry tasks'
class AbstractCreativeWorkAdmin(admin.ModelAdmin):
list_display = ('type', 'title', 'num_contributors')
list_filter = ['type']
raw_id_fields = ('change', 'extra', 'extra_version', 'same_as', 'same_as_version', 'subjects')
def num_contributors(self, obj):
return obj.contributors.count()
num_contributors.short_description = 'Contributors'
class AbstractAgentAdmin(admin.ModelAdmin):
list_display = ('type', 'name')
list_filter = ('type',)
raw_id_fields = ('change', 'extra', 'extra_version', 'same_as', 'same_as_version',)
class TagAdmin(admin.ModelAdmin):
raw_id_fields = ('change', 'extra', 'extra_version', 'same_as', 'same_as_version',)
class RawDatumAdmin(admin.ModelAdmin):
raw_id_fields = ()
class AccessTokenAdmin(admin.ModelAdmin):
list_display = ('token', 'user', 'scope')
class ProviderRegistrationAdmin(ReadOnlyAdmin):
list_display = ('source_name', 'status_', 'submitted_at', 'submitted_by', 'direct_source')
list_filter = ('direct_source', 'status',)
readonly_fields = ('submitted_at', 'submitted_by',)
def status_(self, obj):
return ProviderRegistration.STATUS[obj.status].title()
class SiteBannerAdmin(admin.ModelAdmin):
list_display = ('title', 'color', 'icon', 'active')
list_editable = ('active',)
ordering = ('-active', '-last_modified_at')
readonly_fields = ('created_at', 'created_by', 'last_modified_at', 'last_modified_by')
def save_model(self, request, obj, form, change):
if not change:
obj.created_by = request.user
obj.last_modified_by = request.user
super().save_model(request, obj, form, change)
class SourceConfigFilter(admin.SimpleListFilter):
title = 'Source Config'
parameter_name = 'source_config'
def lookups(self, request, model_admin):
# TODO make this into a cool hierarchy deal
# return SourceConfig.objects.select_related('source').values_list('
return SourceConfig.objects.order_by('label').values_list('id', 'label')
def queryset(self, request, queryset):
if self.value():
return queryset.filter(source_config=self.value())
class HarvestLogAdmin(admin.ModelAdmin):
list_display = ('id', 'source', 'label', 'share_version', 'status_', 'start_date_', 'end_date_', 'harvest_log_actions', )
list_filter = ('status', SourceConfigFilter, )
list_select_related = ('source_config__source', )
readonly_fields = ('harvest_log_actions',)
actions = ('restart_tasks', )
STATUS_COLORS = {
HarvestLog.STATUS.created: 'blue',
HarvestLog.STATUS.started: 'cyan',
HarvestLog.STATUS.failed: 'red',
HarvestLog.STATUS.succeeded: 'green',
HarvestLog.STATUS.rescheduled: 'goldenrod',
HarvestLog.STATUS.forced: 'maroon',
HarvestLog.STATUS.skipped: 'orange',
HarvestLog.STATUS.retried: 'darkseagreen',
}
def source(self, obj):
return obj.source_config.source.long_title
def label(self, obj):
return obj.source_config.label
def start_date_(self, obj):
return obj.start_date.isoformat()
def end_date_(self, obj):
return obj.end_date.isoformat()
def status_(self, obj):
if obj.status == HarvestLog.STATUS.created and (timezone.now() - obj.date_modified) > datetime.timedelta(days=1, hours=6):
return format_html('<span style="font-weight: bold;">Lost</span>')
return format_html(
'<span style="font-weight: bold; color: {}">{}</span>',
self.STATUS_COLORS[obj.status],
HarvestLog.STATUS[obj.status].title(),
)
def restart_tasks(self, request, queryset):
for log in queryset.select_related('source_config'):
HarvesterTask().apply_async((1, log.source_config.label), {
'end': log.end_date.isoformat(),
'start': log.start_date.isoformat(),
}, task_id=log.task_id, restarted=True)
restart_tasks.short_description = 'Restart selected tasks'
def harvest_log_actions(self, obj):
url = furl(reverse('admin:source-config-harvest', args=[obj.source_config_id]))
url.args['start'] = self.start_date_(obj)
url.args['end'] = self.end_date_(obj)
url.args['superfluous'] = True
return format_html('<a class="button" href="{}">Restart</a>', url.url)
harvest_log_actions.short_description = 'Actions'
class HarvestForm(forms.Form):
start = forms.DateField(widget=AdminDateWidget())
end = forms.DateField(widget=AdminDateWidget())
superfluous = forms.BooleanField(required=False)
def clean(self):
super().clean()
if self.cleaned_data['start'] > self.cleaned_data['end']:
raise forms.ValidationError('Start date cannot be after end date.')
class SourceConfigAdmin(admin.ModelAdmin):
list_display = ('label', 'source_', 'version', 'enabled', 'source_config_actions')
list_select_related = ('source',)
readonly_fields = ('source_config_actions',)
def source_(self, obj):
return obj.source.long_title
def enabled(self, obj):
return not obj.disabled
enabled.boolean = True
def get_urls(self):
return [
url(
r'^(?P<config_id>.+)/harvest/$',
self.admin_site.admin_view(self.harvest),
name='source-config-harvest'
)
] + super().get_urls()
def source_config_actions(self, obj):
if obj.harvester_id is None:
return ''
return format_html(
'<a class="button" href="{}">Harvest</a>',
reverse('admin:source-config-harvest', args=[obj.pk]),
)
source_config_actions.short_description = 'Actions'
def harvest(self, request, config_id):
config = self.get_object(request, config_id)
if config.harvester_id is None:
raise ValueError('You need a harvester to harvest.')
if request.method == 'POST':
form = HarvestForm(request.POST)
if form.is_valid():
kwargs = {
'start': form.cleaned_data['start'],
'end': form.cleaned_data['end'],
'superfluous': form.cleaned_data['superfluous'],
'async': True,
'quiet': True,
'ignore_disabled': True,
}
management.call_command('fullharvest', config.label, **kwargs)
self.message_user(request, 'Started harvesting {}!'.format(config.label))
url = reverse(
'admin:share_harvestlog_changelist',
current_app=self.admin_site.name,
)
return HttpResponseRedirect(url)
else:
initial = {'start': config.earliest_date, 'end': timezone.now().date()}
for field in HarvestForm.base_fields.keys():
if field in request.GET:
initial[field] = request.GET[field]
form = HarvestForm(initial=initial)
context = self.admin_site.each_context(request)
context['opts'] = self.model._meta
context['form'] = form
context['source_config'] = config
context['title'] = 'Harvest {}'.format(config.label)
return TemplateResponse(request, 'admin/harvest.html', context)
class SourceAdminInline(admin.StackedInline):
model = Source
class ShareUserAdmin(admin.ModelAdmin):
inlines = (SourceAdminInline,)
admin.site.unregister(AccessToken)
admin.site.register(AccessToken, AccessTokenAdmin)
admin.site.register(CeleryTask, CeleryTaskAdmin)
admin.site.register(ChangeSet, ChangeSetAdmin)
admin.site.register(HarvestLog, HarvestLogAdmin)
admin.site.register(NormalizedData, NormalizedDataAdmin)
admin.site.register(ProviderRegistration, ProviderRegistrationAdmin)
admin.site.register(RawDatum, RawDatumAdmin)
admin.site.register(SiteBanner, SiteBannerAdmin)
admin.site.register(Harvester)
admin.site.register(ShareUser, ShareUserAdmin)
admin.site.register(Source)
admin.site.register(SourceConfig, SourceConfigAdmin)
admin.site.register(Transformer)
|
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.utils import viewitems
class _CompressedNode(object):
"""Represents a node in the compressed trie
Parameters
----------
key : string
the key attached to the node
values : list of objects, optional
the values attached to this node
Attributes
----------
values : list of objects
the values attached to this node
key : string
the key attached to the node
children : dict of {string: _CompressedNode}
the children nodes below this node
"""
def __init__(self, key, values=None):
self.values = values or []
self.key = key
self.children = {}
def __nonzero__(self):
return (self.key != "" or len(self.values) > 0
or len(self.children.keys()) > 0)
def __len__(self):
"""Returns the number of values attached to the node
.. warning:: This method is recursive
"""
return sum(len(n) for n in self.children.values()) + len(self.values)
@property
def size(self):
"""int with the number of nodes below the node
.. warning:: This method is recursive
"""
return sum(n.size for n in self.children.values()) + 1
@property
def prefix_map(self):
"""Dict with the prefix map
Dictionary of {values: list of values} containing the prefix map
of this node
"""
mapping = {}
if len(self.children) == 0:
# we have a leaf
mapping = {self.values[0]: self.values[1:]}
else:
# we are at an internal node
for child in self.children.values():
mapping.update(child.prefix_map)
# get largest group
n = -1
key_largest = None
for key, value in viewitems(mapping):
if len(value) > n:
n = len(value)
key_largest = key
# append this node's values
mapping[key_largest].extend(self.values)
return mapping
def insert(self, key, value):
"""Inserts key with value in the node
Parameters
----------
key : string
The string key attached to the value
value : object
Object to attach to the key
"""
node_key_len = len(self.key)
length = min(node_key_len, len(key))
# Follow the key into the tree
split_node = False
index = 0
while index < length and not split_node:
split_node = key[index] != self.key[index]
index += 1
if split_node:
# Index has been incremented after split_node was set to true,
# decrement it to make it work
index -= 1
# We need to split up the node pointed by index
# Get the key for the new node
new_key_node = _CompressedNode(key[index:], [value])
# Get a new node for the old key node
old_key_node = _CompressedNode(self.key[index:], self.values)
old_key_node.children = self.children
self.children = {key[index]: new_key_node,
self.key[index]: old_key_node}
self.key = self.key[:index]
self.values = []
elif index == len(self.key) and index == len(key):
# The new key matches node key exactly
self.values.append(value)
elif index < node_key_len:
# Key shorter than node key
lower_node = _CompressedNode(self.key[index:], self.values)
lower_node.children = self.children
self.children = {self.key[index]: lower_node}
self.key = key
self.values = [value]
else:
# New key longer than current node key
node = self.children.get(key[index])
if node:
# insert into next node
node.insert(key[index:], value)
else:
# Create new node
new_node = _CompressedNode(key[index:], [value])
self.children[key[index]] = new_node
def find(self, key):
"""Searches for key and returns values stored for the key.
Parameters
----------
key : string
The key of the value to search for
Returns
-------
object
The value attached to the key
"""
# key exhausted
if len(key) == 0:
return self.values
# find matching part of key and node_key
min_length = min(len(key), len(self.key))
keys_diff = False
index = 0
while index < min_length and not keys_diff:
keys_diff = key[index] != self.key[index]
index += 1
if keys_diff:
return []
elif index == len(key):
# key and node_key match exactly
return self.values
else:
node = self.children.get(key[index])
if node:
# descend to next node
return node.find(key[index:])
return []
class CompressedTrie(object):
""" A compressed Trie for a list of (key, value) pairs
Parameters
----------
pair_list : list of tuples, optional
List of (key, value) pairs to initialize the Trie
Attributes
----------
size
prefix_map
"""
def __init__(self, pair_list=None):
self._root = _CompressedNode("")
if pair_list:
for key, value in pair_list:
self.insert(key, value)
def __nonzero__(self):
return bool(self._root)
def __len__(self):
return len(self._root)
@property
def size(self):
"""int with the number of nodes in the Trie"""
return self._root.size
@property
def prefix_map(self):
"""Dict with the prefix map
Dictionary of {values: list of values} containing the prefix map
"""
return self._root.prefix_map
def insert(self, key, value):
"""Inserts key with value in Trie
Parameters
----------
key : string
The string key attached to the value
value : object
Object to attach to the key
"""
self._root.insert(key, value)
def find(self, key):
"""Searches for key and returns values stored for the key.
Parameters
----------
key : string
Returns
-------
object
The value attached to the key
"""
return self._root.find(key)
def fasta_to_pairlist(seqs):
"""Yields (key, value) pairs, useful for populating a Trie object
Parameters
----------
seqs : Iterable
tuples of the form ``(label, seq)``, e.g., as obtained by
skbio.parse.sequences.parse_fasta
Returns
-------
GeneratorType
yields tuples of the form ``(seq, label)``
"""
for label, seq in seqs:
yield seq, label
|
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Datasets."""
from __future__ import absolute_import
import six
import copy
import google.cloud._helpers
from google.cloud.bigquery import _helpers
from google.cloud.bigquery.model import ModelReference
from google.cloud.bigquery.routine import RoutineReference
from google.cloud.bigquery.table import TableReference
from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
def _get_table_reference(self, table_id):
"""Constructs a TableReference.
Args:
table_id (str): The ID of the table.
Returns:
google.cloud.bigquery.table.TableReference:
A table reference for a table in this dataset.
"""
return TableReference(self, table_id)
def _get_model_reference(self, model_id):
"""Constructs a ModelReference.
Args:
model_id (str): the ID of the model.
Returns:
google.cloud.bigquery.model.ModelReference:
A ModelReference for a model in this dataset.
"""
return ModelReference.from_api_repr(
{"projectId": self.project, "datasetId": self.dataset_id, "modelId": model_id}
)
def _get_routine_reference(self, routine_id):
"""Constructs a RoutineReference.
Args:
routine_id (str): the ID of the routine.
Returns:
google.cloud.bigquery.routine.RoutineReference:
A RoutineReference for a routine in this dataset.
"""
return RoutineReference.from_api_repr(
{
"projectId": self.project,
"datasetId": self.dataset_id,
"routineId": routine_id,
}
)
class AccessEntry(object):
"""Represents grant of an access role to an entity.
An entry must have exactly one of the allowed :attr:`ENTITY_TYPES`. If
anything but ``view`` is set, a ``role`` is also required. ``role`` is
omitted for a ``view``, because ``view`` s are always read-only.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets.
Attributes:
role (str):
Role granted to the entity. The following string values are
supported: `'READER'`, `'WRITER'`, `'OWNER'`. It may also be
:data:`None` if the ``entity_type`` is ``view``.
entity_type (str):
Type of entity being granted the role. One of :attr:`ENTITY_TYPES`.
entity_id (Union[str, Dict[str, str]]):
If the ``entity_type`` is not 'view', the ``entity_id`` is the
``str`` ID of the entity being granted the role. If the
``entity_type`` is 'view', the ``entity_id`` is a ``dict``
representing the view from a different dataset to grant access to
in the following format::
{
'projectId': string,
'datasetId': string,
'tableId': string
}
Raises:
ValueError:
If the ``entity_type`` is not among :attr:`ENTITY_TYPES`, or if a
``view`` has ``role`` set, or a non ``view`` **does not** have a
``role`` set.
Examples:
>>> entry = AccessEntry('OWNER', 'userByEmail', 'user@example.com')
>>> view = {
... 'projectId': 'my-project',
... 'datasetId': 'my_dataset',
... 'tableId': 'my_table'
... }
>>> entry = AccessEntry(None, 'view', view)
"""
ENTITY_TYPES = frozenset(
["userByEmail", "groupByEmail", "domain", "specialGroup", "view"]
)
"""Allowed entity types."""
def __init__(self, role, entity_type, entity_id):
if entity_type not in self.ENTITY_TYPES:
message = "Entity type %r not among: %s" % (
entity_type,
", ".join(self.ENTITY_TYPES),
)
raise ValueError(message)
if entity_type == "view":
if role is not None:
raise ValueError(
"Role must be None for a view. Received " "role: %r" % (role,)
)
else:
if role is None:
raise ValueError(
"Role must be set for entity " "type %r" % (entity_type,)
)
self.role = role
self.entity_type = entity_type
self.entity_id = entity_id
def __eq__(self, other):
if not isinstance(other, AccessEntry):
return NotImplemented
return (
self.role == other.role
and self.entity_type == other.entity_type
and self.entity_id == other.entity_id
)
def __ne__(self, other):
return not self == other
def __repr__(self):
return "<AccessEntry: role=%s, %s=%s>" % (
self.role,
self.entity_type,
self.entity_id,
)
def to_api_repr(self):
"""Construct the API resource representation of this access entry
Returns:
Dict[str, object]: Access entry represented as an API resource
"""
resource = {self.entity_type: self.entity_id}
if self.role is not None:
resource["role"] = self.role
return resource
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct an access entry given its API representation
Args:
resource (Dict[str, object]):
Access entry resource representation returned from the API
Returns:
google.cloud.bigquery.dataset.AccessEntry:
Access entry parsed from ``resource``.
Raises:
ValueError:
If the resource has more keys than ``role`` and one additional
key.
"""
entry = resource.copy()
role = entry.pop("role", None)
entity_type, entity_id = entry.popitem()
if len(entry) != 0:
raise ValueError("Entry has unexpected keys remaining.", entry)
return cls(role, entity_type, entity_id)
class DatasetReference(object):
"""DatasetReferences are pointers to datasets.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#datasetreference
Args:
project (str): The ID of the project
dataset_id (str): The ID of the dataset
Raises:
ValueError: If either argument is not of type ``str``.
"""
def __init__(self, project, dataset_id):
if not isinstance(project, six.string_types):
raise ValueError("Pass a string for project")
if not isinstance(dataset_id, six.string_types):
raise ValueError("Pass a string for dataset_id")
self._project = project
self._dataset_id = dataset_id
@property
def project(self):
"""str: Project ID of the dataset."""
return self._project
@property
def dataset_id(self):
"""str: Dataset ID."""
return self._dataset_id
@property
def path(self):
"""str: URL path for the dataset based on project and dataset ID."""
return "/projects/%s/datasets/%s" % (self.project, self.dataset_id)
table = _get_table_reference
model = _get_model_reference
routine = _get_routine_reference
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a dataset reference given its API representation
Args:
resource (Dict[str, str]):
Dataset reference resource representation returned from the API
Returns:
google.cloud.bigquery.dataset.DatasetReference:
Dataset reference parsed from ``resource``.
"""
project = resource["projectId"]
dataset_id = resource["datasetId"]
return cls(project, dataset_id)
@classmethod
def from_string(cls, dataset_id, default_project=None):
"""Construct a dataset reference from dataset ID string.
Args:
dataset_id (str):
A dataset ID in standard SQL format. If ``default_project``
is not specified, this must include both the project ID and
the dataset ID, separated by ``.``.
default_project (str):
Optional. The project ID to use when ``dataset_id`` does not
include a project ID.
Returns:
DatasetReference:
Dataset reference parsed from ``dataset_id``.
Examples:
>>> DatasetReference.from_string('my-project-id.some_dataset')
DatasetReference('my-project-id', 'some_dataset')
Raises:
ValueError:
If ``dataset_id`` is not a fully-qualified dataset ID in
standard SQL format.
"""
output_dataset_id = dataset_id
output_project_id = default_project
parts = _helpers._split_id(dataset_id)
if len(parts) == 1 and not default_project:
raise ValueError(
"When default_project is not set, dataset_id must be a "
"fully-qualified dataset ID in standard SQL format, "
'e.g., "project.dataset_id" got {}'.format(dataset_id)
)
elif len(parts) == 2:
output_project_id, output_dataset_id = parts
elif len(parts) > 2:
raise ValueError(
"Too many parts in dataset_id. Expected a fully-qualified "
"dataset ID in standard SQL format. e.g. "
'"project.dataset_id", got {}'.format(dataset_id)
)
return cls(output_project_id, output_dataset_id)
def to_api_repr(self):
"""Construct the API resource representation of this dataset reference
Returns:
Dict[str, str]: dataset reference represented as an API resource
"""
return {"projectId": self._project, "datasetId": self._dataset_id}
def _key(self):
"""A tuple key that uniquely describes this field.
Used to compute this instance's hashcode and evaluate equality.
Returns:
Tuple[str]: The contents of this :class:`.DatasetReference`.
"""
return (self._project, self._dataset_id)
def __eq__(self, other):
if not isinstance(other, DatasetReference):
return NotImplemented
return self._key() == other._key()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._key())
def __repr__(self):
return "DatasetReference{}".format(self._key())
class Dataset(object):
"""Datasets are containers for tables.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource-dataset
Args:
dataset_ref (Union[google.cloud.bigquery.dataset.DatasetReference, str]):
A pointer to a dataset. If ``dataset_ref`` is a string, it must
include both the project ID and the dataset ID, separated by
``.``.
"""
_PROPERTY_TO_API_FIELD = {
"access_entries": "access",
"created": "creationTime",
"default_partition_expiration_ms": "defaultPartitionExpirationMs",
"default_table_expiration_ms": "defaultTableExpirationMs",
"friendly_name": "friendlyName",
"default_encryption_configuration": "defaultEncryptionConfiguration",
}
def __init__(self, dataset_ref):
if isinstance(dataset_ref, six.string_types):
dataset_ref = DatasetReference.from_string(dataset_ref)
self._properties = {"datasetReference": dataset_ref.to_api_repr(), "labels": {}}
@property
def project(self):
"""str: Project ID of the project bound to the dataset."""
return self._properties["datasetReference"]["projectId"]
@property
def path(self):
"""str: URL path for the dataset based on project and dataset ID."""
return "/projects/%s/datasets/%s" % (self.project, self.dataset_id)
@property
def access_entries(self):
"""List[google.cloud.bigquery.dataset.AccessEntry]: Dataset's access
entries.
``role`` augments the entity type and must be present **unless** the
entity type is ``view``.
Raises:
TypeError: If 'value' is not a sequence
ValueError:
If any item in the sequence is not an
:class:`~google.cloud.bigquery.dataset.AccessEntry`.
"""
entries = self._properties.get("access", [])
return [AccessEntry.from_api_repr(entry) for entry in entries]
@access_entries.setter
def access_entries(self, value):
if not all(isinstance(field, AccessEntry) for field in value):
raise ValueError("Values must be AccessEntry instances")
entries = [entry.to_api_repr() for entry in value]
self._properties["access"] = entries
@property
def created(self):
"""Union[datetime.datetime, None]: Datetime at which the dataset was
created (:data:`None` until set from the server).
"""
creation_time = self._properties.get("creationTime")
if creation_time is not None:
# creation_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(creation_time)
)
@property
def dataset_id(self):
"""str: Dataset ID."""
return self._properties["datasetReference"]["datasetId"]
@property
def full_dataset_id(self):
"""Union[str, None]: ID for the dataset resource (:data:`None` until
set from the server)
In the format ``project_id:dataset_id``.
"""
return self._properties.get("id")
@property
def reference(self):
"""google.cloud.bigquery.dataset.DatasetReference: A reference to this
dataset.
"""
return DatasetReference(self.project, self.dataset_id)
@property
def etag(self):
"""Union[str, None]: ETag for the dataset resource (:data:`None` until
set from the server).
"""
return self._properties.get("etag")
@property
def modified(self):
"""Union[datetime.datetime, None]: Datetime at which the dataset was
last modified (:data:`None` until set from the server).
"""
modified_time = self._properties.get("lastModifiedTime")
if modified_time is not None:
# modified_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(modified_time)
)
@property
def self_link(self):
"""Union[str, None]: URL for the dataset resource (:data:`None` until
set from the server).
"""
return self._properties.get("selfLink")
@property
def default_partition_expiration_ms(self):
"""Optional[int]: The default partition expiration for all
partitioned tables in the dataset, in milliseconds.
Once this property is set, all newly-created partitioned tables in
the dataset will have an ``time_paritioning.expiration_ms`` property
set to this value, and changing the value will only affect new
tables, not existing ones. The storage in a partition will have an
expiration time of its partition time plus this value.
Setting this property overrides the use of
``default_table_expiration_ms`` for partitioned tables: only one of
``default_table_expiration_ms`` and
``default_partition_expiration_ms`` will be used for any new
partitioned table. If you provide an explicit
``time_partitioning.expiration_ms`` when creating or updating a
partitioned table, that value takes precedence over the default
partition expiration time indicated by this property.
"""
return _helpers._int_or_none(
self._properties.get("defaultPartitionExpirationMs")
)
@default_partition_expiration_ms.setter
def default_partition_expiration_ms(self, value):
self._properties["defaultPartitionExpirationMs"] = _helpers._str_or_none(value)
@property
def default_table_expiration_ms(self):
"""Union[int, None]: Default expiration time for tables in the dataset
(defaults to :data:`None`).
Raises:
ValueError: For invalid value types.
"""
return _helpers._int_or_none(self._properties.get("defaultTableExpirationMs"))
@default_table_expiration_ms.setter
def default_table_expiration_ms(self, value):
if not isinstance(value, six.integer_types) and value is not None:
raise ValueError("Pass an integer, or None")
self._properties["defaultTableExpirationMs"] = _helpers._str_or_none(value)
@property
def description(self):
"""Union[str, None]: Description of the dataset as set by the user
(defaults to :data:`None`).
Raises:
ValueError: for invalid value types.
"""
return self._properties.get("description")
@description.setter
def description(self, value):
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties["description"] = value
@property
def friendly_name(self):
"""Union[str, None]: Title of the dataset as set by the user
(defaults to :data:`None`).
Raises:
ValueError: for invalid value types.
"""
return self._properties.get("friendlyName")
@friendly_name.setter
def friendly_name(self, value):
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties["friendlyName"] = value
@property
def location(self):
"""Union[str, None]: Location in which the dataset is hosted as set by
the user (defaults to :data:`None`).
Raises:
ValueError: for invalid value types.
"""
return self._properties.get("location")
@location.setter
def location(self, value):
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties["location"] = value
@property
def labels(self):
"""Dict[str, str]: Labels for the dataset.
This method always returns a dict. To change a dataset's labels,
modify the dict, then call
:meth:`google.cloud.bigquery.client.Client.update_dataset`. To delete
a label, set its value to :data:`None` before updating.
Raises:
ValueError: for invalid value types.
"""
return self._properties.setdefault("labels", {})
@labels.setter
def labels(self, value):
if not isinstance(value, dict):
raise ValueError("Pass a dict")
self._properties["labels"] = value
@property
def default_encryption_configuration(self):
"""google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
encryption configuration for all tables in the dataset.
Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
if using default encryption.
See `protecting data with Cloud KMS keys
<https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_
in the BigQuery documentation.
"""
prop = self._properties.get("defaultEncryptionConfiguration")
if prop:
prop = EncryptionConfiguration.from_api_repr(prop)
return prop
@default_encryption_configuration.setter
def default_encryption_configuration(self, value):
api_repr = value
if value:
api_repr = value.to_api_repr()
self._properties["defaultEncryptionConfiguration"] = api_repr
@classmethod
def from_string(cls, full_dataset_id):
"""Construct a dataset from fully-qualified dataset ID.
Args:
full_dataset_id (str):
A fully-qualified dataset ID in standard SQL format. Must
include both the project ID and the dataset ID, separated by
``.``.
Returns:
Dataset: Dataset parsed from ``full_dataset_id``.
Examples:
>>> Dataset.from_string('my-project-id.some_dataset')
Dataset(DatasetReference('my-project-id', 'some_dataset'))
Raises:
ValueError:
If ``full_dataset_id`` is not a fully-qualified dataset ID in
standard SQL format.
"""
return cls(DatasetReference.from_string(full_dataset_id))
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a dataset given its API representation
Args:
resource (Dict[str: object]):
Dataset resource representation returned from the API
Returns:
google.cloud.bigquery.dataset.Dataset:
Dataset parsed from ``resource``.
"""
if (
"datasetReference" not in resource
or "datasetId" not in resource["datasetReference"]
):
raise KeyError(
"Resource lacks required identity information:"
'["datasetReference"]["datasetId"]'
)
project_id = resource["datasetReference"]["projectId"]
dataset_id = resource["datasetReference"]["datasetId"]
dataset = cls(DatasetReference(project_id, dataset_id))
dataset._properties = copy.deepcopy(resource)
return dataset
def to_api_repr(self):
"""Construct the API resource representation of this dataset
Returns:
Dict[str, object]: The dataset represented as an API resource
"""
return copy.deepcopy(self._properties)
def _build_resource(self, filter_fields):
"""Generate a resource for ``update``."""
return _helpers._build_resource_from_properties(self, filter_fields)
table = _get_table_reference
model = _get_model_reference
routine = _get_routine_reference
def __repr__(self):
return "Dataset({})".format(repr(self.reference))
class DatasetListItem(object):
"""A read-only dataset resource from a list operation.
For performance reasons, the BigQuery API only includes some of the
dataset properties when listing datasets. Notably,
:attr:`~google.cloud.bigquery.dataset.Dataset.access_entries` is missing.
For a full list of the properties that the BigQuery API returns, see the
`REST documentation for datasets.list
<https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list>`_.
Args:
resource (Dict[str, str]):
A dataset-like resource object from a dataset list response. A
``datasetReference`` property is required.
Raises:
ValueError:
If ``datasetReference`` or one of its required members is missing
from ``resource``.
"""
def __init__(self, resource):
if "datasetReference" not in resource:
raise ValueError("resource must contain a datasetReference value")
if "projectId" not in resource["datasetReference"]:
raise ValueError(
"resource['datasetReference'] must contain a projectId value"
)
if "datasetId" not in resource["datasetReference"]:
raise ValueError(
"resource['datasetReference'] must contain a datasetId value"
)
self._properties = resource
@property
def project(self):
"""str: Project bound to the dataset."""
return self._properties["datasetReference"]["projectId"]
@property
def dataset_id(self):
"""str: Dataset ID."""
return self._properties["datasetReference"]["datasetId"]
@property
def full_dataset_id(self):
"""Union[str, None]: ID for the dataset resource (:data:`None` until
set from the server)
In the format ``project_id:dataset_id``.
"""
return self._properties.get("id")
@property
def friendly_name(self):
"""Union[str, None]: Title of the dataset as set by the user
(defaults to :data:`None`).
"""
return self._properties.get("friendlyName")
@property
def labels(self):
"""Dict[str, str]: Labels for the dataset."""
return self._properties.setdefault("labels", {})
@property
def reference(self):
"""google.cloud.bigquery.dataset.DatasetReference: A reference to this
dataset.
"""
return DatasetReference(self.project, self.dataset_id)
table = _get_table_reference
model = _get_model_reference
routine = _get_routine_reference
|
|
# Created by GeoScan Ltd. (http://geoscan.aero)
#
# This is python script for Metashape Pro. Scripts repository: https://github.com/agisoft-llc/metashape-scripts
import Metashape as ps
import math, time
# Checking compatibility
compatible_major_version = "1.5"
found_major_version = ".".join(ps.app.version.split('.')[:2])
if found_major_version != compatible_major_version:
raise Exception("Incompatible Metashape version: {} != {}".format(found_major_version, compatible_major_version))
try:
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
except ImportError:
from PySide.QtGui import *
from PySide.QtCore import *
import copy
def time_measure(func):
def wrapper(*args, **kwargs):
t1 = time.time()
res = func(*args, **kwargs)
t2 = time.time()
print("Finished processing in {} sec.".format(t2 - t1))
return res
return wrapper
def delta_vector_to_chunk(v1, v2):
chunk = ps.app.document.chunk
v1 = chunk.crs.unproject(v1)
v2 = chunk.crs.unproject(v2)
v1 = chunk.transform.matrix.inv().mulp(v1)
v2 = chunk.transform.matrix.inv().mulp(v2)
z = v2 - v1
z.normalize()
return z
def get_chunk_vectors(lat, lon):
z = delta_vector_to_chunk(ps.Vector([lon, lat, 0]), ps.Vector([lon, lat, 1]))
y = delta_vector_to_chunk(ps.Vector([lon, lat, 0]), ps.Vector([lon + 0.001, lat, 0]))
x = delta_vector_to_chunk(ps.Vector([lon, lat, 0]), ps.Vector([lon, lat + 0.001, 0]))
return x, y, -z
def wgs_to_chunk(chunk, point):
return chunk.transform.matrix.inv().mulp(chunk.crs.unproject(point))
def show_message(msg):
msgBox = QMessageBox()
print(msg)
msgBox.setText(msg)
msgBox.exec()
def check_chunk(chunk):
if chunk is None or len(chunk.cameras) == 0:
show_message("Empty chunk!")
return False
if chunk.crs is None:
show_message("Initialize chunk coordinate system first")
return False
return True
# returns distance estimation between two cameras in chunk
def get_photos_delta(chunk):
mid_idx = int(len(chunk.cameras) / 2)
if mid_idx == 0:
return ps.Vector([0, 0, 0])
c1 = chunk.cameras[:mid_idx][-1]
c2 = chunk.cameras[:mid_idx][-2]
print(c1.reference.location)
print(c2.reference.location)
offset = c1.reference.location - c2.reference.location
for i in range(len(offset)):
offset[i] = math.fabs(offset[i])
return offset
def get_chunk_bounds(chunk):
min_latitude = min(c.reference.location[1] for c in chunk.cameras if c.reference.location is not None)
max_latitude = max(c.reference.location[1] for c in chunk.cameras if c.reference.location is not None)
min_longitude = min(c.reference.location[0] for c in chunk.cameras if c.reference.location is not None)
max_longitude = max(c.reference.location[0] for c in chunk.cameras if c.reference.location is not None)
offset = get_photos_delta(chunk)
offset_factor = 2
delta_latitude = offset_factor * offset.y
delta_longitude = offset_factor * offset.x
min_longitude -= delta_longitude
max_longitude += delta_longitude
min_latitude -= delta_latitude
max_latitude += delta_latitude
return min_latitude, min_longitude, max_latitude, max_longitude
# Evaluates rotation matrices for cameras that have location
# algorithm is straightforward: we assume copter has zero pitch and roll,
# and yaw is evaluated from current copter direction
# current direction is evaluated simply subtracting location of
# current camera from the next camera location
# i and j are unit axis vectors in chunk coordinate system
# i || North
def estimate_rotation_matrices(chunk, i, j):
groups = copy.copy(chunk.camera_groups)
groups.append(None)
for group in groups:
group_cameras = list(filter(lambda c: c.group == group, chunk.cameras))
if len(group_cameras) == 0:
continue
if len(group_cameras) == 1:
if group_cameras[0].reference.rotation is None:
group_cameras[0].reference.rotation = ps.Vector([0, 0, 0])
continue
for idx, c in enumerate(group_cameras[0:-1]):
next_camera = group_cameras[idx + 1]
if c.reference.rotation is None:
if c.reference.location is None or next_camera.reference.location is None:
continue
direction = delta_vector_to_chunk(c.reference.location, next_camera.reference.location)
cos_yaw = direction * j
yaw = math.degrees(math.acos(cos_yaw)) + 90 # TODO not sure about this offset
if direction * i > 0:
yaw = -yaw
c.reference.rotation = ps.Vector([yaw, 0, 0])
group_cameras[-1].reference.rotation = group_cameras[-2].reference.rotation
@time_measure
def align_cameras(chunk, min_latitude, min_longitude):
if chunk.transform.scale is None:
chunk.transform.scale = 1
chunk.transform.rotation = ps.Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
chunk.transform.translation = ps.Vector([0, 0, 0])
i, j, k = get_chunk_vectors(min_latitude, min_longitude) # i || North
estimate_rotation_matrices(chunk, i, j)
for c in chunk.cameras:
if c.transform is not None:
continue
location = c.reference.location
if location is None:
continue
chunk_coordinates = wgs_to_chunk(chunk, location)
fi = c.reference.rotation.x + 90
fi = math.radians(fi)
roll = math.radians(c.reference.rotation.z)
pitch = math.radians(c.reference.rotation.y)
roll_mat = ps.Matrix([[1, 0, 0],
[0, math.cos(roll), -math.sin(roll)],
[0, math.sin(roll), math.cos(roll)]])
pitch_mat = ps.Matrix([[ math.cos(pitch), 0, math.sin(pitch)],
[ 0, 1, 0],
[-math.sin(pitch), 0, math.cos(pitch)]])
yaw_mat = ps.Matrix([[math.cos(fi), -math.sin(fi), 0],
[math.sin(fi), math.cos(fi), 0],
[ 0, 0, 1]])
r = roll_mat * pitch_mat * yaw_mat
ii = r[0, 0] * i + r[1, 0] * j + r[2, 0] * k
jj = r[0, 1] * i + r[1, 1] * j + r[2, 1] * k
kk = r[0, 2] * i + r[1, 2] * j + r[2, 2] * k
c.transform = ps.Matrix([[ii.x, jj.x, kk.x, chunk_coordinates[0]],
[ii.y, jj.y, kk.y, chunk_coordinates[1]],
[ii.z, jj.z, kk.z, chunk_coordinates[2]],
[ 0, 0, 0, 1]])
def run_camera_alignment():
print("Script started...")
doc = ps.app.document
chunk = doc.chunk
if not check_chunk(chunk):
return
min_latitude, min_longitude, max_latitude, max_longitude = get_chunk_bounds(chunk)
try:
align_cameras(chunk, min_latitude, min_longitude)
except Exception as e:
print(e)
print("Script finished!")
label = "Custom menu/Apply Vertical Camera Alignment"
ps.app.addMenuItem(label, run_camera_alignment)
print("To execute this script press {}".format(label))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility methods
"""
import os
import sys
from os.path import isfile, join
from optparse import OptionParser
class EC2Type:
def __init__(self, arch, ephemeral=1, has_nvme=False):
self.arch = arch
self.ephemeral = ephemeral
self.has_nvme = has_nvme
AMI_HELP_MSG = """PLEASE NOTE - If you have accepted the software terms for CentOS 7 and still get an error,
this could be due to CentOS releasing new images of CentOS 7. When this occurs, the old images
are no longer available to new users. If you think this is the case, go to the CentOS 7 product
page on AWS Marketplace at the URL below to find the latest AMI:
https://aws.amazon.com/marketplace/pp/B00O7WM7QW
On the product page, find the latest AMI ID for your EC2 region. This should be used to set the 'aws_ami'
property in your muchos.props. After setting the 'aws_ami' property, run the launch command again.
""" # noqa
instance_types = {
"c1.medium": EC2Type("pvm"),
"c1.xlarge": EC2Type("pvm", 4),
"c3.2xlarge": EC2Type("pvm", 2),
"c3.4xlarge": EC2Type("pvm", 2),
"c3.8xlarge": EC2Type("pvm", 2),
"c3.large": EC2Type("pvm", 2),
"c3.xlarge": EC2Type("pvm", 2),
"cc2.8xlarge": EC2Type("hvm", 4),
"cg1.4xlarge": EC2Type("hvm", 2),
"cr1.8xlarge": EC2Type("hvm", 2),
"hi1.4xlarge": EC2Type("pvm", 2),
"hs1.8xlarge": EC2Type("pvm", 24),
"i2.2xlarge": EC2Type("hvm", 2),
"i2.4xlarge": EC2Type("hvm", 4),
"i2.8xlarge": EC2Type("hvm", 8),
"i2.xlarge": EC2Type("hvm"),
"i3.large": EC2Type("hvm", 1, True),
"i3.xlarge": EC2Type("hvm", 1, True),
"i3.2xlarge": EC2Type("hvm", 1, True),
"i3.4xlarge": EC2Type("hvm", 2, True),
"m1.large": EC2Type("pvm", 2),
"m1.medium": EC2Type("pvm"),
"m1.small": EC2Type("pvm"),
"m1.xlarge": EC2Type("pvm", 4),
"m2.2xlarge": EC2Type("pvm", 1),
"m2.4xlarge": EC2Type("pvm", 2),
"m2.xlarge": EC2Type("pvm"),
"m3.2xlarge": EC2Type("hvm", 2),
"m3.large": EC2Type("hvm"),
"m3.medium": EC2Type("hvm"),
"m3.xlarge": EC2Type("hvm", 2),
"m5d.large": EC2Type("hvm", 1, True),
"m5d.xlarge": EC2Type("hvm", 1, True),
"m5d.2xlarge": EC2Type("hvm", 1, True),
"m5d.4xlarge": EC2Type("hvm", 2, True),
"m5d.12xlarge": EC2Type("hvm", 2, True),
"m5d.24xlarge": EC2Type("hvm", 4, True),
"r3.2xlarge": EC2Type("hvm", 1),
"r3.4xlarge": EC2Type("hvm", 1),
"r3.8xlarge": EC2Type("hvm", 2),
"r3.large": EC2Type("hvm", 1),
"r3.xlarge": EC2Type("hvm", 1),
"d2.xlarge": EC2Type("hvm", 3),
"d2.2xlarge": EC2Type("hvm", 6),
"d2.4xlarge": EC2Type("hvm", 12),
"d2.8xlarge": EC2Type("hvm", 24),
}
def verify_type(instance_type):
if instance_type not in instance_types:
print(
"ERROR - EC2 instance type '{}' is currently "
"not supported!".format(instance_type)
)
print("This is probably due to the instance type being EBS-only.")
print("Below is a list of supported instance types:")
for key in instance_types:
print(key)
sys.exit(1)
def get_arch(instance_type):
verify_type(instance_type)
return instance_types.get(instance_type).arch
def get_ephemeral_devices(instance_type):
verify_type(instance_type)
devices = []
ec2_type = instance_types.get(instance_type)
start = 0
if instance_type.startswith("m5d"):
start = 1
for i in range(start, ec2_type.ephemeral + start):
if ec2_type.has_nvme:
devices.append("/dev/nvme" + str(i) + "n1")
else:
devices.append("/dev/xvd" + chr(ord("b") + i))
return devices
def get_block_device_map(instance_type):
verify_type(instance_type)
bdm = [{"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": True}}]
ec2_type = instance_types.get(instance_type)
if not ec2_type.has_nvme:
for i in range(0, ec2_type.ephemeral):
device = {
"DeviceName": "/dev/xvd" + chr(ord("b") + i),
"VirtualName": "ephemeral" + str(i),
}
bdm.append(device)
return bdm
def parse_args(hosts_dir, input_args=None):
parser = OptionParser(
usage="muchos [options] <action>\n\n"
+ "where <action> can be:\n"
+ " launch Launch cluster in Azure or EC2\n"
+ " status Check status of Azure or EC2 cluster\n"
+ " setup Set up cluster\n"
+ " sync Sync ansible directory on cluster proxy node\n"
+ " config Print configuration for that cluster. "
"Requires '-p'. Use '-p all' for all config.\n"
+ " ssh SSH to cluster proxy node\n"
+ " kill Kills processes on cluster started by Muchos\n"
+ " wipe Wipes cluster data and kills processes\n"
+ " terminate Terminate EC2 cluster\n"
+ " cancel_shutdown Cancels automatic shutdown of EC2 cluster",
add_help_option=False,
)
parser.add_option(
"-c", "--cluster", dest="cluster", help="Specifies cluster"
)
parser.add_option(
"-p",
"--property",
dest="property",
help="Specifies property to print (if using 'config' action)"
". Set to 'all' to print every property",
)
parser.add_option(
"-h", "--help", action="help", help="Show this help message and exit"
)
if input_args:
(opts, args) = parser.parse_args(input_args)
else:
(opts, args) = parser.parse_args()
if len(args) == 0:
print("ERROR - You must specify on action")
return
action = args[0]
if action == "launch" and not opts.cluster:
print("ERROR - You must specify a cluster if using launch command")
return
clusters = [f for f in os.listdir(hosts_dir) if isfile(join(hosts_dir, f))]
if not opts.cluster:
if len(clusters) == 0:
print(
"ERROR - No clusters found in conf/hosts "
"or specified by --cluster option"
)
return
elif len(clusters) == 1:
opts.cluster = clusters[0]
else:
print(
"ERROR - Multiple clusters {0} found in conf/hosts/. "
"Please pick one using --cluster option".format(clusters)
)
return
if action == "config" and not opts.property:
print(
"ERROR - For config action, you must set -p to a property or 'all'"
)
return
return opts, action, args[1:]
|
|
"""
CQLEngine-Session
Your cqlengine model must inherit from cqlengine_session.SessionModel instead
of cqlengine.model.Model
SessionModel will replace youl SessionModel declarations with classes of type
IdMapModel. Your model module will get classes of type Model with an
underscore prefixed to the name.
example:
class Foo(SessionModel):
pass
results in Foo being a IdMapModel, and _Foo being a Model.
Note that making blind handles requires you pass a key.
blind = Foo(key)
you can make changes and save them (without first needing to load the object.)
blind.title = u'new title'
save()
To create new object use create
foo = Foo.create()
"""
import copy
from datetime import date, datetime
import importlib
import json
import threading
from uuid import UUID
from cassandra.query import SimpleStatement
from cqlengine import columns
import cqlengine.connection
from cqlengine.exceptions import ValidationError
from cqlengine.management import get_fields, sync_table
from cqlengine.models import ColumnQueryEvaluator, Model, ModelMetaClass
from cqlengine.operators import EqualsOperator
from cqlengine.query import BatchQuery, ModelQuerySet
from cqlengine.statements import WhereClause, SelectStatement, DeleteStatement, UpdateStatement, AssignmentClause, InsertStatement, BaseCQLStatement, MapUpdateClause, MapDeleteClause, ListUpdateClause, SetUpdateClause, CounterUpdateClause
class AttributeUnavailable(Exception):
pass
class SessionManager(object):
def get_session(self):
"""Return current session for this context."""
raise NotImplementedError
def set_session(self, session):
"""Make the given session the current session for this context."""
raise NotImplementedError
class ThreadLocalSessionManager(SessionManager):
def __init__(self):
self.storage = threading.local()
def get_session(self):
return getattr(self.storage, 'session', None)
def set_session(self, session):
self.storage.session = session
SESSION_MANAGER = ThreadLocalSessionManager()
def set_session_manager(manager):
global SESSION_MANAGER
SESSION_MANAGER = manager
def clear():
"""Empty the current session"""
# xxx what happens to the existing id-map objects? this is dangerous.
# (also, the dev is not expected to call this.)
SESSION_MANAGER.set_session(None)
def save(*objects):
"Write all pending changes from session to Cassandra."
session = SESSION_MANAGER.get_session()
if session is not None:
session.save(*objects)
def get_session(create_if_missing=True):
session = SESSION_MANAGER.get_session()
if session is None:
session = Session()
SESSION_MANAGER.set_session(session)
return session
def add_call_after_save(callable, *args, **kwargs):
"""Call callable with given args and kwargs after next save."""
get_session().call_after_save.append((callable, args, kwargs,))
class Session(object):
"""Identity map objects and support for implicit batch save."""
def __init__(self):
self.instances_by_class = {}
self.call_after_save = []
#self.deletes = set()
def save(self, *objects):
"""Flush all pending changes to Cassandra.
objects -- if not None, only operate on this or these object(s)
"""
updates = set()
counter_updates = set()
creates = set()
counter_creates = set()
for model_class, by_key in self.instances_by_class.iteritems():
for key, instance in by_key.iteritems():
if hasattr(instance, '_created') and instance._created:
if model_class.id_mapped_class._has_counter:
counter_creates.add(instance)
else:
creates.add(instance)
elif hasattr(instance, '_dirties'):
if model_class.id_mapped_class._has_counter:
counter_updates.add(instance)
else:
updates.add(instance)
if objects:
updates = updates and objects
counter_updates = counter_updates and objects
creates = creates and objects
counter_creates = counter_creates and objects
with BatchQuery() as batch:
for create in creates:
# Note we skip a lot of cqlengine code and create the
# insert statement directly.
# (this is the non-optimized code that is replaced below)
#key_names = create.id_mapped_class._columns.keys()
#arg = {name: getattr(create, name) for name in key_names}
#create.id_mapped_class.batch(batch).create(**arg)
# (end non-optimized code)
# (begin optimized)
# note: it might save time to memoize column family name
# note: cqlengine-session doesn't yet support 'ttl'
insert = InsertStatement(create.id_mapped_class.column_family_name())#, ttl=self._ttl)
for name, col in create.id_mapped_class._columns.items():
val = col.validate(getattr(create, name))
if col._val_is_null(val):
continue
insert.add_assignment_clause(AssignmentClause(
col.db_field_name,
col.to_database(val)))
# skip query execution if it's empty
# caused by pointless update queries
if not insert.is_empty:
batch.add_query(insert)
# (end optimized)
del create._created
try:
del create._dirties
except AttributeError:
pass
for update in updates:
key_names = update._primary_keys.keys()
arg = {name: getattr(update, name) for name in key_names}
dirties = update._dirties
update.id_mapped_class.objects(**arg).batch(batch).update(**dirties)
del update._dirties
# It would seem that batch does not work with counter?
#with BatchQuery() as batch:
for create in counter_creates:
primary_key_names = create.id_mapped_class._primary_keys.keys()
arg = {name: getattr(create, name) for name in primary_key_names}
instance = create.id_mapped_class.create(**arg)
for name, col in create.id_mapped_class._columns.items():
if isinstance(col, columns.Counter):
val = getattr(create, name)
setattr(instance, name, val)
del create._created
try:
del create._dirties
except AttributeError:
pass
instance.update()
for update in counter_updates:
statement = UpdateStatement(update.id_mapped_class.column_family_name())#, ttl=self._ttl)
for name, value in update._dirties.items():
col = update.id_mapped_class._columns[name]
clause = CounterUpdateClause(col.db_field_name, value, 0, column=col)
statement.add_assignment_clause(clause)
for name, col in update.id_mapped_class._primary_keys.items():
statement.add_where_clause(WhereClause(
col.db_field_name,
EqualsOperator(),
col.to_database(getattr(update, name))
))
params = statement.get_context()
statement = SimpleStatement(str(statement))
cqlengine.connection.get_session().execute(statement, params)
del update._dirties
# for delete in self.deletes:
# raise NotImplementedError
for callable, args, kwargs in self.call_after_save:
callable(*args, **kwargs)
self.call_after_save = []
class SessionModelMetaClass(ModelMetaClass):
def __new__(cls, name, bases, attrs):
if attrs.get('__abstract__'):
return super(SessionModelMetaClass, cls).__new__(cls,
name,
bases,
attrs)
if len(bases) > 1:
raise TypeError('SessionModel does not allow multiple inheritance')
# Take the result of the base class's __new__ and assign it to the
# module using a prefixed underscore in the name.
new_name = '_' + name
# Note: at this point attrs has only those actually declared in
# the class declaration (and not in any parent class declaration)
base = super(SessionModelMetaClass, cls).__new__(cls,
new_name,
bases,
attrs)
# Note: at this point, attrs has had a bunch of things added by
# cqlengine.models.ModelMetaClass
module = importlib.import_module(cls.__module__)
setattr(module, new_name, base)
# Copy attrs from the base class because this class won't actually
# inherit from these base classes.
base_attrs = {}
copyable_bases = []
for klass in bases[0].mro():
if klass == SessionModel:
break
copyable_bases.append(klass)
for klass in reversed(copyable_bases):
base_attrs.update(klass.__dict__)
base_attrs.update(attrs)
base_attrs['id_mapped_class'] = base
base_attrs['_promotable_column_names'] = set([cname for cname, c in base_attrs['_columns'].iteritems() if not c.primary_key])
# Make descriptors for the columns so the instances will get/set
# using a ColumnDescriptor instance.
for col_name, col in base._columns.iteritems():
if isinstance(col, columns.Counter):
base_attrs[col_name] = CounterColumnDescriptor(col)
else:
base_attrs[col_name] = ColumnDescriptor(col)
return IdMapMetaClass(name, (IdMapModel,), base_attrs)
# declare your models with this so that SessionModelMetaClass is the metaclass.
class SessionModel(Model):
__abstract__ = True
__metaclass__ = SessionModelMetaClass
class IdMapMetaClass(type):
# def __new__(cls, name, bases, attrs):
# return None
# return type(name, bases, attrs)
def __call__(cls, *key):
"""If instance is in the id-map, return it, else make and return it."""
session = get_session()
try:
instance_by_key = session.instances_by_class[cls]
try:
return instance_by_key[key]
except KeyError:
pass
except KeyError:
instance_by_key = {}
session.instances_by_class[cls] = instance_by_key
instance = super(IdMapMetaClass, cls).__call__(*key)
instance_by_key[key] = instance
return instance
# this is copied from cqlengine, may need more modification..
class QuerySetDescriptor(object):
def __get__(self, instance, session_class):
return WrappedQuerySet(instance, session_class)
class IdMapModel(object):
__metaclass__ = IdMapMetaClass
objects = QuerySetDescriptor()
def __init__(self, *key):
self.key = key
key_names = self.id_mapped_class._primary_keys.keys()
for name, value in zip(key_names, key):
self._promote(name, value)
@classmethod
def all(cls):
return cls.objects.all()
@classmethod
def filter(cls, *args, **kwargs):
return cls.objects.filter(*args, **kwargs)
@classmethod
def get(cls, *args, **kwargs):
return cls.objects.get(*args, **kwargs)
@classmethod
def create(cls, **kwargs):
column_names = cls.id_mapped_class._columns.keys()
extra_columns = set(kwargs.keys()) - set(column_names)
if extra_columns:
raise ValidationError(
"Incorrect columns passed: {}".format(extra_columns))
primary_keys = cls.id_mapped_class._primary_keys
uncleaned_values = {}
for name, col in cls.id_mapped_class._columns.items():
try:
value = kwargs[name]
except KeyError:
if col.default:
if callable(col.default):
value = col.default()
else:
value = col.default
elif isinstance(col, columns.Counter):
value = 0
elif name in primary_keys:
raise ValueError(u"Can't create {} without providing primary key {}".format(cls.__name__, name))
else:
# Container columns have non-None empty cases.
value = None
uncleaned_values[name] = value
key = []
for name, col in primary_keys.items():
key.append(col.to_python(uncleaned_values[name]))
instance = cls(*key)
instance._created = True
for name, col in cls.id_mapped_class._columns.items():
if name in primary_keys:
continue
value = uncleaned_values[name]
if isinstance(col, columns.BaseContainerColumn):
if isinstance(col, columns.Set):
value = OwnedSet(instance, name, col.to_python(value))
elif isinstance(col, columns.List):
value = OwnedList(instance, name, col.to_python(value))
elif isinstance(col, columns.Map):
value = OwnedMap(instance, name, col.to_python(value))
elif value is not None:
value = col.to_python(value)
instance._promote(name, value)
return instance
def promote(self, **kwargs):
"""Set kwargs on entity without marking as dirty
Invalid column names in kwargs raises an exception
Promoting the value of a key raises an exception
"""
extra_columns = set(kwargs.keys()) - self._promotable_column_names
if extra_columns:
raise ValidationError("Incorrect columns passed: {}".format(extra_columns))
for col_name, col_value in kwargs.items():
self._promote(col_name, col_value)
def _promote(self, name, value):
"""set without marking attribute as dirty."""
try:
self._values[name] = value
except AttributeError:
self._values = {name: value}
def _mark_dirty(self, name, value):
"""mark an attribute as dirty."""
try:
self._dirties[name] = value
except AttributeError:
self._dirties = {name: value}
@classmethod
def sync_table(cls):
sync_table(cls.id_mapped_class)
@classmethod
def _construct_instance(cls, values):
mapped_class = cls.id_mapped_class
primary_keys = mapped_class._primary_keys
key = []
for name, col in primary_keys.items():
key.append(col.to_python(values[name]))
instance = cls(*key)
cleaned_values = {}
for name, value in values.items():
if name in primary_keys:
continue
# Ignore results for columns returned that are not in the schema.
# (They may be present as a result of migrating an existing db.)
col = cls.id_mapped_class._columns.get(name)
if col:
if isinstance(col, columns.BaseContainerColumn):
if isinstance(col, columns.Set):
value = OwnedSet(instance, name, col.to_python(value))
elif isinstance(col, columns.List):
value = OwnedList(instance, name, col.to_python(value))
elif isinstance(col, columns.Map):
value = OwnedMap(instance, name, col.to_python(value))
elif value is not None:
value = col.to_python(value)
cleaned_values[name] = value
try:
dirties = instance._dirties
except AttributeError:
dirties = EMPTY
for name, value in cleaned_values.items():
if name not in primary_keys and name not in dirties:
instance._promote(name, value)
return instance
@property
def _key(self):
return getattr(self, self._key_name)
def blind_increment(self, name, value):
col = self.id_mapped_class._columns[name]
if not isinstance(col, columns.Counter):
raise ValueError(u'Can only blind increment Counter columns, %s is a %s' % (name, type(col)))
# Increment the current value, if any.
try:
values = self._values
except AttributeError:
pass
else:
try:
values[name] += value
except KeyError:
pass
# Increment the dirty value, if any.
try:
dirties = self._dirties
except AttributeError:
self._dirties = {name: value}
else:
try:
dirties[name] += value
except KeyError:
dirties[name] = value
class WrappedQuerySet(ModelQuerySet):
def __init__(self, session_instance, session_class):
self._session_instance = session_instance
self._session_class = session_class
if not isinstance(session_class.id_mapped_class.objects, ModelQuerySet):
# If we run into something that is not a ModelQuerySet, let's
# support it. Because we need to copy the _result_constructor
# method instead of providing a _construct_instance method
# directly, this is necessary. Perhaps it is something we'd
# ask of cqlengine plugin in the future.
raise NotImplementedError(u'only ModelQuerySet queries are supported')
super(WrappedQuerySet, self).__init__(session_class.id_mapped_class)
def _get_result_constructor(self):
""" Returns a function that will be used to instantiate query results """
if not self._values_list: # we want models
return lambda rows: self._session_class._construct_instance(rows)
elif self._flat_values_list: # the user has requested flattened list (1 value per row)
return lambda row: row.popitem()[1]
else:
return lambda row: self._get_row_value_list(self._only_fields, row)
def __deepcopy__(self, memo):
clone = self.__class__(self._session_instance, self._session_class)
for k, v in self.__dict__.items():
if k in ['_con', '_cur', '_result_cache', '_result_idx']: # don't clone these
clone.__dict__[k] = None
elif k == '_batch':
# we need to keep the same batch instance across
# all queryset clones, otherwise the batched queries
# fly off into other batch instances which are never
# executed, thx @dokai
clone.__dict__[k] = self._batch
else:
clone.__dict__[k] = copy.deepcopy(v, memo)
return clone
class OwnedSet(set):
def __init__(self, owner, name, *args, **kwargs):
self.owner = owner
self.name = name
super(OwnedSet, self).__init__(*args, **kwargs)
def mark_dirty(self):
self.owner._mark_dirty(self.name, self)
def add(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedSet, self).add(*args, **kwargs)
def remove(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedSet, self).remove(*args, **kwargs)
def clear(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedSet, self).clear(*args, **kwargs)
def copy(self, *args, **kwargs):
c = super(OwnedSet, self).copy(*args, **kwargs)
if hasattr(self, '_dirty'):
c._dirty = self._dirty
return c
def difference_update(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedSet, self).difference_update(*args, **kwargs)
def discard(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedSet, self).discard(*args, **kwargs)
def intersection_update(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedSet, self).intersection_update(*args, **kwargs)
def pop(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedSet, self).pop(*args, **kwargs)
def symmetric_difference_update(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedSet, self).symmetric_difference_update(*args, **kwargs)
def update(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedSet, self).update(*args, **kwargs)
class OwnedList(list):
def __init__(self, owner, name, *args, **kwargs):
self.owner = owner
self.name = name
super(OwnedList, self).__init__(*args, **kwargs)
def mark_dirty(self):
self.owner._mark_dirty(self.name, self)
def __setitem__(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedList, self).__setitem__(*args, **kwargs)
def __setslice__(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedList, self).__setslice__(*args, **kwargs)
def append(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedList, self).append(*args, **kwargs)
def extend(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedList, self).extend(*args, **kwargs)
def insert(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedList, self).insert(*args, **kwargs)
def pop(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedList, self).pop(*args, **kwargs)
def remove(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedList, self).remove(*args, **kwargs)
def reverse(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedList, self).reverse(*args, **kwargs)
def sort(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedList, self).sort(*args, **kwargs)
class OwnedMap(dict):
def __init__(self, owner, name, *args, **kwargs):
self.owner = owner
self.name = name
super(OwnedMap, self).__init__(*args, **kwargs)
def mark_dirty(self):
self.owner._mark_dirty(self.name, self)
def __setitem__(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedMap, self).__setitem__(*args, **kwargs)
def clear(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedMap, self).clear(*args, **kwargs)
def copy(self, *args, **kwargs):
c = super(OwnedMap, self).copy(*args, **kwargs)
if hasattr(self, '_dirty'):
c._dirty = self._dirty
return c
def pop(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedMap, self).pop(*args, **kwargs)
def popitem(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedMap, self).popitem(*args, **kwargs)
def update(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedMap, self).update(*args, **kwargs)
def remove(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedMap, self).remove(*args, **kwargs)
def setdefault(self, *args, **kwargs):
self.mark_dirty()
return super(OwnedMap, self).setdefault(*args, **kwargs)
class ColumnDescriptor(object):
"""
Handles the reading and writing of column values to and from
a model instance's value manager, as well as creating
comparator queries
"""
def __init__(self, column):
"""
:param column:
:type column: columns.Column
:return:
"""
self.column = column
self.query_evaluator = ColumnQueryEvaluator(self.column)
def __get__(self, instance, owner):
"""
Returns either the value or column, depending
on if an instance is provided or not
:param instance: the model instance
:type instance: Model
"""
if instance:
try:
return instance._values[self.column.column_name]
except (AttributeError, KeyError,):
raise AttributeUnavailable(instance, self.column.column_name)
else:
return self.query_evaluator
def __set__(self, instance, value):
"""
Sets the value on an instance, raises an exception with classes
TODO: use None instance to create update statements
"""
if instance:
col = self.column
name = col.column_name
if isinstance(col, columns.BaseContainerColumn):
if isinstance(col, columns.Set):
value = OwnedSet(instance, name, col.to_python(value))
elif isinstance(col, columns.List):
value = OwnedList(instance, name, col.to_python(value))
elif isinstance(col, columns.Map):
value = OwnedMap(instance, name, col.to_python(value))
instance._mark_dirty(name, value)
instance._promote(name, value)
else:
raise AttributeError('cannot reassign column values')
def __delete__(self, instance):
"""
Sets the column value to None, if possible
"""
if instance:
if self.column.can_delete:
raise NotImplementedError
else:
raise AttributeError('cannot delete {} columns'.format(self.column.column_name))
class WrappedResponse(int):
# This is necessary so that set knows it is getting set as the result of
# an __iadd__ call and not a regular assignment.
# Doing this is necessary because a WrappedInt, as below, would be
# incrementable and would side-effect the counter.
pass
class WrappedInt(int):
def __iadd__(self, value):
return WrappedResponse(value)
class CounterColumnDescriptor(ColumnDescriptor):
# This was made to get += to do the right thing for counters.
# see http://stackoverflow.com/questions/11987949/how-to-implement-iadd-for-a-python-property
def __get__(self, instance, owner):
"""
Returns either the value or column, depending
on if an instance is provided or not
:param instance: the model instance
:type instance: Model
"""
if instance:
try:
existing_value = instance._values[self.column.column_name] or 0
return WrappedInt(existing_value)
except (AttributeError, KeyError,):
raise AttributeUnavailable(instance, self.column.column_name)
else:
return self.query_evaluator
def __set__(self, instance, value):
"""
Sets the value on an instance, raises an exception with classes
TODO: use None instance to create update statements
"""
if instance:
if isinstance(value, WrappedResponse):
name = self.column.column_name
value = int(value)
# Increment the current value, if any.
try:
values = instance._values
except AttributeError:
instance._values = {name: value}
else:
try:
values[name] += value
except KeyError:
values[name] = value
# Increment the dirty value, if any.
try:
dirties = instance._dirties
except AttributeError:
instance._dirties = {name: value}
else:
try:
dirties[name] += value
except KeyError:
dirties[name] = value
else:
raise AttributeError('cannot assign to counter, use +=')
else:
raise AttributeError('cannot reassign column values')
class Empty(object):
def __contains__(self, item):
return False
EMPTY = Empty()
class VerifyResult(object):
def __init__(self, model, is_missing=False):
self.model = model
self.is_missing = is_missing
self.is_extra = False
self.missing = set()
self.extra = set()
self.different = set()
self.missing_indexes = set()
self.extra_indexes = set()
def has_errors(self):
return self.is_missing or \
self.is_extra or \
self.missing or \
self.extra or \
self.different or \
self.missing_indexes or \
self.extra_indexes
def report(self):
try:
name = self.model.__name__
except:
name = self.model
if self.is_missing:
return '{} does not have a column family (expected "{}")'.format(
name,
self.model.column_family_name(include_keyspace=False))
if self.is_extra:
return 'found unexpected column family "{}"'.format(name)
logs = []
if self.missing:
logs.append('{} columns missing: {}'.format(
name, ', '.join(self.missing)))
if self.extra:
logs.append('{} extra columns: {}'.format(
name, ', '.join(self.extra)
))
if self.different:
logs.append('{} different columns: {}'.format(
name, ', '.join(self.different)))
if self.missing_indexes:
logs.append('{} indexes missing: {}'.format(
name, ', '.join(self.missing_indexes)
))
if self.extra_indexes:
logs.append('{} extra indexes: {}'.format(
name, ', '.join(self.extra_indexes)
))
return '\n'.join(logs)
def __repr__(self):
return 'VerifyResult({})'.format(self.model.__name__)
def verify(*models, **kwargs):
ignore_extra = kwargs.get('ignore_extra', {})
results = {}
by_keyspace = {}
by_cf = {}
results = {}
for model in models:
ks_name = model._get_keyspace()
try:
by_keyspace[ks_name].add(model)
except KeyError:
by_keyspace[ks_name] = set([model])
cf_name = model.column_family_name(include_keyspace=False)
by_cf[cf_name] = model
results[model] = VerifyResult(model)
for keyspace, models in by_keyspace.items():
query_result = cqlengine.connection.get_session().execute(
"SELECT columnfamily_name, key_aliases, key_validator, column_aliases, comparator from system.schema_columnfamilies WHERE keyspace_name = %(ks_name)s",
{'ks_name': ks_name})
tables = {}
for result in query_result:
columnfamily_name = result['columnfamily_name']
partition_keys = result['key_aliases']
partition_key_types = result['key_validator']
primary_keys = result['column_aliases']
primary_key_types = result['comparator']
partition_keys = json.loads(partition_keys)
if len(partition_keys) > 1:
partition_key_types = partition_key_types[len('org.apache.cassandra.db.marshal.CompositeType('):-1].split(',')[:len(partition_keys)]
else:
partition_key_types = [partition_key_types]
primary_keys = json.loads(primary_keys)
primary_key_types = primary_key_types[len('org.apache.cassandra.db.marshal.CompositeType('):].split(',')[:len(primary_keys)]
item = {
'cf': columnfamily_name,
'partition_keys': partition_keys,
'partition_key_types': partition_key_types,
'primary_keys': primary_keys,
'primary_key_types': primary_key_types
}
tables[columnfamily_name] = item
for model in models:
cf_name = model.column_family_name(include_keyspace=False)
db_field_names = {col.db_field_name: col for name, col in model._columns.items()}
result = results[model]
# Check that model's cf is in db's tables.
if cf_name not in tables:
result.is_missing = True
else:
table_info = tables[cf_name]
fields = get_fields(model)
fields = {field.name: field.type for field in fields}
for name, field_type in fields.iteritems():
# If field is missing, that's an error.
if name not in db_field_names:
result.extra.add(name)
# If field is present, check the type.
else:
col = db_field_names[name]
if isinstance(col, columns.Map):
if not field_type.startswith('org.apache.cassandra.db.marshal.MapType'):
result.different.add(col.column_name)
elif isinstance(col, columns.List):
if not field_type.startswith('org.apache.cassandra.db.marshal.ListType'):
result.different.add(col.column_name)
elif isinstance(col, columns.Set):
if not field_type.startswith('org.apache.cassandra.db.marshal.SetType'):
result.different.add(col.column_name)
else:
local_metadata = _type_to_metadata(col.db_type)
if local_metadata != field_type:
result.different.add(col.column_name)
for name, kind in zip(table_info['partition_keys'], table_info['partition_key_types']):
if name not in db_field_names:
result.extra.add(name)
else:
col = db_field_names[name]
local_metadata = _type_to_metadata(col.db_type)
if local_metadata != kind:
result.different.add(col.column_name)
for name, kind in zip(table_info['primary_keys'], table_info['primary_key_types']):
if name not in db_field_names:
result.extra.add(name)
else:
col = db_field_names[name]
local_metadata = _type_to_metadata(col.db_type)
if col.clustering_order == 'desc':
local_metadata = u'org.apache.cassandra.db.marshal.ReversedType({})'.format(local_metadata)
if local_metadata != kind:
result.different.add(col.column_name)
for name, col in db_field_names.items():
# Handle primary keys from table-level data.
if col.primary_key:
if col.partition_key:
if name not in table_info['partition_keys']:
result.missing.add(col.column_name)
else:
local_metadata = _type_to_metadata(col.db_type)
i = table_info['partition_keys'].index(name)
if local_metadata != table_info['partition_key_types'][i]:
result.different.add(col.column_name)
else:
if name not in table_info['primary_keys']:
result.missing.add(col.column_name)
else:
local_metadata = _type_to_metadata(col.db_type)
if col.clustering_order == 'desc':
local_metadata = u'org.apache.cassandra.db.marshal.ReversedType({})'.format(local_metadata)
i = table_info['primary_keys'].index(name)
if local_metadata != table_info['primary_key_types'][i]:
result.different.add(col.column_name)
# Primary keys are not listed in fields.
if not col.primary_key and name not in fields:
result.missing.add(col.column_name)
for cf in tables:
if cf not in by_cf and cf not in ignore_extra:
result = VerifyResult(cf)
result.is_extra = True
results[cf] = result
model_indexes = {}
for model in models:
this_model_indexes = {col.db_field_name: col for name, col in model._columns.items() if col.index}
if this_model_indexes:
model_indexes[model.column_family_name(include_keyspace=False)] = this_model_indexes
query_results = cqlengine.connection.get_session().execute(
"SELECT index_name from system.\"IndexInfo\" WHERE table_name=%(table_name)s",
{'table_name': model._get_keyspace()})
cassandra_indexes = {}
for result_dict in query_results:
idx = result_dict['index_name']
try:
cf, index_name = idx.split('.')
look_for = 'index_{}_'.format(cf)
index_name = index_name[len(look_for):]
except ValueError:
cf = None
index_name = None
if cf:
try:
cassandra_indexes[cf].add(index_name)
except KeyError:
cassandra_indexes[cf] = set([index_name])
for cf, index_names in cassandra_indexes.items():
if cf not in model_indexes:
if cf not in by_cf:
result = VerifyResult(cf)
result.is_extra = True
results[cf] = result
else:
model = by_cf[cf]
result = results[model]
result.extra_indexes.add(index_name)
else:
this_model_indexes = model_indexes[cf]
if index_name not in this_model_indexes:
model = by_cf[cf]
result = results[model]
result.extra_indexes.add(index_name)
for cf, this_model_indexes in model_indexes.items():
for index_name in this_model_indexes.keys():
if cf not in cassandra_indexes or index_name not in cassandra_indexes[cf]:
model = by_cf[cf]
result = results[model]
result.missing_indexes.add(index_name)
results = {model: result for model, result in results.items() if result.has_errors()}
return results.values()
# Some functions to aid reading the cassandra definitions.
def _metadata_to_type(s):
return {
'org.apache.cassandra.db.marshal.UUIDType': UUID,
'org.apache.cassandra.db.marshal.DoubleType': float,
'org.apache.cassandra.db.marshal.UTF8Type': unicode,
'org.apache.cassandra.db.marshal.BooleanType': bool,
'org.apache.cassandra.db.marshal.Int32Type': int,
'org.apache.cassandra.db.marshal.LongType': long,
'org.apache.cassandra.db.marshal.DateType': date
}.get(s, s)
def _type_to_metadata(s):
return {
'int': 'org.apache.cassandra.db.marshal.Int32Type',
'text': 'org.apache.cassandra.db.marshal.UTF8Type',
'uuid': 'org.apache.cassandra.db.marshal.UUIDType',
UUID: 'org.apache.cassandra.db.marshal.UUIDType',
float: 'org.apache.cassandra.db.marshal.DoubleType',
'double': 'org.apache.cassandra.db.marshal.DoubleType',
unicode: 'org.apache.cassandra.db.marshal.UTF8Type',
'boolean': 'org.apache.cassandra.db.marshal.BooleanType',
bool: 'org.apache.cassandra.db.marshal.BooleanType',
int: 'org.apache.cassandra.db.marshal.Int32Type',
long: 'org.apache.cassandra.db.marshal.LongType',
'bigint': 'org.apache.cassandra.db.marshal.LongType',
date: 'org.apache.cassandra.db.marshal.DateType',
'decimal': 'org.apache.cassandra.db.marshal.DecimalType',
'timestamp': 'org.apache.cassandra.db.marshal.TimestampType',
'varint': 'org.apache.cassandra.db.marshal.IntegerType',
'timeuuid': 'org.apache.cassandra.db.marshal.TimeUUIDType',
'ascii': 'org.apache.cassandra.db.marshal.AsciiType',
'blob': 'org.apache.cassandra.db.marshal.BytesType',
'counter': 'org.apache.cassandra.db.marshal.CounterColumnType'
}.get(s, s)
|
|
import datetime
from psoc_4m_base import *
class PSOC_4M_MOISTURE_UNIT(PSOC_BASE_4M):
def __init__(self,instrument):
self.system_id = 0x201
PSOC_BASE_4M.__init__( self, instrument, self.system_id)
# additional write address definitions definitions
self.check_one_wire_presence_addr = 27
self.make_soil_temperature_addr = 28
self.make_air_temp_humidity_addr = 29
self.force_moisture_reading_addr = 30
self.update_moisture_sensor_configuration_addr = 31
self.update_flash_addr = 33
self.clear_moisture_flag_addr = 34
self.sensor_length = 16
self.new_measurement_flag_start = 20
self.new_measurement_flag_list = [ "NEW_MOISTURE_DATA_FLAG"]
# status
self.status_start = 13
self.status_list = [
"ONE_WIRE_DEVICE_FOUND",
"NEW_MOISTURE_DATA_FLAG"
]
self.moisture_control_start = 15
self.moisture_control_list = [
"AIR_HUMIDITY_FLOAT" ,
"AIR_TEMP_FLOAT",
"MOISTURE_SOIL_TEMP_FLOAT",
"RESISTOR_FLOAT",
]
self.capacitance_mask_start = 23
self.capacitance_mask_list = [ "CAPACITANCE_MASK"]
# Moisture Data
self.moisture_data_start = 30
self.moisture_data_number = 16
self.moisture_data_resistive_start = 70
self.moisture_resistive_configuration_number = 16
# Moisture Configuration Data
self.moisture_configuration_start = 110
self.moisture_configuration_number = 16
#
#
# Read Variables
#
#
def check_status( self, address):
return_value = {}
data = self.instrument.read_registers( address, self.status_start, len(self.status_list) )
for i in range(0,len(self.status_list)):
return_value[ self.status_list[i] ] = data[i]
return return_value
def read_moisture_control(self, address ):
return_value = {}
data = self.instrument.read_floats( address, self.moisture_control_start, len(self.moisture_control_list) )
for i in range(0,len(self.moisture_control_list)):
return_value[ self.moisture_control_list[i] ] = data[i]
return return_value
def read_moisture_data( self ,address ):
return_value = {}
data = self.instrument.read_floats( address, self.moisture_data_start ,self.moisture_configuration_number )
return data
def read_moisture_resistive_data( self ,address ):
return_value = {}
data = self.instrument.read_floats( address, self.moisture_data_resistive_start ,self.moisture_resistive_configuration_number )
return data
def read_moisture_configuration( self, address ):
return_value = {}
data = self.instrument.read_registers( address,self.moisture_configuration_start,self.moisture_configuration_number )
return data
def check_one_wire_presence ( self, address): #sampling rate is in minutes
self.instrument.write_registers(address, self.check_one_wire_presence_addr, [0] )
def make_soil_temperature ( self, address): #sampling rate is in minutes
self.instrument.write_registers(address, self.make_soil_temperature_addr, [0] )
def make_air_temp_humidity( self, address): #sampling rate is in minutes
self.instrument.write_registers(address, self.make_air_temp_humidity_addr, [0] )
def clear_new_moisture_data_flag( self, address):
self.instrument.write_registers( address, self.clear_moisture_flag_addr, [0] )
def force_moisture_reading ( self, address): #sampling rate is in minutes
self.instrument.write_registers(address, self.force_moisture_reading_addr, [0] )
def update_moisture_sensor_configuration ( self,address, sensor_data ): # sensor data consisting of 0,1,2
if len( sensor_data) != self.sensor_length :
raise
valid_data = set([0,1,2])
for i in sensor_data:
if i not in valid_data:
raise
self.instrument.write_registers( address, self.update_moisture_sensor_configuration_addr ,sensor_data )
if __name__ == "__main__":
import new_instrument
import time
new_instrument = new_instrument.new_instrument( "COM4" )
psoc_moisture = PSOC_4M_MOISTURE_UNIT( new_instrument )
#psoc_moisture.update_current_time( 40 )
print psoc_moisture.clear_new_moisture_data_flag(40)
print psoc_moisture.check_status(40)
print psoc_moisture.check_one_wire_presence(40)
time.sleep(.3)
print psoc_moisture.make_soil_temperature(40)
time.sleep(.3)
print psoc_moisture.make_air_temp_humidity(40)
time.sleep(.3)
print psoc_moisture.make_air_temp_humidity(40)
time.sleep(.3)
# test read functions first
print psoc_moisture.check_status(40)
print psoc_moisture.read_moisture_control(40)
print psoc_moisture.read_moisture_configuration( 40 )
print psoc_moisture.force_moisture_reading(40)
time.sleep(1.)
print psoc_moisture.read_moisture_data(40)
print psoc_moisture.read_moisture_resistive_data(40)
quit()
#print psoc_moisture.force_moisture_reading(40)
time.sleep(1)
print psoc_moisture.read_moisture_resistive_data( 40 )
print psoc_moisture.read_moisture_data(40)
print psoc_moisture.check_status(40)
'''
# test directed actions
#psoc_moisture.check_one_wire_presence(40)
#psoc_moisture.make_soil_temperature(40)
psoc_moisture.force_moisture_reading(40)
'''
'''
print "new_data_flag",psoc_moisture.check_new_data_flag( 40)
print "capacitance_mask", psoc_moisture.read_capacitor_mask(40)
print psoc_moisture.read_moisture_control( 40 )
print psoc_moisture.read_moisture_configuration( 40 )
psoc_moisture.change_capacitance_sensor_mask( 40, 0xf)
psoc_moisture. update_moisture_sensor_configuration ( 40,[ 2,1,1,1,1,0,0,0] )
psoc_moisture.update_flash(40)
print "capacitance_mask", psoc_moisture.read_capacitor_mask(40)
print psoc_moisture.read_moisture_control( 40 )
print psoc_moisture.read_moisture_configuration( 40 )
print "force moisture measurement", psoc_moisture.force_moisture_reading(40)
quit()
print psoc_moisture.read_moisture_data(40)
'''
|
|
"""
:codeauthor: Nicole Thomas <nicole@saltstack.com>
"""
import os
import pytest
import salt.utils.files
from salt.exceptions import CommandExecutionError
from tests.support.case import ModuleCase
from tests.support.helpers import random_string, runs_on
# Create user strings for tests
ADD_USER = random_string("RS-", lowercase=False)
DEL_USER = random_string("RS-", lowercase=False)
PRIMARY_GROUP_USER = random_string("RS-", lowercase=False)
CHANGE_USER = random_string("RS-", lowercase=False)
@pytest.mark.skip_if_not_root
@runs_on(kernel="Darwin")
@pytest.mark.destructive_test
class MacUserModuleTest(ModuleCase):
"""
Integration tests for the mac_user module
"""
def setUp(self):
"""
Sets up test requirements
"""
super().setUp()
os_grain = self.run_function("grains.item", ["kernel"])
if os_grain["kernel"] not in "Darwin":
self.skipTest("Test not applicable to '{kernel}' kernel".format(**os_grain))
@pytest.mark.slow_test
def test_mac_user_add(self):
"""
Tests the add function
"""
try:
self.run_function("user.add", [ADD_USER])
user_info = self.run_function("user.info", [ADD_USER])
self.assertEqual(ADD_USER, user_info["name"])
except CommandExecutionError:
self.run_function("user.delete", [ADD_USER])
raise
@pytest.mark.slow_test
def test_mac_user_delete(self):
"""
Tests the delete function
"""
# Create a user to delete - If unsuccessful, skip the test
if self.run_function("user.add", [DEL_USER]) is not True:
self.run_function("user.delete", [DEL_USER])
self.skipTest("Failed to create a user to delete")
# Now try to delete the added user
ret = self.run_function("user.delete", [DEL_USER])
self.assertTrue(ret)
@pytest.mark.slow_test
def test_mac_user_primary_group(self):
"""
Tests the primary_group function
"""
# Create a user to test primary group function
if self.run_function("user.add", [PRIMARY_GROUP_USER]) is not True:
self.run_function("user.delete", [PRIMARY_GROUP_USER])
self.skipTest("Failed to create a user")
try:
# Test mac_user.primary_group
primary_group = self.run_function(
"user.primary_group", [PRIMARY_GROUP_USER]
)
uid_info = self.run_function("user.info", [PRIMARY_GROUP_USER])
self.assertIn(primary_group, uid_info["groups"])
except AssertionError:
self.run_function("user.delete", [PRIMARY_GROUP_USER])
raise
@pytest.mark.slow_test
def test_mac_user_changes(self):
"""
Tests mac_user functions that change user properties
"""
# Create a user to manipulate - if unsuccessful, skip the test
if self.run_function("user.add", [CHANGE_USER]) is not True:
self.run_function("user.delete", [CHANGE_USER])
self.skipTest("Failed to create a user")
try:
# Test mac_user.chuid
self.run_function("user.chuid", [CHANGE_USER, 4376])
uid_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(uid_info["uid"], 4376)
# Test mac_user.chgid
self.run_function("user.chgid", [CHANGE_USER, 4376])
gid_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(gid_info["gid"], 4376)
# Test mac.user.chshell
self.run_function("user.chshell", [CHANGE_USER, "/bin/zsh"])
shell_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(shell_info["shell"], "/bin/zsh")
# Test mac_user.chhome
self.run_function("user.chhome", [CHANGE_USER, "/Users/foo"])
home_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(home_info["home"], "/Users/foo")
# Test mac_user.chfullname
self.run_function("user.chfullname", [CHANGE_USER, "Foo Bar"])
fullname_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(fullname_info["fullname"], "Foo Bar")
# Test mac_user.chgroups
self.run_function("user.chgroups", [CHANGE_USER, "wheel"])
groups_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(groups_info["groups"], ["wheel"])
except AssertionError:
self.run_function("user.delete", [CHANGE_USER])
raise
@pytest.mark.slow_test
def test_mac_user_enable_auto_login(self):
"""
Tests mac_user functions that enable auto login
"""
# Make sure auto login is disabled before we start
if self.run_function("user.get_auto_login"):
self.skipTest("Auto login already enabled")
try:
# Does enable return True
self.assertTrue(
self.run_function(
"user.enable_auto_login", ["Spongebob", "Squarepants"]
)
)
# Did it set the user entry in the plist file
self.assertEqual(self.run_function("user.get_auto_login"), "Spongebob")
# Did it generate the `/etc/kcpassword` file
self.assertTrue(os.path.exists("/etc/kcpassword"))
# Are the contents of the file correct
test_data = b".\xc3\xb8'B\xc2\xa0\xc3\x99\xc2\xad\xc2\x8b\xc3\x8d\xc3\x8dl"
with salt.utils.files.fopen("/etc/kcpassword", "rb") as f:
file_data = f.read()
self.assertEqual(test_data, file_data)
# Does disable return True
self.assertTrue(self.run_function("user.disable_auto_login"))
# Does it remove the user entry in the plist file
self.assertFalse(self.run_function("user.get_auto_login"))
# Is the `/etc/kcpassword` file removed
self.assertFalse(os.path.exists("/etc/kcpassword"))
finally:
# Make sure auto_login is disabled
self.assertTrue(self.run_function("user.disable_auto_login"))
# Make sure autologin is disabled
if self.run_function("user.get_auto_login"):
raise Exception("Failed to disable auto login")
@pytest.mark.slow_test
def test_mac_user_disable_auto_login(self):
"""
Tests mac_user functions that disable auto login
"""
# Make sure auto login is enabled before we start
# Is there an existing setting
if self.run_function("user.get_auto_login"):
self.skipTest("Auto login already enabled")
try:
# Enable auto login for the test
self.run_function("user.enable_auto_login", ["Spongebob", "Squarepants"])
# Make sure auto login got set up
if not self.run_function("user.get_auto_login") == "Spongebob":
raise Exception("Failed to enable auto login")
# Does disable return True
self.assertTrue(self.run_function("user.disable_auto_login"))
# Does it remove the user entry in the plist file
self.assertFalse(self.run_function("user.get_auto_login"))
# Is the `/etc/kcpassword` file removed
self.assertFalse(os.path.exists("/etc/kcpassword"))
finally:
# Make sure auto login is disabled
self.assertTrue(self.run_function("user.disable_auto_login"))
# Make sure auto login is disabled
if self.run_function("user.get_auto_login"):
raise Exception("Failed to disable auto login")
def tearDown(self):
"""
Clean up after tests
"""
# Delete ADD_USER
add_info = self.run_function("user.info", [ADD_USER])
if add_info:
self.run_function("user.delete", [ADD_USER])
# Delete DEL_USER if something failed
del_info = self.run_function("user.info", [DEL_USER])
if del_info:
self.run_function("user.delete", [DEL_USER])
# Delete CHANGE_USER
change_info = self.run_function("user.info", [CHANGE_USER])
if change_info:
self.run_function("user.delete", [CHANGE_USER])
|
|
#!/usr/bin/env python3
#
# Copyright 2016 Red Hat, Inc.
#
# Authors:
# Fam Zheng <famz@redhat.com>
#
# This work is licensed under the MIT License. Please see the LICENSE file or
# http://opensource.org/licenses/MIT.
from django.conf.urls import url
from django.http import HttpResponseForbidden, Http404, HttpResponseRedirect
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.urls import reverse
from django.utils.html import format_html
from mod import PatchewModule
import datetime
import time
import math
from api.views import APILoginRequiredView
from api.models import Message, MessageResult, Project, ProjectResult, Result
from api.rest import PluginMethodField, TestPermission, reverse_detail
from api.search import SearchEngine
from event import emit_event, declare_event, register_handler
from patchew.logviewer import LogView
import schema
from rest_framework import serializers, generics
from rest_framework.fields import CharField, BooleanField
from rest_framework.response import Response
_instance = None
TESTING_SCRIPT_DEFAULT = """#!/bin/bash
# Testing script will be invoked under the git checkout with
# HEAD pointing to a commit that has the patches applied on top of "base"
# branch
exit 0
"""
class TestingLogViewer(LogView):
def get_result(self, request, **kwargs):
project_or_series = kwargs["project_or_series"]
testing_name = kwargs["testing_name"]
if request.GET.get("type") == "project":
obj = Project.objects.filter(name=project_or_series).first()
else:
obj = Message.objects.find_series(project_or_series)
if not obj:
raise Http404("Object not found: " + project_or_series)
return _instance.get_testing_result(obj, testing_name)
class ResultDataSerializer(serializers.Serializer):
# TODO: is_timeout should be present iff the result is a failure
is_timeout = BooleanField(required=False)
head = CharField()
tester = CharField(default=serializers.CurrentUserDefault())
class TestingModule(PatchewModule):
"""Testing module"""
name = "testing"
allowed_groups = ("testers",)
result_data_serializer_class = ResultDataSerializer
test_schema = schema.ArraySchema(
"{name}",
"Test",
desc="Test spec",
members=[
schema.BooleanSchema(
"enabled", "Enabled", desc="Whether this test is enabled", default=True
),
schema.StringSchema(
"requirements", "Requirements", desc="List of requirements of the test"
),
schema.IntegerSchema(
"timeout", "Timeout", default=3600, desc="Timeout for the test"
),
schema.StringSchema(
"script",
"Test script",
desc="The testing script",
default=TESTING_SCRIPT_DEFAULT,
multiline=True,
required=True,
),
],
)
requirement_schema = schema.ArraySchema(
"{name}",
"Requirement",
desc="Test requirement spec",
members=[
schema.StringSchema(
"script",
"Probe script",
desc="The probing script for this requirement",
default="#!/bin/bash\ntrue",
multiline=True,
required=True,
)
],
)
project_config_schema = schema.ArraySchema(
"testing",
desc="Configuration for testing module",
members=[
schema.MapSchema("tests", "Tests", desc="Testing specs", item=test_schema),
schema.MapSchema(
"requirements",
"Requirements",
desc="Requirement specs",
item=requirement_schema,
),
],
)
def __init__(self):
global _instance
assert _instance == None
_instance = self
declare_event(
"TestingReport",
user="the user's name that runs this tester",
tester="the name of the tester",
obj="the object (series or project) which the test is for",
passed="True if the test is passed",
test="test name",
log="test log",
log_url="URL to test log (text)",
html_log_url="URL to test log (HTML)",
is_timeout="whether the test has timeout",
)
register_handler("SetProperty", self.on_set_property)
register_handler("SetProjectConfig", self.on_set_config)
register_handler("ResultUpdate", self.on_result_update)
def on_set_property(self, evt, obj, name, value, old_value):
if isinstance(obj, Project) and name == "git.head" and old_value != value:
self.clear_and_start_testing(obj)
def on_set_config(self, evt, obj):
self.project_recalc_pending_tests(obj)
def get_msg_base_tags(self, msg):
return [t for t in msg.tags if t.lower().startswith("based-on:")]
def on_result_update(self, evt, obj, old_status, result):
if result.name.startswith("testing.") and result.status != old_status:
if "tester" in result.data:
po = obj if isinstance(obj, Project) else obj.project
_instance.tester_check_in(po, result.data["tester"])
if not self.get_testing_results(
obj, status__in=(Result.PENDING, Result.RUNNING)
).exists():
obj.set_property("testing.tested-head", result.data["head"])
if isinstance(obj, Message):
obj.is_tested = True
obj.save()
obj.set_property("testing.tested-base", self.get_msg_base_tags(obj))
if isinstance(obj, Project):
# cache the last result so that badges are not affected by RUNNING state
failures = obj.get_property("testing.failures", [])
if result.status == result.SUCCESS and result.name in failures:
failures.remove(result.name)
obj.set_property("testing.failures", list(failures))
if result.status == result.FAILURE and result.name not in failures:
failures.append(result.name)
obj.set_property("testing.failures", list(failures))
if result.name != "git":
return
if (
isinstance(obj, Message)
and obj.is_series_head
and result.status == result.SUCCESS
and result.data.get("tag")
and result.data.get("repo")
):
tested_base = obj.get_property("testing.tested-base")
if tested_base is None or tested_base != self.get_msg_base_tags(obj):
self.clear_and_start_testing(obj)
def filter_testing_results(self, queryset, *args, **kwargs):
return queryset.filter(name__startswith="testing.", *args, **kwargs)
def get_testing_results(self, obj, *args, **kwargs):
return self.filter_testing_results(obj.results, *args, **kwargs)
def get_testing_result(self, obj, name):
try:
return obj.results.get(name="testing." + name)
except:
raise Http404("Test doesn't exist")
def get_test_name(self, result):
return result.name[len("testing.") :]
def recalc_pending_tests(self, obj):
test_dict = self.get_tests(obj)
all_tests = set((k for k, v in test_dict.items() if v.get("enabled", False)))
for r in self.get_testing_results(obj, status=Result.PENDING):
r.delete()
if len(all_tests):
done_tests = [self.get_test_name(r) for r in self.get_testing_results(obj)]
for tn in all_tests:
if not tn in done_tests:
obj.create_result(
name="testing." + tn, status=Result.PENDING
).save()
if isinstance(obj, Message):
is_tested = len(all_tests) and len(done_tests) == len(all_tests)
if is_tested != obj.is_tested:
obj.is_tested = is_tested
obj.save()
def project_recalc_pending_tests(self, project):
self.recalc_pending_tests(project)
# Only operate on messages for which testing has not completed yet.
message_ids = self.filter_testing_results(
MessageResult.objects, message__project=project, status=Result.PENDING
).values("message_id")
messages = Message.objects.filter(id__in=message_ids)
for obj in messages:
self.recalc_pending_tests(obj)
def clear_and_start_testing(self, obj, test=""):
obj.set_property("testing.tested-head", None)
if isinstance(obj, Message):
obj.is_tested = False
obj.save()
if test:
r = self.get_testing_result(obj, test)
if r:
r.delete()
else:
for r in self.get_testing_results(obj):
r.delete()
self.recalc_pending_tests(obj)
def www_view_testing_reset(self, request, project_or_series):
if not request.user.is_authenticated:
return HttpResponseForbidden()
if request.GET.get("type") == "project":
obj = Project.objects.filter(name=project_or_series).first()
if not obj.maintained_by(request.user):
raise PermissionDenied()
else:
obj = Message.objects.find_series(project_or_series)
if not obj:
raise Http404("Not found: " + project_or_series)
self.clear_and_start_testing(obj, request.GET.get("test", ""))
return HttpResponseRedirect(request.META.get("HTTP_REFERER"))
def www_view_badge(self, request, project, ext):
po = Project.objects.filter(name=project).first()
if po.get_property("testing.failures"):
return HttpResponseRedirect(
"https://img.shields.io/badge/patchew-failing-critical." + ext
)
else:
return HttpResponseRedirect(
"https://img.shields.io/badge/patchew-passing-success." + ext
)
def www_url_hook(self, urlpatterns):
urlpatterns.append(
url(
r"^testing-reset/(?P<project_or_series>.*)/",
self.www_view_testing_reset,
name="testing-reset",
)
)
urlpatterns.append(
url(
r"^logs/(?P<project_or_series>.*)/testing.(?P<testing_name>.*)/",
TestingLogViewer.as_view(),
name="testing-log",
)
)
urlpatterns.append(
url(
r"^(?P<project>[^/]*)/badge.(?P<ext>svg|png)$",
self.www_view_badge,
name="testing-badge",
)
)
def api_url_hook(self, urlpatterns):
urlpatterns.append(
url(
r"^v1/projects/(?P<pk>[^/]*)/get-test/$",
GetTestView.as_view(),
name="get-test",
)
)
def reverse_testing_log(self, obj, test, request=None, html=False):
if isinstance(obj, Message):
log_url = (
reverse(
"testing-log",
kwargs={"project_or_series": obj.message_id, "testing_name": test},
)
+ "?type=message"
)
else:
assert isinstance(obj, Project)
log_url = (
reverse(
"testing-log",
kwargs={"project_or_series": obj.name, "testing_name": test},
)
+ "?type=project"
)
if html:
log_url += "&html=1"
# Generate a full URL, including the host and port, for use in
# email notifications
if request:
log_url = request.build_absolute_uri(log_url)
return log_url
def add_test_report(
self,
request,
project,
tester,
test,
head,
base,
identity,
passed,
log,
is_timeout,
):
# Find a project or series depending on the test type and assign it to obj
if identity["type"] == "project":
obj = Project.objects.get(name=project)
project = obj.name
elif identity["type"] == "series":
message_id = identity["message-id"]
obj = Message.objects.find_series(message_id, project)
if not obj:
raise Exception("Series doesn't exist")
project = obj.project.name
user = request.user
r = self.get_testing_result(obj, test)
r.data = {
"is_timeout": is_timeout,
"user": user.username,
"head": head,
"tester": tester or user.username,
}
r.log = log
r.status = Result.SUCCESS if passed else Result.FAILURE
r.save()
log_url = self.reverse_testing_log(obj, test, request=request)
html_log_url = self.reverse_testing_log(obj, test, request=request, html=True)
emit_event(
"TestingReport",
tester=tester,
user=user.username,
obj=obj,
passed=passed,
test=test,
log=log,
log_url=log_url,
html_log_url=html_log_url,
is_timeout=is_timeout,
)
def get_tests(self, obj):
if isinstance(obj, Message):
obj = obj.project
return self.get_project_config(obj).get("tests", {})
def _build_reset_ops(self, obj):
if isinstance(obj, Message):
typearg = "type=message"
url = reverse("testing-reset", kwargs={"project_or_series": obj.message_id})
else:
assert isinstance(obj, Project)
url = reverse("testing-reset", kwargs={"project_or_series": obj.name})
typearg = "type=project"
url += "?" + typearg
ret = [
{
"url": url,
"title": "Reset all testing states",
"class": "warning",
"icon": "refresh",
}
]
for r in self.get_testing_results(obj, ~Q(status=Result.PENDING)):
tn = self.get_test_name(r)
ret.append(
{
"url": url + "&test=" + tn,
"title": format_html("Reset <b>{}</b> testing state", tn),
"class": "warning",
"icon": "refresh",
}
)
return ret
def prepare_message_hook(self, request, message, detailed):
if not message.is_series_head:
return
if (
message.project.maintained_by(request.user)
and self.get_testing_results(message, ~Q(status=Result.PENDING)).exists()
):
message.extra_ops += self._build_reset_ops(message)
if self.get_testing_results(message, status=Result.FAILURE).exists():
message.status_tags.append(
{
"title": "Testing failed",
"url": reverse(
"series_detail",
kwargs={
"project": message.project.name,
"message_id": message.message_id,
},
),
"type": "danger",
"char": "T",
}
)
elif message.is_tested:
message.status_tags.append(
{
"title": "Testing passed",
"url": reverse(
"series_detail",
kwargs={
"project": message.project.name,
"message_id": message.message_id,
},
),
"type": "success",
"char": "T",
}
)
def get_result_log_url(self, result):
tn = result.name[len("testing.") :]
return self.reverse_testing_log(result.obj, tn, html=False)
def render_result(self, result):
if not result.is_completed():
return None
pn = result.name
tn = pn[len("testing.") :]
log_url = result.get_log_url()
html_log_url = log_url + "&html=1"
passed_str = "failed" if result.is_failure() else "passed"
return format_html(
'Test <b>{}</b> <a class="cbox-log" data-link="{}" href="{}">{}</a>',
tn,
html_log_url,
log_url,
passed_str,
)
def check_active_testers(self, project):
at = []
for tn, v in project.get_property("testing.check_in", {}).items():
age = time.time() - v
if age > 10 * 60:
continue
at.append("%s (%dmin)" % (tn, math.ceil(age / 60)))
if not at:
return
project.extra_status.append(
{"icon": "fa-refresh fa-spin", "html": "Active testers: " + ", ".join(at)}
)
def prepare_project_hook(self, request, project):
if not project.maintained_by(request.user):
return
project.extra_info.append(
{
"title": "Testing configuration",
"class": "info",
"content_html": self.build_config_html(request, project),
}
)
self.check_active_testers(project)
if (
project.maintained_by(request.user)
and self.get_testing_results(project, ~Q(status=Result.PENDING)).exists()
):
project.extra_ops += self._build_reset_ops(project)
def get_capability_probes(self, project):
props = self.get_project_config(project).get("requirements", {})
return {k: v["script"] for k, v in props.items()}
def get_testing_probes(self, project, request, format):
return self.get_capability_probes(project)
def rest_project_fields_hook(self, request, fields):
fields["testing_probes"] = PluginMethodField(obj=self)
def tester_check_in(self, project, tester):
assert project
assert tester
po = Project.objects.filter(name=project).first()
if not po:
return
po.set_property("testing.check_in." + tester, time.time())
class GetTestViewMixin(object):
def _generate_series_test_data(self, request, s, result, test):
gr = s.git_result
assert gr.is_success()
return self._generate_test_data(
project=s.project.name,
repo=gr.data["repo"],
head=gr.data["tag"],
base=gr.data.get("base", None),
identity={
"type": "series",
"message-id": s.message_id,
"subject": s.subject,
},
result_uri=reverse_detail(result, request),
test=test,
)
def _generate_project_test_data(
self, request, project, repo, head, base, result, test
):
return self._generate_test_data(
project=project,
repo=repo,
head=head,
base=base,
identity={"type": "project", "head": head},
result_uri=reverse_detail(result, request),
test=test,
)
def _find_applicable_test(self, queryset, user, po, tester, capabilities):
# Prefer non-running tests, or tests that started the earliest
one_hour_ago = datetime.datetime.now() - datetime.timedelta(0, 3600)
where = Q(status=Result.PENDING)
where = where | Q(status=Result.RUNNING, last_update__lt=one_hour_ago)
where = where & Q(name__startswith="testing.")
q = queryset.filter(where).order_by("status", "last_update")
tests = _instance.get_tests(po)
for r in q:
if isinstance(r, MessageResult) and not r.message.git_result.is_success():
continue
tn = _instance.get_test_name(r)
t = tests.get(tn, None)
# Shouldn't happen, but let's protect against it
if not t:
continue
reqs = t.get("requirements", "")
for req in [x.strip() for x in reqs.split(",") if x]:
if req not in capabilities:
break
else:
t["name"] = tn
yield r, t
def _find_project_test(self, request, po, tester, capabilities):
head = po.get_property("git.head")
repo = po.git
tested = po.get_property("testing.tested-head")
if not head or not repo:
return None
candidates = self._find_applicable_test(
ProjectResult.objects.filter(project=po),
request.user,
po,
tester,
capabilities,
)
for r, test in candidates:
td = self._generate_project_test_data(
request, po.name, repo, head, tested, r, test
)
return r, po, td
return None
def _find_series_test(self, request, po, tester, capabilities):
candidates = self._find_applicable_test(
MessageResult.objects.filter(message__project=po),
request.user,
po,
tester,
capabilities,
)
for r, test in candidates:
s = r.message
td = self._generate_series_test_data(request, s, r, test)
return r, s, td
return None
def _do_testing_get(self, request, po, tester, capabilities):
# Try project head test first
_instance.tester_check_in(po.name, tester or request.user.username)
candidate = self._find_project_test(request, po, tester, capabilities)
if not candidate:
candidate = self._find_series_test(request, po, tester, capabilities)
if not candidate:
return None
r, obj, test_data = candidate
r.status = Result.RUNNING
r.save()
return test_data
class TestingGetView(APILoginRequiredView, GetTestViewMixin):
name = "testing-get"
allowed_groups = ["testers"]
def _generate_test_data(
self, project, repo, head, base, identity, result_uri, test
):
r = {
"project": project,
"repo": repo,
"head": head,
"base": base,
"test": test,
"identity": identity,
"result_uri": result_uri,
}
return r
def handle(self, request, project, tester, capabilities):
po = Project.objects.get(name=project)
return self._do_testing_get(request, po, tester, capabilities)
class GetTestView(generics.GenericAPIView, GetTestViewMixin):
queryset = Project.objects.all()
permission_classes = (TestPermission,)
def _generate_test_data(
self, project, repo, head, base, identity, result_uri, test
):
r = {
"repo": repo,
"head": head,
"base": base,
"test": test,
"identity": identity,
"result_uri": result_uri,
}
return r
def post(self, request, *args, **kwargs):
tester = request.data.get("tester", "")
capabilities = request.data.get("capabilities", [])
test_data = self._do_testing_get(
request, self.get_object(), tester, capabilities
)
if test_data:
return Response(test_data)
else:
return Response(status=204)
class TestingReportView(APILoginRequiredView):
name = "testing-report"
allowed_groups = ["testers"]
def handle(
self,
request,
tester,
project,
test,
head,
base,
passed,
log,
identity,
is_timeout=False,
):
_instance.add_test_report(
request,
project,
tester,
test,
head,
base,
identity,
passed,
log,
is_timeout,
)
class TestingCapabilitiesView(APILoginRequiredView):
name = "testing-capabilities"
allowed_groups = ["testers"]
def handle(self, request, tester, project):
_instance.tester_check_in(project, tester or request.user.username)
po = Project.objects.filter(name=project).first()
if not po:
raise Http404("Project '%s' not found" % project)
probes = _instance.get_capability_probes(po)
return probes
class UntestView(APILoginRequiredView):
name = "untest"
allowed_groups = ["testers"]
def handle(self, request, terms):
se = SearchEngine()
q = se.search_series(user=request.user, *terms)
for s in q:
_instance.clear_and_start_testing(s)
|
|
"""Support for tracking consumption over given periods of time."""
from datetime import timedelta
import logging
from croniter import croniter
import voluptuous as vol
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import CONF_NAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_TARIFF,
CONF_CRON_PATTERN,
CONF_METER,
CONF_METER_NET_CONSUMPTION,
CONF_METER_OFFSET,
CONF_METER_TYPE,
CONF_SOURCE_SENSOR,
CONF_TARIFF,
CONF_TARIFF_ENTITY,
CONF_TARIFFS,
DATA_TARIFF_SENSORS,
DATA_UTILITY,
DOMAIN,
METER_TYPES,
SERVICE_RESET,
SERVICE_SELECT_NEXT_TARIFF,
SERVICE_SELECT_TARIFF,
SIGNAL_RESET_METER,
)
_LOGGER = logging.getLogger(__name__)
TARIFF_ICON = "mdi:clock-outline"
ATTR_TARIFFS = "tariffs"
DEFAULT_OFFSET = timedelta(hours=0)
def validate_cron_pattern(pattern):
"""Check that the pattern is well-formed."""
if croniter.is_valid(pattern):
return pattern
raise vol.Invalid("Invalid pattern")
def period_or_cron(config):
"""Check that if cron pattern is used, then meter type and offsite must be removed."""
if CONF_CRON_PATTERN in config and CONF_METER_TYPE in config:
raise vol.Invalid(f"Use <{CONF_CRON_PATTERN}> or <{CONF_METER_TYPE}>")
if (
CONF_CRON_PATTERN in config
and CONF_METER_OFFSET in config
and config[CONF_METER_OFFSET] != DEFAULT_OFFSET
):
raise vol.Invalid(
f"When <{CONF_CRON_PATTERN}> is used <{CONF_METER_OFFSET}> has no meaning"
)
return config
METER_CONFIG_SCHEMA = vol.Schema(
vol.All(
{
vol.Required(CONF_SOURCE_SENSOR): cv.entity_id,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_METER_TYPE): vol.In(METER_TYPES),
vol.Optional(CONF_METER_OFFSET, default=DEFAULT_OFFSET): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_METER_NET_CONSUMPTION, default=False): cv.boolean,
vol.Optional(CONF_TARIFFS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_CRON_PATTERN): validate_cron_pattern,
},
period_or_cron,
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({cv.slug: METER_CONFIG_SCHEMA})}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""Set up an Utility Meter."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
hass.data[DATA_UTILITY] = {}
register_services = False
for meter, conf in config.get(DOMAIN).items():
_LOGGER.debug("Setup %s.%s", DOMAIN, meter)
hass.data[DATA_UTILITY][meter] = conf
hass.data[DATA_UTILITY][meter][DATA_TARIFF_SENSORS] = []
if not conf[CONF_TARIFFS]:
# only one entity is required
hass.async_create_task(
discovery.async_load_platform(
hass,
SENSOR_DOMAIN,
DOMAIN,
[{CONF_METER: meter, CONF_NAME: conf.get(CONF_NAME, meter)}],
config,
)
)
else:
# create tariff selection
await component.async_add_entities(
[TariffSelect(meter, list(conf[CONF_TARIFFS]))]
)
hass.data[DATA_UTILITY][meter][CONF_TARIFF_ENTITY] = "{}.{}".format(
DOMAIN, meter
)
# add one meter for each tariff
tariff_confs = []
for tariff in conf[CONF_TARIFFS]:
tariff_confs.append(
{
CONF_METER: meter,
CONF_NAME: f"{meter} {tariff}",
CONF_TARIFF: tariff,
}
)
hass.async_create_task(
discovery.async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, tariff_confs, config
)
)
register_services = True
if register_services:
component.async_register_entity_service(SERVICE_RESET, {}, "async_reset_meters")
component.async_register_entity_service(
SERVICE_SELECT_TARIFF,
{vol.Required(ATTR_TARIFF): cv.string},
"async_select_tariff",
)
component.async_register_entity_service(
SERVICE_SELECT_NEXT_TARIFF, {}, "async_next_tariff"
)
return True
class TariffSelect(RestoreEntity):
"""Representation of a Tariff selector."""
def __init__(self, name, tariffs):
"""Initialize a tariff selector."""
self._name = name
self._current_tariff = None
self._tariffs = tariffs
self._icon = TARIFF_ICON
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
if self._current_tariff is not None:
return
state = await self.async_get_last_state()
if not state or state.state not in self._tariffs:
self._current_tariff = self._tariffs[0]
else:
self._current_tariff = state.state
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the select input."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
return self._current_tariff
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_TARIFFS: self._tariffs}
async def async_reset_meters(self):
"""Reset all sensors of this meter."""
_LOGGER.debug("reset meter %s", self.entity_id)
async_dispatcher_send(self.hass, SIGNAL_RESET_METER, self.entity_id)
async def async_select_tariff(self, tariff):
"""Select new option."""
if tariff not in self._tariffs:
_LOGGER.warning(
"Invalid tariff: %s (possible tariffs: %s)",
tariff,
", ".join(self._tariffs),
)
return
self._current_tariff = tariff
self.async_write_ha_state()
async def async_next_tariff(self):
"""Offset current index."""
current_index = self._tariffs.index(self._current_tariff)
new_index = (current_index + 1) % len(self._tariffs)
self._current_tariff = self._tariffs[new_index]
self.async_write_ha_state()
|
|
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from scipy.special import gammaln, betaln
from matplotlib import pyplot
def create_coeff(N, order):
points = 2. ** np.arange(0, N + 1)
coeff = np.zeros((N, order + 1), dtype=np.float32)
for pos in range(N):
left = points[pos]
right = points[pos + 1]
xrange = np.linspace(left, right, 100)
y = gammaln(xrange)
z = np.polyfit(xrange, y, order)
print z.shape
coeff[pos] = z
return coeff
coeff = create_coeff(33, 2)
coeff3 = create_coeff(33, 3)
coeff4 = create_coeff(33, 5)
coeff5 = create_coeff(33, 5)
def gammaln_approx_second_order(x):
if x < 2.5:
return gammaln(x)
pos = int(np.floor(np.log2(x)))
a = coeff[pos]
yhat = a[2] + a[1] * x + a[0] * x ** 2
return yhat
def gammaln_approx_third_order(x):
if x < 2.5:
return gammaln(x)
pos = int(np.floor(np.log2(x)))
a = coeff3[pos]
yhat = a[3] + a[2] * x + a[1] * x ** 2 + a[0] * x ** 3
return yhat
def gammaln_approx_fourth_order(x):
if x < 2.5:
return gammaln(x)
pos = int(np.floor(np.log2(x)))
a = coeff4[pos]
yhat = a[4] + a[3] * x + a[2] * x ** 2 + a[1] * x ** 3 + a[0] * x ** 4
return yhat
def gammaln_approx_fifth_order(x):
# if x < 2.5:
# return gammaln(x)
pos = int(np.floor(np.log2(x)))
a = coeff5[pos]
yhat = a[5] + a[4] * x + a[3] * x ** 2 + \
a[2] * x ** 3 + a[1] * x ** 4 + a[0] * x ** 5
return yhat
def func_test(approx_func, max):
#x = np.array([1000.0, 1001, 1002])
x = np.linspace(0.001, max, 10000)
y = gammaln(x)
yhat = map(approx_func, x)
pyplot.subplot(2, 1, 1)
pyplot.plot(x, y, linewidth=2)
pyplot.plot(x, yhat, color='r')
pyplot.subplot(2, 1, 2)
delta = yhat - y
err_frac = (delta / gammaln(x))
pyplot.plot(x, err_frac * 100)
THOLD = 0.001
accuracy = np.sum(np.abs(err_frac) < THOLD).astype(float) / len(x) * 100
print "accurate", accuracy
pyplot.ylabel('percent error')
pyplot.show()
def test_fifth_order():
func_test(gammaln_approx_fifth_order, 1e2)
def test_gauss():
func_test(gammaln_gauss, 1e2)
def beta(alpha, beta):
gl = gammaln_approx_second_order
return gl(alpha) + gl(beta) - gl(alpha + beta)
def gammaln_gauss(x):
if x < 2.5:
return gammaln(x)
u = x - 0.5
return np.log(2 * 3141592) / 2 + u * np.log(u) - u
def test_beta():
N = 100
xs = np.linspace(0.01, 1e5, N)
ys = np.linspace(0.01, 1e5, N)
vals = np.zeros((N, N))
errs = np.zeros(N * N)
pos = 0
for xi, x in enumerate(xs):
for yi, y in enumerate(ys):
z = betaln(x, y)
zhat = beta(x, y)
errs[pos] = (zhat - z) / z
vals[xi, yi] = z
pos += 1
pyplot.figure()
pyplot.plot(errs)
# pyplot.imshow(vals)
# pyplot.figure()
## PN = 5
# for i in range(PN):
## pyplot.subplot(1, PN, i+1)
## pyplot.plot(ys, vals[N/ PN * i, :])
## pyplot.title(xs[N/PN * i])
# pyplot.plot(delta)
pyplot.show()
def coeff_gen():
mycoeff = create_coeff(33, 5)
print "const float coeff[] = {",
for a in mycoeff:
for ai in a:
print "%.14e," % ai,
print
print "};"
def lt25test():
x = np.linspace(0.001, 2.5, 1000)
y = gammaln(x)
order = 3
z = np.polyfit(x, y, order)
w = np.poly1d(z)
pyplot.plot(x, y)
pyplot.plot(x, w(x))
pyplot.figure()
delta = np.abs(y - w(x))
print delta
pyplot.plot(x, delta / y * 100)
pyplot.ylabel("percent error")
pyplot.grid(1)
pyplot.show()
def lstudent():
def tgtfunc(x):
return gammaln(x / 2 + 0.5) - gammaln(x / 2)
coeffs = []
pot_range = 2
for pot in np.arange(-4, 32, pot_range):
x = np.linspace(2.0 ** pot, 2.0 ** (pot + pot_range), 1000)
y = tgtfunc(x)
order = 3
z = np.polyfit(x, y, order)
coeffs.append(z)
print z
## w = np.poly1d(z)
## yhat = w(x)
## pyplot.plot(x, y)
## pyplot.plot(x, yhat)
# pyplot.show()
print "const float lgamma_nu_func_approx_coeff3[] = {",
for a in coeffs:
for ai in a:
print "%.14e," % ai,
print
print "};"
|
|
# Copyright 2010-2012 Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Add new servername entries to an existing run
CSV formats
index, hostname, port, protocol
index, hostname, port # HTTPS assumed
index, hostname # Port 443, HTTPS assumed; line starts with number
hostname, port # HTTPS assumed; line starts with letter; port may be empty
Options:
--input <name> : CSV File containing a list of hosts
--count <num> : Only configure max <num> servers from the source
--run-id <num> : add the new entries to this job
--verbose : Print more information
--testbase2 : use the debug test database for this job
--threads <num> : Use the specified number of threads when inserting the entries for the job
"""
from optparse import OptionParser
import probedb.standalone
import probedb.cluster.models as Cluster
import probedb.probedata2.models as Probe
import probedb.resultdb2.models as Results
import fileinput
from django.db import connection, transaction
import datetime
import threading
from multiprocessing import Process, JoinableQueue as Queue, Lock
def __ProgressCounter(run, queue, threads, options):
i=0
while True:
queue.get()
i += 1
if i%100 == 0:
if options.verbose:
print "Queued ", i, "servers so far. Threads active ", sum([t.is_alive() for t in threads])
queue.task_done()
def SetupQueueThread(tid, run, probe_servers, progress_queue, ):
connection.close()
try:
while True:
item = probe_servers.get()
if isinstance(item, int):
try:
item = Probe.Server.objects.get(id=item)
except:
probe_servers.task_done()
continue
hostname = item.servername.strip()
if (not hostname or
any([x in hostname for x in " \t%/&#\"'\\{[]}()*,;<>$"]) or
any([ord(x)>=128 or ord(x)<=32 for x in hostname])):
item.enabled = False
item.save()
probe_servers.task_done()
continue
hostname = hostname.strip(".")
while hostname.find("..")>=0:
hostname = hostname.replace("..", ".")
if hostname != item.servername:
item.enabled = False
item.save()
item = "0," + hostname+","+str(item.port) # Convert to string to correct the list
if not isinstance(item, Probe.Server):
hostname_line = item
if not hostname_line.strip():
probe_servers.task_done()
continue
split_line = hostname_line.strip().split(",")
if len(split_line) > 2:
(index, hostname, port) = split_line[:3]
else:
port = ""
(var1, var2) = split_line
if var1.isdigit():
(index, hostname) = (var1, var2)
else:
(hostname, port) = (var1, var2)
hostname = hostname.strip()
if (not hostname or
any([x in hostname for x in " \t%/&#\"'\\{[]}()*,;<>$"]) or
any([ord(x)>=128 or ord(x)<=32 for x in hostname])):
probe_servers.task_done()
continue
hostname = hostname.strip(".")
while hostname.find("..")>=0:
hostname = hostname.replace("..", ".")
if not port:
port = 443
else:
port = int(port)
sn_t = "%s:%05d" % (hostname, port)
(item, created) = Probe.Server.objects.get_or_create(
full_servername = sn_t,
defaults={'enabled':True,
"alexa_rating":0,
"servername":hostname,
"port": port,
}
)
if created:
item.Construct()
if item.enabled:
try:
sid = transaction.savepoint()
run_entry = Probe.ProbeQueue.objects.create(part_of_run=run,server=item,state=Probe.ProbeQueue.PROBEQ_IDLE)
transaction.savepoint_commit(sid)
progress_queue.put(True)
except:
transaction.savepoint_rollback(sid)
pass
probe_servers.task_done()
except:
pass
def setup_queue(options):
probe_servers = Queue()
progress_queue = Queue()
run = Probe.ProbeRun.objects.get(id = options.run_id)
summary_top = Results.ResultSummaryList.objects.get(part_of_run=run)
summary_top.setup()
connection.close()
threads = []
for i in range(options.threads):
new_thread = Process(target=SetupQueueThread, args=(i,run, probe_servers, progress_queue))
new_thread.daemon = True
new_thread.start()
threads.append(new_thread)
progress_thread = threading.Thread(target=__ProgressCounter, args=(run, progress_queue, threads,options))
progress_thread.daemon = True
progress_thread.start()
i = 0;
if options.input_filename and (not options.count or i < options.count):
for hostname_line in fileinput.input(options.input_filename, openhook=fileinput.hook_compressed):
probe_servers.put(hostname_line)
i+=1
if options.count and i >= options.count:
break;
probe_servers.join()
progress_queue.join()
return run
def main():
options_config = OptionParser()
options_config.add_option("--input", action="store", type="string", dest="input_filename", default="testlist.csv")
options_config.add_option("--testbase2", action="store_true", dest="use_testbase2")
options_config.add_option("--threads", action="store", type="int", dest="threads", default=30)
options_config.add_option("--verbose", action="store_true", dest="verbose")
options_config.add_option("--count", action="store", type="int", dest="count", default=0)
options_config.add_option("--run-id", action="store", type="int", dest="run_id", default=0)
(options, args) = options_config.parse_args()
started = datetime.datetime.now()
run = setup_queue(options)
if options.verbose:
print "Run %d for %s/%s has been updated with more items. Total %d items" %(run.id, run.source_name, run.description, Probe.ProbeQueue.objects.filter(part_of_run=run).count())
ended = datetime.datetime.now()
if options.verbose:
print "Time to run: ", (ended-started)
if __name__ == "__main__":
main()
|
|
import os
import unittest
import time
import pytest
from integration_tests.env_variable_names import (
SLACK_SDK_TEST_INCOMING_WEBHOOK_URL,
SLACK_SDK_TEST_INCOMING_WEBHOOK_CHANNEL_NAME,
SLACK_SDK_TEST_BOT_TOKEN,
)
from slack_sdk.web import WebClient
from slack_sdk.webhook import WebhookClient
from slack_sdk.models.attachments import Attachment, AttachmentField
from slack_sdk.models.blocks import SectionBlock, DividerBlock, ActionsBlock
from slack_sdk.models.blocks.block_elements import ButtonElement
from slack_sdk.models.blocks.basic_components import MarkdownTextObject, PlainTextObject
class TestWebhook(unittest.TestCase):
def setUp(self):
if not hasattr(self, "channel_id"):
token = os.environ[SLACK_SDK_TEST_BOT_TOKEN]
channel_name = os.environ[
SLACK_SDK_TEST_INCOMING_WEBHOOK_CHANNEL_NAME
].replace("#", "")
client = WebClient(token=token)
self.channel_id = None
for resp in client.conversations_list(limit=1000):
for c in resp["channels"]:
if c["name"] == channel_name:
self.channel_id = c["id"]
break
if self.channel_id is not None:
break
def tearDown(self):
pass
def test_webhook(self):
url = os.environ[SLACK_SDK_TEST_INCOMING_WEBHOOK_URL]
webhook = WebhookClient(url)
response = webhook.send(text="Hello!")
self.assertEqual(200, response.status_code)
self.assertEqual("ok", response.body)
token = os.environ[SLACK_SDK_TEST_BOT_TOKEN]
client = WebClient(token=token)
history = client.conversations_history(channel=self.channel_id, limit=1)
self.assertIsNotNone(history)
actual_text = history["messages"][0]["text"]
self.assertEqual("Hello!", actual_text)
def test_with_unfurls_off(self):
url = os.environ[SLACK_SDK_TEST_INCOMING_WEBHOOK_URL]
token = os.environ[SLACK_SDK_TEST_BOT_TOKEN]
webhook = WebhookClient(url)
client = WebClient(token=token)
# send message that does not unfurl
response = webhook.send(
text="<https://imgs.xkcd.com/comics/desert_golfing_2x.png|Desert Golfing>",
unfurl_links=False,
unfurl_media=False,
)
self.assertEqual(200, response.status_code)
self.assertEqual("ok", response.body)
# wait to allow Slack API to edit message with attachments
time.sleep(2)
history = client.conversations_history(channel=self.channel_id, limit=1)
self.assertIsNotNone(history)
self.assertTrue("attachments" not in history["messages"][0])
# FIXME: This test started failing as of August 5, 2021
@pytest.mark.skip()
def test_with_unfurls_on(self):
# Slack API rate limits unfurls of unique links so test will
# fail when repeated. For testing, either use a different URL
# for text option or delete existing attachments in webhook channel.
url = os.environ[SLACK_SDK_TEST_INCOMING_WEBHOOK_URL]
token = os.environ[SLACK_SDK_TEST_BOT_TOKEN]
webhook = WebhookClient(url)
client = WebClient(token=token)
# send message that does unfurl
response = webhook.send(
text="<https://imgs.xkcd.com/comics/red_spiders_small.jpg|Spiders>",
unfurl_links=True,
unfurl_media=True,
)
self.assertEqual(200, response.status_code)
self.assertEqual("ok", response.body)
# wait to allow Slack API to edit message with attachments
time.sleep(2)
history = client.conversations_history(channel=self.channel_id, limit=1)
self.assertIsNotNone(history)
self.assertTrue("attachments" in history["messages"][0])
def test_with_blocks(self):
url = os.environ[SLACK_SDK_TEST_INCOMING_WEBHOOK_URL]
webhook = WebhookClient(url)
response = webhook.send(
text="fallback",
blocks=[
SectionBlock(
block_id="sb-id",
text=MarkdownTextObject(
text="This is a mrkdwn text section block."
),
fields=[
PlainTextObject(text="*this is plain_text text*", emoji=True),
MarkdownTextObject(text="*this is mrkdwn text*"),
PlainTextObject(text="*this is plain_text text*", emoji=True),
],
),
DividerBlock(),
ActionsBlock(
elements=[
ButtonElement(
text=PlainTextObject(text="Create New Task", emoji=True),
style="primary",
value="create_task",
),
ButtonElement(
text=PlainTextObject(text="Create New Project", emoji=True),
value="create_project",
),
ButtonElement(
text=PlainTextObject(text="Help", emoji=True),
value="help",
),
],
),
],
)
self.assertEqual(200, response.status_code)
self.assertEqual("ok", response.body)
def test_with_blocks_dict(self):
url = os.environ[SLACK_SDK_TEST_INCOMING_WEBHOOK_URL]
webhook = WebhookClient(url)
response = webhook.send(
text="fallback",
blocks=[
{
"type": "section",
"block_id": "sb-id",
"text": {
"type": "mrkdwn",
"text": "This is a mrkdwn text section block.",
},
"fields": [
{
"type": "plain_text",
"text": "*this is plain_text text*",
},
{
"type": "mrkdwn",
"text": "*this is mrkdwn text*",
},
{
"type": "plain_text",
"text": "*this is plain_text text*",
},
],
},
{"type": "divider", "block_id": "9SxG"},
{
"type": "actions",
"block_id": "avJ",
"elements": [
{
"type": "button",
"action_id": "yXqIx",
"text": {
"type": "plain_text",
"text": "Create New Task",
},
"style": "primary",
"value": "create_task",
},
{
"type": "button",
"action_id": "KCdDw",
"text": {
"type": "plain_text",
"text": "Create New Project",
},
"value": "create_project",
},
{
"type": "button",
"action_id": "MXjB",
"text": {
"type": "plain_text",
"text": "Help",
},
"value": "help",
},
],
},
],
)
self.assertEqual(200, response.status_code)
self.assertEqual("ok", response.body)
def test_with_attachments(self):
url = os.environ[SLACK_SDK_TEST_INCOMING_WEBHOOK_URL]
webhook = WebhookClient(url)
response = webhook.send(
text="fallback",
attachments=[
Attachment(
text="attachment text",
title="Attachment",
fallback="fallback_text",
pretext="some_pretext",
title_link="link in title",
fields=[
AttachmentField(
title=f"field_{i}_title", value=f"field_{i}_value"
)
for i in range(5)
],
color="#FFFF00",
author_name="John Doe",
author_link="http://johndoeisthebest.com",
author_icon="http://johndoeisthebest.com/avatar.jpg",
thumb_url="thumbnail URL",
footer="and a footer",
footer_icon="link to footer icon",
ts=123456789,
markdown_in=["fields"],
)
],
)
self.assertEqual(200, response.status_code)
self.assertEqual("ok", response.body)
def test_with_attachments_dict(self):
url = os.environ[SLACK_SDK_TEST_INCOMING_WEBHOOK_URL]
webhook = WebhookClient(url)
response = webhook.send(
text="fallback",
attachments=[
{
"author_name": "John Doe",
"fallback": "fallback_text",
"text": "attachment text",
"pretext": "some_pretext",
"title": "Attachment",
"footer": "and a footer",
"id": 1,
"author_link": "http://johndoeisthebest.com",
"color": "FFFF00",
"fields": [
{
"title": "field_0_title",
"value": "field_0_value",
},
{
"title": "field_1_title",
"value": "field_1_value",
},
{
"title": "field_2_title",
"value": "field_2_value",
},
{
"title": "field_3_title",
"value": "field_3_value",
},
{
"title": "field_4_title",
"value": "field_4_value",
},
],
"mrkdwn_in": ["fields"],
}
],
)
self.assertEqual(200, response.status_code)
self.assertEqual("ok", response.body)
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for multi-worker training tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import re
import zipfile
from absl import logging
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.framework import test_util
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as tracking_util
from tensorflow.python.util import nest
class MultiWorkerTutorialTest(parameterized.TestCase, test.TestCase):
"""Test multi-worker training flow demo'ed in go/multi-worker-with-keras."""
@contextlib.contextmanager
def skip_fetch_failure_exception(self):
try:
yield
except zipfile.BadZipfile as e:
self.skipTest('Data loading error: Bad magic number for file header.')
except Exception as e: # pylint: disable=broad-except
if 'URL fetch failure' in str(e):
self.skipTest('URL fetch error not considered failure of the test.')
else:
raise
@ds_combinations.generate(
combinations.combine(
mode=['eager'],
shard_policy=[None] + list(distribute_options.AutoShardPolicy)))
def testMultiWorkerTutorial(self, mode, shard_policy):
"""Test multi-worker training flow demo'ed in go/multi-worker-with-keras.
This test should be kept in sync with the code samples in
go/multi-worker-with-keras.
Args:
mode: Runtime mode.
shard_policy: None or any of tf.data.experimental.AutoShardPolicy for
testing.
"""
if shard_policy is distribute_options.AutoShardPolicy.FILE:
self.skipTest('TensorSliceDataset is not shardable with FILE policy.')
def mnist_dataset(batch_size):
with self.skip_fetch_failure_exception():
(x_train, y_train), _ = mnist.load_data()
# The `x` arrays are in uint8 and have values in the range [0, 255].
# We need to convert them to float32 with values in the range [0, 1]
x_train = x_train / np.float32(255)
y_train = y_train.astype(np.int64)
train_dataset = dataset_ops.DatasetV2.from_tensor_slices(
(x_train, y_train)).shuffle(60000).repeat().batch(batch_size)
return train_dataset
def build_and_compile_cnn_model():
model = keras.Sequential([
keras.layers.Input(shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(32, 3, activation='relu'),
keras.layers.Flatten(),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10)
])
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=gradient_descent.SGD(learning_rate=0.001),
metrics=['accuracy'])
return model
per_worker_batch_size = 64
single_worker_dataset = mnist_dataset(per_worker_batch_size)
single_worker_model = build_and_compile_cnn_model()
single_worker_model.fit(single_worker_dataset, epochs=3, steps_per_epoch=70)
num_workers = 4
def proc_func(model_path, checkpoint_dir):
global_batch_size = per_worker_batch_size * num_workers
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
with strategy.scope():
multi_worker_model = build_and_compile_cnn_model()
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath=os.path.join(self.get_temp_dir(), 'checkpoint'))
]
multi_worker_dataset = mnist_dataset(global_batch_size)
if shard_policy:
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = shard_policy
multi_worker_dataset = multi_worker_dataset.with_options(options)
multi_worker_model.fit(
multi_worker_dataset,
epochs=2,
steps_per_epoch=20,
callbacks=callbacks)
def _is_chief(task_type, task_id):
return task_type is None or task_type == 'chief' or (
task_type == 'worker' and task_id == 0)
def _get_temp_dir(dirpath, task_id):
base_dirpath = 'workertemp_' + str(task_id)
temp_dir = os.path.join(dirpath, base_dirpath)
file_io.recursive_create_dir_v2(temp_dir)
return temp_dir
def write_filepath(filepath, task_type, task_id):
dirpath = os.path.dirname(filepath)
base = os.path.basename(filepath)
if not _is_chief(task_type, task_id):
dirpath = _get_temp_dir(dirpath, task_id)
return os.path.join(dirpath, base)
task_type, task_id = (strategy.cluster_resolver.task_type,
strategy.cluster_resolver.task_id)
write_model_path = write_filepath(model_path, task_type, task_id)
multi_worker_model.save(write_model_path)
if not _is_chief(task_type, task_id):
file_io.delete_recursively_v2(os.path.dirname(write_model_path))
# Make sure chief finishes saving before non-chief's assertions.
multi_process_runner.barrier().wait()
if not file_io.file_exists_v2(model_path):
raise RuntimeError()
if file_io.file_exists_v2(write_model_path) != _is_chief(
task_type, task_id):
raise RuntimeError()
loaded_model = keras.saving.save.load_model(model_path)
loaded_model.fit(multi_worker_dataset, epochs=2, steps_per_epoch=20)
checkpoint = tracking_util.Checkpoint(model=multi_worker_model)
write_checkpoint_dir = write_filepath(checkpoint_dir, task_type, task_id)
checkpoint_manager = checkpoint_management.CheckpointManager(
checkpoint, directory=write_checkpoint_dir, max_to_keep=1)
checkpoint_manager.save()
if not _is_chief(task_type, task_id):
file_io.delete_recursively_v2(write_checkpoint_dir)
# Make sure chief finishes saving before non-chief's assertions.
multi_process_runner.barrier().wait()
if not file_io.file_exists_v2(checkpoint_dir):
raise RuntimeError()
if file_io.file_exists_v2(write_checkpoint_dir) != _is_chief(
task_type, task_id):
raise RuntimeError()
latest_checkpoint = checkpoint_management.latest_checkpoint(
checkpoint_dir)
checkpoint.restore(latest_checkpoint)
multi_worker_model.fit(multi_worker_dataset, epochs=2, steps_per_epoch=20)
logging.info('testMultiWorkerTutorial successfully ends')
model_path = os.path.join(self.get_temp_dir(), 'model.tf')
checkpoint_dir = os.path.join(self.get_temp_dir(), 'ckpt')
with test_util.skip_if_error(self, errors_impl.UnavailableError):
mpr_result = multi_process_runner.run(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=num_workers),
args=(model_path, checkpoint_dir),
list_stdout=True)
self.assertTrue(
any([
'testMultiWorkerTutorial successfully ends' in msg
for msg in mpr_result.stdout
]))
def extract_accuracy(worker_id, input_string):
match = re.match(
r'\[worker\-{}\].*accuracy: (\d+\.\d+).*'.format(worker_id),
input_string)
return None if match is None else float(match.group(1))
for worker_id in range(num_workers):
accu_result = nest.map_structure(
lambda x: extract_accuracy(worker_id, x), # pylint: disable=cell-var-from-loop
mpr_result.stdout)
self.assertTrue(
any(accu_result), 'Every worker is supposed to have accuracy result.')
if __name__ == '__main__':
multi_process_runner.test_main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova import test
from nova.tests import matchers
from nova.virt.libvirt import config
class LibvirtConfigBaseTest(test.TestCase):
def assertXmlEqual(self, expectedXmlstr, actualXmlstr):
self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr))
class LibvirtConfigTest(LibvirtConfigBaseTest):
def test_config_plain(self):
obj = config.LibvirtConfigObject(root_name="demo")
xml = obj.to_xml()
self.assertXmlEqual(xml, "<demo/>")
def test_config_ns(self):
obj = config.LibvirtConfigObject(root_name="demo", ns_prefix="foo",
ns_uri="http://example.com/foo")
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<foo:demo xmlns:foo="http://example.com/foo"/>""")
def test_config_text(self):
obj = config.LibvirtConfigObject(root_name="demo")
root = obj.format_dom()
root.append(obj._text_node("foo", "bar"))
xml = etree.tostring(root)
self.assertXmlEqual(xml, "<demo><foo>bar</foo></demo>")
def test_config_parse(self):
inxml = "<demo><foo/></demo>"
obj = config.LibvirtConfigObject(root_name="demo")
obj.parse_str(inxml)
class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
def test_config_host(self):
xmlin = """
<capabilities>
<host>
<uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
<cpu>
<arch>x86_64</arch>
<model>Opteron_G3</model>
<vendor>AMD</vendor>
<topology sockets='1' cores='4' threads='1'/>
<feature name='ibs'/>
<feature name='osvw'/>
</cpu>
</host>
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'/>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='i686'/>
</guest>
</capabilities>"""
obj = config.LibvirtConfigCaps()
obj.parse_str(xmlin)
self.assertEqual(type(obj.host), config.LibvirtConfigCapsHost)
self.assertEqual(obj.host.uuid, "c7a5fdbd-edaf-9455-926a-d65c16db1809")
xmlout = obj.to_xml()
self.assertXmlEqual(xmlin, xmlout)
class LibvirtConfigGuestTimerTest(LibvirtConfigBaseTest):
def test_config_platform(self):
obj = config.LibvirtConfigGuestTimer()
obj.track = "host"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<timer name="platform" track="host"/>
""")
def test_config_pit(self):
obj = config.LibvirtConfigGuestTimer()
obj.name = "pit"
obj.tickpolicy = "discard"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<timer name="pit" tickpolicy="discard"/>
""")
def test_config_hpet(self):
obj = config.LibvirtConfigGuestTimer()
obj.name = "hpet"
obj.present = False
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<timer name="hpet" present="no"/>
""")
class LibvirtConfigGuestClockTest(LibvirtConfigBaseTest):
def test_config_utc(self):
obj = config.LibvirtConfigGuestClock()
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<clock offset="utc"/>
""")
def test_config_localtime(self):
obj = config.LibvirtConfigGuestClock()
obj.offset = "localtime"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<clock offset="localtime"/>
""")
def test_config_timezone(self):
obj = config.LibvirtConfigGuestClock()
obj.offset = "timezone"
obj.timezone = "EDT"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<clock offset="timezone" timezone="EDT"/>
""")
def test_config_variable(self):
obj = config.LibvirtConfigGuestClock()
obj.offset = "variable"
obj.adjustment = "123456"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<clock offset="variable" adjustment="123456"/>
""")
def test_config_timers(self):
obj = config.LibvirtConfigGuestClock()
tmpit = config.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "discard"
tmrtc = config.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "merge"
obj.add_timer(tmpit)
obj.add_timer(tmrtc)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<clock offset="utc">
<timer name="pit" tickpolicy="discard"/>
<timer name="rtc" tickpolicy="merge"/>
</clock>
""")
class LibvirtConfigCPUFeatureTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigCPUFeature("mtrr")
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<feature name="mtrr"/>
""")
class LibvirtConfigGuestCPUFeatureTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigGuestCPUFeature("mtrr")
obj.policy = "force"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<feature name="mtrr" policy="force"/>
""")
class LibvirtConfigCPUTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigCPU()
obj.model = "Penryn"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu>
<model>Penryn</model>
</cpu>
""")
def test_config_complex(self):
obj = config.LibvirtConfigCPU()
obj.model = "Penryn"
obj.vendor = "Intel"
obj.arch = "x86_64"
obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<feature name="mtrr"/>
<feature name="apic"/>
</cpu>
""")
def test_config_topology(self):
obj = config.LibvirtConfigCPU()
obj.model = "Penryn"
obj.sockets = 4
obj.cores = 4
obj.threads = 2
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu>
<model>Penryn</model>
<topology sockets="4" cores="4" threads="2"/>
</cpu>
""")
class LibvirtConfigGuestCPUTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigGuestCPU()
obj.model = "Penryn"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu match="exact">
<model>Penryn</model>
</cpu>
""")
def test_config_complex(self):
obj = config.LibvirtConfigGuestCPU()
obj.model = "Penryn"
obj.vendor = "Intel"
obj.arch = "x86_64"
obj.mode = "custom"
obj.add_feature(config.LibvirtConfigGuestCPUFeature("mtrr"))
obj.add_feature(config.LibvirtConfigGuestCPUFeature("apic"))
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu mode="custom" match="exact">
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<feature name="mtrr" policy="require"/>
<feature name="apic" policy="require"/>
</cpu>
""")
def test_config_host(self):
obj = config.LibvirtConfigGuestCPU()
obj.mode = "host-model"
obj.match = "exact"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<cpu mode="host-model" match="exact"/>
""")
class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigGuestSMBIOS()
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<smbios mode="sysinfo"/>
""")
class LibvirtConfigGuestSysinfoTest(LibvirtConfigBaseTest):
def test_config_simple(self):
obj = config.LibvirtConfigGuestSysinfo()
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<sysinfo type="smbios"/>
""")
def test_config_bios(self):
obj = config.LibvirtConfigGuestSysinfo()
obj.bios_vendor = "Acme"
obj.bios_version = "6.6.6"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<sysinfo type="smbios">
<bios>
<entry name="vendor">Acme</entry>
<entry name="version">6.6.6</entry>
</bios>
</sysinfo>
""")
def test_config_system(self):
obj = config.LibvirtConfigGuestSysinfo()
obj.system_manufacturer = "Acme"
obj.system_product = "Wile Coyote"
obj.system_version = "6.6.6"
obj.system_serial = "123456"
obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<sysinfo type="smbios">
<system>
<entry name="manufacturer">Acme</entry>
<entry name="product">Wile Coyote</entry>
<entry name="version">6.6.6</entry>
<entry name="serial">123456</entry>
<entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
</system>
</sysinfo>
""")
def test_config_mixed(self):
obj = config.LibvirtConfigGuestSysinfo()
obj.bios_vendor = "Acme"
obj.system_manufacturer = "Acme"
obj.system_product = "Wile Coyote"
obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<sysinfo type="smbios">
<bios>
<entry name="vendor">Acme</entry>
</bios>
<system>
<entry name="manufacturer">Acme</entry>
<entry name="product">Wile Coyote</entry>
<entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
</system>
</sysinfo>
""")
class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
def test_config_file(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "file"
obj.source_path = "/tmp/hello"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
</disk>""")
def test_config_file_serial(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "file"
obj.source_path = "/tmp/hello"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
<serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
</disk>""")
def test_config_block(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "block"
obj.source_path = "/tmp/hello"
obj.source_device = "cdrom"
obj.driver_name = "qemu"
obj.target_dev = "/dev/hdc"
obj.target_bus = "ide"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="block" device="cdrom">
<driver name="qemu"/>
<source dev="/tmp/hello"/>
<target bus="ide" dev="/dev/hdc"/>
</disk>""")
def test_config_network(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "network"
obj.source_protocol = "iscsi"
obj.source_host = "foo.bar.com"
obj.driver_name = "qemu"
obj.driver_format = "qcow2"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="network" device="disk">
<driver name="qemu" type="qcow2"/>
<source name="foo.bar.com" protocol="iscsi"/>
<target bus="ide" dev="/dev/hda"/>
</disk>""")
def test_config_network_auth(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "network"
obj.source_protocol = "rbd"
obj.source_host = "pool/image"
obj.driver_name = "qemu"
obj.driver_format = "raw"
obj.target_dev = "/dev/vda"
obj.target_bus = "virtio"
obj.auth_username = "foo"
obj.auth_secret_type = "ceph"
obj.auth_secret_uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="network" device="disk">
<driver name="qemu" type="raw"/>
<source name="pool/image" protocol="rbd"/>
<auth username="foo">
<secret type="ceph"
uuid="b38a3f43-4be2-4046-897f-b67c2f5e0147"/>
</auth>
<target bus="virtio" dev="/dev/vda"/>
</disk>""")
def test_config_iotune(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "file"
obj.source_path = "/tmp/hello"
obj.target_dev = "/dev/hda"
obj.target_bus = "ide"
obj.disk_read_bytes_sec = 1024000
obj.disk_read_iops_sec = 1000
obj.disk_total_bytes_sec = 2048000
obj.disk_write_bytes_sec = 1024000
obj.disk_write_iops_sec = 1000
obj.disk_total_iops_sec = 2000
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<disk type="file" device="disk">
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
<iotune>
<read_bytes_sec>1024000</read_bytes_sec>
<read_iops_sec>1000</read_iops_sec>
<write_bytes_sec>1024000</write_bytes_sec>
<write_iops_sec>1000</write_iops_sec>
<total_bytes_sec>2048000</total_bytes_sec>
<total_iops_sec>2000</total_iops_sec>
</iotune>
</disk>""")
class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest):
def test_config_mount(self):
obj = config.LibvirtConfigGuestFilesys()
obj.source_type = "mount"
obj.source_dir = "/tmp/hello"
obj.target_dir = "/mnt"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<filesystem type="mount">
<source dir="/tmp/hello"/>
<target dir="/mnt"/>
</filesystem>""")
class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
def test_config_tablet(self):
obj = config.LibvirtConfigGuestInput()
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<input type="tablet" bus="usb"/>""")
class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
def test_config_graphics(self):
obj = config.LibvirtConfigGuestGraphics()
obj.type = "vnc"
obj.autoport = True
obj.keymap = "en_US"
obj.listen = "127.0.0.1"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
""")
class LibvirtConfigGuestSerialTest(LibvirtConfigBaseTest):
def test_config_file(self):
obj = config.LibvirtConfigGuestSerial()
obj.type = "file"
obj.source_path = "/tmp/vm.log"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<serial type="file">
<source path="/tmp/vm.log"/>
</serial>""")
class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest):
def test_config_pty(self):
obj = config.LibvirtConfigGuestConsole()
obj.type = "pty"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<console type="pty"/>""")
class LibvirtConfigGuestChannelTest(LibvirtConfigBaseTest):
def test_config_spice_minimal(self):
obj = config.LibvirtConfigGuestChannel()
obj.type = "spicevmc"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<channel type="spicevmc">
<target type='virtio'/>
</channel>""")
def test_config_spice_full(self):
obj = config.LibvirtConfigGuestChannel()
obj.type = "spicevmc"
obj.target_name = "com.redhat.spice.0"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<channel type="spicevmc">
<target type='virtio' name='com.redhat.spice.0'/>
</channel>""")
class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
def test_config_ethernet(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "ethernet"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
obj.target_dev = "vnet0"
obj.driver_name = "vhost"
obj.vif_inbound_average = 1024000
obj.vif_inbound_peak = 10240000
obj.vif_inbound_burst = 1024000
obj.vif_outbound_average = 1024000
obj.vif_outbound_peak = 10240000
obj.vif_outbound_burst = 1024000
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="ethernet">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<driver name="vhost"/>
<target dev="vnet0"/>
<bandwidth>
<inbound average="1024000" peak="10240000" burst="1024000"/>
<outbound average="1024000" peak="10240000" burst="1024000"/>
</bandwidth>
</interface>""")
def test_config_bridge(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "bridge"
obj.source_dev = "br0"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
obj.target_dev = "tap12345678"
obj.filtername = "clean-traffic"
obj.filterparams.append({"key": "IP", "value": "192.168.122.1"})
obj.vif_inbound_average = 1024000
obj.vif_inbound_peak = 10240000
obj.vif_inbound_burst = 1024000
obj.vif_outbound_average = 1024000
obj.vif_outbound_peak = 10240000
obj.vif_outbound_burst = 1024000
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="bridge">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source bridge="br0"/>
<target dev="tap12345678"/>
<filterref filter="clean-traffic">
<parameter name="IP" value="192.168.122.1"/>
</filterref>
<bandwidth>
<inbound average="1024000" peak="10240000" burst="1024000"/>
<outbound average="1024000" peak="10240000" burst="1024000"/>
</bandwidth>
</interface>""")
def test_config_bridge_ovs(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "bridge"
obj.source_dev = "br0"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
obj.target_dev = "tap12345678"
obj.vporttype = "openvswitch"
obj.vportparams.append({"key": "instanceid", "value": "foobar"})
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="bridge">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source bridge="br0"/>
<target dev="tap12345678"/>
<virtualport type="openvswitch">
<parameters instanceid="foobar"/>
</virtualport>
</interface>""")
def test_config_8021Qbh(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "direct"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
obj.target_dev = "tap12345678"
obj.source_dev = "eth0"
obj.vporttype = "802.1Qbh"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<interface type="direct">
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source dev="eth0" mode="private"/>
<target dev="tap12345678"/>
<virtualport type="802.1Qbh"/>
</interface>""")
class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
def test_config_lxc(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "lxc"
obj.memory = 1024 * 1024 * 100
obj.vcpus = 2
obj.cpuset = "0-3,^2,4-5"
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "exe"
obj.os_init_path = "/sbin/init"
fs = config.LibvirtConfigGuestFilesys()
fs.source_dir = "/root/lxc"
fs.target_dir = "/"
obj.add_device(fs)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domain type="lxc">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<vcpu cpuset="0-3,^2,4-5">2</vcpu>
<os>
<type>exe</type>
<init>/sbin/init</init>
</os>
<devices>
<filesystem type="mount">
<source dir="/root/lxc"/>
<target dir="/"/>
</filesystem>
</devices>
</domain>""")
def test_config_xen_pv(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "xen"
obj.memory = 1024 * 1024 * 100
obj.vcpus = 2
obj.cpuset = "0-3,^2,4-5"
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "linux"
obj.os_kernel = "/tmp/vmlinuz"
obj.os_initrd = "/tmp/ramdisk"
obj.os_root = "root=xvda"
obj.os_cmdline = "console=xvc0"
disk = config.LibvirtConfigGuestDisk()
disk.source_type = "file"
disk.source_path = "/tmp/img"
disk.target_dev = "/dev/xvda"
disk.target_bus = "xen"
obj.add_device(disk)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domain type="xen">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<vcpu cpuset="0-3,^2,4-5">2</vcpu>
<os>
<type>linux</type>
<kernel>/tmp/vmlinuz</kernel>
<initrd>/tmp/ramdisk</initrd>
<cmdline>console=xvc0</cmdline>
<root>root=xvda</root>
</os>
<devices>
<disk type="file" device="disk">
<source file="/tmp/img"/>
<target bus="xen" dev="/dev/xvda"/>
</disk>
</devices>
</domain>""")
def test_config_xen_hvm(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "xen"
obj.memory = 1024 * 1024 * 100
obj.vcpus = 2
obj.cpuset = "0-3,^2,4-5"
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "hvm"
obj.os_loader = '/usr/lib/xen/boot/hvmloader'
obj.os_root = "root=xvda"
obj.os_cmdline = "console=xvc0"
obj.acpi = True
obj.apic = True
disk = config.LibvirtConfigGuestDisk()
disk.source_type = "file"
disk.source_path = "/tmp/img"
disk.target_dev = "/dev/xvda"
disk.target_bus = "xen"
obj.add_device(disk)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domain type="xen">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<vcpu cpuset="0-3,^2,4-5">2</vcpu>
<os>
<type>hvm</type>
<loader>/usr/lib/xen/boot/hvmloader</loader>
<cmdline>console=xvc0</cmdline>
<root>root=xvda</root>
</os>
<features>
<acpi/>
<apic/>
</features>
<devices>
<disk type="file" device="disk">
<source file="/tmp/img"/>
<target bus="xen" dev="/dev/xvda"/>
</disk>
</devices>
</domain>""")
def test_config_kvm(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "kvm"
obj.memory = 1024 * 1024 * 100
obj.vcpus = 2
obj.cpuset = "0-3,^2,4-5"
obj.cpu_shares = 100
obj.cpu_quota = 50000
obj.cpu_period = 25000
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "linux"
obj.os_boot_dev = "hd"
obj.os_smbios = config.LibvirtConfigGuestSMBIOS()
obj.acpi = True
obj.apic = True
obj.sysinfo = config.LibvirtConfigGuestSysinfo()
obj.sysinfo.bios_vendor = "Acme"
obj.sysinfo.system_version = "1.0.0"
disk = config.LibvirtConfigGuestDisk()
disk.source_type = "file"
disk.source_path = "/tmp/img"
disk.target_dev = "/dev/vda"
disk.target_bus = "virtio"
obj.add_device(disk)
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domain type="kvm">
<uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
<name>demo</name>
<memory>104857600</memory>
<vcpu cpuset="0-3,^2,4-5">2</vcpu>
<sysinfo type='smbios'>
<bios>
<entry name="vendor">Acme</entry>
</bios>
<system>
<entry name="version">1.0.0</entry>
</system>
</sysinfo>
<os>
<type>linux</type>
<boot dev="hd"/>
<smbios mode="sysinfo"/>
</os>
<features>
<acpi/>
<apic/>
</features>
<cputune>
<shares>100</shares>
<quota>50000</quota>
<period>25000</period>
</cputune>
<devices>
<disk type="file" device="disk">
<source file="/tmp/img"/>
<target bus="virtio" dev="/dev/vda"/>
</disk>
</devices>
</domain>""")
class LibvirtConfigGuestSnapshotTest(LibvirtConfigBaseTest):
def test_config_snapshot(self):
obj = config.LibvirtConfigGuestSnapshot()
obj.name = "Demo"
xml = obj.to_xml()
self.assertXmlEqual(xml, """
<domainsnapshot>
<name>Demo</name>
</domainsnapshot>""")
|
|
from __future__ import unicode_literals
from copy import copy
import os
import shutil
import sys
import tempfile
from django.apps import apps
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.migrations.loader import MigrationLoader
from django.db.models.signals import post_migrate, pre_migrate
from django.test import override_settings, TransactionTestCase
from django.utils import six
from django_migrate_project.loader import PROJECT_MIGRATIONS_MODULE_NAME
import mock
TEST_MIGRATIONS_DIR = os.path.join(settings.BASE_DIR, 'test_migrations')
PROJECT_MIGRATIONS_DIRECTORY = os.path.join(
TEST_MIGRATIONS_DIR, 'project_migration')
class MigrateProjectTest(TransactionTestCase):
""" Tests for 'migrateproject' """
def setUp(self):
# Roll back migrations to a blank state
call_command('migrate', 'blog', 'zero', verbosity=0)
call_command('migrate', 'cookbook', 'zero', verbosity=0)
call_command('migrate', 'event_calendar', 'zero', verbosity=0)
call_command('migrate', 'newspaper', 'zero', verbosity=0)
self._old_sys_path = copy(sys.path)
def tearDown(self):
# Delete any temp directories
if getattr(self, 'tempdir', None):
shutil.rmtree(self.tempdir)
self.clear_migrations_modules()
sys.path = self._old_sys_path
def clear_migrations_modules(self):
# Destroy modules that were loaded for migrations
sys.modules.pop("blog_0001_project", None)
sys.modules.pop("cookbook_0001_project", None)
sys.modules.pop("cookbook_0002_project", None)
def setup_migration_tree(self, dir):
# Move the files to the correct location
shutil.copytree(
PROJECT_MIGRATIONS_DIRECTORY,
os.path.join(dir, PROJECT_MIGRATIONS_MODULE_NAME)
)
sys.path.insert(0, dir)
return os.path.join(dir, PROJECT_MIGRATIONS_MODULE_NAME)
def test_routine_migration(self):
""" Test applying a routine project migration """
self.tempdir = tempfile.mkdtemp()
with override_settings(BASE_DIR=self.tempdir):
self.setup_migration_tree(settings.BASE_DIR)
connection = connections[DEFAULT_DB_ALIAS]
loader = MigrationLoader(connection)
applied_migrations = copy(loader.applied_migrations)
migrated_apps = [app for app, _ in loader.applied_migrations]
self.assertNotIn('event_calendar', migrated_apps)
self.assertNotIn('newspaper', migrated_apps)
call_command('migrateproject', verbosity=0)
try:
# Check that database changed
loader = MigrationLoader(connection)
self.assertNotEqual(
loader.applied_migrations, applied_migrations)
migrated_apps = [app for app, _ in loader.applied_migrations]
self.assertIn('event_calendar', migrated_apps)
self.assertIn('newspaper', migrated_apps)
finally:
# Roll back migrations to a blank state
# NOTE: This needs to be done before deleting anything or else
# Django won't find the migrations on disk
call_command('migrate', 'event_calendar', 'zero', verbosity=0)
call_command('migrate', 'newspaper', 'zero', verbosity=0)
def test_migrations_dir_error(self):
""" Test running the management command with a bad migrations dir """
self.tempdir = tempfile.mkdtemp()
with override_settings(BASE_DIR=self.tempdir):
# No migrations folder at all
with self.assertRaises(CommandError):
call_command('migrateproject', verbosity=0)
migrations_dir = self.setup_migration_tree(settings.BASE_DIR)
os.remove(os.path.join(migrations_dir, '__init__.py'))
# Missing __init__.py file
with self.assertRaises(CommandError):
call_command('migrateproject', verbosity=0)
def test_unapply(self):
""" Test unapplying an applied project migration """
self.tempdir = tempfile.mkdtemp()
def perform_unapply():
connection = connections[DEFAULT_DB_ALIAS]
loader = MigrationLoader(connection)
applied_migrations = copy(loader.applied_migrations)
# Apply the migrations, then unapply them
call_command('migrateproject', verbosity=0)
# Check that database changed
loader = MigrationLoader(connection)
self.assertNotEqual(
loader.applied_migrations, applied_migrations)
migrated_apps = [app for app, _ in loader.applied_migrations]
self.assertIn('event_calendar', migrated_apps)
self.assertIn('newspaper', migrated_apps)
out = six.StringIO()
# Call command to unapply the changes
call_command('migrateproject', unapply=True, stdout=out,
verbosity=1)
# Check that it says it was unapplied
self.assertIn("unapply all", out.getvalue().lower())
# Check that database is back to original
loader = MigrationLoader(connection)
migrated_apps = [app for app, _ in loader.applied_migrations]
self.assertEqual(loader.applied_migrations, applied_migrations)
self.assertNotIn('event_calendar', migrated_apps)
self.assertNotIn('newspaper', migrated_apps)
with override_settings(BASE_DIR=self.tempdir):
self.setup_migration_tree(settings.BASE_DIR)
field = None
try:
# Do a normal unapply
perform_unapply()
# Then make some new changes via monkey patching
from event_calendar.models import Event
for field in Event._meta.fields:
if field.name == 'description':
field.blank = True
field.null = True
break
out = six.StringIO()
# Create the new migration
call_command('makeprojectmigrations', stdout=out, verbosity=1)
self.assertIn("migrations for", out.getvalue().lower())
# The cached package won't see the new module
sys.modules.pop("migrations", None)
# And apply/unapply those new migrations for better
# statement coverage
perform_unapply()
finally:
if field:
field.blank = False
field.null = False
# Roll back migrations to a blank state
# NOTE: This needs to be done before deleting anything or else
# Django won't find the migrations on disk
call_command('migrate', 'event_calendar', 'zero', verbosity=0)
call_command('migrate', 'newspaper', 'zero', verbosity=0)
def test_nothing_to_apply(self):
""" Test applying already applied project migration """
self.tempdir = tempfile.mkdtemp()
with override_settings(BASE_DIR=self.tempdir):
self.setup_migration_tree(settings.BASE_DIR)
connection = connections[DEFAULT_DB_ALIAS]
loader = MigrationLoader(connection)
applied_migrations = copy(loader.applied_migrations)
migrated_apps = [app for app, _ in loader.applied_migrations]
self.assertNotIn('event_calendar', migrated_apps)
self.assertNotIn('newspaper', migrated_apps)
call_command('migrateproject', verbosity=0)
try:
# Check that database changed
loader = MigrationLoader(connection)
self.assertNotEqual(
loader.applied_migrations, applied_migrations)
out = six.StringIO()
# Call command again to show nothing changes
call_command('migrateproject', stdout=out, verbosity=1)
self.assertIn('no migrations', out.getvalue().lower())
finally:
# Roll back migrations to a blank state
# NOTE: This needs to be done before deleting anything or else
# Django won't find the migrations on disk
call_command('migrate', 'event_calendar', 'zero', verbosity=0)
call_command('migrate', 'newspaper', 'zero', verbosity=0)
def test_signals(self):
""" Test the signals emitted during the migration """
app_config = apps.get_app_config('event_calendar')
pre_migrate_callback = mock.MagicMock()
post_migrate_callback = mock.MagicMock()
pre_migrate.connect(pre_migrate_callback, sender=app_config)
post_migrate.connect(post_migrate_callback, sender=app_config)
self.test_routine_migration()
pre_migrate.disconnect(pre_migrate_callback, sender=app_config)
post_migrate.disconnect(post_migrate_callback, sender=app_config)
self.assertEqual(pre_migrate_callback.call_count, 3)
self.assertEqual(post_migrate_callback.call_count, 3)
def test_changes_detected(self):
""" Test a migration with model changes detected """
self.tempdir = tempfile.mkdtemp()
module = 'django_migrate_project.management.commands.migrateproject'
changes_path = module + '.MigrationAutodetector.changes'
with override_settings(BASE_DIR=self.tempdir):
self.setup_migration_tree(settings.BASE_DIR)
# Migrate first, so that no migrations are available to apply
call_command('migrateproject', verbosity=0)
try:
with mock.patch(changes_path) as changes:
changes.return_value = True
out = six.StringIO()
call_command('migrateproject', stdout=out, verbosity=1)
output = out.getvalue().lower()
self.assertIn("have changes", output)
self.assertIn("'manage.py makeprojectmigrations'", output)
self.assertIn("'manage.py migrateproject'", output)
finally:
# Roll back migrations to a blank state
# NOTE: This needs to be done before deleting anything or else
# Django won't find the migrations on disk
call_command('migrate', 'event_calendar', 'zero', verbosity=0)
call_command('migrate', 'newspaper', 'zero', verbosity=0)
def test_alt_database(self):
""" Test migrating a project with an alternate database selected """
self.tempdir = tempfile.mkdtemp()
with override_settings(BASE_DIR=self.tempdir):
self.setup_migration_tree(settings.BASE_DIR)
# Roll back migrations to a blank state in the 'other' database
call_command('migrate', 'event_calendar', 'zero', database='other',
verbosity=0)
call_command('migrate', 'newspaper', 'zero', database='other',
verbosity=0)
default_connection = connections[DEFAULT_DB_ALIAS]
connection = connections['other']
default_loader = MigrationLoader(default_connection)
loader = MigrationLoader(connection)
default_applied_migrations = copy(
default_loader.applied_migrations)
applied_migrations = copy(loader.applied_migrations)
call_command('migrateproject', database='other', verbosity=0)
default_loader = MigrationLoader(default_connection)
loader = MigrationLoader(connection)
# The default database should remain unchanged
self.assertEqual(default_loader.applied_migrations,
default_applied_migrations)
# The 'other' database should have been migrated
self.assertNotEqual(loader.applied_migrations,
applied_migrations)
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for managing Plist files."""
import os
import plistlib
from xctestrunner.shared import ios_errors
class Plist(object):
"""Handles the .plist file operations."""
def __init__(self, plist_file_path):
"""Initializes the Plist object.
Args:
plist_file_path: string, the path of the .plist file.
"""
self._plist_file_path = plist_file_path
# Module to read the .plist file.
def GetPlistField(self, field):
"""View specific field in the .plist file.
Args:
field: string, the field consist of property key names delimited by
colons. List(array) items are specified by a zero-based integer index.
Examples
:CFBundleShortVersionString
:CFBundleDocumentTypes:2:CFBundleTypeExtensions
Returns:
the object of the plist's field.
Raises:
ios_errors.PlistError: the field does not exist in the plist dict.
"""
with open(self._plist_file_path, 'rb') as plist_file:
plist_root_object = plistlib.load(plist_file)
return _GetObjectWithField(plist_root_object, field)
def HasPlistField(self, field):
"""Checks whether a specific field is in the .plist file.
Args:
field: string, the field consist of property key names delimited by
colons. List(array) items are specified by a zero-based integer index.
Examples
:CFBundleShortVersionString
:CFBundleDocumentTypes:2:CFBundleTypeExtensions
Returns:
whether the field is in the plist's file.
"""
try:
self.GetPlistField(field)
except ios_errors.PlistError:
return False
return True
def SetPlistField(self, field, value):
"""Set field with provided value in .plist file.
Args:
field: string, the field consist of property key names delimited by
colons. List(array) items are specified by a zero-based integer index.
Examples
:CFBundleShortVersionString
:CFBundleDocumentTypes:2:CFBundleTypeExtensions
value: a object, the value of the field to be added. It can be integer,
bool, string, array, dict.
Raises:
ios_errors.PlistError: the field does not exist in the .plist file's dict.
"""
if not field:
with open(self._plist_file_path, 'wb') as plist_file:
plistlib.dump(value, plist_file)
return
if os.path.exists(self._plist_file_path):
with open(self._plist_file_path, 'rb') as plist_file:
plist_root_object = plistlib.load(plist_file)
else:
plist_root_object = {}
keys_in_field = field.rsplit(':', 1)
if len(keys_in_field) == 1:
key = field
target_object = plist_root_object
else:
key = keys_in_field[1]
target_object = _GetObjectWithField(plist_root_object, keys_in_field[0])
try:
target_object[_ParseKey(target_object, key)] = value
except ios_errors.PlistError as e:
raise e
except (KeyError, IndexError):
raise ios_errors.PlistError('Failed to set key %s from object %s.'
% (key, target_object))
with open(self._plist_file_path, 'wb') as plist_file:
plistlib.dump(plist_root_object, plist_file)
def DeletePlistField(self, field):
"""Delete field in .plist file.
Args:
field: string, the field consist of property key names delimited by
colons. List(array) items are specified by a zero-based integer index.
Examples
:CFBundleShortVersionString
:CFBundleDocumentTypes:2:CFBundleTypeExtensions
Raises:
ios_errors.PlistError: the field does not exist in the .plist file's dict.
"""
with open(self._plist_file_path, 'rb') as plist_file:
plist_root_object = plistlib.load(plist_file)
keys_in_field = field.rsplit(':', 1)
if len(keys_in_field) == 1:
key = field
target_object = plist_root_object
else:
key = keys_in_field[1]
target_object = _GetObjectWithField(plist_root_object, keys_in_field[0])
try:
del target_object[_ParseKey(target_object, key)]
except ios_errors.PlistError as e:
raise e
except (KeyError, IndexError):
raise ios_errors.PlistError('Failed to delete key %s from object %s.'
% (key, target_object))
with open(self._plist_file_path, 'wb') as plist_file:
plistlib.dump(plist_root_object, plist_file)
def _GetObjectWithField(target_object, field):
"""Gets sub object of the object with field.
Args:
target_object: the target object.
field: string, the field consist of property key names delimited by
colons. List(array) items are specified by a zero-based integer index.
Examples
:CFBundleShortVersionString
:CFBundleDocumentTypes:2:CFBundleTypeExtensions
Returns:
a object of the target object's field. If field is empty, returns the
target object itself.
Raises:
ios_errors.PlistError: the field does not exist in the object or the field
is invaild.
"""
if not field:
return target_object
current_object = target_object
for key in field.split(':'):
try:
current_object = current_object[_ParseKey(current_object, key)]
except ios_errors.PlistError as e:
raise e
except (KeyError, IndexError):
raise ios_errors.PlistError(
'The field %s can not be found in the target object. '
'The object content is %s' % (field, current_object))
return current_object
def _ParseKey(target_object, key):
"""Parses the key value according target object type.
Args:
target_object: the target object.
key: string, the key of object.
Returns:
If object is dict, returns key itself. If object is list, returns int(key).
Raises:
ios_errors.PlistError: when object is list and key is not int, or object is
not list/dict.
"""
if isinstance(target_object, dict):
return key
if isinstance(target_object, list):
try:
return int(key)
except ValueError:
raise ios_errors.PlistError(
'The key %s is invaild index of list(array) object %s.'
% (key, target_object))
raise ios_errors.PlistError('The object %s is not dict or list.'
% target_object)
|
|
import dateparser
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import * # noqa
from typing import Callable, Dict, List, Any, Union, Tuple
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
INTEGRATION_NAME = 'Carbon Black EDR'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
''' PARSING PROCESS EVENT COMPLEX FIELDS CLASS'''
class ProcessEventDetail:
"""
This class representing the Process Event Details as found here:
https://developer.carbonblack.com/reference/enterprise-response/6.3/rest-api/#process-event-details
Each sub-class representing a different piped-versioned field, and support the format method.
"""
def __init__(self, piped_version: Union[str, list], fields):
self.fields = []
if not isinstance(piped_version, list):
piped_version = [piped_version]
for entry in piped_version:
data = entry.split('|')
# zip works when number of values is not equal, which can result in incorrect data.
if len(data) != len(fields):
demisto.debug(f'{INTEGRATION_NAME} - Missing details. Ignoring entry: {entry}.')
self.fields.append(dict(zip(fields, data)))
def format(self):
return self.fields
class filemod_complete(ProcessEventDetail):
FIELDS = ['operation_type', 'event_time', 'file_path', 'md5_after_last_write',
'file_type', 'flagged_as_potential_tamper_attempt']
OPERATION_TYPE = {'1': 'Created the file',
'2': 'First wrote to the file',
'4': 'Deleted the file',
'8': 'Last wrote to the file'}
FILE_TYPE = {'1': 'PE',
'2': 'Elf',
'3': 'UniversalBin',
'8': 'EICAR',
'16': 'OfficeLegacy',
'17': 'OfficeOpenXml',
'48': 'Pdf',
'64': 'ArchivePkzip',
'65': 'ArchiveLzh',
'66': 'ArchiveLzw',
'67': 'ArchiveRar',
'68': 'ArchiveTar',
'69': 'Archive7zip'}
def __init__(self, piped_version):
super().__init__(piped_version, self.FIELDS)
def format(self):
for entry in self.fields:
entry['operation_type'] = self.OPERATION_TYPE.get(entry.get('operation_type', ''), '')
entry['file_type'] = self.FILE_TYPE.get(entry.get('file_type', ''), '')
return self.fields
class modload_complete(ProcessEventDetail):
FIELDS = ['event_time', 'loaded_module_md5', 'loaded_module_full_path']
def __init__(self, piped_version):
super().__init__(piped_version, self.FIELDS)
def format(self):
return self.fields
class regmod_complete(ProcessEventDetail):
FIELDS = ['operation_type', 'event_time', 'registry_key_path']
OPERATION_TYPE = {'1': 'Created the file',
'2': 'First wrote to the file',
'4': 'Deleted the file',
'8': 'Last wrote to the file'}
def __init__(self, piped_version):
super().__init__(piped_version, self.FIELDS)
def format(self):
for entry in self.fields:
entry['operation_type'] = self.OPERATION_TYPE.get(entry.get('operation_type', ''), '')
return self.fields
class crossproc_complete(ProcessEventDetail):
FIELDS = ['cross-process_access_type', 'event_time', 'targeted_process_unique_id',
'targeted_process_md5', 'targeted_process_path', 'ProcessOpen_sub-type',
'requested_access_priviledges', 'flagged_as_potential_tamper_attempt']
SUB_TYPES = {'1': 'handle open to process', '2': 'handle open to thread in process'}
def __init__(self, piped_version):
super().__init__(piped_version, self.FIELDS)
def format(self):
for entry in self.fields:
entry['ProcessOpen_sub-type'] = self.SUB_TYPES.get(entry.get('ProcessOpen_sub-type', ''), '')
return self.fields
''' CLIENT CLASS '''
class Client(BaseClient):
def __init__(self, base_url: str, apitoken: str, use_ssl: bool, use_proxy: bool):
headers = {'X-Auth-Token': apitoken, 'Accept': 'application/json', 'Content-Type': 'application/json'}
super().__init__(base_url, headers=headers, verify=use_ssl, proxy=use_proxy)
def http_request(self, url: str, method: str, params: dict = None, json_data: dict = None,
ok_codes: tuple = (200, 204), resp_type: str = 'json') -> dict:
"""
initiates a http request to openphish
"""
data = self._http_request(
method=method,
ok_codes=ok_codes,
url_suffix=url,
params=params,
resp_type=resp_type,
return_empty_response=True,
json_data=json_data,
timeout=30,
)
return data
def get_sensors(self, id: str = None, hostname: str = None, ipaddr: str = None, # noqa: F841
groupid: str = None, inactive_filter_days: str = None, # noqa: F841
limit: Union[int, str] = None) -> Tuple[int, List[dict]]:
url = f'/v1/sensor/{id}' if id else '/v1/sensor'
query_params = assign_params(
ip=ipaddr,
hostname=hostname,
groupid=groupid,
inactive_filter_days=inactive_filter_days
)
res = self.http_request(url=url, method='GET', params=query_params, ok_codes=(200, 204))
# When querying specific sensor without filters, the api returns dictionary instead of list.
return len(res), res[:arg_to_number(limit, 'limit')] if isinstance(res, list) else [res]
def get_alerts(self, status: str = None, username: str = None, feedname: str = None,
hostname: str = None, report: str = None, sort: str = None, query: str = None,
facet: str = None, limit: Union[str, int] = None, start: str = None,
allow_empty_params: bool = False) -> dict:
query_params = assign_params(
status=status,
username=username,
feedname=feedname,
hostname=hostname,
report=report,
query=query
)
query_string = _create_query_string(query_params, allow_empty_params=allow_empty_params)
params = assign_params(q=query_string,
rows=arg_to_number(limit, 'limit'),
start=start,
sort=sort,
facet=facet,
)
return self.http_request(url='/v2/alert', method='GET', params=params)
def get_binaries(self, md5: str = None, product_name: str = None, signed: str = None, # noqa: F841
group: str = None, hostname: str = None, digsig_publisher: str = None, # noqa: F841
company_name: str = None, sort: str = None,
observed_filename: str = None, query: str = None, facet: str = None,
limit: str = None, start: str = None) -> dict:
query_params = assign_params(
md5=md5,
product_name=product_name,
signed=signed,
group=group,
hostname=hostname,
digsig_publisher=digsig_publisher,
company_name=company_name,
observed_filename=observed_filename,
query=query
)
query_string = _create_query_string(query_params)
params = assign_params(q=query_string,
rows=arg_to_number(limit, 'limit'),
start=start,
sort=sort,
facet=facet,
)
return self.http_request(url='/v1/binary', method='GET', params=params)
def get_processes(self, process_name: str = None, group: str = None, hostname: str = None,
parent_name: str = None, process_path: str = None, md5: str = None,
query: str = None, group_by: str = None, sort: str = None, facet: str = None,
facet_field: str = None, limit: str = None, start: str = None, allow_empty: bool = False):
query_fields = ['process_name', 'group', 'hostname', 'parent_name', 'process_path', 'md5', 'query']
local_params = locals()
query_params = assign_params(
process_name=process_name,
parent_name=parent_name,
process_path=process_path,
group=group,
hostname=hostname,
md5=md5,
query=query
)
query_string = _create_query_string(query_params, allow_empty)
params = assign_params(q=query_string,
rows=arg_to_number(limit, 'limit'),
start=start,
sort=sort,
facet=facet,
)
if facet_field:
params['facet.field'] = facet_field
if group_by:
params['cb.group'] = group_by
return self.http_request(url='/v1/process', method='GET', params=params)
def get_formatted_ProcessEventDetail(self, process_json: dict):
complex_fields = {'filemod_complete': filemod_complete, 'modload_complete': modload_complete,
'regmod_complete': regmod_complete, 'crossproc_complete': crossproc_complete}
formatted_json = {}
for field in process_json:
if field in complex_fields:
# creating the relevant field object and formatting it.
field_object: ProcessEventDetail = complex_fields[field](process_json.get(field))
formatted_json[field] = field_object.format()
else:
formatted_json[field] = process_json.get(field)
return formatted_json
''' HELPER FUNCTIONS '''
def _create_query_string(params: dict, allow_empty_params: bool = False) -> str:
"""
Creating a cb query from params according to https://developer.carbonblack.com/resources/query_overview.pdf.
if 'query' in params, it overrides the other params.
allow_empty_params is used for testing and not production as it would overload the context.
"""
# If user provided both params and query, it means he is not experienced, and might expect different results,
# therefore we decided to prohibit the use of both in search commands.
if 'query' in params and len(params) > 1:
raise DemistoException(f'{INTEGRATION_NAME} - Searching with both query and other filters is not allowed. '
f'Please provide either a search query or one of the possible filters.')
elif 'query' in params:
return params['query']
current_query = [f"{query_field}:{params[query_field]}" for query_field in params]
current_query = ' AND '.join(current_query)
if not current_query and not allow_empty_params:
raise DemistoException(f'{INTEGRATION_NAME} - Search without any filter is not permitted.')
return current_query
def _add_to_current_query(current_query: str = '', params: dict = None) -> str:
new_query = ''
if not params:
return current_query
if current_query:
new_query += f'({current_query}) AND '
current_query_params = [f"{query_field}:{params[query_field]}" for query_field in params]
new_query += ' AND '.join(current_query_params)
return new_query
def _get_sensor_isolation_change_body(client: Client, sensor_id: str, new_isolation: bool) -> dict:
sensor_data = client.get_sensors(sensor_id)[1][0] # returns (length, [sensor_data])
new_sensor_data = {
'network_isolation_enabled': new_isolation,
'group_id': sensor_data.get('group_id')
}
return new_sensor_data
def _parse_field(raw_field: str, sep: str = ',', index_after_split: int = 0, chars_to_remove: str = '') -> str:
'''
This function allows getting a specific complex sub-string. "example,example2|" -> 'example2'
'''
if not raw_field:
demisto.debug(f'{INTEGRATION_NAME} - Got empty raw field to parse.')
return ''
try:
new_field = raw_field.split(sep)[index_after_split]
except IndexError:
demisto.error(f'{INTEGRATION_NAME} - raw: {raw_field}, split by {sep} has no index {index_after_split}')
return ''
chars_to_remove = set(chars_to_remove)
for char in chars_to_remove:
new_field = new_field.replace(char, '')
return new_field
def _get_isolation_status_field(isolation_activated: bool, is_isolated: bool) -> str:
# Logic for isolation can be found in:
# https://developer.carbonblack.com/reference/enterprise-response/6.3/rest-api/#sensorsendpoints
if isolation_activated:
sensor_isolation_status = 'Yes' if is_isolated else 'Pending isolation'
else:
sensor_isolation_status = 'Pending unisolation' if is_isolated else 'No'
return sensor_isolation_status
''' COMMAND FUNCTIONS '''
def unquarantine_device_command(client: Client, sensor_id: str) -> CommandResults:
url = f'/v1/sensor/{sensor_id}'
res = client.http_request(url=url, method='PUT',
json_data=_get_sensor_isolation_change_body(client, sensor_id, False))
if not res:
raise Exception(f'{INTEGRATION_NAME} - could not un-isolate sensor {sensor_id}')
return CommandResults(readable_output='Sensor was un-isolated successfully.')
def quarantine_device_command(client: Client, sensor_id: str) -> CommandResults:
url = f'/v1/sensor/{sensor_id}'
res = client.http_request(url=url, method='PUT',
json_data=_get_sensor_isolation_change_body(client, sensor_id, True))
if not res:
raise Exception(f'{INTEGRATION_NAME} - could not isolate sensor {sensor_id}')
return CommandResults(readable_output='Sensor was isolated successfully.')
def sensors_list_command(client: Client, id: str = None, hostname: str = None, ip: str = None,
group_id: str = None, inactive_filter_days: str = None, limit: int = None) -> CommandResults:
try:
total_num_of_sensors, res = client.get_sensors(id, hostname, ip, group_id, inactive_filter_days, limit)
human_readable_data = []
for sensor_data in res:
human_readable_data.append({
'Computer Name': sensor_data.get('computer_name'),
'Status': sensor_data.get('status'),
'OS Version': sensor_data.get('os_type'),
'Node Id': sensor_data.get('node_id'),
'Sensor Version': sensor_data.get('build_version_string'),
'Sensor Id': sensor_data.get('id'),
'IP Address/MAC Info': _parse_field(sensor_data.get('network_adapters', ''), index_after_split=1,
chars_to_remove='|'),
'Group ID': sensor_data.get('group_id'),
'Power State': sensor_data.get('power_state'),
'Health Score': sensor_data.get('sensor_health_status'),
'Is Isolating': sensor_data.get('is_isolating')
})
md = tableToMarkdown(f'{INTEGRATION_NAME} - Sensors', human_readable_data, removeNull=True, headers=[
'Sensor Id', 'Computer Name', 'Status', 'Power State', 'Group ID', 'OS Version', 'Health Score',
'Is Isolating', 'Node Id', 'Sensor Version', 'IP Address/MAC Info'])
md += f"\nShowing {len(res)} out of {total_num_of_sensors} results."
return CommandResults(outputs=res, outputs_prefix='CarbonBlackEDR.Sensor', outputs_key_field='id',
readable_output=md, raw_response=res)
except DemistoException as e:
if '404' in e.message:
raise Exception(f'{INTEGRATION_NAME} - The sensor {id} could not be found. '
f'Please try using a different sensor.')
else:
raise Exception(f'{INTEGRATION_NAME} - Error connecting to API. Error: {e.message}')
def watchlist_delete_command(client: Client, id: str) -> CommandResults:
res = client.http_request(url=f'/v1/watchlist/{id}', method='DELETE')
# res contains whether the task successful.
return CommandResults(readable_output=res.get('result'))
def watchlist_update_command(client: Client, id: str, search_query: str, description: str,
enabled: bool = None) -> CommandResults:
params = assign_params(enabled=enabled, search_query=search_query, description=description)
res = client.http_request(url=f'/v1/watchlist/{id}', method='PUT', json_data=params)
# res contains whether the task successful.
return CommandResults(readable_output=res.get('result'))
def watchlist_create_command(client: Client, name: str, search_query: str, index_type: str = 'events',
description: str = '') -> CommandResults:
params = assign_params(name=name, search_query=search_query, description=description, index_type=index_type)
res = client.http_request(url='/v1/watchlist', method='POST', json_data=params)
watchlist_id = res.get('id')
if id:
output = {'id': watchlist_id}
return CommandResults(outputs=output, outputs_prefix='CarbonBlackEDR.Watchlist', outputs_key_field='id',
readable_output=f"Successfully created new watchlist with id {watchlist_id}")
return CommandResults(readable_output="Could not create new watchlist.")
def get_watchlist_list_command(client: Client, id: str = None, limit: str = None) -> CommandResults:
url = f'/v1/watchlist/{id}' if id else '/v1/watchlist'
res: Union[dict, list] = client.http_request(url=url, method='GET')
human_readable_data = []
# Handling case of only one record.
if id:
res = [res]
total_num_of_watchlists = len(res)
res = res[:arg_to_number(limit, 'limit')]
for watchlist in res:
human_readable_data.append({
'Name': watchlist.get('name'),
'ID': watchlist.get('id'),
'Group ID': watchlist.get('group_id'),
'Description': watchlist.get('description'),
'Total Hits': watchlist.get('total_hits'),
'Query': watchlist.get('search_query'),
})
md = f'{INTEGRATION_NAME} - Watchlists'
md += tableToMarkdown(f"\nShowing {len(res)} out of {total_num_of_watchlists} results.", human_readable_data,
removeNull=True)
return CommandResults(outputs=res, outputs_prefix='CarbonBlackEDR.Watchlist', outputs_key_field='name',
readable_output=md)
def binary_ban_command(client: Client, md5: str, text: str, last_ban_time: str = None, ban_count: str = None,
last_ban_host: str = None) -> CommandResults:
body = assign_params(md5hash=md5,
text=text, last_ban_time=last_ban_time, ban_count=ban_count,
last_ban_host=last_ban_host)
try:
client.http_request(url='/v1/banning/blacklist', method='POST', json_data=body)
except DemistoException as e:
if '409' in e.message:
return CommandResults(readable_output=f'Ban for md5 {md5} already exists')
else:
raise Exception(f'{INTEGRATION_NAME} - Error connecting to API. Error: {e.message}')
return CommandResults(readable_output='hash banned successfully')
def binary_bans_list_command(client: Client, limit: str = None) -> CommandResults:
res = client.http_request(url='/v1/banning/blacklist', method='GET')
res = res[:arg_to_number(limit, 'limit')] if limit else res
human_readable_data = []
for banned in res:
human_readable_data.append({
'md5': banned.get('md5hash'),
'Text': banned.get('text'),
'Timestamp': banned.get('timestamp'),
'User ID': banned.get('user_id'),
'Username': banned.get('username'),
})
return CommandResults(outputs=res, outputs_prefix='CarbonBlackEDR.BinaryBan', outputs_key_field='md5',
readable_output=tableToMarkdown(f'{INTEGRATION_NAME} -Banned Hashes', human_readable_data))
def alert_update_command(client: Client, alert_ids: str, status: str = None, set_ignored: bool = None,
query: str = None) -> CommandResults:
url = '/v1/alerts'
body = assign_params(alert_ids=argToList(alert_ids),
requested_status=status,
set_ignored=set_ignored,
query=query
)
res = client.http_request(url=url, method='POST', json_data=body)
if not res:
raise Exception(f"{INTEGRATION_NAME} - Could not find alerts: {', '.join(alert_ids)}.")
return CommandResults(readable_output='Alert was updated successfully.')
def alert_search_command(client: Client, status: str = None, username: str = None, feedname: str = None,
hostname: str = None, report: str = None, sort: str = None, query: str = None,
facet: str = None, limit: str = None, start: str = '0') -> CommandResults:
res = client.get_alerts(status, username, feedname, hostname, report, sort, query, facet, limit, start)
if not res:
raise Exception(f'{INTEGRATION_NAME} - Request cannot be processed.')
alerts = res.get('results', [])
human_readable_data = []
for alert in alerts:
human_readable_data.append({
'Alert ID': alert.get('unique_id'),
'File Name': alert.get('process_name'),
'File Path': alert.get('process_path'),
'Hostname': alert.get('hostname'),
'Source md5': alert.get('md5'),
'Segment ID': alert.get('segment_id'),
'Severity': alert.get('alert_severity'),
'Created Time': alert.get('created_time'),
'Status': alert.get('status'),
})
outputs = assign_params(Results=alerts, Facets=res.get('facets'), Terms=res.get('terms'),
total_results=res.get('total_results'))
md = f'{INTEGRATION_NAME} - Alert Search Results'
md += tableToMarkdown(
f"\nShowing {start} - {len(res.get('results', []))} out of {res.get('total_results', '0')} results.",
human_readable_data)
return CommandResults(outputs=outputs, outputs_prefix='CarbonBlackEDR.Alert',
outputs_key_field='Terms',
readable_output=md)
def binary_summary_command(client: Client, md5: str) -> CommandResults:
url = f'/v1/binary/{md5}/summary'
try:
res = client.http_request(url=url, method='GET')
if not res:
return CommandResults(
readable_output=f'Could not find data for file {md5}.')
human_readable_data = {
'Host Count': res.get('host_count'),
'Group': res.get('group'),
'OS Type': res.get('os_type'),
'Timestamp': res.get('timestamp'),
'md5': res.get('md5'),
'Last Seen': res.get('last_seen'),
'Is Executable Image': res.get('is_executable_image')
}
return CommandResults(outputs=res, outputs_prefix='CarbonBlackEDR.BinaryMetadata', outputs_key_field='md5',
readable_output=tableToMarkdown(f'{INTEGRATION_NAME} -Summary For File {md5}',
human_readable_data, removeNull=True))
except DemistoException as e:
if '404' in e.message:
return CommandResults(readable_output=f'File {md5} could not be found')
else:
raise Exception(f'{INTEGRATION_NAME} - Error connecting to API. Error: {e.message}')
def binary_download_command(client: Client, md5: str) -> CommandResults:
url = f'/v1/binary/{md5}'
try:
res = client.http_request(url=url, method='GET', ok_codes=(200, 204, 404), resp_type='content')
if not res:
return CommandResults(
readable_output=f'Could not find data for file {md5}.')
return fileResult(f'binary_{md5}.zip', res, file_type=9)
except DemistoException as e:
if '404' in e.message:
return CommandResults(readable_output=f'File {md5} could not be found')
else:
raise Exception(f'{INTEGRATION_NAME} - Error connecting to API. Error: {e.message}')
def binary_search_command(client: Client, md5: str = None, product_name: str = None, digital_signature: str = None,
group: str = None, hostname: str = None, publisher: str = None, company_name: str = None,
sort: str = None, observed_filename: str = None, query: str = None, facet: str = None,
limit: str = '50', start: str = '0') -> CommandResults:
res = client.get_binaries(md5, product_name, digital_signature, group, hostname, publisher, company_name, sort,
observed_filename, query, facet, limit, start)
if not res:
raise Exception(f'{INTEGRATION_NAME} - Request cannot be processed.')
outputs = assign_params(Results=res.get('results'), Facets=res.get('facets'), Terms=res.get('terms'),
total_results=res.get('total_results'))
human_readable_data = []
for binary_file in res.get('results', []):
human_readable_data.append({
'Host Count': binary_file.get('host_count'),
'Group': binary_file.get('group'),
'OS Type': binary_file.get('os_type'),
'Timestamp': binary_file.get('timestamp'),
'md5': binary_file.get('md5'),
'Last Seen': binary_file.get('last_seen'),
'Is Executable Image': binary_file.get('is_executable_image')
})
md = f'{INTEGRATION_NAME} - Binary Search Results'
md += tableToMarkdown(f"\nShowing {start} - {len(res.get('results', []))} out of {res.get('total_results', '0')} "
f"results.", human_readable_data, headers=['md5', 'Group', 'OS Type', 'Host Count',
'Last Seen', 'Is Executable Image', 'Timestamp'])
return CommandResults(outputs=outputs, outputs_prefix='CarbonBlackEDR.BinarySearch',
outputs_key_field='md5',
readable_output=md)
def process_events_list_command(client: Client, process_id: str, segment_id: str, start: str = None, count: str = None):
if not process_id or not segment_id:
raise Exception(f'{INTEGRATION_NAME} - Please provide both process id and segment id to run this command.')
url = f'/v3/process/{process_id}/{segment_id}/event'
start = int(start) if start else None
count = int(count) if count else None
params = {}
if start:
params['cb.event_start'] = start
if count:
params['cb.event_count'] = count
res = client.http_request(url=url, method='GET', params=params)
process = client.get_formatted_ProcessEventDetail(res.get('process', {}))
return CommandResults(outputs=process, outputs_prefix='CarbonBlackEDR.Events',
outputs_key_field='id',
readable_output=process, raw_response=res)
def process_segments_get_command(client: Client, process_id: str, limit: str = '50') -> CommandResults:
url = f'/v1/process/{process_id}/segment'
res = client.http_request(url=url, method='GET')
if not res:
return CommandResults(
readable_output=f'Could not find segment data for process id {process_id}.')
res = res.get('process', {}).get('segments')
res = res[:arg_to_number(limit, 'limit')] if limit else res
# Human readable is depending on request therefore is not prettified.
return CommandResults(outputs=res, outputs_prefix='CarbonBlackEDR.ProcessSegments',
outputs_key_field='unique_id',
readable_output=res)
def process_get_command(client: Client, process_id: str, segment_id: str,
get_related: bool = False) -> CommandResults:
get_related = argToBoolean(get_related)
url = f"/{'v1' if get_related else 'v2'}/process/{process_id}/{segment_id}"
try:
res = client.http_request(url=url, method='GET')
except DemistoException as e:
if "404" in e.message:
raise Exception(f'{INTEGRATION_NAME} - Could not find result for '
f'process id {process_id} with segment id {segment_id}.')
else:
raise Exception(f'{INTEGRATION_NAME} - Error connecting to API. Error: {e.message}')
data = res.get('process', {}) if get_related else res
human_readable_data = {
'Process Path': data.get('path'),
'Process md5': data.get('process_md5'),
'Process Name': data.get('process_name'),
'Process PID': data.get('process_pid'),
'Process ID': data.get('id'),
'Hostname': data.get('hostname'),
'Segment ID': data.get('segment_id'),
'Username': data.get('username'),
'Last Update': data.get('last_update'),
'Is Terminated': data.get('terminated')
}
return CommandResults(outputs=res, outputs_prefix='CarbonBlackEDR.Process', outputs_key_field='id',
readable_output=tableToMarkdown(f'{INTEGRATION_NAME} - Process', human_readable_data))
def processes_search_command(client: Client, process_name: str = None, group: str = None, hostname: str = None,
parent_name: str = None, process_path: str = None, md5: str = None,
query: str = None, group_by: str = None, sort: str = None, facet: str = None,
facet_field: str = None, limit: str = '50', start: str = '0'):
res = client.get_processes(process_name, group, hostname, parent_name, process_path, md5, query, group_by, sort,
facet, facet_field, limit, start)
if not res:
raise Exception(f'{INTEGRATION_NAME} - Request cannot be processed.')
outputs = assign_params(Results=res.get('results'), Facets=res.get('facets'), Terms=res.get('terms'),
total_results=res.get('total_results'))
human_readable_data = []
for process in res.get('results'):
human_readable_data.append(
{
'Process Path': process.get('path'),
'Process md5': process.get('process_md5'),
'Process Name': process.get('process_name'),
'Segment ID': process.get('segment_id'),
'Process PID': process.get('process_pid'),
'Process ID': process.get('id'),
'Hostname': process.get('hostname'),
'Username': process.get('username'),
'Last Update': process.get('last_update'),
'Is Terminated': process.get('terminated')
})
md = f'#### {INTEGRATION_NAME} - Process Search Results'
md += tableToMarkdown(
f"\nShowing {start} - {len(res.get('results', []))} out of {res.get('total_results', '0')} results.",
human_readable_data,
headers=['Process Path', 'Process ID', 'Segment ID', 'Process md5', 'Process Name', 'Hostname',
'Process PID', 'Username', 'Last Update', 'Is Terminated'],
removeNull=True)
return CommandResults(outputs=outputs, outputs_prefix='CarbonBlackEDR.ProcessSearch', outputs_key_field='Terms',
readable_output=md)
def sensor_installer_download_command(client: Client, os_type: str, group_id: str):
url = f"/v1/group/{group_id}/installer/{os_type.replace('_', '/')}"
res = client.http_request(url=url, method='GET', resp_type='content')
if not res:
return CommandResults(
readable_output=f'Could not find installer for group id {group_id} which compatible with {os_type}.')
return fileResult(f'sensor_installer_{group_id}_{os_type}.zip', res, file_type=9)
def endpoint_command(client: Client, id: str = None, ip: str = None, hostname: str = None):
if not id and not ip and not hostname:
raise Exception(f'{INTEGRATION_NAME} - In order to run this command, please provide valid id, ip or hostname')
try:
ips = argToList(ip)
res = []
if ips:
for current_ip in ips:
res += client.get_sensors(id=id, ipaddr=current_ip, hostname=hostname)[1]
else:
res += client.get_sensors(id=id, hostname=hostname)[1]
endpoints = []
command_results = []
for sensor in res:
is_isolated = _get_isolation_status_field(sensor['network_isolation_enabled'],
sensor['is_isolating'])
endpoint = Common.Endpoint(
id=sensor.get('id'),
hostname=sensor.get('computer_name'),
ip_address=_parse_field(sensor.get('network_adapters', ''), index_after_split=0, chars_to_remove='|'),
mac_address=_parse_field(sensor.get('network_adapters', ''), index_after_split=1, chars_to_remove='|'),
os_version=sensor.get('os_environment_display_string'),
memory=sensor.get('physical_memory_size'),
status='Online' if sensor.get('status') else 'Offline',
is_isolated=is_isolated,
vendor='Carbon Black Response')
endpoints.append(endpoint)
endpoint_context = endpoint.to_context().get(Common.Endpoint.CONTEXT_PATH)
md = tableToMarkdown(f'{INTEGRATION_NAME} - Endpoint: {sensor.get("id")}', endpoint_context)
command_results.append(CommandResults(
readable_output=md,
raw_response=res,
indicator=endpoint
))
return command_results
except Exception as e:
return CommandResults(readable_output=f'{INTEGRATION_NAME} - Could not get endpoint (error- {e}')
def fetch_incidents(client: Client, max_results: int, last_run: dict, first_fetch_time: str, status: str = None,
feedname: str = None, query: str = ''):
if (status or feedname) and query:
raise Exception(f'{INTEGRATION_NAME} - Search is not permitted with both query and filter parameters.')
max_results = arg_to_number(arg=max_results, arg_name='max_fetch', required=False) if max_results else 50
# How much time before the first fetch to retrieve incidents
first_fetch_time = dateparser.parse(first_fetch_time)
last_fetch = last_run.get('last_fetch', None) # {last_fetch: timestamp}
demisto.debug(f'{INTEGRATION_NAME} - last fetch: {last_fetch}')
# Handle first fetch time
if last_fetch is None:
last_fetch = first_fetch_time
else:
last_fetch = datetime.fromtimestamp(last_fetch)
latest_created_time = last_fetch.timestamp()
date_range = f'[{last_fetch.strftime("%Y-%m-%dT%H:%M:%S")} TO *]'
incidents: List[Dict[str, Any]] = []
alerts = []
# multiple statuses are not supported by api. If multiple statuses provided, gets the incidents for each status.
# Otherwise will run without status.
query_params = {'created_time': date_range}
if feedname:
query_params['feedname'] = feedname
if status:
for current_status in argToList(status):
demisto.debug(f'{INTEGRATION_NAME} - Fetching incident from Server with status: {current_status}')
query_params['status'] = current_status
# we create a new query containing params since we do not allow both query and params.
res = client.get_alerts(query=_create_query_string(query_params), limit=max_results)
alerts += res.get('results', [])
demisto.debug(f'{INTEGRATION_NAME} - fetched {len(alerts)} so far.')
else:
query = _add_to_current_query(query, query_params)
demisto.debug(f'{INTEGRATION_NAME} - Fetching incident from Server with status: {status}')
res = client.get_alerts(query=query, limit=max_results)
alerts += res.get('results', [])
demisto.debug(f'{INTEGRATION_NAME} - Got total of {len(alerts)} alerts from CB server.')
for alert in alerts:
incident_created_time = dateparser.parse(alert.get('created_time'))
incident_created_time_ms = incident_created_time.timestamp()
# to prevent duplicates, adding incidents with creation_time > last fetched incident
if last_fetch:
if incident_created_time_ms <= last_fetch.timestamp():
demisto.debug(f'{INTEGRATION_NAME} - alert {str(alert)} was created at {incident_created_time_ms}.'
f' Skipping.')
continue
alert_id = alert.get('unique_id', '')
alert_name = alert.get('process_name', '')
incident_name = f'{INTEGRATION_NAME}: {alert_id} {alert_name}'
if not alert_id or not alert_name:
demisto.debug(f'{INTEGRATION_NAME} - Alert details are missing. {str(alert)}')
incident = {
'name': incident_name,
'occurred': timestamp_to_datestring(incident_created_time_ms),
'rawJSON': json.dumps(alert),
}
incidents.append(incident)
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time_ms > latest_created_time:
latest_created_time = incident_created_time_ms
demisto.debug(f'Fetched {len(alerts)} alerts. Saving {len(incidents)} as incidents.')
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': latest_created_time}
return next_run, incidents
def test_module(client: Client, params: dict) -> str:
try:
client.get_processes(limit='5', allow_empty=True)
if params['isFetch']:
client.get_alerts(status=params.get('alert_status', None), feedname=params.get('alert_feed_name', None),
query=params.get('alert_query', None), allow_empty_params=False, limit='3')
return 'ok'
except DemistoException as e:
if 'Forbidden' in str(e) or 'UNAUTHORIZED' in str(e):
raise Exception('Authorization Error: make sure API Key is correctly set')
else:
raise e
''' MAIN FUNCTION '''
def main() -> None:
try:
params = demisto.params()
base_url = urljoin(params['url'], '/api')
if not params.get('credentials') or not (api_token := params.get('credentials', {}).get('password')):
raise DemistoException('Missing API Key. Fill in a valid key in the integration configuration.')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
args = demisto.args() if demisto.args() else {}
demisto.debug(f'Command being called is {command}')
client = Client(
base_url=base_url,
use_ssl=verify_certificate,
use_proxy=proxy,
apitoken=api_token
)
commands: Dict[str, Callable] = {'cb-edr-processes-search': processes_search_command,
'cb-edr-process-get': process_get_command,
'cb-edr-process-segments-get': process_segments_get_command,
'cb-edr-process-events-list': process_events_list_command,
'cb-edr-binary-search': binary_search_command,
'cb-edr-binary-download': binary_download_command,
'cb-edr-binary-summary': binary_summary_command,
'cb-edr-alert-search': alert_search_command,
'cb-edr-alert-update': alert_update_command,
'cb-edr-binary-bans-list': binary_bans_list_command,
'cb-edr-binary-ban': binary_ban_command,
'cb-edr-watchlists-list': get_watchlist_list_command,
'cb-edr-watchlist-create': watchlist_create_command,
'cb-edr-watchlist-update': watchlist_update_command,
'cb-edr-watchlist-delete': watchlist_delete_command,
'cb-edr-sensors-list': sensors_list_command,
'cb-edr-quarantine-device': quarantine_device_command,
'cb-edr-unquarantine-device': unquarantine_device_command,
'cb-edr-sensor-installer-download': sensor_installer_download_command,
'endpoint': endpoint_command
}
if command == 'test-module':
result = test_module(client, params)
return_results(result)
elif command == 'fetch-incidents':
next_run, incidents = fetch_incidents(
client=client,
max_results=params.get('max_fetch'),
last_run=demisto.getLastRun(),
first_fetch_time=params.get('first_fetch', '3 days'),
status=params.get('alert_status', None),
feedname=params.get('alert_feed_name', None),
query=params.get('alert_query', None))
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif command in commands:
return_results(commands[command](client, **args))
else:
raise NotImplementedError(f'command {command} was not implemented in this integration.')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
|
# -*- coding: utf-8 -*-
"""SQLite parser plugin for Google Hangouts conversations database files."""
from __future__ import unicode_literals
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class HangoutsMessageData(events.EventData):
"""GoogleHangouts Message event data.
Attributes:
body (str): content of the SMS text message.
message_status (int): message status.
message_type (int): message type.
sender (str): Name with the sender.
"""
DATA_TYPE = 'android:messaging:hangouts'
def __init__(self):
"""Initializes event data."""
super(HangoutsMessageData, self).__init__(data_type=self.DATA_TYPE)
self.body = None
self.message_status = None
self.message_type = None
self.sender = None
class HangoutsMessagePlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Google Hangouts conversations database files.
The Google Hangouts conversations database file is typically stored in:
/data/com.google.android.talk/databases/babel.db
This SQLite database is the conversation database for conversations,
participant names, messages, and information about the Google Hangout event.
There can be multiple babel.db databases, and each database name will be
followed by an integer starting with 0, for example:
"babel0.db,babel1.db,babel3.db".
"""
NAME = 'hangouts_messages'
DATA_FORMAT = 'Google Hangouts conversations SQLite database (babel.db) file'
REQUIRED_STRUCTURE = {
'blocked_people': frozenset([]),
'messages': frozenset([
'_id', 'text', 'timestamp', 'status', 'type', 'author_chat_id']),
'participants': frozenset([
'full_name', 'chat_id'])}
QUERIES = [
('SELECT messages._id, participants.full_name, text, messages.timestamp,'
'status, type FROM messages INNER JOIN participants ON '
'messages.author_chat_id=participants.chat_id;', 'ParseMessagesRow')]
SCHEMAS = [{
'android_metadata': (
'CREATE TABLE android_metadata (locale TEXT)'),
'blocked_people': (
'CREATE TABLE blocked_people (_id INTEGER PRIMARY KEY, gaia_id '
'TEXT, chat_id TEXT, name TEXT, profile_photo_url TEXT, UNIQUE '
'(chat_id) ON CONFLICT REPLACE, UNIQUE (gaia_id) ON CONFLICT '
'REPLACE)'),
'conversation_participants': (
'CREATE TABLE conversation_participants (_id INTEGER PRIMARY KEY, '
'participant_row_id INT, participant_type INT, conversation_id '
'TEXT, sequence INT, active INT, invitation_status INT DEFAULT(0), '
'UNIQUE (conversation_id,participant_row_id) ON CONFLICT REPLACE, '
'FOREIGN KEY (conversation_id) REFERENCES '
'conversations(conversation_id) ON DELETE CASCADE ON UPDATE '
'CASCADE, FOREIGN KEY (participant_row_id) REFERENCES '
'participants(_id))'),
'conversations': (
'CREATE TABLE conversations (_id INTEGER PRIMARY KEY, '
'conversation_id TEXT, conversation_type INT, '
'latest_message_timestamp INT DEFAULT(0), '
'latest_message_expiration_timestamp INT, metadata_present '
'INT,notification_level INT, name TEXT, generated_name TEXT, '
'snippet_type INT, snippet_text TEXT, snippet_image_url TEXT, '
'snippet_author_gaia_id TEXT, snippet_author_chat_id TEXT, '
'snippet_message_row_id INT, snippet_selector INT, snippet_status '
'INT, snippet_new_conversation_name TEXT, snippet_participant_keys '
'TEXT, snippet_sms_type TEXT, previous_latest_timestamp INT, status '
'INT, view INT, inviter_gaia_id TEXT, inviter_chat_id TEXT, '
'inviter_affinity INT, is_pending_leave INT, account_id INT, is_otr '
'INT, packed_avatar_urls TEXT, self_avatar_url TEXT, self_watermark '
'INT DEFAULT(0), chat_watermark INT DEFAULT(0), hangout_watermark '
'INT DEFAULT(0), is_draft INT, sequence_number INT, call_media_type '
'INT DEFAULT(0), has_joined_hangout INT, has_chat_notifications '
'DEFAULT(0),has_video_notifications '
'DEFAULT(0),last_hangout_event_time INT, draft TEXT, otr_status '
'INT, otr_toggle INT, last_otr_modification_time INT, '
'continuation_token BLOB, continuation_event_timestamp INT, '
'has_oldest_message INT DEFAULT(0), sort_timestamp INT, '
'first_peak_scroll_time INT, first_peak_scroll_to_message_timestamp '
'INT, second_peak_scroll_time INT, '
'second_peak_scroll_to_message_timestamp INT, conversation_hash '
'BLOB, disposition INT DEFAULT(0), has_persistent_events INT '
'DEFAULT(-1), transport_type INT DEFAULT(1), '
'default_transport_phone TEXT, sms_service_center TEXT, '
'is_temporary INT DEFAULT (0), sms_thread_id INT DEFAULT (-1), '
'chat_ringtone_uri TEXT, hangout_ringtone_uri TEXT, '
'snippet_voicemail_duration INT DEFAULT (0), share_count INT '
'DEFAULT(0), has_unobserved TEXT, last_share_timestamp INT '
'DEFAULT(0), gls_status INT DEFAULT(0), gls_link TEXT, is_guest INT '
'DEFAULT(0), UNIQUE (conversation_id ))'),
'dismissed_contacts': (
'CREATE TABLE dismissed_contacts (_id INTEGER PRIMARY KEY, gaia_id '
'TEXT, chat_id TEXT, name TEXT, profile_photo_url TEXT, UNIQUE '
'(chat_id) ON CONFLICT REPLACE, UNIQUE (gaia_id) ON CONFLICT '
'REPLACE)'),
'event_suggestions': (
'CREATE TABLE event_suggestions (_id INTEGER PRIMARY KEY, '
'conversation_id TEXT, event_id TEXT, suggestion_id TEXT, timestamp '
'INT, expiration_time_usec INT, type INT, gem_asset_url STRING, '
'gem_horizontal_alignment INT, matched_message_substring TEXT, '
'FOREIGN KEY (conversation_id) REFERENCES '
'conversations(conversation_id) ON DELETE CASCADE ON UPDATE '
'CASCADE, UNIQUE (conversation_id,suggestion_id) ON CONFLICT '
'REPLACE)'),
'merge_keys': (
'CREATE TABLE merge_keys (_id INTEGER PRIMARY KEY, conversation_id '
'TEXT, merge_key TEXT, UNIQUE (conversation_id) ON CONFLICT '
'REPLACE, FOREIGN KEY (conversation_id) REFERENCES '
'conversations(conversation_id) ON DELETE CASCADE ON UPDATE CASCADE '
')'),
'merged_contact_details': (
'CREATE TABLE merged_contact_details (_id INTEGER PRIMARY KEY, '
'merged_contact_id INT, lookup_data_type INT, lookup_data TEXT, '
'lookup_data_standardized TEXT, lookup_data_search TEXT, '
'lookup_data_label TEXT, needs_gaia_ids_resolved INT DEFAULT (1), '
'is_hangouts_user INT DEFAULT (0), gaia_id TEXT, avatar_url TEXT, '
'display_name TEXT, last_checked_ts INT DEFAULT (0), '
'lookup_data_display TEXT, detail_affinity_score REAL DEFAULT '
'(0.0), detail_logging_id TEXT, is_in_viewer_dasher_domain INT '
'DEFAULT (0), FOREIGN KEY (merged_contact_id) REFERENCES '
'merged_contacts(_id) ON DELETE CASCADE ON UPDATE CASCADE)'),
'merged_contacts': (
'CREATE TABLE merged_contacts (_id INTEGER PRIMARY KEY, '
'contact_lookup_key TEXT, contact_id INT, raw_contact_id INT, '
'display_name TEXT, avatar_url TEXT, is_frequent INT DEFAULT (0), '
'is_favorite INT DEFAULT (0), contact_source INT DEFAULT(0), '
'frequent_order INT, person_logging_id TEXT, person_affinity_score '
'REAL DEFAULT (0.0), is_in_same_domain INT DEFAULT (0))'),
'messages': (
'CREATE TABLE messages (_id INTEGER PRIMARY KEY, message_id TEXT, '
'message_type INT, conversation_id TEXT, author_chat_id TEXT, '
'author_gaia_id TEXT, text TEXT, timestamp INT, '
'delete_after_read_timetamp INT, status INT, type INT, local_url '
'TEXT, remote_url TEXT, attachment_content_type TEXT, width_pixels '
'INT, height_pixels INT, stream_id TEXT, image_id TEXT, album_id '
'TEXT, latitude DOUBLE, longitude DOUBLE, address ADDRESS, '
'notification_level INT, expiration_timestamp INT, '
'notified_for_failure INT DEFAULT(0), off_the_record INT '
'DEFAULT(0), transport_type INT NOT NULL DEFAULT(1), '
'transport_phone TEXT, external_ids TEXT, sms_timestamp_sent INT '
'DEFAULT(0), sms_priority INT DEFAULT(0), sms_message_size INT '
'DEFAULT(0), mms_subject TEXT, sms_raw_sender TEXT, '
'sms_raw_recipients TEXT, persisted INT DEFAULT(1), '
'sms_message_status INT DEFAULT(-1), sms_type INT DEFAULT(-1), '
'stream_url TEXT, attachment_target_url TEXT, attachment_name TEXT, '
'image_rotation INT DEFAULT (0), new_conversation_name TEXT, '
'participant_keys TEXT, forwarded_mms_url TEXT, forwarded_mms_count '
'INT DEFAULT(0), attachment_description TEXT, '
'attachment_target_url_description TEXT, attachment_target_url_name '
'TEXT, attachment_blob_data BLOB,attachment_uploading_progress INT '
'DEFAULT(0), sending_error INT DEFAULT(0), stream_expiration INT, '
'voicemail_length INT DEFAULT (0), call_media_type INT DEFAULT(0), '
'last_seen_timestamp INT DEFAULT(0), observed_status INT '
'DEFAULT(2), receive_type INT DEFAULT(0), init_timestamp INT '
'DEFAULT(0), in_app_msg_latency INT DEFAULT(0), notified INT '
'DEFAULT(0), alert_in_conversation_list INT DEFAULT(0), attachments '
'BLOB, is_user_mentioned INT DEFAULT(0), local_id TEXT, '
'request_task_row_id INT DEFAULT(-1), FOREIGN KEY (conversation_id) '
'REFERENCES conversations(conversation_id) ON DELETE CASCADE ON '
'UPDATE CASCADE, UNIQUE (conversation_id,message_id) ON CONFLICT '
'REPLACE)'),
'mms_notification_inds': (
'CREATE TABLE mms_notification_inds (_id INTEGER PRIMARY KEY, '
'content_location TEXT, transaction_id TEXT, from_address TEXT, '
'message_size INT DEFAULT(0), expiry INT)'),
'multipart_attachments': (
'CREATE TABLE multipart_attachments (_id INTEGER PRIMARY KEY, '
'message_id TEXT, conversation_id TEXT, url TEXT, content_type '
'TEXT, width INT, height INT, FOREIGN KEY (message_id, '
'conversation_id) REFERENCES messages(message_id, conversation_id) '
'ON DELETE CASCADE ON UPDATE CASCADE)'),
'participant_email_fts': (
'CREATE VIRTUAL TABLE participant_email_fts USING '
'fts4(content="merged_contact_details", gaia_id,lookup_data)'),
'participant_email_fts_docsize': (
'CREATE TABLE \'participant_email_fts_docsize\'(docid INTEGER '
'PRIMARY KEY, size BLOB)'),
'participant_email_fts_segdir': (
'CREATE TABLE \'participant_email_fts_segdir\'(level INTEGER,idx '
'INTEGER,start_block INTEGER,leaves_end_block INTEGER,end_block '
'INTEGER,root BLOB,PRIMARY KEY(level, idx))'),
'participant_email_fts_segments': (
'CREATE TABLE \'participant_email_fts_segments\'(blockid INTEGER '
'PRIMARY KEY, block BLOB)'),
'participant_email_fts_stat': (
'CREATE TABLE \'participant_email_fts_stat\'(id INTEGER PRIMARY '
'KEY, value BLOB)'),
'participants': (
'CREATE TABLE participants (_id INTEGER PRIMARY KEY, '
'participant_type INT DEFAULT 1, gaia_id TEXT, chat_id TEXT, '
'phone_id TEXT, circle_id TEXT, first_name TEXT, full_name TEXT, '
'fallback_name TEXT, profile_photo_url TEXT, batch_gebi_tag STRING '
'DEFAULT(\'-1\'), blocked INT DEFAULT(0), in_users_domain BOOLEAN, '
'UNIQUE (circle_id) ON CONFLICT REPLACE, UNIQUE (chat_id) ON '
'CONFLICT REPLACE, UNIQUE (gaia_id) ON CONFLICT REPLACE)'),
'participants_fts': (
'CREATE VIRTUAL TABLE participants_fts USING '
'fts4(content="participants",gaia_id,full_name)'),
'participants_fts_docsize': (
'CREATE TABLE \'participants_fts_docsize\'(docid INTEGER PRIMARY '
'KEY, size BLOB)'),
'participants_fts_segdir': (
'CREATE TABLE \'participants_fts_segdir\'(level INTEGER,idx '
'INTEGER,start_block INTEGER,leaves_end_block INTEGER,end_block '
'INTEGER,root BLOB,PRIMARY KEY(level, idx))'),
'participants_fts_segments': (
'CREATE TABLE \'participants_fts_segments\'(blockid INTEGER PRIMARY '
'KEY, block BLOB)'),
'participants_fts_stat': (
'CREATE TABLE \'participants_fts_stat\'(id INTEGER PRIMARY KEY, '
'value BLOB)'),
'presence': (
'CREATE TABLE presence (_id INTEGER PRIMARY KEY, gaia_id TEXT NOT '
'NULL, reachable INT DEFAULT(0), reachable_time INT DEFAULT(0), '
'available INT DEFAULT(0), available_time INT DEFAULT(0), '
'status_message TEXT, status_message_time INT DEFAULT(0), call_type '
'INT DEFAULT(0), call_type_time INT DEFAULT(0), device_status INT '
'DEFAULT(0), device_status_time INT DEFAULT(0), last_seen INT '
'DEFAULT(0), last_seen_time INT DEFAULT(0), location BLOB, '
'location_time INT DEFAULT(0), UNIQUE (gaia_id) ON CONFLICT '
'REPLACE)'),
'recent_calls': (
'CREATE TABLE recent_calls (_id INTEGER PRIMARY KEY, '
'normalized_number TEXT NOT NULL, phone_number TEXT, contact_id '
'TEXT, call_timestamp INT, call_type INT, contact_type INT, '
'call_rate TEXT, is_free_call BOOLEAN)'),
'search': (
'CREATE TABLE search (search_key TEXT NOT NULL,continuation_token '
'TEXT,PRIMARY KEY (search_key))'),
'sticker_albums': (
'CREATE TABLE sticker_albums (album_id TEXT NOT NULL, title TEXT, '
'cover_photo_id TEXT, last_used INT DEFAULT(0), PRIMARY KEY '
'(album_id))'),
'sticker_photos': (
'CREATE TABLE sticker_photos (photo_id TEXT NOT NULL, album_id TEXT '
'NOT NULL, url TEXT NOT NULL, file_name TEXT, last_used INT '
'DEFAULT(0), PRIMARY KEY (photo_id), FOREIGN KEY (album_id) '
'REFERENCES sticker_albums(album_id) ON DELETE CASCADE)'),
'suggested_contacts': (
'CREATE TABLE suggested_contacts (_id INTEGER PRIMARY KEY, gaia_id '
'TEXT, chat_id TEXT, name TEXT, first_name TEXT, packed_circle_ids '
'TEXT, profile_photo_url TEXT, sequence INT, suggestion_type INT, '
'logging_id TEXT, affinity_score REAL DEFAULT (0.0), '
'is_in_same_domain INT DEFAULT (0))')}]
def ParseMessagesRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses an Messages row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = HangoutsMessageData()
event_data.sender = self._GetRowValue(query_hash, row, 'full_name')
event_data.body = self._GetRowValue(query_hash, row, 'text')
event_data.offset = self._GetRowValue(query_hash, row, '_id')
event_data.query = query
event_data.message_status = self._GetRowValue(query_hash, row, 'status')
event_data.message_type = self._GetRowValue(query_hash, row, 'type')
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
sqlite.SQLiteParser.RegisterPlugin(HangoutsMessagePlugin)
|
|
import torch
from torch import nn
from qelos.basic import DotDistance, CosineDistance, ForwardDistance, BilinearDistance, TrilinearDistance, Softmax, Lambda
from qelos.rnn import RecStack, Reccable, RecStatefulContainer, RecStateful, RecurrentStack, RecurrentWrapper
from qelos.util import issequence
# region attention
class AttentionGenerator(nn.Module):
def __init__(self, dist=None, normalizer=Softmax(),
data_selector=None, scale=1., dropout=0.):
super(AttentionGenerator, self).__init__()
self.dist = dist
self.data_selector = data_selector
self.normalizer = normalizer
self.dropout = nn.Dropout(p=dropout) if dropout > 0. else None
self.scale = scale
def forward(self, data, crit, mask=None): # should work for 3D/2D and 3D/3D
if self.data_selector is not None:
data = self.data_selector(data)
scores = self.dist(data, crit) # (batsize, seqlen)
if scores.dim() == 3: # (batsize, dseqlen, cseqlen)
assert(crit.dim() == 3)
scores = scores.permute(0, 2, 1) # because scores for 3D3D are given from data to crit, here we need from crit to data
if mask is not None and mask.dim() == 2:
mask = mask.unsqueeze(1).repeat(1, scores.size(1), 1)
if mask is not None:
assert(mask.size() == scores.size(), "mask should be same size as scores")
scores.data.masked_fill_((-1*mask+1).byte().data, -float("inf"))
if self.scale != 1.:
scores = scores / self.scale
weights = self.normalizer(scores)
if self.dropout is not None:
weights = self.dropout(weights)
return weights # (batsize, dseqlen) or (batsize, cseqlen, dseqlen)
class AttentionConsumer(nn.Module):
def __init__(self, data_selector=None):
super(AttentionConsumer, self).__init__()
self.data_selector = data_selector
def forward(self, data, weights): # weights can (batsize, seqlen) or (batsize, cseqlen, seqlen)
if self.data_selector is not None:
data = self.data_selector(data)
if weights.dim() == 3:
data = data.unsqueeze(1) # (batsize, 1, seqlen, dim)
weights = weights.unsqueeze(-1) # (batsize, seqlen, 1) or (batsize, cseqlen, seqlen, 1)
ret = data * weights
return torch.sum(ret, -2)
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
self.attgen = AttentionGenerator()
self.attcon = AttentionConsumer()
def split_data(self): # splits datasets in two along dim axis, one goes to gen, other to cons
def attgen_ds(data): # (batsize, seqlen, dim)
return data[:, :, :data.size(2)//2]
def attcon_ds(data):
return data[:, :, data.size(2)//2:]
self.attgen.data_selector = attgen_ds
self.attcon.data_selector = attcon_ds
return self
def scale(self, scale):
self.attgen.scale = scale
return self
def dropout(self, rate):
self.attgen.dropout = nn.Dropout(rate)
return self
def forward(self, data, crit):
weights = self.attgen(data, crit)
summary = self.attcon(data, weights)
return summary
def dot_gen(self):
self.attgen.dist = DotDistance()
return self
def cosine_gen(self):
self.attgen.dist = CosineDistance()
return self
def forward_gen(self, ldim, rdim, aggdim, activation="tanh", use_bias=True):
self.attgen.dist = ForwardDistance(ldim, rdim, aggdim, activation=activation, use_bias=use_bias)
return self
def bilinear_gen(self, ldim, rdim):
self.attgen.dist = BilinearDistance(ldim, rdim)
return self
def trilinear_gen(self, ldim, rdim, aggdim, activation="tanh", use_bias=True):
self.attgen.dist = TrilinearDistance(ldim, rdim, aggdim, activation=activation, use_bias=use_bias)
return self
# endregion
class Decoder(nn.Module):
"""
Takes some context and decodes a sequence
Should support (partial) teacher forcing
"""
def __init__(self, decodercell):
"""
:param decodercell: main block that generates. must include everything
"""
super(Decoder, self).__init__()
#assert(isinstance(decodercell, DecoderCell))
self.block = decodercell
def reset_state(self):
self.block.reset_state()
def _compute_init_states(self, *x, **kw):
return self.block._compute_init_states(*x, **kw)
def set_init_states(self, *x):
self.block.set_init_states(*x)
def forward(self, *x, **kw): # first input must be (batsize, seqlen,...)
self.reset_state()
batsize = x[0].size(0)
maxtime = x[0].size(1) if "maxtime" not in kw else kw["maxtime"]
new_init_states = self._compute_init_states(*x, **kw)
if new_init_states is not None:
if not issequence(new_init_states):
new_init_states = (new_init_states,)
self.set_init_states(*new_init_states)
y_list = []
y_t = None
for t in range(maxtime):
#x_t = [x_e[:, t] if x_e.sequence else x_e for x_e in x]
x_t, x_t_kw = self.block._get_inputs_t(t=t, x=x, xkw=kw, y_t=y_t) # let the Rec definition decide what to input
if not issequence(x_t):
x_t = [x_t]
x_t = tuple(x_t)
x_t_kw["t"] = t
blockret = self.block(*x_t, **x_t_kw)
if not issequence(blockret):
blockret = [blockret]
y_t = blockret
#y_t = [y_t_e.unsqueeze(1) for y_t_e in blockret[:self.block.numstates]]
y_list.append(y_t)
y = tuple()
for i in range(len(y_list[0])):
yl_e = [y_list[j][i] for j in range(len(y_list))]
y += (torch.stack(yl_e, 1),)
if len(y) == 1:
y = y[0]
return y
class ContextDecoder(Decoder):
"""
Allows to use efficient cudnn RNN unrolled over time
"""
def __init__(self, embedder=None, core=None, **kw):
assert(core is not None)
super(ContextDecoder, self).__init__(core)
self.embedder = embedder
self.ctx_to_decinp = kw["ctx_to_decinp"] if "ctx_to_decinp" in kw else True
self.init_state_gen = kw["ctx_to_h0"] if "ctx_to_h0" in kw else None
def forward(self, x, ctx):
"""
:param x: (batsize, seqlen) of integers or (batsize, seqlen, dim) of vectors if embedder is None
:param ctx: (batsize, dim) of context vectors
:return:
"""
new_init_states = self._compute_init_states(x, ctx)
if new_init_states is not None:
if not issequence(new_init_states):
new_init_states = (new_init_states,)
self.set_init_states(*new_init_states)
x_emb = self.embedder(x) if self.embedder is not None else x
if self.ctx_to_decinp:
ctx = ctx.unsqueeze(1).repeat(1, x_emb.size(1), 1)
i = torch.cat([x_emb, ctx], 2)
else:
i = x_emb
y = self.block(i)
return y
def _compute_init_states(self, x, ctx):
if self.init_state_gen is not None:
h_0 = self.init_state_gen(ctx)
return h_0
else:
return None
class AttentionDecoder(ContextDecoder):
def __init__(self, attention=None,
embedder=None,
core=None, # RecurrentStack
smo=None, # non-rec
att_transform=None,
init_state_gen=None,
ctx_to_smo=True,
state_to_smo=True,
decinp_to_att=False,
decinp_to_smo=False,
return_out=True,
return_att=False):
super(AttentionDecoder, self).__init__(embedder=embedder, core=core, ctx_to_h0=init_state_gen)
self.attention = attention
self.smo = RecurrentWrapper(smo) if smo is not None else None
self.att_transform = RecurrentWrapper(att_transform) if att_transform is not None else None
# wiring
self.att_after_update = True
self.ctx_to_smo = ctx_to_smo
self.state_to_smo = state_to_smo
self.decinp_to_att = decinp_to_att
self.decinp_to_smo = decinp_to_smo
# returns
self.return_out = return_out
self.return_att = return_att
def forward(self, x, ctx, ctxmask=None):
"""
:param x: (batsize, seqlen) of integers or (batsize, seqlen, dim) of vectors if embedder is None
:param ctx: (batsize, dim) of context vectors
:return:
"""
new_init_states = self._compute_init_states(x, ctx)
if new_init_states is not None:
if not issequence(new_init_states):
new_init_states = (new_init_states,)
self.set_init_states(*new_init_states)
x_emb = self.embedder(x) if self.embedder is not None else x
y = self.block(x_emb)
toatt = y
if self.decinp_to_att:
toatt = torch.cat([y, x_emb], 2)
dctx, att_weights = self._get_dctx(ctx, ctxmask, toatt)
cat_to_smo = []
if self.state_to_smo: cat_to_smo.append(y)
if self.ctx_to_smo: cat_to_smo.append(dctx)
if self.decinp_to_smo: cat_to_smo.append(x_emb)
smoinp = torch.cat(cat_to_smo, 2) if len(cat_to_smo) > 1 else cat_to_smo[0]
output = self.smo(smoinp) if self.smo is not None else smoinp
# returns
ret = tuple()
if self.return_out:
ret += (output,)
if self.return_att:
ret += (att_weights,)
if len(ret) == 1:
ret = ret[0]
return ret
def _get_dctx(self, ctx, ctxmask, toatt):
"""
:param ctx: (batsize, inpseqlen, dim)
:param ctxmask: (batsize, inpseqlen)
:param toatt: (batsize, outseqlen, dim)
:return: (batsize, outseqlen, dim) and (batsize, outseqlen, inpseqlen)
"""
if self.att_transform is not None:
toatt = self.att_transform(toatt)
att_weights = self.attention.attgen(ctx, toatt, mask=ctxmask)
res = self.attention.attcon(ctx, att_weights)
return res, att_weights
pass # TODO
class DecoderCell(RecStatefulContainer):
"""
Decoder logic.
Call .to_decoder() to get decoder.
Two ways to make a new decoder architecture:
* subclass this and override forward(), get_inputs_t() and compute_init_states()
* set modules/functions for the three pieces by using the provided setters (overrides subclassing)
"""
_teacher_unforcing_support = False # OVERRIDE THIS to enable teacher unforcing args
def __init__(self, *layers):
super(DecoderCell, self).__init__()
if len(layers) == 1:
self.set_core(layers[0])
elif len(layers) > 1:
self.core = RecStack(*layers)
else:
self.core = None
self.teacher_force = 1
self._init_state_computer = None
self._inputs_t_getter = None
# region RecStatefulContainer signature
def reset_state(self):
self.core.reset_state()
def set_init_states(self, *states):
self.core.set_init_states(*states)
def get_init_states(self, batsize):
return self.core.get_init_states(batsize)
# endregion
def teacher_force(self, frac=1): # set teacher forcing
if not self._teacher_unforcing_support and frac < 1:
raise NotImplementedError("only teacher forcing supported")
if frac < 0 or frac > 1:
raise Exception("bad argument, must be [0, 1]")
self.teacher_force = frac
def forward(self, *x, **kw): # OVERRIDE THIS
"""
Must be implemented in all real decoder cells.
:param x: inputs to this timestep (list of tensors) and states
:param kw: more arguments, might include time step as t=
:return: outputs of one decoding timestep (list of tensors)
"""
return self.core(*x, **kw)
def set_core(self, reccable):
assert(isinstance(reccable, RecStateful))
self.core = reccable
def _get_inputs_t(self, t=None, x=None, xkw=None, y_t=None):
if self._inputs_t_getter is None:
return self.get_inputs_t(t=t, x=x, xkw=xkw, y_t=y_t)
else:
return self._inputs_t_getter(t=t, x=x, xkw=xkw, y_t=y_t)
def get_inputs_t(self, t=None, x=None, xkw=None, y_t=None):
"""
Make the inputs to cell from timestep, inputs to decoder and previous outputs of cell.
Called before every call to .forward() and must compute all arguments for .forward() for given timestep.
Must be implemented in all concrete decoder cells.
This method is the place to implement teacher forcing (don't forget to override _teacher_unforcing_support to True to
enable official teacher forcing support).
This method could also be used for computing dynamic contexts (attention)
:param t: timestep (integer)
:param x: original arguments to decoder (list of tensors)
:param xkw: original kwargs to decoder
:param y_t: previous outputs of this cell (list of tensors or None). If None, no previous outputs have been output yet
:return: actual (inputs, kwinpts) to .forward() of this decoder cell (list of tensors)
"""
return x[0][:, t], {"t": t}
def set_inputs_t_getter(self, callabla):
self._inputs_t_getter = callabla
def _compute_init_states(self, *x, **kw):
if self._init_state_computer is None:
return self.compute_init_states(*x, **kw)
else:
return self._init_state_computer(*x, **kw)
def compute_init_states(self, *x, **kw):
"""
Compute new initial states for the reccable elements in the decoder's rec stack
:param x: the original inputs
:return: possibly incomplete list of initial states to set, or None
"""
return None
def set_init_states_computer(self, callabla):
self._init_state_computer = callabla
def to_decoder(self):
""" Makes a decoder from this decoder cell """
return Decoder(self)
class ContextDecoderCell(DecoderCell):
def __init__(self, embedder=None, *layers):
super(ContextDecoderCell, self).__init__(*layers)
self.embedder = embedder
def forward(self, x, ctx, **kw):
if self.embedder is not None:
emb = self.embedder(x)
else:
emb = x
inp = torch.cat([emb, ctx], 1)
ret = self.core(inp)
return ret
def get_inputs_t(self, t=None, x=None, xkw=None, y_t=None):
return (x[0][:, t], x[1]), {}
class AttentionDecoderCell(DecoderCell):
"""
Recurrence of decoder with attention # TODO
"""
def __init__(self, attention=None,
embedder=None,
core=None,
smo=None,
init_state_gen=None,
attention_transform=None,
att_after_update=False,
ctx_to_decinp=True,
ctx_to_smo=True,
state_to_smo=True,
decinp_to_att=False,
decinp_to_smo=False,
return_out=True,
return_att=False,
state_split=False,
**kw):
"""
Initializes attention-based decoder cell
:param attention: attention module
:param embedder: embedder module
:param core: core recurrent module (recstateful)
:param smo: module that takes core's output vectors and produces probabilities over output vocabulary
:param init_state_gen: module that generates initial states for the decoder and its core (see also .set_init_states())
:param attention_transform: module that transforms attention vector just before generation of attention weights
:param att_after_update: perform recurrent step before attention
:param ctx_to_decinp: feed attention context to core
:param ctx_to_smo: feed attention context to smo
:param state_to_smo: feed output of core to smo
:param decinp_to_att: feed embedding to attention generation
:param decinp_to_smo: feed embedding to smo
:param return_out: return output probabilities
:param return_att: return attention weights over input sequence
:param state_split: split core's state, first half goes to attention (which might also be split), second half goes to smo
:param kw:
"""
super(AttentionDecoderCell, self).__init__(**kw)
# submodules
self.attention = attention
self.embedder = embedder
self.core = core
self.smo = smo
self.set_init_states_computer(init_state_gen) if init_state_gen is not None else None
self.att_transform = attention_transform
# wiring
self.att_after_update = att_after_update
self.ctx_to_decinp = ctx_to_decinp
self.ctx_to_smo = ctx_to_smo
self.state_to_smo = state_to_smo
self.decinp_to_att = decinp_to_att
self.decinp_to_smo = decinp_to_smo
self.state_split = state_split
# returns
self.return_out = return_out
self.return_att = return_att
# states
self._state = [None]
# region implement DecoderCell signature
def forward(self, x_t, ctx, ctxmask=None, t=None, outmask_t=None, **kw):
"""
:param x_t: (batsize,...) input for current timestep
:param ctx: (batsize, inpseqlen, dim) whole context
:param ctxmask: (batsize, inpseqlen) context mask
:param t: current timestep
:param kw:
:return: output probabilities for current timestep and/or attention weights
"""
batsize = x_t.size(0)
x_t_emb = self.embedder(x_t)
if len(x_t_emb) == 2 and isinstance(x_t_emb, tuple):
x_t_emb, _ = x_t_emb
if self.att_after_update:
ctx_tm1 = self._state[0]
i_t = torch.cat([x_t_emb, ctx_tm1], 1) if self.ctx_to_decinp else x_t_emb
o_t = self.core(i_t, t=t)
ctx_t, att_weights_t = self._get_ctx_t(ctx, ctxmask, o_t, x_t_emb)
else:
o_tm1 = self._state[0]
ctx_t, att_weights_t = self._get_ctx_t(ctx, ctxmask, o_tm1, x_t_emb)
i_t = torch.cat([x_t_emb, ctx_t], 1) if self.ctx_to_decinp else x_t_emb
o_t = self.core(i_t, t=t)
cat_to_smo = []
o_to_smo = o_t[:, o_t.size(1)//2:] if self.state_split else o_t # first half is split off in _get_ctx_t()
if self.state_to_smo: cat_to_smo.append(o_to_smo)
if self.ctx_to_smo: cat_to_smo.append(ctx_t)
if self.decinp_to_smo: cat_to_smo.append(x_t_emb)
smoinp_t = torch.cat(cat_to_smo, 1) if len(cat_to_smo) > 1 else cat_to_smo[0]
smokw = {}
smokw.update(kw)
if outmask_t is not None:
smokw["mask"] = outmask_t.float()
y_t = self.smo(smoinp_t, **smokw) if self.smo is not None else smoinp_t
# returns
ret = tuple()
if self.return_out:
ret += (y_t,)
if self.return_att:
ret += (att_weights_t,)
# store rec state
if self.att_after_update:
self._state[0] = ctx_t
else:
self._state[0] = o_t
if len(ret) == 1:
ret = ret[0]
return ret
def _get_ctx_t(self, ctx, ctxmask, h, x_emb):
"""
:param ctx: (batsize, inpseqlen, dim) whole context
:param ctxmask: (batsize, inpseqlen) context mask over time
:param h: (batsize, dim) criterion for attention
:param x_emb: (batsize, dim) vector of current input, used in attention if decinp_to_att==True
:return: (summary of ctx based on attention, attention weights)
"""
assert(ctx.dim() == 3)
assert(ctxmask is None or ctxmask.dim() == 2)
if self.state_split:
h = h[:, :h.size(1)//2]
if self.decinp_to_att:
h = torch.cat([h, x_emb], 1)
if self.att_transform is not None:
h = self.att_transform(h)
att_weights = self.attention.attgen(ctx, h, mask=ctxmask)
res = self.attention.attcon(ctx, att_weights)
return res, att_weights
def get_inputs_t(self, t=None, x=None, xkw=None, y_t=None): # TODO implement teacher forcing
outargs = (x[0][:, t], x[1]) # (prev_token, ctx)
outkwargs = {"t": t}
if "ctxmask" in xkw: # copy over ctxmask (shared over decoder steps)
outkwargs["ctxmask"] = xkw["ctxmask"]
if "outmask" in xkw: # slice out the time from outmask
outkwargs["outmask_t"] = xkw["outmask"][:, t]
return outargs, outkwargs
# endregion
# region RecStatefulContainer signature
def reset_state(self):
#self._state[0] = None
self.core.reset_state()
def set_init_states(self, ownstate, *states):
"""
:param ownstate: treated as first context (ctx_0) if att_after_update==True,
treated as initial output of core (o_0) otherwise
:param states: (optional) states for core
"""
self._state[0] = ownstate
self.core.set_init_states(*states)
# endregion
|
|
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
# Copyright (c) 2015, Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import uuid
from itertools import chain
from collections import namedtuple
import numpy
from distributions.io.stream import protobuf_stream_read
from distributions.io.stream import protobuf_stream_write
from loom.schema_pb2 import ProductValue
from loom.schema_pb2 import Row
from loom.schema_pb2 import Query
import loom.cFormat
import loom.runner
DEFAULTS = {
'sample_sample_count': 10,
'entropy_sample_count': 1000,
'mutual_information_sample_count': 1000,
'similar_row_limit': 1000,
'tile_size': 500,
}
BUFFER_SIZE = 10
Estimate = namedtuple('Estimate', ['mean', 'variance'], verbose=False)
def get_estimate(samples):
mean = numpy.mean(samples)
variance = numpy.var(samples) / len(samples)
return Estimate(mean, variance)
NONE = ProductValue.Observed.NONE
DENSE = ProductValue.Observed.DENSE
SPARSE = ProductValue.Observed.SPARSE
def none_to_protobuf(diff):
assert isinstance(diff, ProductValue.Diff)
diff.Clear()
diff.neg.observed.sparsity = NONE
diff.pos.observed.sparsity = NONE
def data_row_to_protobuf(data_row, diff):
assert isinstance(diff, ProductValue.Diff)
if all([value is None for value in data_row]):
none_to_protobuf(diff)
return
diff.Clear()
diff.neg.observed.sparsity = NONE
diff.pos.observed.sparsity = DENSE
mask = diff.pos.observed.dense
fields = {
bool: diff.pos.booleans,
int: diff.pos.counts,
long: diff.pos.counts,
float: diff.pos.reals,
}
for val in data_row:
observed = val is not None
mask.append(observed)
if observed:
fields[type(val)].append(val)
def protobuf_to_data_row(diff):
assert isinstance(diff, ProductValue.Diff)
assert diff.neg.observed.sparsity == NONE
data = diff.pos
packed = chain(data.booleans, data.counts, data.reals)
return [
packed.next() if observed else None
for observed in data.observed.dense
]
def load_data_rows(filename):
for row in loom.cFormat.row_stream_load(filename):
data = row.iter_data()
packed = chain(data['booleans'], data['counts'], data['reals'])
yield [
packed.next() if observed else None
for observed in data['observed']
]
def feature_set_to_protobuf(feature_set, messages):
message = messages.add()
message.sparsity = SPARSE
for i in sorted(feature_set):
message.sparse.append(i)
class QueryServer(object):
def __init__(self, protobuf_server):
self.protobuf_server = protobuf_server
@property
def root(self):
return self.protobuf_server.root
def close(self):
self.protobuf_server.close()
def __enter__(self):
return self
def __exit__(self, *unused):
self.close()
def request(self):
request = Query.Request()
request.id = str(uuid.uuid4())
return request
def sample(self, to_sample, conditioning_row=None, sample_count=None):
if sample_count is None:
sample_count = DEFAULTS['sample_sample_count']
if conditioning_row is None:
conditioning_row = [None for _ in to_sample]
assert len(to_sample) == len(conditioning_row)
request = self.request()
data_row_to_protobuf(conditioning_row, request.sample.data)
request.sample.to_sample.sparsity = DENSE
request.sample.to_sample.dense[:] = to_sample
request.sample.sample_count = sample_count
self.protobuf_server.send(request)
response = self.protobuf_server.receive()
if response.error:
raise Exception('\n'.join(response.error))
samples = []
for sample in response.sample.samples:
data_out = protobuf_to_data_row(sample)
for i, val in enumerate(data_out):
if val is None:
assert to_sample[i] is False
data_out[i] = conditioning_row[i]
samples.append(data_out)
return samples
def _send_score(self, row):
request = self.request()
data_row_to_protobuf(row, request.score.data)
self.protobuf_server.send(request)
def _receive_score(self):
response = self.protobuf_server.receive()
if response.error:
raise Exception('\n'.join(response.error))
return response.score.score
def score(self, row):
self._send_score(row)
return self._receive_score()
def batch_score(self, rows, buffer_size=BUFFER_SIZE):
buffered = 0
for row in rows:
self._send_score(row)
if buffered < buffer_size:
buffered += 1
else:
yield self._receive_score()
for _ in xrange(buffered):
yield self._receive_score()
def _entropy(
self,
row_sets,
col_sets,
conditioning_row=None,
sample_count=None):
row_sets = list(set(map(frozenset, row_sets)) | set([frozenset()]))
col_sets = list(set(map(frozenset, col_sets)) | set([frozenset()]))
if sample_count is None:
sample_count = DEFAULTS['entropy_sample_count']
request = self.request()
if conditioning_row is None:
none_to_protobuf(request.entropy.conditional)
else:
data_row_to_protobuf(conditioning_row, request.entropy.conditional)
for feature_set in row_sets:
feature_set_to_protobuf(feature_set, request.entropy.row_sets)
for feature_set in col_sets:
feature_set_to_protobuf(feature_set, request.entropy.col_sets)
request.entropy.sample_count = sample_count
self.protobuf_server.send(request)
response = self.protobuf_server.receive()
if response.error:
raise Exception('\n'.join(response.error))
means = response.entropy.means
variances = response.entropy.variances
size = len(row_sets) * len(col_sets)
assert len(means) == size, means
assert len(variances) == size, variances
means = iter(means)
variances = iter(variances)
return {
row_set | col_set: Estimate(means.next(), variances.next())
for row_set in row_sets
for col_set in col_sets
}
def entropy(
self,
row_sets,
col_sets,
conditioning_row=None,
sample_count=None,
tile_size=None):
if tile_size is None:
tile_size = DEFAULTS['tile_size']
min_size = max(1, min(tile_size, len(row_sets), len(col_sets)))
tile_size = tile_size * tile_size / min_size
assert tile_size > 0, tile_size
result = {}
for i in xrange(0, len(row_sets), tile_size):
row_tile = row_sets[i: i + tile_size]
for j in xrange(0, len(col_sets), tile_size):
col_tile = col_sets[j: j + tile_size]
result.update(self._entropy(
row_tile,
col_tile,
conditioning_row,
sample_count))
return result
def mutual_information(
self,
feature_set1,
feature_set2,
entropys=None,
conditioning_row=None,
sample_count=None):
'''
Estimate the mutual information between feature_set1
and feature_set2 conditioned on conditioning_row
'''
if not isinstance(feature_set1, frozenset):
feature_set1 = frozenset(feature_set1)
if not isinstance(feature_set2, frozenset):
feature_set2 = frozenset(feature_set2)
if sample_count is None:
sample_count = DEFAULTS['mutual_information_sample_count']
feature_union = frozenset.union(feature_set1, feature_set2)
if entropys is None:
entropys = self.entropy(
[feature_set1],
[feature_set2],
conditioning_row,
sample_count)
mi = entropys[feature_set1].mean \
+ entropys[feature_set2].mean \
- entropys[feature_union].mean
variance = entropys[feature_set1].variance \
+ entropys[feature_set2].variance \
+ entropys[feature_union].variance
return Estimate(mi, variance)
def score_derivative(
self,
update_row,
score_rows=None,
row_limit=None):
row = Row()
request = self.request()
if row_limit is None:
row_limit = DEFAULTS['similar_row_limit']
if score_rows is not None:
for i, data_row in enumerate(score_rows):
data_row_to_protobuf(
data_row,
row.diff)
added_diff = request.score_derivative.score_data.add()
added_diff.MergeFrom(row.diff)
request.score_derivative.row_limit = row_limit
data_row_to_protobuf(
update_row,
row.diff)
request.score_derivative.update_data.MergeFrom(row.diff)
self.protobuf_server.send(request)
response = self.protobuf_server.receive()
if response.error:
raise Exception('\n'.join(response.error))
ids = response.score_derivative.ids
score_diffs = response.score_derivative.score_diffs
return zip(ids, score_diffs)
class ProtobufServer(object):
def __init__(self, root, config=None, debug=False, profile=None):
self.root = root
self.proc = loom.runner.query(
root_in=root,
config_in=config,
log_out=None,
debug=debug,
profile=profile,
block=False)
def send(self, request):
assert isinstance(request, Query.Request), request
request_string = request.SerializeToString()
protobuf_stream_write(request_string, self.proc.stdin)
self.proc.stdin.flush()
def receive(self):
response_string = protobuf_stream_read(self.proc.stdout)
response = Query.Response()
response.ParseFromString(response_string)
return response
def close(self):
self.proc.stdin.close()
self.proc.wait()
def __enter__(self):
return self
def __exit__(self, *unused):
self.close()
def get_server(root, config=None, debug=False, profile=None):
protobuf_server = ProtobufServer(root, config, debug, profile)
return QueryServer(protobuf_server)
|
|
import math, types
from phycas.pdfgen import *
from phycas.utilities.CommonFunctions import CommonFunctions
from phycas.utilities.GlobalState import readFile
from phycas.phylogeny import Tree
class PDFTree(CommonFunctions):
def __init__(self):
CommonFunctions.__init__(self, None)
self.pdf_splits_to_plot = None
# Variables associated with PDF tree drawing (used in pdftree() function)
# The 14 standard fonts guaranteed to be available in all PDF consumer applications:
# Times-Roman Helvetica Courier Symbol
# Times-Bold Helvetica-Bold Courier-Bold ZapfDingbats
# Times-Italic Helvetica-Oblique Courier-Oblique
# Times-BoldItalic Helvetica-BoldOblique Courier-BoldOblique
self.pdf_filename = 'trees.pdf' # Set to desired name of pdf file to create
self.pdf_edge_support_file = None # File containing PAUP* output with table of support values; if specified, the support values will be shown on trees plotted
self.pdf_tip_label_font = 'Times-Italic' # Font used for tip node names; should be one of the 14 standard fonts listed above
self.pdf_tip_label_height = 12 # Height in points of tip node name font
self.pdf_plot_label_font = 'Helvetica' # Font used for plot axis labels; should be one of the 14 standard fonts listed above
self.pdf_plot_label_height = 12 # Height in points of plot axis label font
self.pdf_title_font = 'Helvetica' # Font used for scalebar text; should be one of the 14 standard fonts listed above
self.pdf_title_height = 14 # Height in points of scalebar text font
self.pdf_scalebar_position = 'bottom' # Valid values are 'top', 'bottom' or None
self.pdf_scalebar_label_font = 'Helvetica' # Font used for scalebar text; should be one of the 14 standard fonts listed above
self.pdf_scalebar_label_height = 10 # Height in points of scalebar text font
self.pdf_support_label_font = 'Times-Roman' # Font used for edge support values; should be one of the 14 standard fonts listed above
self.pdf_support_label_height = 8 # Height in points of edge support font
self.pdf_support_as_percent = True # If True, support values will be shown as percentages (e.g. 93.1) rather than proportions (e.g. 0.931)
self.pdf_support_decimals = 1 # The number of decimal places shown in support values (e.g. to get 93.7, specify 1; to round up to 94, specify 0)
self.pdf_ladderize = 'right' # Valid values are 'right', 'left' or None
self.pdf_page_width = 8.5 # Page width in inches
self.pdf_page_height = 11.0 # Page length in inches
self.pdf_line_width = 1.0 # Width of lines representing edges in the tree
self.pdf_left_margin = 1.0 # Left margin in inches (1 inch = 72 points)
self.pdf_right_margin = 1.0 # Right margin in inches (1 inch = 72 points)
self.pdf_top_margin = 1.0 # Top margin in inches (1 inch = 72 points)
self.pdf_bottom_margin = 1.0 # Bottom margin in inches (1 inch = 72 points)
self.keep_xy_proportional = True # If True, vertical dimension of each tree in a collection will be kept proportional to its horizontal dimension
self.keep_tip_labels_proportional = True # If True, tip label height will be kept commensurate with size of tree for each tree in a printed collection (smaller trees will have smaller tip labels)
self.pdf_treefile = None # Set to tree file name if you want to make one pdf file with each tree from tree file on a separate page
self.pdf_newick = None # Set to the tree description to print if only want to save one tree to a pdf file
self.pdf_outgroup_taxon = None # Set to taxon name of tip serving as the outgroup for display rooting purposes (note: at this time outgroup can consist of just one taxon)
def pdftree(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Creates a PDF file containing a single tree (if pdf_newick is
specified) or a collection of trees (if pdf_treefile is specified).
If a collection of trees is specified, scales all trees the same (i.e
the scalebar is identical in size for all trees plotted).
"""
#complex_outgroup = type(self.pdf_outgroup_taxon) in (types.ListType,types.TupleType)
simple_outgroup = type(self.pdf_outgroup_taxon) == types.StringType
self.phycassert(simple_outgroup, 'Phycas cannot yet deal with pdf_outgroup_taxon containing more than one outgroup taxon')
self.phycassert((self.pdf_treefile and not self.pdf_newick) or (self.pdf_newick and not self.pdf_treefile), 'set either pdf_newick or pdf_treefile, but not both')
# If pdf_edge_support_file has been specified, read splits table from the file
# and store the splits in the pdf_splits_to_plot dictionary
if self.pdf_edge_support_file and os.path.exists(self.pdf_edge_support_file):
# Read splits file and store all splits found along with their frequencies
contents_of_file = open(self.pdf_edge_support_file,'r').read()
regex = re.compile('([*.]+)\s+([0-9.]+)', re.M)
matches = regex.findall(contents_of_file)
self.phycassert(matches, 'could not find any splits defined in the pdf_edge_support_file named %s' % self.pdf_edge_support_file)
self.pdf_splits_to_plot = {}
for p,f in matches:
self.pdf_splits_to_plot[p] = float(f)
# Fork depending on whether user wants to print just one tree (pdf_newick specified)
# or an entire collection of trees (pdf_treefile specified)
if self.pdf_newick:
# Build tree the newick description of which is in self.newick
tree = self.pdf_newick.buildTree()
if self.pdf_outgroup_taxon:
num = tree.findTipByName(self.pdf_outgroup_taxon)
self.phycassert(num is not None, 'could not root tree using specified outgroup: no tip having name "%s" could be found' % self.pdf_outgroup_taxon)
tree.rerootAtTip(num)
if self.pdf_ladderize:
if self.pdf_ladderize == 'right':
tree.ladderizeRight()
else:
tree.ladderizeLeft()
# Save tree in PDF
pdf = PDFGenerator(self.pdf_page_width, self.pdf_page_height)
pdf.overwrite = True
pdf.newPage()
self.tree2pdf(pdf, tree)
pdf.saveDocument(self.pdf_filename)
else:
# Open pdf_treefile and read trees therein
self.tree_file_name = self.pdf_treefile
contents = readFile(self.pdf_treefile)
# Build each tree and determine its height
tree = Tree()
max_height = 0.0
for tree_def in contents.trees:
tree_def.buildTree(tree)
tree.rectifyNames(contents.taxon_labels)
if self.pdf_outgroup_taxon:
num = tree.findTipByName(self.pdf_outgroup_taxon)
self.phycassert(num is not None, 'could not root tree using specified outgroup: no tip having name "%s" could be found' % self.pdf_outgroup_taxon)
tree.rerootAtTip(num)
h = tree.calcTotalHeight()
if h > max_height:
max_height = h
#tlen = tree.edgeLenSum()
#print 'tlen =',tlen,', height =',h
# Build each tree again and save in PDF file
pdf = PDFGenerator(self.pdf_page_width, self.pdf_page_height)
pdf.overwrite = True
for tree_def in contents.trees:
tree_def.buildTree(tree)
tree.rectifyNames(contents.taxon_labels)
if self.pdf_outgroup_taxon:
num = tree.findTipByName(self.pdf_outgroup_taxon)
tree.rerootAtTip(num)
if self.pdf_ladderize:
if self.pdf_ladderize == 'right':
tree.ladderizeRight()
else:
tree.ladderizeLeft()
tree.rectifyNames(contents.taxon_labels)
pdf.newPage()
self.tree2pdf(pdf, tree, None, max_height)
pdf.saveDocument(self.pdf_filename)
# Prevent unintentional spillover
self.pdf_splits_to_plot = None
def tree2pdf(self, pdf, tree, title = None, xscalemax = 0.0, show_support = False):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Prints tree on a pdf object (instance of class PDFGenerator). If title
is specified, the supplied string will be centered at the top of the
page. The optional argument xscalemax represents the maximum height
of a group of trees being printed on separate pages in the same pdf
document. If xscalemax is left unspecified, each tree will be scaled
to fit the page and the scalebar will be adjusted accordingly. If
xscalemax is specified, it will be used to determine the scalebar, and
the scalebar will remain the same size for all trees printed with the
same xcalemax value.
"""
# TODO: max_label_points should be calculated outside this function and passed in as an argument
inch = 72.0
spacer = 5.0
max_label_points = 0.0
rooted_tree = tree.isRooted()
nodes = []
# Perform a preorder traversal:
# 1) for each node, set x-value to height above root (in units of edge length)
# 2) for each tip, set y-value to tip index, with root tip being 0, and other
# tips being numbered from left to right
# 3) find the length of the longest taxon label as it will be rendered in the
# PDF file so that the margin calculations can be made
# 4) for each internal, just set y-value to 0.0 for now; these internal y-values
# will be calculated on the subsequent postorder traversal
if self.pdf_splits_to_plot:
tree.recalcAllSplits(tree.getNObservables())
# Record information about the tip serving as the root
nd = tree.getFirstPreorder()
assert nd.isRoot(), 'first preorder node should be the root'
if not rooted_tree:
nodes.append(nd)
subroot = nd.getLeftChild()
height = subroot.getEdgeLen()
nd.setX(height)
if self.pdf_ladderize and self.pdf_ladderize == 'left':
last_tip_index = float(tree.getNObservables() - 1)
nd.setY(last_tip_index) #--> Y is irrelevant if rooted
ntips = 0.0
else:
nd.setY(0.0)
if rooted_tree:
ntips = 0.0
else:
ntips = 1.0
max_height = height
# Determine the width (in points) occupied by the longest taxon label
if self.pdf_tip_label_font and not rooted_tree:
taxon_label = nd.getNodeName()
label_width = float(self.pdf_tip_label_height)*pdf.calcStringWidth(self.pdf_tip_label_font, taxon_label)
if label_width > max_label_points:
max_label_points = label_width
# Record information about the internal node serving as the subroot
nd = nd.getNextPreorder()
assert nd.getParent().isRoot(), 'second preorder node should be the subroot'
nodes.append(nd)
nd.setX(0.0)
nd.setY(0.0)
subroot = nd
# Record information about the remaining nodes in the tree
while True:
nd = nd.getNextPreorder()
if not nd:
break
else:
ndpar = nd.getParent()
nodes.append(nd)
height = nd.getEdgeLen() + ndpar.getX()
nd.setX(height)
if height > max_height:
max_height = height
if nd.isTip():
nd.setY(ntips)
ntips += 1.0
if self.pdf_tip_label_font:
taxon_label = nd.getNodeName()
label_width = float(self.pdf_tip_label_height)*pdf.calcStringWidth(self.pdf_tip_label_font, taxon_label)
if label_width > max_label_points:
max_label_points = label_width
else:
nd.setY(0.0)
# Compute length represented by scale bar. For example,
# xscalemax = 0.00275
# log_xscalemax = -2.56
# ten_to_power = 10^floor(-2.56)
# = 10^{-3}
# = 0.001
# scalebar = 0.001*floor(0.00275/0.001)
# = 0.001*floor(2.75)
# = 0.002
# ndecimals = -floor(-2.56)
# = 3.0
if xscalemax == 0.0:
xscalemax = max_height
half_xscalemax = xscalemax/2.0
log_xscalemax = math.log10(half_xscalemax)
ten_to_power = 10**math.floor(log_xscalemax)
scalebar = ten_to_power*math.floor(half_xscalemax/ten_to_power)
ndecimals = -int(math.floor(log_xscalemax))
if ndecimals < 0:
ndecimals = 0
format_str = '%%.%df' % (ndecimals)
scalebar_str = format_str % scalebar
scalebar_str_extent = float(self.pdf_scalebar_label_height)*pdf.calcStringWidth(self.pdf_scalebar_label_font, scalebar_str)
scalebar_height = float(self.pdf_scalebar_label_height) + 2*spacer + self.pdf_line_width
# Find xscaler (amount by which branch lengths must be multiplied to give x-coordinate)
# and yscaler (amount by which the tip position must be multiplied to give y-coordinate).
xheight = 0.0
if self.pdf_tip_label_font:
xheight = float(self.pdf_tip_label_height)*pdf.getXHeight(self.pdf_tip_label_font)
half_xheight = xheight/2.0
ntips = tree.getNObservables()
label_width = max_label_points + spacer
right_margin = self.pdf_right_margin*inch
left_margin = self.pdf_left_margin*inch
top_margin = self.pdf_top_margin*inch
bottom_margin = self.pdf_bottom_margin*inch
plot_right = self.pdf_page_width*inch
plot_width = plot_right - left_margin - right_margin
plot_top = self.pdf_page_height*inch
plot_height = plot_top - top_margin - bottom_margin
tree_width = plot_width - label_width
tree_height = plot_height
if self.pdf_scalebar_position:
tree_height -= scalebar_height
if title:
tree_height -= 3.0*float(self.pdf_title_height)
tree_x0 = left_margin
tree_y0 = bottom_margin + scalebar_height
xscaler = tree_width/xscalemax
yscaler = tree_height/float(ntips - 1)
#pdf.addRectangle(left_margin, bottom_margin, plot_width, plot_height, 1, 'dotted')
if title and self.pdf_title_height > 0:
# Draw title centered at top of page
title_str_extent = float(self.pdf_title_height)*pdf.calcStringWidth(self.pdf_title_font, title)
title_x = left_margin + (plot_width - title_str_extent)/2.0
title_y = tree_y0 + tree_height + 2.0*float(self.pdf_title_height)
pdf.addText(title_x, title_y, self.pdf_title_font, self.pdf_title_height, title)
if self.pdf_scalebar_position:
if self.pdf_scalebar_position == 'top':
# Draw scalebar horizontally starting at top left corner
scalebar_width = scalebar*xscaler
scalebar_y = tree_x0 + tree_height - scalebar_height + spacer
pdf.addLine(left_margin, scalebar_y, left_margin + scalebar_width, scalebar_y, self.pdf_line_width)
# Draw scalebar text centered above the scalebar
scalebar_x = left_margin + (scalebar_width - scalebar_str_extent)/2.0
scalebar_y = tree_x0 + tree_height - float(self.pdf_scalebar_label_height)
pdf.addText(scalebar_x, scalebar_y, self.pdf_scalebar_label_font, self.pdf_scalebar_label_height, scalebar_str)
else:
# Draw scalebar horizontally starting at bottom left corner
scalebar_width = scalebar*xscaler
pdf.addLine(left_margin, bottom_margin, left_margin + scalebar_width, bottom_margin, self.pdf_line_width)
# Draw scalebar text centered above the scalebar
scalebar_x = left_margin + (scalebar_width - scalebar_str_extent)/2.0
scalebar_y = bottom_margin + spacer
pdf.addText(scalebar_x, scalebar_y, self.pdf_scalebar_label_font, self.pdf_scalebar_label_height, scalebar_str)
# add enough to left margin to center smaller trees horizontally
left_margin += (xscaler*(xscalemax - max_height) + label_width*(1.0 - max_height/xscalemax))/2.0
# add enough to the top margin to center smaller trees vertically
top_margin += (tree_height*(1.0 - max_height/xscalemax))/2.0
#top_margin += (plot_height*(1.0 - max_height/xscalemax))/2.0
# adjust yscaler to keep vertical tree dimension proportional to its horizontal dimension
if self.keep_xy_proportional:
yscaler *= max_height/xscalemax
# adjust tip label height (in points) to make size of tip labels commensurate with size of tree
if self.keep_tip_labels_proportional:
tip_font_points = self.pdf_tip_label_height*max_height/xscalemax
else:
tip_font_points = self.pdf_tip_label_height
# Perform a postorder traversal:
# 1) scale each x-value
# 2) calculate y-value of each internal node as the average y-value of its children
# 3) scale each y-value
# 4) plot each edge
# 5) plot names of tips
# 6) for each internal node, draw shoulder from leftmost child to rightmost
nodes.reverse()
for nd in nodes:
node_x = left_margin + nd.getX()*xscaler
if nd.isTip():
node_y = tree_y0 + tree_height - nd.getY()*yscaler
if self.pdf_scalebar_position and self.pdf_scalebar_position == 'top':
node_y -= scalebar_height
brlen = nd.isRoot() and xscaler*nd.getX() or xscaler*nd.getEdgeLen()
# draw tip node name
if self.pdf_tip_label_font:
pdf.addText(node_x + spacer, node_y - half_xheight, self.pdf_tip_label_font, tip_font_points, nd.getNodeName())
# draw line representing edge leading to tip node
pdf.addLine(node_x, node_y, node_x - brlen, node_y, self.pdf_line_width)
else:
nchildren = 1.0
child = nd.getLeftChild()
left_child = right_child = child
childY = child.getY()
while True:
child = child.getRightSib()
if child:
right_child = child
childY += child.getY()
nchildren += 1.0
else:
break
if (not rooted_tree) and (nd is subroot):
if self.pdf_ladderize and self.pdf_ladderize == 'left':
right_child = nd.getParent()
else:
left_child = nd.getParent()
else:
nd.setY(childY/nchildren)
node_y = tree_y0 + tree_height - childY*yscaler/nchildren
if self.pdf_scalebar_position and self.pdf_scalebar_position == 'top':
node_y -= scalebar_height
brlen = xscaler*nd.getEdgeLen()
# draw line representing edge leading to internal node
pdf.addLine(node_x, node_y, node_x - brlen, node_y, self.pdf_line_width)
# draw line representing shoulders of internal node
left_y = tree_y0 + tree_height - left_child.getY()*yscaler
right_y = tree_y0 + tree_height - right_child.getY()*yscaler
if self.pdf_scalebar_position and self.pdf_scalebar_position == 'top':
left_y -= scalebar_height
right_y -= scalebar_height
pdf.addLine(node_x, left_y, node_x, right_y, self.pdf_line_width)
# if specified, plot support value
if show_support and self.pdf_splits_to_plot:
for p in self.pdf_splits_to_plot.keys():
s = Split()
s.setOnSymbol('*')
s.setOffSymbol('.')
s.createFromPattern(p)
if s.equals(nd.getSplit()):
support_x = node_x + spacer
support_y = (left_y + right_y)/2.0 - half_xheight
support_str = '%.1f' % self.pdf_splits_to_plot[p]
pdf.addText(support_x, support_y, self.pdf_support_label_font, self.pdf_support_label_height, support_str)
break
elif show_support and nd is not subroot:
# Expecting each node's support data member to be set already
support_format = '%%.%df' % self.pdf_support_decimals
if self.pdf_support_as_percent:
support_str = support_format % (100.0*nd.getSupport(),)
else:
support_str = support_format % (nd.getSupport(),)
support_str_extent = float(self.pdf_support_label_height)*pdf.calcStringWidth(self.pdf_support_label_font, support_str)
support_x = node_x - (brlen + support_str_extent)/2.0
support_y = (left_y + right_y)/2.0 + half_xheight
pdf.addText(support_x, support_y, self.pdf_support_label_font, self.pdf_support_label_height, support_str)
|
|
#!/usr/bin/python
# Copyright KOLIBERO under one or more contributor license agreements.
# KOLIBERO licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, jsonify, request, Response, abort
from functools import wraps
import json
import time
import requests
import os
import memcache
import threading
import Queue
#from __future__ import absolute_import, unicode_literals
from cf_recommender.recommender import Recommender
C_QUERY_CNT_KEY = "query_cnt"
C_QUERY_RESP_EMPTY_CNT_KEY = "qresp_empty"
C_QUERY_RESP_OK_CNT_KEY = "qresp_ok"
C_TRAIN_CNT_KEY = "train_cnt"
app = Flask(__name__)
cf_settings = {
# redis
'expire': 3600 * 24 * 30,
'redis': {
'host': 'localhost',
'port': 6379,
'db': 0
},
# recommendation engine settings
'recommendation_count': 10,
'recommendation': {
'update_interval_sec': 600,
'search_depth': 100,
'max_history': 1000,
},
}
print("Connection to MemCached")
mc = memcache.Client(['localhost:11211'], debug=0)
recommendation = Recommender(cf_settings)
train_queue = Queue.Queue()
class TrainingWorker(threading.Thread):
#
def __init__ (self, q):
self.q = q
self.reco = Recommender(cf_settings)
threading.Thread.__init__ (self)
#
def run(self):
while True:
msg = self.q.get()
user_id = msg.get("user_id")
buy_items = msg.get("items")
for item_id in buy_items:
self.reco.register(item_id)
self.reco.like(user_id, buy_items)
#
self.q.task_done()
if self.q.empty():
break
def inc_cnt(p_key):
qcnt = mc.get(p_key)
if not qcnt:
qcnt = 0
qcnt += 1
mc.set(p_key,qcnt,time=7200)
def check_auth(username, password):
return username == 'token' and password == 'unused'
def authenticate():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 403,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# Router
@app.route('/api/v1.0/router', methods=['POST'])
def set_router_cxt():
##
ret = {"status":"success"}
#
time0 = int(time.time()*1000)
latencykey = 'latency'
#
try:
##
message = request.json
#print message
# Flush All Training
try:
if message.get("rtype","") == 'flush_training':
latencykey = "latency_"+message.get("rtype","")
#
ret["status"] = "success"
os.system("redis-cli flushall")
except Exception, Argument:
me = "[1] Unexpected Error! "+Argument.message
print me
ret["status"] = "error"
ret["status_reason"] = me
# Service sequence
try:
if message.get("rtype","") == 'service':
latencykey = "latency_"+message.get("rtype","")
#
ret["status"] = "error"
ret["status_reason"] = "Not implemented!"
except Exception, Argument:
me = "[1] Unexpected Error! "+Argument.message
print me
ret["status"] = "error"
ret["status_reason"] = me
# Train sequence - cf reco
try:
if message.get("rtype","") == 'train':
latencykey = "latency_"+message.get("rtype","")
#
ret["status"] = "success"
train_queue.put(message)
train_thread = TrainingWorker(train_queue)
train_thread.start()
#user_id = message.get("user_id")
#buy_items = message.get("items")
#for item_id in buy_items:
# recommendation.register(item_id)
#recommendation.like(user_id, buy_items)
#
inc_cnt(C_TRAIN_CNT_KEY)
except Exception, Argument:
me = "[2] Unexpected Error! "+Argument.message
print me
ret["status"] = "error"
#ret["status_reason"] = "Unexpected Error!"
ret["msg"] = me
# Query sequence - cf reco
try:
if message.get("rtype","") == 'query':
latencykey = "latency_"+message.get("rtype","")
#
result = []
if message.get("items"):
items = message.get("items")
#
itmap = {}
for item_id in items:
res0 = recommendation.get(item_id, count=int(message.get("size")))
for r in res0:
val = itmap.get(r, 0.0)
itmap[r] = val + 1.0
#
result0 = []
for key, value in itmap.items():
#result.append({key:value})
result0.append({"item":key,"rank":value})
result0 = sorted(result0, key=lambda k: k["rank"] )
result0 = result0[-int(message.get("size")):]
#
result = []
for r1 in result0:
result.append({r1.get("item"):r1.get("rank")})
else:
# We need "item_id" and result "size"
result = recommendation.get(message.get("item_id"), count=int(message.get("size")))
#
ret["status"] = "success"
ret["payload"] = result
inc_cnt(C_QUERY_CNT_KEY)
if len(result)>0:
inc_cnt(C_QUERY_RESP_OK_CNT_KEY)
else:
inc_cnt(C_QUERY_RESP_EMPTY_CNT_KEY)
except Exception, Argument:
em = "[3] Unexpected Error! "+Argument.message
print em
ret["status"] = "error"
ret["status_reason"] = em
#ret["msg"] = em
mc.set("message_"+message.get("rtype",""),message,time=7200)
mc.set("response_"+message.get("rtype",""),ret,time=7200)
except Exception, Argument:
em = "[0] Fatal Error! "+Argument.message
ret["status"] = "fatal error"
ret["status_reason"] = em
#ret["msg"] = em
time1 = int(time.time()*1000)
latency = time1 - time0
mc.set(latencykey,{"latency":latency}, time=7200)
return jsonify(ret)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, debug=False)
|
|
"""
Improved support for Microsoft Visual C++ compilers.
Known supported compilers:
--------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
Microsoft Windows SDK 6.1 (x86, x64, ia64)
Microsoft Windows SDK 7.0 (x86, x64, ia64)
Microsoft Visual C++ 10.0:
Microsoft Windows SDK 7.1 (x86, x64, ia64)
Microsoft Visual C++ 14.X:
Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
Microsoft Visual Studio Build Tools 2019 (x86, x64, arm, arm64)
This may also support compilers shipped with compatible Visual Studio versions.
"""
import json
from io import open
from os import listdir, pathsep
from os.path import join, isfile, isdir, dirname
import sys
import contextlib
import platform
import itertools
import subprocess
import distutils.errors
from setuptools.extern.packaging.version import LegacyVersion
from .monkey import get_unpatched
if platform.system() == 'Windows':
import winreg
from os import environ
else:
# Mock winreg and environ so the module can be imported on this platform.
class winreg:
HKEY_USERS = None
HKEY_CURRENT_USER = None
HKEY_LOCAL_MACHINE = None
HKEY_CLASSES_ROOT = None
environ = dict()
_msvc9_suppress_errors = (
# msvc9compiler isn't available on some platforms
ImportError,
# msvc9compiler raises DistutilsPlatformError in some
# environments. See #1118.
distutils.errors.DistutilsPlatformError,
)
try:
from distutils.msvc9compiler import Reg
except _msvc9_suppress_errors:
pass
def msvc9_find_vcvarsall(version):
"""
Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone
compiler build for Python
(VCForPython / Microsoft Visual C++ Compiler for Python 2.7).
Fall back to original behavior when the standalone compiler is not
available.
Redirect the path of "vcvarsall.bat".
Parameters
----------
version: float
Required Microsoft Visual C++ version.
Return
------
str
vcvarsall.bat path
"""
vc_base = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = vc_base % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = vc_base % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
vcvarsall = join(productdir, "vcvarsall.bat")
if isfile(vcvarsall):
return vcvarsall
return get_unpatched(msvc9_find_vcvarsall)(version)
def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
"""
Patched "distutils.msvc9compiler.query_vcvarsall" for support extra
Microsoft Visual C++ 9.0 and 10.0 compilers.
Set environment without use of "vcvarsall.bat".
Parameters
----------
ver: float
Required Microsoft Visual C++ version.
arch: str
Target architecture.
Return
------
dict
environment
"""
# Try to get environment from vcvarsall.bat (Classical way)
try:
orig = get_unpatched(msvc9_query_vcvarsall)
return orig(ver, arch, *args, **kwargs)
except distutils.errors.DistutilsPlatformError:
# Pass error if Vcvarsall.bat is missing
pass
except ValueError:
# Pass error if environment not set after executing vcvarsall.bat
pass
# If error, try to set environment directly
try:
return EnvironmentInfo(arch, ver).return_env()
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, ver, arch)
raise
def _msvc14_find_vc2015():
"""Python 3.8 "distutils/_msvccompiler.py" backport"""
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Microsoft\VisualStudio\SxS\VC7",
0,
winreg.KEY_READ | winreg.KEY_WOW64_32KEY
)
except OSError:
return None, None
best_version = 0
best_dir = None
with key:
for i in itertools.count():
try:
v, vc_dir, vt = winreg.EnumValue(key, i)
except OSError:
break
if v and vt == winreg.REG_SZ and isdir(vc_dir):
try:
version = int(float(v))
except (ValueError, TypeError):
continue
if version >= 14 and version > best_version:
best_version, best_dir = version, vc_dir
return best_version, best_dir
def _msvc14_find_vc2017():
"""Python 3.8 "distutils/_msvccompiler.py" backport
Returns "15, path" based on the result of invoking vswhere.exe
If no install is found, returns "None, None"
The version is returned to avoid unnecessarily changing the function
result. It may be ignored when the path is not None.
If vswhere.exe is not available, by definition, VS 2017 is not
installed.
"""
root = environ.get("ProgramFiles(x86)") or environ.get("ProgramFiles")
if not root:
return None, None
try:
path = subprocess.check_output([
join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
"-latest",
"-prerelease",
"-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
"-property", "installationPath",
"-products", "*",
]).decode(encoding="mbcs", errors="strict").strip()
except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
return None, None
path = join(path, "VC", "Auxiliary", "Build")
if isdir(path):
return 15, path
return None, None
PLAT_SPEC_TO_RUNTIME = {
'x86': 'x86',
'x86_amd64': 'x64',
'x86_arm': 'arm',
'x86_arm64': 'arm64'
}
def _msvc14_find_vcvarsall(plat_spec):
"""Python 3.8 "distutils/_msvccompiler.py" backport"""
_, best_dir = _msvc14_find_vc2017()
vcruntime = None
if plat_spec in PLAT_SPEC_TO_RUNTIME:
vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec]
else:
vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
if best_dir:
vcredist = join(best_dir, "..", "..", "redist", "MSVC", "**",
vcruntime_plat, "Microsoft.VC14*.CRT",
"vcruntime140.dll")
try:
import glob
vcruntime = glob.glob(vcredist, recursive=True)[-1]
except (ImportError, OSError, LookupError):
vcruntime = None
if not best_dir:
best_version, best_dir = _msvc14_find_vc2015()
if best_version:
vcruntime = join(best_dir, 'redist', vcruntime_plat,
"Microsoft.VC140.CRT", "vcruntime140.dll")
if not best_dir:
return None, None
vcvarsall = join(best_dir, "vcvarsall.bat")
if not isfile(vcvarsall):
return None, None
if not vcruntime or not isfile(vcruntime):
vcruntime = None
return vcvarsall, vcruntime
def _msvc14_get_vc_env(plat_spec):
"""Python 3.8 "distutils/_msvccompiler.py" backport"""
if "DISTUTILS_USE_SDK" in environ:
return {
key.lower(): value
for key, value in environ.items()
}
vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec)
if not vcvarsall:
raise distutils.errors.DistutilsPlatformError(
"Unable to find vcvarsall.bat"
)
try:
out = subprocess.check_output(
'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
stderr=subprocess.STDOUT,
).decode('utf-16le', errors='replace')
except subprocess.CalledProcessError as exc:
raise distutils.errors.DistutilsPlatformError(
"Error executing {}".format(exc.cmd)
) from exc
env = {
key.lower(): value
for key, _, value in
(line.partition('=') for line in out.splitlines())
if key and value
}
if vcruntime:
env['py_vcruntime_redist'] = vcruntime
return env
def msvc14_get_vc_env(plat_spec):
"""
Patched "distutils._msvccompiler._get_vc_env" for support extra
Microsoft Visual C++ 14.X compilers.
Set environment without use of "vcvarsall.bat".
Parameters
----------
plat_spec: str
Target architecture.
Return
------
dict
environment
"""
# Always use backport from CPython 3.8
try:
return _msvc14_get_vc_env(plat_spec)
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, 14.0)
raise
def msvc14_gen_lib_options(*args, **kwargs):
"""
Patched "distutils._msvccompiler.gen_lib_options" for fix
compatibility between "numpy.distutils" and "distutils._msvccompiler"
(for Numpy < 1.11.2)
"""
if "numpy.distutils" in sys.modules:
import numpy as np
if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):
return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)
return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
def _augment_exception(exc, version, arch=''):
"""
Add details to the exception message to help guide the user
as to what action will resolve it.
"""
# Error if MSVC++ directory not found or environment not set
message = exc.args[0]
if "vcvarsall" in message.lower() or "visual c" in message.lower():
# Special error message if MSVC++ not installed
tmpl = 'Microsoft Visual C++ {version:0.1f} or greater is required.'
message = tmpl.format(**locals())
msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
if version == 9.0:
if arch.lower().find('ia64') > -1:
# For VC++ 9.0, if IA64 support is needed, redirect user
# to Windows SDK 7.0.
# Note: No download link available from Microsoft.
message += ' Get it with "Microsoft Windows SDK 7.0"'
else:
# For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
message += ' Get it from http://aka.ms/vcpython27'
elif version == 10.0:
# For VC++ 10.0 Redirect user to Windows SDK 7.1
message += ' Get it with "Microsoft Windows SDK 7.1": '
message += msdownload % 8279
elif version >= 14.0:
# For VC++ 14.X Redirect user to latest Visual C++ Build Tools
message += (' Get it with "Microsoft C++ Build Tools": '
r'https://visualstudio.microsoft.com'
r'/visual-cpp-build-tools/')
exc.args = (message, )
class PlatformInfo:
"""
Current and Target Architectures information.
Parameters
----------
arch: str
Target architecture.
"""
current_cpu = environ.get('processor_architecture', '').lower()
def __init__(self, arch):
self.arch = arch.lower().replace('x64', 'amd64')
@property
def target_cpu(self):
"""
Return Target CPU architecture.
Return
------
str
Target CPU
"""
return self.arch[self.arch.find('_') + 1:]
def target_is_x86(self):
"""
Return True if target CPU is x86 32 bits..
Return
------
bool
CPU is x86 32 bits
"""
return self.target_cpu == 'x86'
def current_is_x86(self):
"""
Return True if current CPU is x86 32 bits..
Return
------
bool
CPU is x86 32 bits
"""
return self.current_cpu == 'x86'
def current_dir(self, hidex86=False, x64=False):
"""
Current platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
str
subfolder: '\target', or '' (see hidex86 parameter)
"""
return (
'' if (self.current_cpu == 'x86' and hidex86) else
r'\x64' if (self.current_cpu == 'amd64' and x64) else
r'\%s' % self.current_cpu
)
def target_dir(self, hidex86=False, x64=False):
r"""
Target platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
str
subfolder: '\current', or '' (see hidex86 parameter)
"""
return (
'' if (self.target_cpu == 'x86' and hidex86) else
r'\x64' if (self.target_cpu == 'amd64' and x64) else
r'\%s' % self.target_cpu
)
def cross_dir(self, forcex86=False):
r"""
Cross platform specific subfolder.
Parameters
----------
forcex86: bool
Use 'x86' as current architecture even if current architecture is
not x86.
Return
------
str
subfolder: '' if target architecture is current architecture,
'\current_target' if not.
"""
current = 'x86' if forcex86 else self.current_cpu
return (
'' if self.target_cpu == current else
self.target_dir().replace('\\', '\\%s_' % current)
)
class RegistryInfo:
"""
Microsoft Visual Studio related registry information.
Parameters
----------
platform_info: PlatformInfo
"PlatformInfo" instance.
"""
HKEYS = (winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT)
def __init__(self, platform_info):
self.pi = platform_info
@property
def visualstudio(self):
"""
Microsoft Visual Studio root registry key.
Return
------
str
Registry key
"""
return 'VisualStudio'
@property
def sxs(self):
"""
Microsoft Visual Studio SxS registry key.
Return
------
str
Registry key
"""
return join(self.visualstudio, 'SxS')
@property
def vc(self):
"""
Microsoft Visual C++ VC7 registry key.
Return
------
str
Registry key
"""
return join(self.sxs, 'VC7')
@property
def vs(self):
"""
Microsoft Visual Studio VS7 registry key.
Return
------
str
Registry key
"""
return join(self.sxs, 'VS7')
@property
def vc_for_python(self):
"""
Microsoft Visual C++ for Python registry key.
Return
------
str
Registry key
"""
return r'DevDiv\VCForPython'
@property
def microsoft_sdk(self):
"""
Microsoft SDK registry key.
Return
------
str
Registry key
"""
return 'Microsoft SDKs'
@property
def windows_sdk(self):
"""
Microsoft Windows/Platform SDK registry key.
Return
------
str
Registry key
"""
return join(self.microsoft_sdk, 'Windows')
@property
def netfx_sdk(self):
"""
Microsoft .NET Framework SDK registry key.
Return
------
str
Registry key
"""
return join(self.microsoft_sdk, 'NETFXSDK')
@property
def windows_kits_roots(self):
"""
Microsoft Windows Kits Roots registry key.
Return
------
str
Registry key
"""
return r'Windows Kits\Installed Roots'
def microsoft(self, key, x86=False):
"""
Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str
Registry key
"""
node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
return join('Software', node64, 'Microsoft', key)
def lookup(self, key, name):
"""
Look for values in registry in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
name: str
Value name to find.
Return
------
str
value
"""
key_read = winreg.KEY_READ
openkey = winreg.OpenKey
closekey = winreg.CloseKey
ms = self.microsoft
for hkey in self.HKEYS:
bkey = None
try:
bkey = openkey(hkey, ms(key), 0, key_read)
except (OSError, IOError):
if not self.pi.current_is_x86():
try:
bkey = openkey(hkey, ms(key, True), 0, key_read)
except (OSError, IOError):
continue
else:
continue
try:
return winreg.QueryValueEx(bkey, name)[0]
except (OSError, IOError):
pass
finally:
if bkey:
closekey(bkey)
class SystemInfo:
"""
Microsoft Windows and Visual Studio related system information.
Parameters
----------
registry_info: RegistryInfo
"RegistryInfo" instance.
vc_ver: float
Required Microsoft Visual C++ version.
"""
# Variables and properties in this class use originals CamelCase variables
# names from Microsoft source files for more easy comparison.
WinDir = environ.get('WinDir', '')
ProgramFiles = environ.get('ProgramFiles', '')
ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles)
def __init__(self, registry_info, vc_ver=None):
self.ri = registry_info
self.pi = self.ri.pi
self.known_vs_paths = self.find_programdata_vs_vers()
# Except for VS15+, VC version is aligned with VS version
self.vs_ver = self.vc_ver = (
vc_ver or self._find_latest_available_vs_ver())
def _find_latest_available_vs_ver(self):
"""
Find the latest VC version
Return
------
float
version
"""
reg_vc_vers = self.find_reg_vs_vers()
if not (reg_vc_vers or self.known_vs_paths):
raise distutils.errors.DistutilsPlatformError(
'No Microsoft Visual C++ version found')
vc_vers = set(reg_vc_vers)
vc_vers.update(self.known_vs_paths)
return sorted(vc_vers)[-1]
def find_reg_vs_vers(self):
"""
Find Microsoft Visual Studio versions available in registry.
Return
------
list of float
Versions
"""
ms = self.ri.microsoft
vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)
vs_vers = []
for hkey, key in itertools.product(self.ri.HKEYS, vckeys):
try:
bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)
except (OSError, IOError):
continue
with bkey:
subkeys, values, _ = winreg.QueryInfoKey(bkey)
for i in range(values):
with contextlib.suppress(ValueError):
ver = float(winreg.EnumValue(bkey, i)[0])
if ver not in vs_vers:
vs_vers.append(ver)
for i in range(subkeys):
with contextlib.suppress(ValueError):
ver = float(winreg.EnumKey(bkey, i))
if ver not in vs_vers:
vs_vers.append(ver)
return sorted(vs_vers)
def find_programdata_vs_vers(self):
r"""
Find Visual studio 2017+ versions from information in
"C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances".
Return
------
dict
float version as key, path as value.
"""
vs_versions = {}
instances_dir = \
r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances'
try:
hashed_names = listdir(instances_dir)
except (OSError, IOError):
# Directory not exists with all Visual Studio versions
return vs_versions
for name in hashed_names:
try:
# Get VS installation path from "state.json" file
state_path = join(instances_dir, name, 'state.json')
with open(state_path, 'rt', encoding='utf-8') as state_file:
state = json.load(state_file)
vs_path = state['installationPath']
# Raises OSError if this VS installation does not contain VC
listdir(join(vs_path, r'VC\Tools\MSVC'))
# Store version and path
vs_versions[self._as_float_version(
state['installationVersion'])] = vs_path
except (OSError, IOError, KeyError):
# Skip if "state.json" file is missing or bad format
continue
return vs_versions
@staticmethod
def _as_float_version(version):
"""
Return a string version as a simplified float version (major.minor)
Parameters
----------
version: str
Version.
Return
------
float
version
"""
return float('.'.join(version.split('.')[:2]))
@property
def VSInstallDir(self):
"""
Microsoft Visual Studio directory.
Return
------
str
path
"""
# Default path
default = join(self.ProgramFilesx86,
'Microsoft Visual Studio %0.1f' % self.vs_ver)
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default
@property
def VCInstallDir(self):
"""
Microsoft Visual C++ directory.
Return
------
str
path
"""
path = self._guess_vc() or self._guess_vc_legacy()
if not isdir(path):
msg = 'Microsoft Visual C++ directory not found'
raise distutils.errors.DistutilsPlatformError(msg)
return path
def _guess_vc(self):
"""
Locate Visual C++ for VS2017+.
Return
------
str
path
"""
if self.vs_ver <= 14.0:
return ''
try:
# First search in known VS paths
vs_dir = self.known_vs_paths[self.vs_ver]
except KeyError:
# Else, search with path from registry
vs_dir = self.VSInstallDir
guess_vc = join(vs_dir, r'VC\Tools\MSVC')
# Subdir with VC exact version as name
try:
# Update the VC version with real one instead of VS version
vc_ver = listdir(guess_vc)[-1]
self.vc_ver = self._as_float_version(vc_ver)
return join(guess_vc, vc_ver)
except (OSError, IOError, IndexError):
return ''
def _guess_vc_legacy(self):
"""
Locate Visual C++ for versions prior to 2017.
Return
------
str
path
"""
default = join(self.ProgramFilesx86,
r'Microsoft Visual Studio %0.1f\VC' % self.vs_ver)
# Try to get "VC++ for Python" path from registry as default path
reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver)
python_vc = self.ri.lookup(reg_path, 'installdir')
default_vc = join(python_vc, 'VC') if python_vc else default
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc
@property
def WindowsSdkVersion(self):
"""
Microsoft Windows SDK versions for specified MSVC++ version.
Return
------
tuple of str
versions
"""
if self.vs_ver <= 9.0:
return '7.0', '6.1', '6.0a'
elif self.vs_ver == 10.0:
return '7.1', '7.0a'
elif self.vs_ver == 11.0:
return '8.0', '8.0a'
elif self.vs_ver == 12.0:
return '8.1', '8.1a'
elif self.vs_ver >= 14.0:
return '10.0', '8.1'
@property
def WindowsSdkLastVersion(self):
"""
Microsoft Windows SDK last version.
Return
------
str
version
"""
return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib'))
@property # noqa: C901
def WindowsSdkDir(self): # noqa: C901 # is too complex (12) # FIXME
"""
Microsoft Windows SDK directory.
Return
------
str
path
"""
sdkdir = ''
for ver in self.WindowsSdkVersion:
# Try to get it from registry
loc = join(self.ri.windows_sdk, 'v%s' % ver)
sdkdir = self.ri.lookup(loc, 'installationfolder')
if sdkdir:
break
if not sdkdir or not isdir(sdkdir):
# Try to get "VC++ for Python" version from registry
path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
install_base = self.ri.lookup(path, 'installdir')
if install_base:
sdkdir = join(install_base, 'WinSDK')
if not sdkdir or not isdir(sdkdir):
# If fail, use default new path
for ver in self.WindowsSdkVersion:
intver = ver[:ver.rfind('.')]
path = r'Microsoft SDKs\Windows Kits\%s' % intver
d = join(self.ProgramFiles, path)
if isdir(d):
sdkdir = d
if not sdkdir or not isdir(sdkdir):
# If fail, use default old path
for ver in self.WindowsSdkVersion:
path = r'Microsoft SDKs\Windows\v%s' % ver
d = join(self.ProgramFiles, path)
if isdir(d):
sdkdir = d
if not sdkdir:
# If fail, use Platform SDK
sdkdir = join(self.VCInstallDir, 'PlatformSDK')
return sdkdir
@property
def WindowsSDKExecutablePath(self):
"""
Microsoft Windows SDK executable directory.
Return
------
str
path
"""
# Find WinSDK NetFx Tools registry dir name
if self.vs_ver <= 11.0:
netfxver = 35
arch = ''
else:
netfxver = 40
hidex86 = True if self.vs_ver <= 12.0 else False
arch = self.pi.current_dir(x64=True, hidex86=hidex86)
fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
# list all possibles registry paths
regpaths = []
if self.vs_ver >= 14.0:
for ver in self.NetFxSdkVersion:
regpaths += [join(self.ri.netfx_sdk, ver, fx)]
for ver in self.WindowsSdkVersion:
regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
# Return installation folder from the more recent path
for path in regpaths:
execpath = self.ri.lookup(path, 'installationfolder')
if execpath:
return execpath
@property
def FSharpInstallDir(self):
"""
Microsoft Visual F# directory.
Return
------
str
path
"""
path = join(self.ri.visualstudio, r'%0.1f\Setup\F#' % self.vs_ver)
return self.ri.lookup(path, 'productdir') or ''
@property
def UniversalCRTSdkDir(self):
"""
Microsoft Universal CRT SDK directory.
Return
------
str
path
"""
# Set Kit Roots versions for specified MSVC++ version
vers = ('10', '81') if self.vs_ver >= 14.0 else ()
# Find path of the more recent Kit
for ver in vers:
sdkdir = self.ri.lookup(self.ri.windows_kits_roots,
'kitsroot%s' % ver)
if sdkdir:
return sdkdir or ''
@property
def UniversalCRTSdkLastVersion(self):
"""
Microsoft Universal C Runtime SDK last version.
Return
------
str
version
"""
return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib'))
@property
def NetFxSdkVersion(self):
"""
Microsoft .NET Framework SDK versions.
Return
------
tuple of str
versions
"""
# Set FxSdk versions for specified VS version
return (('4.7.2', '4.7.1', '4.7',
'4.6.2', '4.6.1', '4.6',
'4.5.2', '4.5.1', '4.5')
if self.vs_ver >= 14.0 else ())
@property
def NetFxSdkDir(self):
"""
Microsoft .NET Framework SDK directory.
Return
------
str
path
"""
sdkdir = ''
for ver in self.NetFxSdkVersion:
loc = join(self.ri.netfx_sdk, ver)
sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
if sdkdir:
break
return sdkdir
@property
def FrameworkDir32(self):
"""
Microsoft .NET Framework 32bit directory.
Return
------
str
path
"""
# Default path
guess_fw = join(self.WinDir, r'Microsoft.NET\Framework')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
@property
def FrameworkDir64(self):
"""
Microsoft .NET Framework 64bit directory.
Return
------
str
path
"""
# Default path
guess_fw = join(self.WinDir, r'Microsoft.NET\Framework64')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
@property
def FrameworkVersion32(self):
"""
Microsoft .NET Framework 32bit versions.
Return
------
tuple of str
versions
"""
return self._find_dot_net_versions(32)
@property
def FrameworkVersion64(self):
"""
Microsoft .NET Framework 64bit versions.
Return
------
tuple of str
versions
"""
return self._find_dot_net_versions(64)
def _find_dot_net_versions(self, bits):
"""
Find Microsoft .NET Framework versions.
Parameters
----------
bits: int
Platform number of bits: 32 or 64.
Return
------
tuple of str
versions
"""
# Find actual .NET version in registry
reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits)
dot_net_dir = getattr(self, 'FrameworkDir%d' % bits)
ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or ''
# Set .NET versions for specified MSVC++ version
if self.vs_ver >= 12.0:
return ver, 'v4.0'
elif self.vs_ver >= 10.0:
return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5'
elif self.vs_ver == 9.0:
return 'v3.5', 'v2.0.50727'
elif self.vs_ver == 8.0:
return 'v3.0', 'v2.0.50727'
@staticmethod
def _use_last_dir_name(path, prefix=''):
"""
Return name of the last dir in path or '' if no dir found.
Parameters
----------
path: str
Use dirs in this path
prefix: str
Use only dirs starting by this prefix
Return
------
str
name
"""
matching_dirs = (
dir_name
for dir_name in reversed(listdir(path))
if isdir(join(path, dir_name)) and
dir_name.startswith(prefix)
)
return next(matching_dirs, None) or ''
class EnvironmentInfo:
"""
Return environment variables for specified Microsoft Visual C++ version
and platform : Lib, Include, Path and libpath.
This function is compatible with Microsoft Visual C++ 9.0 to 14.X.
Script created by analysing Microsoft environment configuration files like
"vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
Parameters
----------
arch: str
Target architecture.
vc_ver: float
Required Microsoft Visual C++ version. If not set, autodetect the last
version.
vc_min_ver: float
Minimum Microsoft Visual C++ version.
"""
# Variables and properties in this class use originals CamelCase variables
# names from Microsoft source files for more easy comparison.
def __init__(self, arch, vc_ver=None, vc_min_ver=0):
self.pi = PlatformInfo(arch)
self.ri = RegistryInfo(self.pi)
self.si = SystemInfo(self.ri, vc_ver)
if self.vc_ver < vc_min_ver:
err = 'No suitable Microsoft Visual C++ version found'
raise distutils.errors.DistutilsPlatformError(err)
@property
def vs_ver(self):
"""
Microsoft Visual Studio.
Return
------
float
version
"""
return self.si.vs_ver
@property
def vc_ver(self):
"""
Microsoft Visual C++ version.
Return
------
float
version
"""
return self.si.vc_ver
@property
def VSTools(self):
"""
Microsoft Visual Studio Tools.
Return
------
list of str
paths
"""
paths = [r'Common7\IDE', r'Common7\Tools']
if self.vs_ver >= 14.0:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
paths += [r'Team Tools\Performance Tools']
paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
return [join(self.si.VSInstallDir, path) for path in paths]
@property
def VCIncludes(self):
"""
Microsoft Visual C++ & Microsoft Foundation Class Includes.
Return
------
list of str
paths
"""
return [join(self.si.VCInstallDir, 'Include'),
join(self.si.VCInstallDir, r'ATLMFC\Include')]
@property
def VCLibraries(self):
"""
Microsoft Visual C++ & Microsoft Foundation Class Libraries.
Return
------
list of str
paths
"""
if self.vs_ver >= 15.0:
arch_subdir = self.pi.target_dir(x64=True)
else:
arch_subdir = self.pi.target_dir(hidex86=True)
paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
if self.vs_ver >= 14.0:
paths += [r'Lib\store%s' % arch_subdir]
return [join(self.si.VCInstallDir, path) for path in paths]
@property
def VCStoreRefs(self):
"""
Microsoft Visual C++ store references Libraries.
Return
------
list of str
paths
"""
if self.vs_ver < 14.0:
return []
return [join(self.si.VCInstallDir, r'Lib\store\references')]
@property
def VCTools(self):
"""
Microsoft Visual C++ Tools.
Return
------
list of str
paths
"""
si = self.si
tools = [join(si.VCInstallDir, 'VCPackages')]
forcex86 = True if self.vs_ver <= 10.0 else False
arch_subdir = self.pi.cross_dir(forcex86)
if arch_subdir:
tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
if self.vs_ver == 14.0:
path = 'Bin%s' % self.pi.current_dir(hidex86=True)
tools += [join(si.VCInstallDir, path)]
elif self.vs_ver >= 15.0:
host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else
r'bin\HostX64%s')
tools += [join(
si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))]
if self.pi.current_cpu != self.pi.target_cpu:
tools += [join(
si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))]
else:
tools += [join(si.VCInstallDir, 'Bin')]
return tools
@property
def OSLibraries(self):
"""
Microsoft Windows SDK Libraries.
Return
------
list of str
paths
"""
if self.vs_ver <= 10.0:
arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
else:
arch_subdir = self.pi.target_dir(x64=True)
lib = join(self.si.WindowsSdkDir, 'lib')
libver = self._sdk_subdir
return [join(lib, '%sum%s' % (libver, arch_subdir))]
@property
def OSIncludes(self):
"""
Microsoft Windows SDK Include.
Return
------
list of str
paths
"""
include = join(self.si.WindowsSdkDir, 'include')
if self.vs_ver <= 10.0:
return [include, join(include, 'gl')]
else:
if self.vs_ver >= 14.0:
sdkver = self._sdk_subdir
else:
sdkver = ''
return [join(include, '%sshared' % sdkver),
join(include, '%sum' % sdkver),
join(include, '%swinrt' % sdkver)]
@property
def OSLibpath(self):
"""
Microsoft Windows SDK Libraries Paths.
Return
------
list of str
paths
"""
ref = join(self.si.WindowsSdkDir, 'References')
libpath = []
if self.vs_ver <= 9.0:
libpath += self.OSLibraries
if self.vs_ver >= 11.0:
libpath += [join(ref, r'CommonConfiguration\Neutral')]
if self.vs_ver >= 14.0:
libpath += [
ref,
join(self.si.WindowsSdkDir, 'UnionMetadata'),
join(
ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'),
join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'),
join(
ref, 'Windows.Networking.Connectivity.WwanContract',
'1.0.0.0'),
join(
self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs',
'%0.1f' % self.vs_ver, 'References', 'CommonConfiguration',
'neutral'),
]
return libpath
@property
def SdkTools(self):
"""
Microsoft Windows SDK Tools.
Return
------
list of str
paths
"""
return list(self._sdk_tools())
def _sdk_tools(self):
"""
Microsoft Windows SDK Tools paths generator.
Return
------
generator of str
paths
"""
if self.vs_ver < 15.0:
bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86'
yield join(self.si.WindowsSdkDir, bin_dir)
if not self.pi.current_is_x86():
arch_subdir = self.pi.current_dir(x64=True)
path = 'Bin%s' % arch_subdir
yield join(self.si.WindowsSdkDir, path)
if self.vs_ver in (10.0, 11.0):
if self.pi.target_is_x86():
arch_subdir = ''
else:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
yield join(self.si.WindowsSdkDir, path)
elif self.vs_ver >= 15.0:
path = join(self.si.WindowsSdkDir, 'Bin')
arch_subdir = self.pi.current_dir(x64=True)
sdkver = self.si.WindowsSdkLastVersion
yield join(path, '%s%s' % (sdkver, arch_subdir))
if self.si.WindowsSDKExecutablePath:
yield self.si.WindowsSDKExecutablePath
@property
def _sdk_subdir(self):
"""
Microsoft Windows SDK version subdir.
Return
------
str
subdir
"""
ucrtver = self.si.WindowsSdkLastVersion
return ('%s\\' % ucrtver) if ucrtver else ''
@property
def SdkSetup(self):
"""
Microsoft Windows SDK Setup.
Return
------
list of str
paths
"""
if self.vs_ver > 9.0:
return []
return [join(self.si.WindowsSdkDir, 'Setup')]
@property
def FxTools(self):
"""
Microsoft .NET Framework Tools.
Return
------
list of str
paths
"""
pi = self.pi
si = self.si
if self.vs_ver <= 10.0:
include32 = True
include64 = not pi.target_is_x86() and not pi.current_is_x86()
else:
include32 = pi.target_is_x86() or pi.current_is_x86()
include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
tools = []
if include32:
tools += [join(si.FrameworkDir32, ver)
for ver in si.FrameworkVersion32]
if include64:
tools += [join(si.FrameworkDir64, ver)
for ver in si.FrameworkVersion64]
return tools
@property
def NetFxSDKLibraries(self):
"""
Microsoft .Net Framework SDK Libraries.
Return
------
list of str
paths
"""
if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
return []
arch_subdir = self.pi.target_dir(x64=True)
return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
@property
def NetFxSDKIncludes(self):
"""
Microsoft .Net Framework SDK Includes.
Return
------
list of str
paths
"""
if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
return []
return [join(self.si.NetFxSdkDir, r'include\um')]
@property
def VsTDb(self):
"""
Microsoft Visual Studio Team System Database.
Return
------
list of str
paths
"""
return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
@property
def MSBuild(self):
"""
Microsoft Build Engine.
Return
------
list of str
paths
"""
if self.vs_ver < 12.0:
return []
elif self.vs_ver < 15.0:
base_path = self.si.ProgramFilesx86
arch_subdir = self.pi.current_dir(hidex86=True)
else:
base_path = self.si.VSInstallDir
arch_subdir = ''
path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir)
build = [join(base_path, path)]
if self.vs_ver >= 15.0:
# Add Roslyn C# & Visual Basic Compiler
build += [join(base_path, path, 'Roslyn')]
return build
@property
def HTMLHelpWorkshop(self):
"""
Microsoft HTML Help Workshop.
Return
------
list of str
paths
"""
if self.vs_ver < 11.0:
return []
return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
@property
def UCRTLibraries(self):
"""
Microsoft Universal C Runtime SDK Libraries.
Return
------
list of str
paths
"""
if self.vs_ver < 14.0:
return []
arch_subdir = self.pi.target_dir(x64=True)
lib = join(self.si.UniversalCRTSdkDir, 'lib')
ucrtver = self._ucrt_subdir
return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
@property
def UCRTIncludes(self):
"""
Microsoft Universal C Runtime SDK Include.
Return
------
list of str
paths
"""
if self.vs_ver < 14.0:
return []
include = join(self.si.UniversalCRTSdkDir, 'include')
return [join(include, '%sucrt' % self._ucrt_subdir)]
@property
def _ucrt_subdir(self):
"""
Microsoft Universal C Runtime SDK version subdir.
Return
------
str
subdir
"""
ucrtver = self.si.UniversalCRTSdkLastVersion
return ('%s\\' % ucrtver) if ucrtver else ''
@property
def FSharp(self):
"""
Microsoft Visual F#.
Return
------
list of str
paths
"""
if 11.0 > self.vs_ver > 12.0:
return []
return [self.si.FSharpInstallDir]
@property
def VCRuntimeRedist(self):
"""
Microsoft Visual C++ runtime redistributable dll.
Return
------
str
path
"""
vcruntime = 'vcruntime%d0.dll' % self.vc_ver
arch_subdir = self.pi.target_dir(x64=True).strip('\\')
# Installation prefixes candidates
prefixes = []
tools_path = self.si.VCInstallDir
redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist'))
if isdir(redist_path):
# Redist version may not be exactly the same as tools
redist_path = join(redist_path, listdir(redist_path)[-1])
prefixes += [redist_path, join(redist_path, 'onecore')]
prefixes += [join(tools_path, 'redist')] # VS14 legacy path
# CRT directory
crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10),
# Sometime store in directory with VS version instead of VC
'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10))
# vcruntime path
for prefix, crt_dir in itertools.product(prefixes, crt_dirs):
path = join(prefix, arch_subdir, crt_dir, vcruntime)
if isfile(path):
return path
def return_env(self, exists=True):
"""
Return environment dict.
Parameters
----------
exists: bool
It True, only return existing paths.
Return
------
dict
environment
"""
env = dict(
include=self._build_paths('include',
[self.VCIncludes,
self.OSIncludes,
self.UCRTIncludes,
self.NetFxSDKIncludes],
exists),
lib=self._build_paths('lib',
[self.VCLibraries,
self.OSLibraries,
self.FxTools,
self.UCRTLibraries,
self.NetFxSDKLibraries],
exists),
libpath=self._build_paths('libpath',
[self.VCLibraries,
self.FxTools,
self.VCStoreRefs,
self.OSLibpath],
exists),
path=self._build_paths('path',
[self.VCTools,
self.VSTools,
self.VsTDb,
self.SdkTools,
self.SdkSetup,
self.FxTools,
self.MSBuild,
self.HTMLHelpWorkshop,
self.FSharp],
exists),
)
if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist):
env['py_vcruntime_redist'] = self.VCRuntimeRedist
return env
def _build_paths(self, name, spec_path_lists, exists):
"""
Given an environment variable name and specified paths,
return a pathsep-separated string of paths containing
unique, extant, directories from those paths and from
the environment variable. Raise an error if no paths
are resolved.
Parameters
----------
name: str
Environment variable name
spec_path_lists: list of str
Paths
exists: bool
It True, only return existing paths.
Return
------
str
Pathsep-separated paths
"""
# flatten spec_path_lists
spec_paths = itertools.chain.from_iterable(spec_path_lists)
env_paths = environ.get(name, '').split(pathsep)
paths = itertools.chain(spec_paths, env_paths)
extant_paths = list(filter(isdir, paths)) if exists else paths
if not extant_paths:
msg = "%s environment variable is empty" % name.upper()
raise distutils.errors.DistutilsPlatformError(msg)
unique_paths = self._unique_everseen(extant_paths)
return pathsep.join(unique_paths)
# from Python docs
@staticmethod
def _unique_everseen(iterable, key=None):
"""
List unique elements, preserving order.
Remember all elements ever seen.
_unique_everseen('AAAABBBCCDAABBB') --> A B C D
_unique_everseen('ABBCcAD', str.lower) --> A B C D
"""
seen = set()
seen_add = seen.add
if key is None:
for element in itertools.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
|
|
# Copyright (C) 2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals, division, absolute_import
import os
import sys
import subprocess
import tempfile
import shutil
import json
from .compat import open
from .exceptions import err_exit
from .utils import json_load_raise_on_duplicates
from .utils.resolver import is_container_id, resolve_path
from .cli import try_call
import dxpy
ASSET_BUILDER_PRECISE = "app-create_asset_precise"
ASSET_BUILDER_TRUSTY = "app-create_asset_trusty"
ASSET_BUILDER_XENIAL = "app-create_asset_xenial"
ASSET_BUILDER_XENIAL_V1 = "app-create_asset_xenial_v1"
ASSET_BUILDER_FOCAL = "app-create_asset_focal"
class AssetBuilderException(Exception):
"""
This exception is raised by the methods in this module
when asset building fails.
"""
pass
def parse_asset_spec(src_dir):
if not os.path.isdir(src_dir):
err_exit(src_dir + " is not a valid directory.")
if not os.path.exists(os.path.join(src_dir, "dxasset.json")):
raise AssetBuilderException("'" + src_dir + "' is not a valid DNAnexus asset source directory." +
" It does not contain a 'dxasset.json' file.")
with open(os.path.join(src_dir, "dxasset.json")) as asset_desc:
try:
return json_load_raise_on_duplicates(asset_desc)
except Exception as e:
raise AssetBuilderException("Could not parse dxasset.json file as JSON: " + str(e.args))
def validate_conf(asset_conf):
"""
Validates the contents of the conf file and makes sure that the required information
is provided.
{
"name": "asset_library_name",
"title": "A human readable name",
"description": " A detailed description abput the asset",
"version": "0.0.1",
"runSpecVersion": "1",
"release": "16.04",
"distribution": "Ubuntu"
"execDepends":
[
{"name": "samtools", "package_manager": "apt"},
{"name": "bamtools"},
{"name": "bio", "package_manager": "gem", "version": "1.4.3"},
{"name": "pysam","package_manager": "pip", "version": "0.7.4"},
{"name": "Bio::SeqIO", "package_manager": "cpan", "version": "1.006924"}
]
}
"""
if 'name' not in asset_conf:
raise AssetBuilderException('The asset configuration does not contain the required field "name".')
# Validate runSpec
if 'release' not in asset_conf or asset_conf['release'] not in ["20.04", "16.04", "14.04", "12.04"]:
raise AssetBuilderException('The "release" field value should be either "20.04", "16.04", "14.04" (DEPRECATED), or "12.04" (DEPRECATED)')
if 'runSpecVersion' in asset_conf:
if asset_conf['runSpecVersion'] not in ["0", "1"]:
raise AssetBuilderException('The "runSpecVersion" field should be either "0", or "1"')
if (asset_conf['runSpecVersion'] == "1" and asset_conf['release'] != "16.04"):
raise AssetBuilderException('The "runSpecVersion" field can only be "1" if "release" is "16.04"')
else:
asset_conf['runSpecVersion'] = "0"
if 'distribution' in asset_conf:
if asset_conf['distribution'] != "Ubuntu":
raise AssetBuilderException('The distribution may only take the value "Ubuntu".')
else:
asset_conf['distribution'] = "Ubuntu"
if 'version' not in asset_conf:
raise AssetBuilderException('The asset configuration does not contain the required field "version". ')
if 'title' not in asset_conf:
raise AssetBuilderException('The asset configuration does not contain the required field "title". ')
if 'description' not in asset_conf:
raise AssetBuilderException('The asset configuration does not contain the required field "description".')
def dx_upload(file_name, dest_project, target_folder, json_out):
try:
maybe_progress_kwargs = {} if json_out else dict(show_progress=True)
remote_file = dxpy.upload_local_file(file_name,
project=dest_project,
folder=target_folder,
wait_on_close=True,
**maybe_progress_kwargs)
return remote_file
except:
print("Failed to upload the file " + file_name, file=sys.stderr)
raise
def get_asset_make(src_dir, dest_folder, target_folder, json_out):
if os.path.exists(os.path.join(src_dir, "Makefile")):
return dx_upload(os.path.join(src_dir, "Makefile"), dest_folder, target_folder, json_out)
elif os.path.exists(os.path.join(src_dir, "makefile")):
return dx_upload(os.path.join(src_dir, "makefile"), dest_folder, target_folder, json_out)
def parse_destination(dest_str):
"""
Parses dest_str, which is (roughly) of the form
PROJECT:/FOLDER/NAME, and returns a tuple (project, folder, name)
"""
# Interpret strings of form "project-XXXX" (no colon) as project. If
# we pass these through to resolve_path they would get interpreted
# as folder names...
if is_container_id(dest_str):
return (dest_str, None, None)
# ...otherwise, defer to resolver.resolve_path. This handles the
# following forms:
#
# /FOLDER/
# /ENTITYNAME
# /FOLDER/ENTITYNAME
# [PROJECT]:
# [PROJECT]:/FOLDER/
# [PROJECT]:/ENTITYNAME
# [PROJECT]:/FOLDER/ENTITYNAME
return try_call(resolve_path, dest_str)
def get_asset_tarball(asset_name, src_dir, dest_project, dest_folder, json_out):
"""
If the src_dir contains a "resources" directory its contents are archived and
the archived file is uploaded to the platform
"""
if os.path.isdir(os.path.join(src_dir, "resources")):
temp_dir = tempfile.mkdtemp()
try:
resource_file = os.path.join(temp_dir, asset_name + "_resources.tar.gz")
cmd = ["tar", "-czf", resource_file, "-C", os.path.join(src_dir, "resources"), "."]
subprocess.check_call(cmd)
file_id = dx_upload(resource_file, dest_project, dest_folder, json_out)
return file_id
finally:
shutil.rmtree(temp_dir)
def build_asset(args):
if args.src_dir is None:
args.src_dir = os.getcwd()
dest_project_name = None
dest_folder_name = None
dest_asset_name = None
make_file = None
asset_file = None
conf_file = None
try:
asset_conf = parse_asset_spec(args.src_dir)
validate_conf(asset_conf)
asset_conf_file = os.path.join(args.src_dir, "dxasset.json")
dxpy.api.system_whoami()
dest_project_name, dest_folder_name, dest_asset_name = parse_destination(args.destination)
if dest_project_name is None:
raise AssetBuilderException("Can't build an asset without specifying a destination project; \
please use the -d/--destination flag to explicitly specify a project")
if dest_asset_name is None:
dest_asset_name = asset_conf['name']
# If dx build_asset is launched form a job, set json flag to True to avoid watching the job log
if dxpy.JOB_ID:
args.json = True
if not args.json:
print("Uploading input files for the AssetBuilder", file=sys.stderr)
conf_file = dx_upload(asset_conf_file, dest_project_name, dest_folder_name, args.json)
make_file = get_asset_make(args.src_dir, dest_project_name, dest_folder_name, args.json)
asset_file = get_asset_tarball(asset_conf['name'], args.src_dir, dest_project_name,
dest_folder_name, args.json)
input_hash = {"conf_json": dxpy.dxlink(conf_file)}
if asset_file:
input_hash["custom_asset"] = dxpy.dxlink(asset_file)
if make_file:
input_hash["asset_makefile"] = dxpy.dxlink(make_file)
builder_run_options = {
"name": dest_asset_name,
"input": input_hash
}
if args.priority is not None:
builder_run_options["priority"] = args.priority
# Add the default destination project to app run options, if it is not run from a job
if not dxpy.JOB_ID:
builder_run_options["project"] = dest_project_name
if 'instanceType' in asset_conf:
builder_run_options["systemRequirements"] = {"*": {"instanceType": asset_conf["instanceType"]}}
if dest_folder_name:
builder_run_options["folder"] = dest_folder_name
if asset_conf['release'] == "12.04":
app_run_result = dxpy.api.app_run(ASSET_BUILDER_PRECISE, input_params=builder_run_options)
elif asset_conf['release'] == "14.04":
app_run_result = dxpy.api.app_run(ASSET_BUILDER_TRUSTY, input_params=builder_run_options)
elif asset_conf['release'] == "16.04" and asset_conf['runSpecVersion'] == '1':
app_run_result = dxpy.api.app_run(ASSET_BUILDER_XENIAL_V1, input_params=builder_run_options)
elif asset_conf['release'] == "16.04":
app_run_result = dxpy.api.app_run(ASSET_BUILDER_XENIAL, input_params=builder_run_options)
elif asset_conf['release'] == "20.04":
app_run_result = dxpy.api.app_run(ASSET_BUILDER_FOCAL, input_params=builder_run_options)
job_id = app_run_result["id"]
if not args.json:
print("\nStarted job '" + str(job_id) + "' to build the asset bundle.\n", file=sys.stderr)
if args.watch:
try:
subprocess.check_call(["dx", "watch", job_id])
except subprocess.CalledProcessError as e:
if e.returncode == 3:
# Some kind of failure to build the asset. The reason
# for the failure is probably self-evident from the
# job log (and if it's not, the CalledProcessError
# is not informative anyway), so just propagate the
# return code without additional remarks.
sys.exit(3)
else:
raise e
dxpy.DXJob(job_id).wait_on_done(interval=1)
asset_id, _ = dxpy.get_dxlink_ids(dxpy.api.job_describe(job_id)['output']['asset_bundle'])
if args.json:
print(json.dumps({"id": asset_id}))
else:
print("\nAsset bundle '" + asset_id +
"' is built and can now be used in your app/applet's dxapp.json\n", file=sys.stderr)
except Exception as de:
print(de.__class__.__name__ + ": " + str(de), file=sys.stderr)
sys.exit(1)
finally:
if conf_file:
try:
conf_file.remove()
except:
pass
if make_file:
try:
make_file.remove()
except:
pass
if asset_file:
try:
asset_file.remove()
except:
pass
|
|
import os.path
import tempfile
from unittest import TestCase
from lxml import etree
from mock import Mock, patch
from regparser.builder import (
Builder, Checkpointer, LayerCacheAggregator, NullCheckpointer)
from regparser.tree.struct import Node
class BuilderTests(TestCase):
@patch.object(Builder, 'merge_changes')
@patch.object(Builder, '__init__')
def test_revision_generator_notices(self, init, merge_changes):
init.return_value = None
b = Builder() # Don't need parameters as init's been mocked out
aaaa = {'document_number': 'aaaa', 'effective_on': '2012-12-12',
'publication_date': '2011-11-11', 'changes': []}
bbbb = {'document_number': 'bbbb', 'effective_on': '2012-12-12',
'publication_date': '2011-11-12', 'changes': []}
cccc = {'document_number': 'cccc', 'effective_on': '2013-01-01',
'publication_date': '2012-01-01', 'changes': []}
b.notices = [aaaa, bbbb, cccc]
b.eff_notices = {'2012-12-12': [aaaa, bbbb], '2013-01-01': [cccc]}
b.doc_number = 'aaaa'
b.checkpointer = NullCheckpointer()
tree = Node(label=['1111'])
version_list = []
notice_lists = []
for notice, _, _, notices in b.revision_generator(tree):
version_list.append(notice['document_number'])
notice_lists.append(notices)
self.assertEqual(['bbbb', 'cccc'], version_list)
self.assertEqual(2, len(notice_lists))
self.assertEqual(2, len(notice_lists[0]))
self.assertTrue(aaaa in notice_lists[0])
self.assertTrue(bbbb in notice_lists[0])
self.assertEqual(3, len(notice_lists[1]))
self.assertTrue(aaaa in notice_lists[1])
self.assertTrue(bbbb in notice_lists[1])
self.assertTrue(cccc in notice_lists[1])
@patch.object(Builder, '__init__')
def test_layer_cache(self, init):
"""Integration test for layer caching"""
init.return_value = None
cache = LayerCacheAggregator()
b = Builder() # Don't need parameters as init's been mocked out
b.cfr_title, b.cfr_part, b.doc_number = 15, '111', '111-222'
b.writer = Mock()
b.checkpointer = NullCheckpointer()
write = b.writer.layer.return_value.write
tree = Node(label=["1234"], children=[
Node(label=["1234", "1"], children=[
Node("See paragraph (b)", label=["1234", "1", "a"]),
Node("This is b", label=["1234", "1", "b"])])])
b.gen_and_write_layers(tree, [], cache, [])
arg = write.call_args_list[9][0][0]
self.assertEqual(['1234-1-a'], arg.keys())
cache.replace_using(tree)
write.reset_mock()
tree.children[0].children[1].text = "References paragraph (a)"
b.gen_and_write_layers(tree, [], cache, [])
arg = write.call_args_list[9][0][0]
self.assertEqual(['1234-1-a'], arg.keys())
write.reset_mock()
tree.children[0].children[0].text = "Contains no references"
b.gen_and_write_layers(tree, [], cache, [])
arg = write.call_args_list[9][0][0]
self.assertEqual(['1234-1-a'], arg.keys())
write.reset_mock()
notice = {'document_number': '111-222'}
cache.invalidate_by_notice(notice)
b.gen_and_write_layers(tree, [], cache, [])
arg = write.call_args_list[9][0][0]
self.assertEqual(['1234-1-a'], arg.keys())
write.reset_mock()
notice['changes'] = {'1234-1-b': 'some change'}
cache.invalidate_by_notice(notice)
b.gen_and_write_layers(tree, [], cache, [])
arg = write.call_args_list[9][0][0]
self.assertEqual(['1234-1-a', '1234-1-b'], list(sorted(arg.keys())))
write.reset_mock()
notice['changes'] = {'1234-Subpart-A': 'some change'}
cache.invalidate_by_notice(notice)
b.gen_and_write_layers(tree, [], cache, [])
arg = write.call_args_list[9][0][0]
self.assertEqual(['1234-1-b'], list(sorted(arg.keys())))
def test_determine_doc_number_fr(self):
"""Verify that a document number can be pulled out of an FR notice"""
xml_str = """
<RULE>
<FRDOC>[FR Doc. 2011-31715 Filed 12-21-11; 8:45 am]</FRDOC>
<BILCOD>BILLING CODE 4810-AM-P</BILCOD>
</RULE>"""
self.assertEqual(
'2011-31715', Builder.determine_doc_number(xml_str, '00', '00'))
@patch('regparser.builder.fetch_notice_json')
def test_determine_doc_number_annual(self, fetch_notice_json):
"""Verify that a document number can be pulled out of an annual
edition of the reg"""
fetch_notice_json.return_value = [
{'el': 1, 'document_number': '111-111'},
{'el': 2, 'document_number': '222-222'}]
xml_str = """<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="cfr.xsl"?>
<CFRGRANULE xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="CFRMergedXML.xsd">
<FDSYS>
<CFRTITLE>12</CFRTITLE>
<DATE>2013-01-01</DATE>
<ORIGINALDATE>2012-01-01</ORIGINALDATE>
</FDSYS>
</CFRGRANULE>"""
self.assertEqual(
'111-111', Builder.determine_doc_number(xml_str, '12', '34'))
args = fetch_notice_json.call_args
self.assertEqual(('12', '34'), args[0]) # positional args
self.assertEqual({'max_effective_date': '2012-01-01',
'only_final': True}, args[1]) # kw args
class LayerCacheAggregatorTests(TestCase):
def test_invalidate(self):
cache = LayerCacheAggregator()
cache._known_labels = set(['123', '123-1', '123-1-a',
'123-1-a-Interp'])
cache.invalidate(['123-2'])
self.assertEqual(cache._known_labels,
set(['123', '123-1', '123-1-a', '123-1-a-Interp']))
cache.invalidate(['123-2', '123-1-Interp'])
self.assertEqual(cache._known_labels, set(['123']))
class CheckpointerTests(TestCase):
def test_basic_serialization(self):
"""We should be able to store and retrieve an object. Verify that this
is occurring outside of local memory by comparing to the original."""
to_store = {"some": "value", 123: 456}
cp = Checkpointer(tempfile.mkdtemp())
cp.counter = 1
cp._serialize("a-tag", to_store)
to_store["some"] = "other"
result = cp._deserialize("a-tag")
self.assertEqual(result, {"some": "value", 123: 456})
self.assertEqual(to_store, {"some": "other", 123: 456})
cp.counter = 2
cp._serialize("a-tag", to_store)
to_store["some"] = "more"
result = cp._deserialize("a-tag")
self.assertEqual(result, {"some": "other", 123: 456})
self.assertEqual(to_store, {"some": "more", 123: 456})
cp.counter = 1
result = cp._deserialize("a-tag")
self.assertEqual(result, {"some": "value", 123: 456})
def test_tree_serialization(self):
"""Trees have embedded XML, which doesn't serialize well"""
tree = Node(
text="top", label=["111"], title="Reg 111", children=[
Node(text="inner", label=["111", "1"],
source_xml=etree.fromstring("""<tag>Hi</tag>"""))
])
cp = Checkpointer(tempfile.mkdtemp())
cp.checkpoint("a-tag", lambda: tree) # saving
cp._reset()
loaded = cp.checkpoint("a-tag", None) # would explode if not loaded
self.assertEqual(repr(tree), repr(loaded))
self.assertEqual(
etree.tostring(tree.children[0].source_xml),
etree.tostring(loaded.children[0].source_xml))
def test_dont_load_later_elements(self):
"""If a checkpoint is executed, we should not load any later
checkpoints. This allows a user to delete, say step 5, and effectively
rebuild from that checkpoint."""
cp = Checkpointer(tempfile.mkdtemp())
self.assertEqual(cp.checkpoint("1", lambda: 1), 1)
self.assertEqual(cp.checkpoint("2", lambda: 2), 2)
self.assertEqual(cp.checkpoint("3", lambda: 3), 3)
cp._reset()
self.assertEqual(cp.checkpoint("1", lambda: -1), 1)
self.assertEqual(cp.checkpoint("2", lambda: -2, force=True), -2)
self.assertEqual(cp.checkpoint("3", lambda: -3), -3)
def test_exception_reading(self):
"""If a file exists but is not the correct format, we expect
deserialization to gracefully fail (rather than exploding)"""
cp = Checkpointer(tempfile.mkdtemp())
self.assertEqual(1, cp.checkpoint("1", lambda: 1))
with open(cp._filename("1"), "w") as written_file:
written_file.write("")
cp._reset()
# pickle will raise an exception, so we will recompute
self.assertEqual(-1, cp.checkpoint("1", lambda: -1))
def test_filename(self):
"""Verify that an appropriate file name is generated in an appropriate
folder"""
file_path = tempfile.mkdtemp() + os.path.join('some', 'depth', 'here')
cp = Checkpointer(file_path)
cp.counter = 25
filename = cp._filename('A WeIrD TaG')
self.assertTrue(os.path.join('some', 'depth', 'here') in filename)
self.assertTrue('25' in filename)
self.assertTrue('aweirdtag' in filename)
def test_dirs_created(self):
"""If the full path does not exist, it is created"""
file_path = tempfile.mkdtemp() + os.path.join('some', 'depth', 'here')
Checkpointer(file_path)
self.assertTrue(os.path.isdir(file_path))
|
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from botocore.exceptions import ClientError
import boto3
import copy
from datetime import datetime
import functools
import json
import itertools
import logging
import os
import random
import threading
import time
import ipaddress
import six
# Try to place nice in lambda exec environment
# where we don't require yaml
try:
import yaml
except ImportError: # pragma: no cover
yaml = None
else:
try:
from yaml import CSafeLoader
SafeLoader = CSafeLoader
except ImportError: # pragma: no cover
try:
from yaml import SafeLoader
except ImportError:
SafeLoader = None
log = logging.getLogger('custodian.utils')
class VarsSubstitutionError(Exception):
pass
class Bag(dict):
def __getattr__(self, k):
try:
return self[k]
except KeyError:
raise AttributeError(k)
def load_file(path, format=None, vars=None):
if format is None:
format = 'yaml'
_, ext = os.path.splitext(path)
if ext[1:] == 'json':
format = 'json'
with open(path) as fh:
contents = fh.read()
if vars:
try:
contents = contents.format(**vars)
except IndexError as e:
msg = 'Failed to substitute variable by positional argument.'
raise VarsSubstitutionError(msg)
except KeyError as e:
msg = 'Failed to substitute variables. KeyError on "{}"'.format(e.message)
raise VarsSubstitutionError(msg)
if format == 'yaml':
try:
return yaml_load(contents)
except yaml.YAMLError as e:
log.error('Error while loading yaml file %s', path)
log.error('Skipping this file. Error message below:\n%s', e)
return None
elif format == 'json':
return loads(contents)
def yaml_load(value):
if yaml is None:
raise RuntimeError("Yaml not available")
return yaml.load(value, Loader=SafeLoader)
def loads(body):
return json.loads(body)
def dumps(data, fh=None, indent=0):
if fh:
return json.dump(data, fh, cls=DateTimeEncoder, indent=indent)
else:
return json.dumps(data, cls=DateTimeEncoder, indent=indent)
def format_event(evt):
return json.dumps(evt, indent=2)
def type_schema(
type_name, inherits=None, rinherit=None,
aliases=None, required=None, **props):
"""jsonschema generation helper
params:
- type_name: name of the type
- inherits: list of document fragments that are required via anyOf[$ref]
- rinherit: use another schema as a base for this, basically work around
inherits issues with additionalProperties and type enums.
- aliases: additional names this type maybe called
- required: list of required properties, by default 'type' is required
- props: additional key value properties
"""
if aliases:
type_names = [type_name]
type_names.extend(aliases)
else:
type_names = [type_name]
if rinherit:
s = copy.deepcopy(rinherit)
s['properties']['type'] = {'enum': type_names}
else:
s = {
'type': 'object',
'properties': {
'type': {'enum': type_names}}}
# Ref based inheritance and additional properties don't mix well.
# http://goo.gl/8UyRvQ
if not inherits:
s['additionalProperties'] = False
s['properties'].update(props)
if not required:
required = []
if isinstance(required, list):
required.append('type')
s['required'] = required
if inherits:
extended = s
s = {'allOf': [{'$ref': i} for i in inherits]}
s['allOf'].append(extended)
return s
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def group_by(resources, key):
resource_map = {}
for r in resources:
resource_map.setdefault(r.get(key), []).append(r)
return resource_map
def chunks(iterable, size=50):
"""Break an iterable into lists of size"""
batch = []
for n in iterable:
batch.append(n)
if len(batch) % size == 0:
yield batch
batch = []
if batch:
yield batch
def camelResource(obj):
"""Some sources from apis return lowerCased where as describe calls
always return TitleCase, this function turns the former to the later
"""
if not isinstance(obj, dict):
return obj
for k in list(obj.keys()):
v = obj.pop(k)
obj["%s%s" % (k[0].upper(), k[1:])] = v
if isinstance(v, dict):
camelResource(v)
elif isinstance(v, list):
map(camelResource, v)
return obj
def get_account_id_from_sts(session):
response = session.client('sts').get_caller_identity()
return response.get('Account')
def query_instances(session, client=None, **query):
"""Return a list of ec2 instances for the query.
"""
if client is None:
client = session.client('ec2')
p = client.get_paginator('describe_instances')
results = p.paginate(**query)
return list(itertools.chain(
*[r["Instances"] for r in itertools.chain(
*[pp['Reservations'] for pp in results])]))
CONN_CACHE = threading.local()
def local_session(factory):
"""Cache a session thread local for up to 45m"""
s = getattr(CONN_CACHE, 'session', None)
t = getattr(CONN_CACHE, 'time', 0)
n = time.time()
if s is not None and t + (60 * 45) > n:
return s
s = factory()
CONN_CACHE.session = s
CONN_CACHE.time = n
return s
def reset_session_cache():
setattr(CONN_CACHE, 'session', None)
setattr(CONN_CACHE, 'time', 0)
def annotation(i, k):
return i.get(k, ())
def set_annotation(i, k, v):
"""
>>> x = {}
>>> set_annotation(x, 'marker', 'a')
>>> annotation(x, 'marker')
['a']
"""
if not isinstance(i, dict):
raise ValueError("Can only annotate dictionaries")
if not isinstance(v, list):
v = [v]
if k in i:
ev = i.get(k)
if isinstance(ev, list):
ev.extend(v)
else:
i[k] = v
def parse_s3(s3_path):
if not s3_path.startswith('s3://'):
raise ValueError("invalid s3 path")
ridx = s3_path.find('/', 5)
if ridx == -1:
ridx = None
bucket = s3_path[5:ridx]
s3_path = s3_path.rstrip('/')
if ridx is None:
key_prefix = ""
else:
key_prefix = s3_path[s3_path.find('/', 5):]
return s3_path, bucket, key_prefix
def generate_arn(
service, resource, partition='aws',
region=None, account_id=None, resource_type=None, separator='/'):
"""Generate an Amazon Resource Name.
See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html.
"""
arn = 'arn:%s:%s:%s:%s:' % (
partition, service, region if region else '', account_id if account_id else '')
if resource_type:
arn = arn + '%s%s%s' % (resource_type, separator, resource)
else:
arn = arn + resource
return arn
def snapshot_identifier(prefix, db_identifier):
"""Return an identifier for a snapshot of a database or cluster.
"""
now = datetime.now()
return '%s-%s-%s' % (prefix, db_identifier, now.strftime('%Y-%m-%d'))
def get_retry(codes=(), max_attempts=8, min_delay=1, log_retries=False):
"""Decorator for retry boto3 api call on transient errors.
https://www.awsarchitectureblog.com/2015/03/backoff.html
https://en.wikipedia.org/wiki/Exponential_backoff
:param codes: A sequence of retryable error codes.
:param max_attempts: The max number of retries, by default the delay
time is proportional to the max number of attempts.
:param log_retries: Whether we should log retries, if specified
specifies the level at which the retry should be logged.
:param _max_delay: The maximum delay for any retry interval *note*
this parameter is only exposed for unit testing, as its
derived from the number of attempts.
Returns a function for invoking aws client calls that
retries on retryable error codes.
"""
max_delay = max(min_delay, 2) ** max_attempts
def _retry(func, *args, **kw):
for idx, delay in enumerate(
backoff_delays(min_delay, max_delay, jitter=True)):
try:
return func(*args, **kw)
except ClientError as e:
if e.response['Error']['Code'] not in codes:
raise
elif idx == max_attempts - 1:
raise
if log_retries:
worker_log.log(
log_retries,
"retrying %s on error:%s attempt:%d last delay:%0.2f",
func, e.response['Error']['Code'], idx, delay)
time.sleep(delay)
return _retry
def backoff_delays(start, stop, factor=2.0, jitter=False):
"""Geometric backoff sequence w/ jitter
"""
cur = start
while cur <= stop:
if jitter:
yield cur - (cur * random.random())
else:
yield cur
cur = cur * factor
def parse_cidr(value):
"""Process cidr ranges."""
klass = IPv4Network
if '/' not in value:
klass = ipaddress.ip_address
try:
v = klass(six.text_type(value))
except (ipaddress.AddressValueError, ValueError):
v = None
return v
class IPv4Network(ipaddress.IPv4Network):
# Override for net 2 net containment comparison
def __contains__(self, other):
if isinstance(other, ipaddress._BaseNetwork):
return self.supernet_of(other)
return super(IPv4Network, self).__contains__(other)
worker_log = logging.getLogger('c7n.worker')
def worker(f):
"""Generic wrapper to log uncaught exceptions in a function.
When we cross concurrent.futures executor boundaries we lose our
traceback information, and when doing bulk operations we may tolerate
transient failures on a partial subset. However we still want to have
full accounting of the error in the logs, in a format that our error
collection (cwl subscription) can still pickup.
"""
def _f(*args, **kw):
try:
return f(*args, **kw)
except:
worker_log.exception(
'Error invoking %s',
"%s.%s" % (f.__module__, f.__name__))
raise
functools.update_wrapper(_f, f)
return _f
def reformat_schema(model):
""" Reformat schema to be in a more displayable format. """
if not hasattr(model, 'schema'):
return "Model '{}' does not have a schema".format(model)
if 'properties' not in model.schema:
return "Schema in unexpected format."
ret = copy.deepcopy(model.schema['properties'])
if 'type' in ret:
del(ret['type'])
for key in model.schema.get('required', []):
if key in ret:
ret[key]['required'] = True
return ret
_profile_session = None
def get_profile_session(options):
global _profile_session
if _profile_session:
return _profile_session
profile = getattr(options, 'profile', None)
_profile_session = boto3.Session(profile_name=profile)
return _profile_session
|
|
# Copyright (c) 2016-2020, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Interface to the blockchain database.'''
import array
import ast
import os
import time
from bisect import bisect_right
from collections import namedtuple
from glob import glob
from struct import Struct
import attr
from aiorpcx import run_in_thread, sleep
import electrumx.lib.util as util
from electrumx.lib.hash import hash_to_hex_str
from electrumx.lib.merkle import Merkle, MerkleCache
from electrumx.lib.util import (
formatted_time, pack_be_uint16, pack_be_uint32, pack_le_uint64, pack_le_uint32,
unpack_le_uint32, unpack_be_uint32, unpack_le_uint64
)
from electrumx.server.storage import db_class
from electrumx.server.history import History
UTXO = namedtuple("UTXO", "tx_num tx_pos tx_hash height value")
@attr.s(slots=True)
class FlushData(object):
height = attr.ib()
tx_count = attr.ib()
headers = attr.ib()
block_tx_hashes = attr.ib()
# The following are flushed to the UTXO DB if undo_infos is not None
undo_infos = attr.ib()
adds = attr.ib()
deletes = attr.ib()
tip = attr.ib()
class DB(object):
'''Simple wrapper of the backend database for querying.
Performs no DB update, though the DB will be cleaned on opening if
it was shutdown uncleanly.
'''
DB_VERSIONS = [6, 7, 8]
class DBError(Exception):
'''Raised on general DB errors generally indicating corruption.'''
def __init__(self, env):
self.logger = util.class_logger(__name__, self.__class__.__name__)
self.env = env
self.coin = env.coin
# Setup block header size handlers
if self.coin.STATIC_BLOCK_HEADERS:
self.header_offset = self.coin.static_header_offset
self.header_len = self.coin.static_header_len
else:
self.header_offset = self.dynamic_header_offset
self.header_len = self.dynamic_header_len
self.logger.info(f'switching current directory to {env.db_dir}')
os.chdir(env.db_dir)
self.db_class = db_class(self.env.db_engine)
self.history = History()
self.utxo_db = None
self.utxo_flush_count = 0
self.fs_height = -1
self.fs_tx_count = 0
self.db_height = -1
self.db_tx_count = 0
self.db_tip = None
self.tx_counts = None
self.last_flush = time.time()
self.last_flush_tx_count = 0
self.wall_time = 0
self.first_sync = True
self.db_version = -1
self.logger.info(f'using {self.env.db_engine} for DB backend')
# Header merkle cache
self.merkle = Merkle()
self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)
self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
if not self.coin.STATIC_BLOCK_HEADERS:
self.headers_offsets_file = util.LogicalFile(
'meta/headers_offsets', 2, 16000000)
async def _read_tx_counts(self):
if self.tx_counts is not None:
return
# tx_counts[N] has the cumulative number of txs at the end of
# height N. So tx_counts[0] is 1 - the genesis coinbase
size = (self.db_height + 1) * 8
tx_counts = self.tx_counts_file.read(0, size)
assert len(tx_counts) == size
self.tx_counts = array.array('Q', tx_counts)
if self.tx_counts:
assert self.db_tx_count == self.tx_counts[-1]
else:
assert self.db_tx_count == 0
async def _open_dbs(self, for_sync, compacting):
assert self.utxo_db is None
# First UTXO DB
self.utxo_db = self.db_class('utxo', for_sync)
if self.utxo_db.is_new:
self.logger.info('created new database')
self.logger.info('creating metadata directory')
os.mkdir('meta')
with util.open_file('COIN', create=True) as f:
f.write(f'ElectrumX databases and metadata for '
f'{self.coin.NAME} {self.coin.NET}'.encode())
if not self.coin.STATIC_BLOCK_HEADERS:
self.headers_offsets_file.write(0, bytes(8))
else:
self.logger.info(f'opened UTXO DB (for sync: {for_sync})')
self.read_utxo_state()
# Then history DB
self.utxo_flush_count = self.history.open_db(self.db_class, for_sync,
self.utxo_flush_count,
compacting)
self.clear_excess_undo_info()
# Read TX counts (requires meta directory)
await self._read_tx_counts()
async def open_for_compacting(self):
await self._open_dbs(True, True)
async def open_for_sync(self):
'''Open the databases to sync to the daemon.
When syncing we want to reserve a lot of open files for the
synchronization. When serving clients we want the open files for
serving network connections.
'''
await self._open_dbs(True, False)
async def open_for_serving(self):
'''Open the databases for serving. If they are already open they are
closed first.
'''
if self.utxo_db:
self.logger.info('closing DBs to re-open for serving')
self.utxo_db.close()
self.history.close_db()
self.utxo_db = None
await self._open_dbs(False, False)
# Header merkle cache
async def populate_header_merkle_cache(self):
self.logger.info('populating header merkle cache...')
length = max(1, self.db_height - self.env.reorg_limit)
start = time.time()
await self.header_mc.initialize(length)
elapsed = time.time() - start
self.logger.info(f'header merkle cache populated in {elapsed:.1f}s')
async def header_branch_and_root(self, length, height):
return await self.header_mc.branch_and_root(length, height)
# Flushing
def assert_flushed(self, flush_data):
'''Asserts state is fully flushed.'''
assert flush_data.tx_count == self.fs_tx_count == self.db_tx_count
assert flush_data.height == self.fs_height == self.db_height
assert flush_data.tip == self.db_tip
assert not flush_data.headers
assert not flush_data.block_tx_hashes
assert not flush_data.adds
assert not flush_data.deletes
assert not flush_data.undo_infos
self.history.assert_flushed()
def flush_dbs(self, flush_data, flush_utxos, estimate_txs_remaining):
'''Flush out cached state. History is always flushed; UTXOs are
flushed if flush_utxos.'''
if flush_data.height == self.db_height:
self.assert_flushed(flush_data)
return
start_time = time.time()
prior_flush = self.last_flush
tx_delta = flush_data.tx_count - self.last_flush_tx_count
# Flush to file system
self.flush_fs(flush_data)
# Then history
self.flush_history()
# Flush state last as it reads the wall time.
with self.utxo_db.write_batch() as batch:
if flush_utxos:
self.flush_utxo_db(batch, flush_data)
self.flush_state(batch)
# Update and put the wall time again - otherwise we drop the
# time it took to commit the batch
self.flush_state(self.utxo_db)
elapsed = self.last_flush - start_time
self.logger.info(f'flush #{self.history.flush_count:,d} took '
f'{elapsed:.1f}s. Height {flush_data.height:,d} '
f'txs: {flush_data.tx_count:,d} ({tx_delta:+,d})')
# Catch-up stats
if self.utxo_db.for_sync:
flush_interval = self.last_flush - prior_flush
tx_per_sec_gen = int(flush_data.tx_count / self.wall_time)
tx_per_sec_last = 1 + int(tx_delta / flush_interval)
eta = estimate_txs_remaining() / tx_per_sec_last
self.logger.info(f'tx/sec since genesis: {tx_per_sec_gen:,d}, '
f'since last flush: {tx_per_sec_last:,d}')
self.logger.info(f'sync time: {formatted_time(self.wall_time)} '
f'ETA: {formatted_time(eta)}')
def flush_fs(self, flush_data):
'''Write headers, tx counts and block tx hashes to the filesystem.
The first height to write is self.fs_height + 1. The FS
metadata is all append-only, so in a crash we just pick up
again from the height stored in the DB.
'''
prior_tx_count = (self.tx_counts[self.fs_height]
if self.fs_height >= 0 else 0)
assert len(flush_data.block_tx_hashes) == len(flush_data.headers)
assert flush_data.height == self.fs_height + len(flush_data.headers)
assert flush_data.tx_count == (self.tx_counts[-1] if self.tx_counts
else 0)
assert len(self.tx_counts) == flush_data.height + 1
hashes = b''.join(flush_data.block_tx_hashes)
flush_data.block_tx_hashes.clear()
assert len(hashes) % 32 == 0
assert len(hashes) // 32 == flush_data.tx_count - prior_tx_count
# Write the headers, tx counts, and tx hashes
start_time = time.time()
height_start = self.fs_height + 1
offset = self.header_offset(height_start)
self.headers_file.write(offset, b''.join(flush_data.headers))
self.fs_update_header_offsets(offset, height_start, flush_data.headers)
flush_data.headers.clear()
offset = height_start * self.tx_counts.itemsize
self.tx_counts_file.write(offset,
self.tx_counts[height_start:].tobytes())
offset = prior_tx_count * 32
self.hashes_file.write(offset, hashes)
self.fs_height = flush_data.height
self.fs_tx_count = flush_data.tx_count
if self.utxo_db.for_sync:
elapsed = time.time() - start_time
self.logger.info(f'flushed filesystem data in {elapsed:.2f}s')
def flush_history(self):
self.history.flush()
def flush_utxo_db(self, batch, flush_data):
'''Flush the cached DB writes and UTXO set to the batch.'''
# Care is needed because the writes generated by flushing the
# UTXO state may have keys in common with our write cache or
# may be in the DB already.
start_time = time.time()
add_count = len(flush_data.adds)
spend_count = len(flush_data.deletes) // 2
# Spends
batch_delete = batch.delete
for key in sorted(flush_data.deletes):
batch_delete(key)
flush_data.deletes.clear()
# New UTXOs
batch_put = batch.put
for key, value in flush_data.adds.items():
# suffix = tx_idx + tx_num
hashX = value[:-13]
suffix = key[-4:] + value[-13:-8]
batch_put(b'h' + key[:4] + suffix, hashX)
batch_put(b'u' + hashX + suffix, value[-8:])
flush_data.adds.clear()
# New undo information
self.flush_undo_infos(batch_put, flush_data.undo_infos)
flush_data.undo_infos.clear()
if self.utxo_db.for_sync:
block_count = flush_data.height - self.db_height
tx_count = flush_data.tx_count - self.db_tx_count
elapsed = time.time() - start_time
self.logger.info(f'flushed {block_count:,d} blocks with '
f'{tx_count:,d} txs, {add_count:,d} UTXO adds, '
f'{spend_count:,d} spends in '
f'{elapsed:.1f}s, committing...')
self.utxo_flush_count = self.history.flush_count
self.db_height = flush_data.height
self.db_tx_count = flush_data.tx_count
self.db_tip = flush_data.tip
def flush_state(self, batch):
'''Flush chain state to the batch.'''
now = time.time()
self.wall_time += now - self.last_flush
self.last_flush = now
self.last_flush_tx_count = self.fs_tx_count
self.write_utxo_state(batch)
def flush_backup(self, flush_data, touched):
'''Like flush_dbs() but when backing up. All UTXOs are flushed.'''
assert not flush_data.headers
assert not flush_data.block_tx_hashes
assert flush_data.height < self.db_height
self.history.assert_flushed()
start_time = time.time()
tx_delta = flush_data.tx_count - self.last_flush_tx_count
self.backup_fs(flush_data.height, flush_data.tx_count)
self.history.backup(touched, flush_data.tx_count)
with self.utxo_db.write_batch() as batch:
self.flush_utxo_db(batch, flush_data)
# Flush state last as it reads the wall time.
self.flush_state(batch)
elapsed = self.last_flush - start_time
self.logger.info(f'backup flush #{self.history.flush_count:,d} took '
f'{elapsed:.1f}s. Height {flush_data.height:,d} '
f'txs: {flush_data.tx_count:,d} ({tx_delta:+,d})')
def fs_update_header_offsets(self, offset_start, height_start, headers):
if self.coin.STATIC_BLOCK_HEADERS:
return
offset = offset_start
offsets = []
for h in headers:
offset += len(h)
offsets.append(pack_le_uint64(offset))
# For each header we get the offset of the next header, hence we
# start writing from the next height
pos = (height_start + 1) * 8
self.headers_offsets_file.write(pos, b''.join(offsets))
def dynamic_header_offset(self, height):
assert not self.coin.STATIC_BLOCK_HEADERS
offset, = unpack_le_uint64(self.headers_offsets_file.read(height * 8, 8))
return offset
def dynamic_header_len(self, height):
return self.dynamic_header_offset(height + 1)\
- self.dynamic_header_offset(height)
def backup_fs(self, height, tx_count):
'''Back up during a reorg. This just updates our pointers.'''
self.fs_height = height
self.fs_tx_count = tx_count
# Truncate header_mc: header count is 1 more than the height.
self.header_mc.truncate(height + 1)
async def raw_header(self, height):
'''Return the binary header at the given height.'''
header, n = await self.read_headers(height, 1)
if n != 1:
raise IndexError(f'height {height:,d} out of range')
return header
async def read_headers(self, start_height, count):
'''Requires start_height >= 0, count >= 0. Reads as many headers as
are available starting at start_height up to count. This
would be zero if start_height is beyond self.db_height, for
example.
Returns a (binary, n) pair where binary is the concatenated
binary headers, and n is the count of headers returned.
'''
if start_height < 0 or count < 0:
raise self.DBError(f'{count:,d} headers starting at '
f'{start_height:,d} not on disk')
def read_headers():
# Read some from disk
disk_count = max(0, min(count, self.db_height + 1 - start_height))
if disk_count:
offset = self.header_offset(start_height)
size = self.header_offset(start_height + disk_count) - offset
return self.headers_file.read(offset, size), disk_count
return b'', 0
return await run_in_thread(read_headers)
def fs_tx_hash(self, tx_num):
'''Return a pair (tx_hash, tx_height) for the given tx number.
If the tx_height is not on disk, returns (None, tx_height).'''
tx_height = bisect_right(self.tx_counts, tx_num)
if tx_height > self.db_height:
tx_hash = None
else:
tx_hash = self.hashes_file.read(tx_num * 32, 32)
return tx_hash, tx_height
def fs_tx_hashes_at_blockheight(self, block_height):
'''Return a list of tx_hashes at given block height,
in the same order as in the block.
'''
if block_height > self.db_height:
raise self.DBError(f'block {block_height:,d} not on disk (>{self.db_height:,d})')
assert block_height >= 0
if block_height > 0:
first_tx_num = self.tx_counts[block_height - 1]
else:
first_tx_num = 0
num_txs_in_block = self.tx_counts[block_height] - first_tx_num
tx_hashes = self.hashes_file.read(first_tx_num * 32, num_txs_in_block * 32)
assert num_txs_in_block == len(tx_hashes) // 32
return [tx_hashes[idx * 32: (idx+1) * 32] for idx in range(num_txs_in_block)]
async def tx_hashes_at_blockheight(self, block_height):
return await run_in_thread(self.fs_tx_hashes_at_blockheight, block_height)
async def fs_block_hashes(self, height, count):
headers_concat, headers_count = await self.read_headers(height, count)
if headers_count != count:
raise self.DBError('only got {:,d} headers starting at {:,d}, not '
'{:,d}'.format(headers_count, height, count))
offset = 0
headers = []
for n in range(count):
hlen = self.header_len(height + n)
headers.append(headers_concat[offset:offset + hlen])
offset += hlen
return [self.coin.header_hash(header) for header in headers]
async def limited_history(self, hashX, *, limit=1000):
'''Return an unpruned, sorted list of (tx_hash, height) tuples of
confirmed transactions that touched the address, earliest in
the blockchain first. Includes both spending and receiving
transactions. By default returns at most 1000 entries. Set
limit to None to get them all.
'''
def read_history():
tx_nums = list(self.history.get_txnums(hashX, limit))
fs_tx_hash = self.fs_tx_hash
return [fs_tx_hash(tx_num) for tx_num in tx_nums]
while True:
history = await run_in_thread(read_history)
if all(hash is not None for hash, height in history):
return history
self.logger.warning(f'limited_history: tx hash '
f'not found (reorg?), retrying...')
await sleep(0.25)
# -- Undo information
def min_undo_height(self, max_height):
'''Returns a height from which we should store undo info.'''
return max_height - self.env.reorg_limit + 1
def undo_key(self, height):
'''DB key for undo information at the given height.'''
return b'U' + pack_be_uint32(height)
def read_undo_info(self, height):
'''Read undo information from a file for the current height.'''
return self.utxo_db.get(self.undo_key(height))
def flush_undo_infos(self, batch_put, undo_infos):
'''undo_infos is a list of (undo_info, height) pairs.'''
for undo_info, height in undo_infos:
batch_put(self.undo_key(height), b''.join(undo_info))
def raw_block_prefix(self):
return 'meta/block'
def raw_block_path(self, height):
return f'{self.raw_block_prefix()}{height:d}'
def read_raw_block(self, height):
'''Returns a raw block read from disk. Raises FileNotFoundError
if the block isn't on-disk.'''
with util.open_file(self.raw_block_path(height)) as f:
return f.read(-1)
def write_raw_block(self, block, height):
'''Write a raw block to disk.'''
with util.open_truncate(self.raw_block_path(height)) as f:
f.write(block)
# Delete old blocks to prevent them accumulating
try:
del_height = self.min_undo_height(height) - 1
os.remove(self.raw_block_path(del_height))
except FileNotFoundError:
pass
def clear_excess_undo_info(self):
'''Clear excess undo info. Only most recent N are kept.'''
prefix = b'U'
min_height = self.min_undo_height(self.db_height)
keys = []
for key, _hist in self.utxo_db.iterator(prefix=prefix):
height, = unpack_be_uint32(key[-4:])
if height >= min_height:
break
keys.append(key)
if keys:
with self.utxo_db.write_batch() as batch:
for key in keys:
batch.delete(key)
self.logger.info(f'deleted {len(keys):,d} stale undo entries')
# delete old block files
prefix = self.raw_block_prefix()
paths = [path for path in glob(f'{prefix}[0-9]*')
if len(path) > len(prefix)
and int(path[len(prefix):]) < min_height]
if paths:
for path in paths:
try:
os.remove(path)
except FileNotFoundError:
pass
self.logger.info(f'deleted {len(paths):,d} stale block files')
# -- UTXO database
def read_utxo_state(self):
state = self.utxo_db.get(b'state')
if not state:
self.db_height = -1
self.db_tx_count = 0
self.db_tip = b'\0' * 32
self.db_version = max(self.DB_VERSIONS)
self.utxo_flush_count = 0
self.wall_time = 0
self.first_sync = True
else:
state = ast.literal_eval(state.decode())
if not isinstance(state, dict):
raise self.DBError('failed reading state from DB')
self.db_version = state['db_version']
if self.db_version not in self.DB_VERSIONS:
raise self.DBError('your UTXO DB version is {} but this '
'software only handles versions {}'
.format(self.db_version, self.DB_VERSIONS))
# backwards compat
genesis_hash = state['genesis']
if isinstance(genesis_hash, bytes):
genesis_hash = genesis_hash.decode()
if genesis_hash != self.coin.GENESIS_HASH:
raise self.DBError('DB genesis hash {} does not match coin {}'
.format(genesis_hash,
self.coin.GENESIS_HASH))
self.db_height = state['height']
self.db_tx_count = state['tx_count']
self.db_tip = state['tip']
self.utxo_flush_count = state['utxo_flush_count']
self.wall_time = state['wall_time']
self.first_sync = state['first_sync']
# These are our state as we move ahead of DB state
self.fs_height = self.db_height
self.fs_tx_count = self.db_tx_count
self.last_flush_tx_count = self.fs_tx_count
# Upgrade DB
if self.db_version != max(self.DB_VERSIONS):
self.upgrade_db()
# Log some stats
self.logger.info('UTXO DB version: {:d}'.format(self.db_version))
self.logger.info('coin: {}'.format(self.coin.NAME))
self.logger.info('network: {}'.format(self.coin.NET))
self.logger.info('height: {:,d}'.format(self.db_height))
self.logger.info('tip: {}'.format(hash_to_hex_str(self.db_tip)))
self.logger.info('tx count: {:,d}'.format(self.db_tx_count))
if self.utxo_db.for_sync:
self.logger.info(f'flushing DB cache at {self.env.cache_MB:,d} MB')
if self.first_sync:
self.logger.info('sync time so far: {}'
.format(util.formatted_time(self.wall_time)))
def upgrade_db(self):
self.logger.info(f'UTXO DB version: {self.db_version}')
self.logger.info('Upgrading your DB; this can take some time...')
def upgrade_u_prefix(prefix):
count = 0
with self.utxo_db.write_batch() as batch:
batch_delete = batch.delete
batch_put = batch.put
# Key: b'u' + address_hashX + tx_idx + tx_num
for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
if len(db_key) == 21:
return
break
if self.db_version == 6:
for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
count += 1
batch_delete(db_key)
batch_put(db_key[:14] + b'\0\0' + db_key[14:] + b'\0', db_value)
else:
for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
count += 1
batch_delete(db_key)
batch_put(db_key + b'\0', db_value)
return count
last = time.time()
count = 0
for cursor in range(65536):
prefix = b'u' + pack_be_uint16(cursor)
count += upgrade_u_prefix(prefix)
now = time.time()
if now > last + 10:
last = now
self.logger.info(f'DB 1 of 3: {count:,d} entries updated, '
f'{cursor * 100 / 65536:.1f}% complete')
self.logger.info('DB 1 of 3 upgraded successfully')
def upgrade_h_prefix(prefix):
count = 0
with self.utxo_db.write_batch() as batch:
batch_delete = batch.delete
batch_put = batch.put
# Key: b'h' + compressed_tx_hash + tx_idx + tx_num
for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
if len(db_key) == 14:
return
break
if self.db_version == 6:
for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
count += 1
batch_delete(db_key)
batch_put(db_key[:7] + b'\0\0' + db_key[7:] + b'\0', db_value)
else:
for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
count += 1
batch_delete(db_key)
batch_put(db_key + b'\0', db_value)
return count
last = time.time()
count = 0
for cursor in range(65536):
prefix = b'h' + pack_be_uint16(cursor)
count += upgrade_h_prefix(prefix)
now = time.time()
if now > last + 10:
last = now
self.logger.info(f'DB 2 of 3: {count:,d} entries updated, '
f'{cursor * 100 / 65536:.1f}% complete')
# Upgrade tx_counts file
size = (self.db_height + 1) * 8
tx_counts = self.tx_counts_file.read(0, size)
if len(tx_counts) == (self.db_height + 1) * 4:
tx_counts = array.array('I', tx_counts)
tx_counts = array.array('Q', tx_counts)
self.tx_counts_file.write(0, tx_counts.tobytes())
self.db_version = max(self.DB_VERSIONS)
with self.utxo_db.write_batch() as batch:
self.write_utxo_state(batch)
self.logger.info('DB 2 of 3 upgraded successfully')
def write_utxo_state(self, batch):
'''Write (UTXO) state to the batch.'''
state = {
'genesis': self.coin.GENESIS_HASH,
'height': self.db_height,
'tx_count': self.db_tx_count,
'tip': self.db_tip,
'utxo_flush_count': self.utxo_flush_count,
'wall_time': self.wall_time,
'first_sync': self.first_sync,
'db_version': self.db_version,
}
batch.put(b'state', repr(state).encode())
def set_flush_count(self, count):
self.utxo_flush_count = count
with self.utxo_db.write_batch() as batch:
self.write_utxo_state(batch)
async def all_utxos(self, hashX):
'''Return all UTXOs for an address sorted in no particular order.'''
def read_utxos():
utxos = []
utxos_append = utxos.append
# Key: b'u' + address_hashX + tx_idx + tx_num
# Value: the UTXO value as a 64-bit unsigned integer
prefix = b'u' + hashX
for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
tx_pos, = unpack_le_uint32(db_key[-9:-5])
tx_num, = unpack_le_uint64(db_key[-5:] + bytes(3))
value, = unpack_le_uint64(db_value)
tx_hash, height = self.fs_tx_hash(tx_num)
utxos_append(UTXO(tx_num, tx_pos, tx_hash, height, value))
return utxos
while True:
utxos = await run_in_thread(read_utxos)
if all(utxo.tx_hash is not None for utxo in utxos):
return utxos
self.logger.warning(f'all_utxos: tx hash not '
f'found (reorg?), retrying...')
await sleep(0.25)
async def lookup_utxos(self, prevouts):
'''For each prevout, lookup it up in the DB and return a (hashX,
value) pair or None if not found.
Used by the mempool code.
'''
def lookup_hashXs():
'''Return (hashX, suffix) pairs, or None if not found,
for each prevout.
'''
def lookup_hashX(tx_hash, tx_idx):
idx_packed = pack_le_uint32(tx_idx)
# Key: b'h' + compressed_tx_hash + tx_idx + tx_num
# Value: hashX
prefix = b'h' + tx_hash[:4] + idx_packed
# Find which entry, if any, the TX_HASH matches.
for db_key, hashX in self.utxo_db.iterator(prefix=prefix):
tx_num_packed = db_key[-5:]
tx_num, = unpack_le_uint64(tx_num_packed + bytes(3))
hash, _height = self.fs_tx_hash(tx_num)
if hash == tx_hash:
return hashX, idx_packed + tx_num_packed
return None, None
return [lookup_hashX(*prevout) for prevout in prevouts]
def lookup_utxos(hashX_pairs):
def lookup_utxo(hashX, suffix):
if not hashX:
# This can happen when the daemon is a block ahead
# of us and has mempool txs spending outputs from
# that new block
return None
# Key: b'u' + address_hashX + tx_idx + tx_num
# Value: the UTXO value as a 64-bit unsigned integer
key = b'u' + hashX + suffix
db_value = self.utxo_db.get(key)
if not db_value:
# This can happen if the DB was updated between
# getting the hashXs and getting the UTXOs
return None
value, = unpack_le_uint64(db_value)
return hashX, value
return [lookup_utxo(*hashX_pair) for hashX_pair in hashX_pairs]
hashX_pairs = await run_in_thread(lookup_hashXs)
return await run_in_thread(lookup_utxos, hashX_pairs)
|
|
#!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import apply_edits
def _FindPHB(filepath):
return apply_edits._FindPrimaryHeaderBasename(filepath)
class FindPrimaryHeaderBasenameTest(unittest.TestCase):
def testNoOpOnHeader(self):
self.assertIsNone(_FindPHB('bar.h'))
self.assertIsNone(_FindPHB('foo/bar.h'))
def testStripDirectories(self):
self.assertEqual('bar', _FindPHB('foo/bar.cc'))
def testStripPlatformSuffix(self):
self.assertEqual('bar', _FindPHB('bar_posix.cc'))
self.assertEqual('bar', _FindPHB('bar_unittest.cc'))
def testStripTestSuffix(self):
self.assertEqual('bar', _FindPHB('bar_browsertest.cc'))
self.assertEqual('bar', _FindPHB('bar_unittest.cc'))
def testStripPlatformAndTestSuffix(self):
self.assertEqual('bar', _FindPHB('bar_uitest_aura.cc'))
self.assertEqual('bar', _FindPHB('bar_linux_unittest.cc'))
def testNoSuffixStrippingWithoutUnderscore(self):
self.assertEqual('barunittest', _FindPHB('barunittest.cc'))
def _ApplyEdit(old_contents_string,
edit,
contents_filepath="some_file.cc",
last_edit=None):
if last_edit is not None:
assert (last_edit > edit) # Test or prod caller should ensure.
ba = bytearray()
ba.extend(old_contents_string.encode('ASCII'))
apply_edits._ApplySingleEdit(contents_filepath, ba, edit, last_edit)
return ba.decode('ASCII')
def _InsertHeader(old_contents,
contents_filepath='foo/impl.cc',
new_header_path='new/header.h'):
edit = apply_edits.Edit('include-user-header', -1, -1, new_header_path)
return _ApplyEdit(old_contents, edit, contents_filepath=contents_filepath)
class InsertIncludeHeaderTest(unittest.TestCase):
def _assertEqualContents(self, expected, actual):
if expected != actual:
print("####################### EXPECTED:")
print(expected)
print("####################### ACTUAL:")
print(actual)
print("####################### END.")
self.assertEqual(expected, actual)
def testSkippingCppComments(self):
old_contents = '''
// Copyright info here.
#include "old/header.h"
'''
expected_new_contents = '''
// Copyright info here.
#include "new/header.h"
#include "old/header.h"
'''
new_header_line = '#include "new/header.h'
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingCppComments_DocCommentForStruct(self):
""" This is a regression test for https://crbug.com/1175684 """
old_contents = '''
// Copyright blah blah...
#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_FILTER_H_
#define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_FILTER_H_
#include <stdint.h>
// Doc comment for a struct.
// Multiline.
struct sock_filter {
uint16_t code;
};
'''
expected_new_contents = '''
// Copyright blah blah...
#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_FILTER_H_
#define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_FILTER_H_
#include <stdint.h>
#include "new/header.h"
// Doc comment for a struct.
// Multiline.
struct sock_filter {
uint16_t code;
};
'''
new_header_line = '#include "new/header.h'
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingCppComments_DocCommentForStruct2(self):
""" This is a regression test for https://crbug.com/1175684 """
old_contents = '''
// Copyright blah blah...
// Doc comment for a struct.
struct sock_filter {
uint16_t code;
};
'''
expected_new_contents = '''
// Copyright blah blah...
#include "new/header.h"
// Doc comment for a struct.
struct sock_filter {
uint16_t code;
};
'''
new_header_line = '#include "new/header.h'
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingCppComments_DocCommentForStruct3(self):
""" This is a regression test for https://crbug.com/1175684 """
old_contents = '''
// Doc comment for a struct.
struct sock_filter {
uint16_t code;
};
'''
expected_new_contents = '''
#include "new/header.h"
// Doc comment for a struct.
struct sock_filter {
uint16_t code;
};
'''
new_header_line = '#include "new/header.h'
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingCppComments_DocCommentForInclude(self):
""" This is a regression test for https://crbug.com/1175684 """
old_contents = '''
// Copyright blah blah...
// System includes.
#include <stdint.h>
// Doc comment for a struct.
struct sock_filter {
uint16_t code;
};
'''
expected_new_contents = '''
// Copyright blah blah...
// System includes.
#include <stdint.h>
#include "new/header.h"
// Doc comment for a struct.
struct sock_filter {
uint16_t code;
};
'''
new_header_line = '#include "new/header.h'
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingCppComments_DocCommentForWholeFile(self):
""" This is a regression test for https://crbug.com/1175684 """
old_contents = '''
// Copyright blah blah...
// Doc comment for the whole file.
struct sock_filter {
uint16_t code;
};
'''
expected_new_contents = '''
// Copyright blah blah...
// Doc comment for the whole file.
#include "new/header.h"
struct sock_filter {
uint16_t code;
};
'''
new_header_line = '#include "new/header.h'
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingOldStyleComments(self):
old_contents = '''
/* Copyright
* info here.
*/
#include "old/header.h"
'''
expected_new_contents = '''
/* Copyright
* info here.
*/
#include "new/header.h"
#include "old/header.h"
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingOldStyleComments_NoWhitespaceAtLineStart(self):
old_contents = '''
/* Copyright
* info here.
*/
#include "old/header.h"
'''
expected_new_contents = '''
/* Copyright
* info here.
*/
#include "new/header.h"
#include "old/header.h"
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingSystemHeaders(self):
old_contents = '''
#include <string>
#include <vector> // blah
#include "old/header.h"
'''
expected_new_contents = '''
#include <string>
#include <vector> // blah
#include "new/header.h"
#include "old/header.h"
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingPrimaryHeader(self):
old_contents = '''
// Copyright info here.
#include "foo/impl.h"
#include "old/header.h"
'''
expected_new_contents = '''
// Copyright info here.
#include "foo/impl.h"
#include "new/header.h"
#include "old/header.h"
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSimilarNonPrimaryHeader_WithPrimaryHeader(self):
old_contents = '''
// Copyright info here.
#include "primary/impl.h" // This is the primary header.
#include "unrelated/impl.h" // This is *not* the primary header.
#include "zzz/foo.h"
'''
expected_new_contents = '''
// Copyright info here.
#include "primary/impl.h" // This is the primary header.
#include "unrelated/impl.h" // This is *not* the primary header.
#include "new/header.h"
#include "zzz/foo.h"
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSimilarNonPrimaryHeader_NoPrimaryHeader(self):
old_contents = '''
// Copyright info here.
#include "unrelated/impl.h" // This is *not* the primary header.
#include "zzz/foo.h"
'''
expected_new_contents = '''
// Copyright info here.
#include "unrelated/impl.h" // This is *not* the primary header.
#include "new/header.h"
#include "zzz/foo.h"
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingIncludeGuards(self):
old_contents = '''
#ifndef FOO_IMPL_H_
#define FOO_IMPL_H_
#include "old/header.h"
#endif FOO_IMPL_H_
'''
expected_new_contents = '''
#ifndef FOO_IMPL_H_
#define FOO_IMPL_H_
#include "new/header.h"
#include "old/header.h"
#endif FOO_IMPL_H_
'''
self._assertEqualContents(
expected_new_contents,
_InsertHeader(old_contents, 'foo/impl.h', 'new/header.h'))
def testSkippingIncludeGuards2(self):
# This test is based on base/third_party/valgrind/memcheck.h
old_contents = '''
#ifndef __MEMCHECK_H
#define __MEMCHECK_H
#include "old/header.h"
#endif
'''
expected_new_contents = '''
#ifndef __MEMCHECK_H
#define __MEMCHECK_H
#include "new/header.h"
#include "old/header.h"
#endif
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingIncludeGuards3(self):
# This test is based on base/third_party/xdg_mime/xdgmime.h
old_contents = '''
#ifndef __XDG_MIME_H__
#define __XDG_MIME_H__
#include "old/header.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
typedef void (*XdgMimeCallback) (void *user_data);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* __XDG_MIME_H__ */
'''
expected_new_contents = '''
#ifndef __XDG_MIME_H__
#define __XDG_MIME_H__
#include "new/header.h"
#include "old/header.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
typedef void (*XdgMimeCallback) (void *user_data);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* __XDG_MIME_H__ */
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingIncludeGuards4(self):
# This test is based on ash/first_run/desktop_cleaner.h and/or
# components/subresource_filter/core/common/scoped_timers.h and/or
# device/gamepad/abstract_haptic_gamepad.h
old_contents = '''
#ifndef ASH_FIRST_RUN_DESKTOP_CLEANER_
#define ASH_FIRST_RUN_DESKTOP_CLEANER_
#include "old/header.h"
namespace ash {
} // namespace ash
#endif // ASH_FIRST_RUN_DESKTOP_CLEANER_
'''
expected_new_contents = '''
#ifndef ASH_FIRST_RUN_DESKTOP_CLEANER_
#define ASH_FIRST_RUN_DESKTOP_CLEANER_
#include "new/header.h"
#include "old/header.h"
namespace ash {
} // namespace ash
#endif // ASH_FIRST_RUN_DESKTOP_CLEANER_
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingIncludeGuards5(self):
# This test is based on third_party/weston/include/GLES2/gl2.h (the |extern
# "C"| part has been removed to make the test trickier to handle right -
# otherwise it is easy to see that the header has to be included before the
# |extern "C"| part).
#
# The tricky parts below include:
# 1. upper + lower case characters allowed in the guard name
# 2. Having to recognize that GL_APIENTRYP is *not* a guard
old_contents = '''
#ifndef __gles2_gl2_h_
#define __gles2_gl2_h_ 1
#include <GLES2/gl2platform.h>
#ifndef GL_APIENTRYP
#define GL_APIENTRYP GL_APIENTRY*
#endif
#endif
'''
expected_new_contents = '''
#ifndef __gles2_gl2_h_
#define __gles2_gl2_h_ 1
#include <GLES2/gl2platform.h>
#include "new/header.h"
#ifndef GL_APIENTRYP
#define GL_APIENTRYP GL_APIENTRY*
#endif
#endif
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testSkippingIncludeGuards6(self):
# This test is based on ios/third_party/blink/src/html_token.h
old_contents = '''
#ifndef HTMLToken_h
#define HTMLToken_h
#include <stddef.h>
#include <vector>
// ...
#endif
'''
expected_new_contents = '''
#ifndef HTMLToken_h
#define HTMLToken_h
#include <stddef.h>
#include <vector>
#include "new/header.h"
// ...
#endif
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testNoOpIfAlreadyPresent(self):
# This tests that the new header won't be inserted (and duplicated)
# if it is already included.
old_contents = '''
// Copyright info here.
#include "old/header.h"
#include "new/header.h"
#include "new/header2.h"
'''
expected_new_contents = '''
// Copyright info here.
#include "old/header.h"
#include "new/header.h"
#include "new/header2.h"
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testNoOpIfAlreadyPresent_WithTrailingComment(self):
# This tests that the new header won't be inserted (and duplicated)
# if it is already included.
old_contents = '''
// Copyright info here.
#include "old/header.h"
#include "new/header.h" // blah
#include "new/header2.h"
'''
expected_new_contents = '''
// Copyright info here.
#include "old/header.h"
#include "new/header.h" // blah
#include "new/header2.h"
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testNoOldHeaders(self):
# This tests that an extra new line is inserted after the new header
# when there are no old headers immediately below.
old_contents = '''
#include <vector>
struct S {};
'''
expected_new_contents = '''
#include <vector>
#include "new/header.h"
struct S {};
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testPlatformIfDefs(self):
# This test is based on
# //base/third_party/double_conversion/double-conversion/utils.h
# We need to insert the new header in a non-conditional part.
old_contents = '''
#ifndef DOUBLE_CONVERSION_UTILS_H_
#define DOUBLE_CONVERSION_UTILS_H_
#include <cstdlib>
#include <cstring>
#ifndef DOUBLE_CONVERSION_UNREACHABLE
#ifdef _MSC_VER
void DOUBLE_CONVERSION_NO_RETURN abort_noreturn();
inline void abort_noreturn() { abort(); }
#define DOUBLE_CONVERSION_UNREACHABLE() (abort_noreturn())
#else
#define DOUBLE_CONVERSION_UNREACHABLE() (abort())
#endif
#endif
namespace double_conversion {
'''
expected_new_contents = '''
#ifndef DOUBLE_CONVERSION_UTILS_H_
#define DOUBLE_CONVERSION_UTILS_H_
#include <cstdlib>
#include <cstring>
#include "new/header.h"
#ifndef DOUBLE_CONVERSION_UNREACHABLE
#ifdef _MSC_VER
void DOUBLE_CONVERSION_NO_RETURN abort_noreturn();
inline void abort_noreturn() { abort(); }
#define DOUBLE_CONVERSION_UNREACHABLE() (abort_noreturn())
#else
#define DOUBLE_CONVERSION_UNREACHABLE() (abort())
#endif
#endif
namespace double_conversion {
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testNoOldIncludesAndIfDefs(self):
# Artificial test: no old #includes + some #ifdefs. The main focus of the
# test is ensuring that the new header will be inserted into the
# unconditional part of the file.
old_contents = '''
#ifndef NDEBUG
#include "base/logging.h"
#endif
void foo();
'''
expected_new_contents = '''
#include "new/header.h"
#ifndef NDEBUG
#include "base/logging.h"
#endif
void foo();
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testNoOldIncludesAndIfDefs2(self):
# Artificial test: no old #includes + some #ifdefs. The main focus of the
# test is ensuring that the new header will be inserted into the
# unconditional part of the file.
old_contents = '''
#if defined(OS_WIN)
#include "foo_win.h"
#endif
void foo();
'''
expected_new_contents = '''
#include "new/header.h"
#if defined(OS_WIN)
#include "foo_win.h"
#endif
void foo();
'''
self._assertEqualContents(expected_new_contents,
_InsertHeader(old_contents))
def testUtf8BomMarker(self):
# Test based on
# //chrome/browser/ui/views/payments/payment_sheet_view_controller.cc
# which at some point began as follows:
# 00000000: efbb bf2f 2f20 436f 7079 7269 6768 7420 ...// Copyright
#
# Previous versions of apply_edits.py would not skip the BOM marker when
# figuring out where to insert the new include header.
old_contents = u'''\ufeff// Copyright
#include "old/header.h"
'''
expected_new_contents = u'''\ufeff// Copyright
#include "new/header.h"
#include "old/header.h"
'''
actual = bytearray()
actual.extend(old_contents.encode('utf-8'))
expected = bytearray()
expected.extend(expected_new_contents.encode('utf-8'))
# Test sanity check (i.e. not an assertion about code under test).
utf8_bom = [0xef, 0xbb, 0xbf]
self._assertEqualContents(list(actual[0:3]), utf8_bom)
self._assertEqualContents(list(expected[0:3]), utf8_bom)
# Actual test.
edit = apply_edits.Edit('include-user-header', -1, -1, "new/header.h")
apply_edits._ApplySingleEdit("foo/impl.cc", actual, edit, None)
self._assertEqualContents(expected, actual)
def _CreateReplacement(content_string, old_substring, new_substring):
""" Test helper for creating an Edit object with the right offset, etc. """
offset = content_string.find(old_substring)
return apply_edits.Edit('r', offset, len(old_substring), new_substring)
class ApplyReplacementTest(unittest.TestCase):
def testBasics(self):
old_text = "123 456 789"
r = _CreateReplacement(old_text, "456", "foo")
new_text = _ApplyEdit(old_text, r)
self.assertEqual("123 foo 789", new_text)
def testMiddleListElementRemoval(self):
old_text = "(123, 456, 789) // foobar"
r = _CreateReplacement(old_text, "456", "")
new_text = _ApplyEdit(old_text, r)
self.assertEqual("(123, 789) // foobar", new_text)
def testFinalElementRemoval(self):
old_text = "(123, 456, 789) // foobar"
r = _CreateReplacement(old_text, "789", "")
new_text = _ApplyEdit(old_text, r)
self.assertEqual("(123, 456) // foobar", new_text)
def testConflictingReplacement(self):
old_text = "123 456 789"
last = _CreateReplacement(old_text, "456", "foo")
edit = _CreateReplacement(old_text, "456", "bar")
expected_msg_regex = 'Conflicting replacement text'
expected_msg_regex += '.*some_file.cc at offset 4, length 3'
expected_msg_regex += '.*"bar" != "foo"'
with self.assertRaisesRegexp(ValueError, expected_msg_regex):
_ApplyEdit(old_text, edit, last_edit=last)
def testUnrecognizedEditDirective(self):
old_text = "123 456 789"
edit = apply_edits.Edit('unknown_directive', 123, 456, "foo")
expected_msg_regex = 'Unrecognized edit directive "unknown_directive"'
expected_msg_regex += '.*some_file.cc'
with self.assertRaisesRegexp(ValueError, expected_msg_regex):
_ApplyEdit(old_text, edit)
def testOverlappingReplacement(self):
old_text = "123 456 789"
last = _CreateReplacement(old_text, "456 789", "foo")
edit = _CreateReplacement(old_text, "123 456", "bar")
expected_msg_regex = 'Overlapping replacements'
expected_msg_regex += '.*some_file.cc'
expected_msg_regex += '.*offset 0, length 7.*"bar"'
expected_msg_regex += '.*offset 4, length 7.*"foo"'
with self.assertRaisesRegexp(ValueError, expected_msg_regex):
_ApplyEdit(old_text, edit, last_edit=last)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.