text
stringlengths 4
1.02M
| meta
dict |
---|---|
import web
from nailgun.extensions import get_all_extensions
from nailgun.api.v1.handlers.assignment import NodeAssignmentHandler
from nailgun.api.v1.handlers.assignment import NodeUnassignmentHandler
from nailgun.api.v1.handlers.capacity import CapacityLogCsvHandler
from nailgun.api.v1.handlers.capacity import CapacityLogHandler
from nailgun.api.v1.handlers.cluster import ClusterAttributesDefaultsHandler
from nailgun.api.v1.handlers.cluster import ClusterAttributesHandler
from nailgun.api.v1.handlers.cluster import ClusterChangesHandler
from nailgun.api.v1.handlers.cluster import ClusterCollectionHandler
from nailgun.api.v1.handlers.cluster import ClusterDeploymentTasksHandler
from nailgun.api.v1.handlers.cluster import ClusterGeneratedData
from nailgun.api.v1.handlers.cluster import ClusterHandler
from nailgun.api.v1.handlers.cluster import ClusterResetHandler
from nailgun.api.v1.handlers.cluster import ClusterStopDeploymentHandler
from nailgun.api.v1.handlers.cluster import ClusterUpdateHandler
from nailgun.api.v1.handlers.cluster import VmwareAttributesDefaultsHandler
from nailgun.api.v1.handlers.cluster import VmwareAttributesHandler
from nailgun.api.v1.handlers.logs import LogEntryCollectionHandler
from nailgun.api.v1.handlers.logs import LogPackageDefaultConfig
from nailgun.api.v1.handlers.logs import LogPackageHandler
from nailgun.api.v1.handlers.logs import LogSourceByNodeCollectionHandler
from nailgun.api.v1.handlers.logs import LogSourceCollectionHandler
from nailgun.api.v1.handlers.logs import SnapshotDownloadHandler
from nailgun.api.v1.handlers.network_group import NetworkGroupCollectionHandler
from nailgun.api.v1.handlers.network_group import NetworkGroupHandler
from nailgun.api.v1.handlers.node_group import NodeGroupCollectionHandler
from nailgun.api.v1.handlers.node_group import NodeGroupHandler
from nailgun.api.v1.handlers.network_configuration \
import NeutronNetworkConfigurationHandler
from nailgun.api.v1.handlers.network_configuration \
import NeutronNetworkConfigurationVerifyHandler
from nailgun.api.v1.handlers.network_configuration \
import NovaNetworkConfigurationHandler
from nailgun.api.v1.handlers.network_configuration \
import NovaNetworkConfigurationVerifyHandler
from nailgun.api.v1.handlers.network_configuration \
import TemplateNetworkConfigurationHandler
from nailgun.api.v1.handlers.node import NodeAgentHandler
from nailgun.api.v1.handlers.node import NodeCollectionHandler
from nailgun.api.v1.handlers.node import NodeHandler
from nailgun.api.v1.handlers.node import NodesAllocationStatsHandler
from nailgun.api.v1.handlers.plugin import PluginCollectionHandler
from nailgun.api.v1.handlers.plugin import PluginHandler
from nailgun.api.v1.handlers.plugin import PluginSyncHandler
from nailgun.api.v1.handlers.node import NodeCollectionNICsDefaultHandler
from nailgun.api.v1.handlers.node import NodeCollectionNICsHandler
from nailgun.api.v1.handlers.node import NodeNICsDefaultHandler
from nailgun.api.v1.handlers.node import NodeNICsHandler
from nailgun.api.v1.handlers.notifications import NotificationCollectionHandler
from nailgun.api.v1.handlers.notifications import NotificationHandler
from nailgun.api.v1.handlers.orchestrator import DefaultDeploymentInfo
from nailgun.api.v1.handlers.orchestrator import DefaultPostPluginsHooksInfo
from nailgun.api.v1.handlers.orchestrator import DefaultPrePluginsHooksInfo
from nailgun.api.v1.handlers.orchestrator import DefaultProvisioningInfo
from nailgun.api.v1.handlers.orchestrator import DeploymentInfo
from nailgun.api.v1.handlers.orchestrator import DeploySelectedNodes
from nailgun.api.v1.handlers.orchestrator import DeploySelectedNodesWithTasks
from nailgun.api.v1.handlers.orchestrator import ProvisioningInfo
from nailgun.api.v1.handlers.orchestrator import ProvisionSelectedNodes
from nailgun.api.v1.handlers.orchestrator import TaskDeployGraph
from nailgun.api.v1.handlers.registration import FuelLoginForm
from nailgun.api.v1.handlers.registration import FuelRegistrationForm
from nailgun.api.v1.handlers.registration import FuelRestorePasswordForm
from nailgun.api.v1.handlers.release import ReleaseCollectionHandler
from nailgun.api.v1.handlers.release import ReleaseDeploymentTasksHandler
from nailgun.api.v1.handlers.release import ReleaseHandler
from nailgun.api.v1.handlers.release import ReleaseNetworksHandler
from nailgun.api.v1.handlers.role import ClusterRolesCollectionHandler
from nailgun.api.v1.handlers.role import ClusterRolesHandler
from nailgun.api.v1.handlers.role import RoleCollectionHandler
from nailgun.api.v1.handlers.role import RoleHandler
from nailgun.api.v1.handlers.tasks import TaskCollectionHandler
from nailgun.api.v1.handlers.tasks import TaskHandler
from nailgun.api.v1.handlers.version import VersionHandler
from nailgun.api.v1.handlers.vms import NodeVMsHandler
from nailgun.api.v1.handlers.vms import SpawnVmsHandler
from nailgun.api.v1.handlers.removed import RemovedIn51RedHatAccountHandler
from nailgun.api.v1.handlers.removed import RemovedIn51RedHatSetupHandler
from nailgun.api.v1.handlers.master_node_settings \
import MasterNodeSettingsHandler
from nailgun.settings import settings
urls = (
r'/releases/?$',
ReleaseCollectionHandler,
r'/releases/(?P<obj_id>\d+)/?$',
ReleaseHandler,
r'/releases/(?P<obj_id>\d+)/networks/?$',
ReleaseNetworksHandler,
r'/releases/(?P<obj_id>\d+)/deployment_tasks/?$',
ReleaseDeploymentTasksHandler,
r'/releases/(?P<release_id>\d+)/roles/?$',
RoleCollectionHandler,
r'/releases/(?P<release_id>\d+)/roles/(?P<role_name>[a-zA-Z-_]+)/?$',
RoleHandler,
r'/clusters/(?P<cluster_id>\d+)/roles/?$',
ClusterRolesCollectionHandler,
r'/clusters/(?P<cluster_id>\d+)/roles/(?P<role_name>[a-zA-Z-_]+)/?$',
ClusterRolesHandler,
r'/clusters/?$',
ClusterCollectionHandler,
r'/clusters/(?P<obj_id>\d+)/?$',
ClusterHandler,
r'/clusters/(?P<cluster_id>\d+)/changes/?$',
ClusterChangesHandler,
r'/clusters/(?P<cluster_id>\d+)/attributes/?$',
ClusterAttributesHandler,
r'/clusters/(?P<cluster_id>\d+)/attributes/defaults/?$',
ClusterAttributesDefaultsHandler,
# nova network-related
r'/clusters/(?P<cluster_id>\d+)/network_configuration/nova_network/?$',
NovaNetworkConfigurationHandler,
r'/clusters/(?P<cluster_id>\d+)/network_configuration/'
'nova_network/verify/?$',
NovaNetworkConfigurationVerifyHandler,
# neutron-related
r'/clusters/(?P<cluster_id>\d+)/network_configuration/neutron/?$',
NeutronNetworkConfigurationHandler,
r'/clusters/(?P<cluster_id>\d+)/network_configuration/'
'neutron/verify/?$',
NeutronNetworkConfigurationVerifyHandler,
r'/clusters/(?P<cluster_id>\d+)/network_configuration/template/?$',
TemplateNetworkConfigurationHandler,
r'/clusters/(?P<cluster_id>\d+)/orchestrator/deployment/?$',
DeploymentInfo,
r'/clusters/(?P<cluster_id>\d+)/orchestrator/deployment/defaults/?$',
DefaultDeploymentInfo,
r'/clusters/(?P<cluster_id>\d+)/orchestrator/provisioning/?$',
ProvisioningInfo,
r'/clusters/(?P<cluster_id>\d+)/orchestrator/provisioning/defaults/?$',
DefaultProvisioningInfo,
r'/clusters/(?P<cluster_id>\d+)/generated/?$',
ClusterGeneratedData,
r'/clusters/(?P<cluster_id>\d+)/orchestrator/plugins_pre_hooks/?$',
DefaultPrePluginsHooksInfo,
r'/clusters/(?P<cluster_id>\d+)/orchestrator/plugins_post_hooks/?$',
DefaultPostPluginsHooksInfo,
r'/clusters/(?P<cluster_id>\d+)/provision/?$',
ProvisionSelectedNodes,
r'/clusters/(?P<cluster_id>\d+)/deploy/?$',
DeploySelectedNodes,
r'/clusters/(?P<cluster_id>\d+)/deploy_tasks/?$',
DeploySelectedNodesWithTasks,
r'/clusters/(?P<cluster_id>\d+)/deploy_tasks/graph.gv$',
TaskDeployGraph,
r'/clusters/(?P<cluster_id>\d+)/stop_deployment/?$',
ClusterStopDeploymentHandler,
r'/clusters/(?P<cluster_id>\d+)/reset/?$',
ClusterResetHandler,
r'/clusters/(?P<cluster_id>\d+)/update/?$',
ClusterUpdateHandler,
r'/clusters/(?P<obj_id>\d+)/deployment_tasks/?$',
ClusterDeploymentTasksHandler,
r'/networks/?$',
NetworkGroupCollectionHandler,
r'/networks/(?P<obj_id>\d+)/?$',
NetworkGroupHandler,
r'/clusters/(?P<cluster_id>\d+)/assignment/?$',
NodeAssignmentHandler,
r'/clusters/(?P<cluster_id>\d+)/unassignment/?$',
NodeUnassignmentHandler,
r'/clusters/(?P<cluster_id>\d+)/vmware_attributes/?$',
VmwareAttributesHandler,
r'/clusters/(?P<cluster_id>\d+)/vmware_attributes/defaults/?$',
VmwareAttributesDefaultsHandler,
r'/nodegroups/?$',
NodeGroupCollectionHandler,
r'/nodegroups/(?P<obj_id>\d+)/?$',
NodeGroupHandler,
r'/nodes/?$',
NodeCollectionHandler,
r'/nodes/agent/?$',
NodeAgentHandler,
r'/nodes/(?P<obj_id>\d+)/?$',
NodeHandler,
r'/nodes/interfaces/?$',
NodeCollectionNICsHandler,
r'/nodes/interfaces/default_assignment/?$',
NodeCollectionNICsDefaultHandler,
r'/nodes/(?P<node_id>\d+)/interfaces/?$',
NodeNICsHandler,
r'/nodes/(?P<node_id>\d+)/interfaces/default_assignment/?$',
NodeNICsDefaultHandler,
r'/nodes/allocation/stats/?$',
NodesAllocationStatsHandler,
r'/tasks/?$',
TaskCollectionHandler,
r'/tasks/(?P<obj_id>\d+)/?$',
TaskHandler,
r'/plugins/(?P<obj_id>\d+)/?$',
PluginHandler,
r'/plugins/?$',
PluginCollectionHandler,
r'/plugins/sync/?$',
PluginSyncHandler,
r'/notifications/?$',
NotificationCollectionHandler,
r'/notifications/(?P<obj_id>\d+)/?$',
NotificationHandler,
r'/dump/(?P<snapshot_name>[A-Za-z0-9-_.]+)$',
SnapshotDownloadHandler,
r'/logs/?$',
LogEntryCollectionHandler,
r'/logs/package/?$',
LogPackageHandler,
r'/logs/package/config/default/?$',
LogPackageDefaultConfig,
r'/logs/sources/?$',
LogSourceCollectionHandler,
r'/logs/sources/nodes/(?P<node_id>\d+)/?$',
LogSourceByNodeCollectionHandler,
r'/tracking/registration/?$',
FuelRegistrationForm,
r'/tracking/login/?$',
FuelLoginForm,
r'/tracking/restore_password/?$',
FuelRestorePasswordForm,
r'/version/?$',
VersionHandler,
r'/capacity/?$',
CapacityLogHandler,
r'/capacity/csv/?$',
CapacityLogCsvHandler,
r'/redhat/account/?$',
RemovedIn51RedHatAccountHandler,
r'/redhat/setup/?$',
RemovedIn51RedHatSetupHandler,
r'/settings/?$',
MasterNodeSettingsHandler,
)
feature_groups_urls = {
'advanced': (
r'/clusters/(?P<cluster_id>\d+)/spawn_vms/?$',
SpawnVmsHandler,
r'/nodes/(?P<node_id>\d+)/vms_conf/?$',
NodeVMsHandler,
)
}
urls = [i if isinstance(i, str) else i.__name__ for i in urls]
_locals = locals()
def get_extensions_urls():
"""Method is used to retrieve the data about
handlers and urls from extensions and convert
them into web.py consumable format.
:returns: dict in the next format:
{'urls': (r'/url/', 'ClassName'),
'handlers': [{
'class': ClassName,
'name': 'ClassName'}]}
"""
urls = []
handlers = []
for extension in get_all_extensions():
for url in extension.urls:
# TODO(eli): handler name should be extension specific
# not to have problems when several extensions use
# the same name for handler classes.
# Should be done as a part of blueprint:
# https://blueprints.launchpad.net/fuel/+spec
# /volume-manager-refactoring
handler_name = url['handler'].__name__
handlers.append({
'class': url['handler'],
'name': handler_name})
urls.extend((url['uri'], handler_name))
return {'urls': urls, 'handlers': handlers}
def get_feature_groups_urls():
"""Method is used to retrieve urls depended on feature groups like
'experimental' or 'advanced' which should be enable only for this modes.
:returns: list of urls
"""
urls = []
for feature in settings.VERSION['feature_groups']:
urls.extend([i if isinstance(i, str) else i.__name__ for i in
feature_groups_urls.get(feature, [])])
return urls
def get_all_urls():
"""Merges urls and handlers from core with
urls and handlers from extensions
"""
ext_urls = get_extensions_urls()
all_urls = list(urls)
all_urls.extend(get_feature_groups_urls())
all_urls.extend(ext_urls['urls'])
for handler in ext_urls['handlers']:
_locals[handler['name']] = handler['class']
return [all_urls, _locals]
def app():
return web.application(*get_all_urls())
def public_urls():
return {
r'/nodes/?$': ['POST'],
r'/nodes/agent/?$': ['PUT'],
r'/version/?$': ['GET']
}
| {
"content_hash": "4638a4594dfc637d9adc25bb774dde06",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 79,
"avg_line_length": 36.49152542372882,
"alnum_prop": 0.7243381328379006,
"repo_name": "SmartInfrastructures/fuel-web-dev",
"id": "61701af8fe81122250ff4feaad0d922683230ee6",
"size": "13553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/api/v1/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "91131"
},
{
"name": "HTML",
"bytes": "7949"
},
{
"name": "JavaScript",
"bytes": "945307"
},
{
"name": "Mako",
"bytes": "1943"
},
{
"name": "Python",
"bytes": "3961568"
},
{
"name": "Ruby",
"bytes": "14701"
},
{
"name": "Shell",
"bytes": "24392"
}
],
"symlink_target": ""
} |
"""Helper utilty function for customization."""
import sys
import os
import docutils
import subprocess
if os.environ.get('READTHEDOCS', None) == 'True':
subprocess.call('cd ..; rm -rf recommonmark;' +
'git clone https://github.com/tqchen/recommonmark', shell=True)
sys.path.insert(0, os.path.abspath('../recommonmark/'))
from recommonmark import parser, transform
MarkdownParser = parser.CommonMarkParser
AutoStructify = transform.AutoStructify
| {
"content_hash": "85aa6ad5289df21e173ffed80841eb5e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 83,
"avg_line_length": 31.466666666666665,
"alnum_prop": 0.7288135593220338,
"repo_name": "mlperf/training_results_v0.6",
"id": "f6a33ffa375dcd96164a20c32970fb4cf29a6c0a",
"size": "496",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/3rdparty/dmlc-core/doc/sphinx_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2017 SONATA-NFV and Paderborn University
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV, Paderborn University
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
from flask_restful import Resource
from flask import Response, request
from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
from emuvim.api.openstack.helper import get_host
import logging
import json
import uuid
from mininet.link import Link
LOG = logging.getLogger("api.openstack.nova")
class NovaDummyApi(BaseOpenstackDummy):
def __init__(self, in_ip, in_port, compute):
super(NovaDummyApi, self).__init__(in_ip, in_port)
self.compute = compute
self.api.add_resource(NovaVersionsList, "/",
resource_class_kwargs={'api': self})
self.api.add_resource(Shutdown, "/shutdown")
self.api.add_resource(NovaVersionShow, "/v2.1/<id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListServersApi, "/v2.1/<id>/servers",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListServersAndPortsApi, "/v2.1/<id>/servers/andPorts",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListServersDetailed, "/v2.1/<id>/servers/detail",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaShowServerDetails, "/v2.1/<id>/servers/<serverid>",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaInterfaceToServer, "/v2.1/<id>/servers/<serverid>/os-interface",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaShowAndDeleteInterfaceAtServer, "/v2.1/<id>/servers/<serverid>/os-interface/<port_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListFlavors, "/v2.1/<id>/flavors", "/v2/<id>/flavors",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListFlavorsDetails, "/v2.1/<id>/flavors/detail", "/v2/<id>/flavors/detail",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListFlavorById, "/v2.1/<id>/flavors/<flavorid>", "/v2/<id>/flavors/<flavorid>",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListImages, "/v2.1/<id>/images",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListImagesDetails, "/v2.1/<id>/images/detail",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListImageById, "/v2.1/<id>/images/<imageid>",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaLimits, "/v2.1/<id>/limits",
resource_class_kwargs={'api': self})
def _start_flask(self):
LOG.info("Starting %s endpoint @ http://%s:%d" % ("NovaDummyApi", self.ip, self.port))
# add some flavors for good measure
self.compute.add_flavor('m1.tiny', 1, 512, "MB", 1, "GB")
self.compute.add_flavor('m1.nano', 1, 64, "MB", 0, "GB")
self.compute.add_flavor('m1.micro', 1, 128, "MB", 0, "GB")
self.compute.add_flavor('m1.small', 1, 1024, "MB", 2, "GB")
if self.app is not None:
self.app.before_request(self.dump_playbook)
self.app.run(self.ip, self.port, debug=True, use_reloader=False)
class Shutdown(Resource):
"""
A get request to /shutdown will shut down this endpoint.
"""
def get(self):
LOG.debug(("%s is beeing shut doen") % (__name__))
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
class NovaVersionsList(Resource):
def __init__(self, api):
self.api = api
def get(self):
"""
Lists API versions.
:return: Returns a json with API versions.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = """
{
"versions": [
{
"id": "v2.1",
"links": [
{
"href": "http://%s:%d/v2.1/",
"rel": "self"
}
],
"status": "CURRENT",
"version": "2.38",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
]
}
""" % (get_host(request), self.api.port)
response = Response(resp, status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not show list of versions." % __name__)
return ex.message, 500
class NovaVersionShow(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Returns API details.
:param id:
:type id: ``str``
:return: Returns a json with API details.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = """
{
"version": {
"id": "v2.1",
"links": [
{
"href": "http://%s:%d/v2.1/",
"rel": "self"
},
{
"href": "http://docs.openstack.org/",
"rel": "describedby",
"type": "text/html"
}
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2.1"
}
],
"status": "CURRENT",
"version": "2.38",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
}
""" % (get_host(request), self.api.port)
response = Response(resp, status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not show list of versions." % __name__)
return ex.message, 500
class NovaListServersApi(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Creates a list with all running servers and their detailed information.
:param id: Used to create a individual link to quarry further information.
:type id: ``str``
:return: Returns a json response with a dictionary that contains the server information.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['servers'] = list()
for server in self.api.compute.computeUnits.values():
s = server.create_server_dict(self.api.compute)
s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
self.api.port,
id,
server.id)}]
resp['servers'].append(s)
response = Response(json.dumps(resp), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
def post(self, id):
"""
Creates a server instance.
:param id: tenant id, we ignore this most of the time
:type id: ``str``
:return: Returns a flask response, with detailed information about the just created server.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
server_dict = json.loads(request.data)['server']
networks = server_dict.get('networks', None)
name = str(self.api.compute.dc.label) + "_man_" + server_dict["name"][0:12]
if self.api.compute.find_server_by_name_or_id(name) is not None:
return Response("Server with name %s already exists." % name, status=409)
# TODO: not finished!
resp = dict()
server = self.api.compute.create_server(name)
server.full_name = str(self.api.compute.dc.label) + "_man_" + server_dict["name"]
server.template_name = server_dict["name"]
if "metadata" in server_dict:
server.properties = server_dict["metadata"]
for flavor in self.api.compute.flavors.values():
if flavor.id == server_dict.get('flavorRef', ''):
server.flavor = flavor.name
for image in self.api.compute.images.values():
if image.id in server_dict['imageRef']:
server.image = image.name
if networks is not None:
for net in networks:
port = self.api.compute.find_port_by_name_or_id(net.get('port', ""))
if port is not None:
server.port_names.append(port.name)
else:
return Response("Currently only networking by port is supported.", status=400)
self.api.compute._start_compute(server)
response = NovaShowServerDetails(self.api).get(id, server.id)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not create the server." % __name__)
return ex.message, 500
class NovaListServersAndPortsApi(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Creates a list with all running servers and their detailed information. This function also presents all
port information of each server.
:param id: Used to create a individual link to quarry further information.
:type id: ``str``
:return: Returns a json response with a dictionary that contains the server information.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['servers'] = list()
for server in self.api.compute.computeUnits.values():
s = server.create_server_dict(self.api.compute)
s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
self.api.port,
id,
server.id)}]
s['ports'] = list()
for port_name in server.port_names:
port = self.api.compute.find_port_by_name_or_id(port_name)
if port is None:
continue
tmp = port.create_port_dict(self.api.compute)
tmp['intf_name'] = port.intf_name
s['ports'].append(tmp)
resp['servers'].append(s)
response = Response(json.dumps(resp), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
class NovaListServersDetailed(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
As List Servers, it lists all running servers and their details but furthermore it also states the
used flavor and the server image.
:param id: tenant id, used for the 'href' link.
:type id: ``str``
:return: Returns a flask response, with detailed information aboit the servers and their flavor and image.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = {"servers": list()}
for server in self.api.compute.computeUnits.values():
s = server.create_server_dict(self.api.compute)
s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
self.api.port,
id,
server.id)}]
flavor = self.api.compute.flavors[server.flavor]
s['flavor'] = {
"id": flavor.id,
"links": [
{
"href": "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id),
"rel": "bookmark"
}
]
}
image = self.api.compute.images[server.image]
s['image'] = {
"id": image.id,
"links": [
{
"href": "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id),
"rel": "bookmark"
}
]
}
resp['servers'].append(s)
response = Response(json.dumps(resp), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
class NovaListFlavors(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Lists all available flavors.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of all flavors.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['flavors'] = list()
for flavor in self.api.compute.flavors.values():
f = flavor.__dict__.copy()
f['id'] = flavor.id
f['name'] = flavor.name
f['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id)}]
resp['flavors'].append(f)
response = Response(json.dumps(resp), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
def post(self, id):
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
data = json.loads(request.data).get("flavor")
LOG.warning("Create Flavor: %s" % str(data))
# add to internal dict
f = self.api.compute.add_flavor(
data.get("name"),
data.get("vcpus"),
data.get("ram"), "MB",
data.get("disk"), "GB")
# create response based on incoming data
data["id"] = f.id
data["links"] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
f.id)}]
resp = {"flavor": data}
return Response(json.dumps(resp), status=200, mimetype="application/json")
class NovaListFlavorsDetails(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Lists all flavors with additional information like ram and disk space.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of all flavors with additional information.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['flavors'] = list()
for flavor in self.api.compute.flavors.values():
# use the class dict. it should work fine
# but use a copy so we don't modifiy the original
f = flavor.__dict__.copy()
# add additional expected stuff stay openstack compatible
f['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id)}]
f['OS-FLV-DISABLED:disabled'] = False
f['OS-FLV-EXT-DATA:ephemeral'] = 0
f['os-flavor-access:is_public'] = True
f['ram'] = flavor.memory
f['vcpus'] = flavor.cpu
f['swap'] = 0
f['disk'] = flavor.storage
f['rxtx_factor'] = 1.0
resp['flavors'].append(f)
response = Response(json.dumps(resp), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
def post(self, id):
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
data = json.loads(request.data).get("flavor")
LOG.warning("Create Flavor: %s" % str(data))
# add to internal dict
f = self.api.compute.add_flavor(
data.get("name"),
data.get("vcpus"),
data.get("ram"), "MB",
data.get("disk"), "GB")
# create response based on incoming data
data["id"] = f.id
data["links"] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
f.id)}]
resp = {"flavor": data}
return Response(json.dumps(resp), status=200, mimetype="application/json")
class NovaListFlavorById(Resource):
def __init__(self, api):
self.api = api
def get(self, id, flavorid):
"""
Returns details about one flavor.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:param flavorid: Represents the flavor.
:type flavorid: ``str``
:return: Returns a flask response with detailed information about the flavor.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['flavor'] = dict()
flavor = self.api.compute.flavors.get(flavorid, None)
if flavor is None:
for f in self.api.compute.flavors.values():
if f.id == flavorid:
flavor = f
break
resp['flavor']['id'] = flavor.id
resp['flavor']['name'] = flavor.name
resp['flavor']['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id)}]
response = Response(json.dumps(resp), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve flavor with id %s" % (__name__, flavorid))
return ex.message, 500
def delete(self, id, flavorid):
"""
Removes the given flavor.
Does not really remove anything from the machine, just fakes an OK.
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
return Response("{}", status=204, mimetype="application/json")
class NovaListImages(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Creates a list of all usable images.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of available images.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['images'] = list()
for image in self.api.compute.images.values():
f = dict()
f['id'] = image.id
f['name'] = str(image.name).replace(":latest", "")
f['links'] = [{'href': "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id)}]
resp['images'].append(f)
response = Response(json.dumps(resp), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
return ex.message, 500
class NovaListImagesDetails(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
As List Images but with additional metadata.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of images and their metadata.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['images'] = list()
for image in self.api.compute.images.values():
# use the class dict. it should work fine
# but use a copy so we don't modifiy the original
f = image.__dict__.copy()
# add additional expected stuff stay openstack compatible
f['name'] = str(image.name).replace(":latest", "")
f['links'] = [{'href': "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id)}]
f['metadata'] = {
"architecture": "x86_64",
"auto_disk_config": "True",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
}
resp['images'].append(f)
response = Response(json.dumps(resp), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
return ex.message, 500
class NovaListImageById(Resource):
def __init__(self, api):
self.api = api
def get(self, id, imageid):
"""
Gets an image by id from the emulator with openstack nova compliant return values.
:param id: tenantid, we ignore this most of the time
:type id: ``str``
:param imageid: id of the image. If it is 1 the dummy CREATE-IMAGE is returned
:type imageid: ``str``
:return: Returns a flask response with the information about one image.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
i = resp['image'] = dict()
for image in self.api.compute.images.values():
if image.id == imageid or image.name == imageid:
i['id'] = image.id
i['name'] = image.name
return Response(json.dumps(resp), status=200, mimetype="application/json")
response = Response("Image with id or name %s does not exists." % imageid, status=404)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve image with id %s." % (__name__, imageid))
return ex.message, 500
def delete(self, id, imageid):
"""
Removes the given image.
Does not really remove anything from the machine, just fakes an OK.
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
return Response("{}", status=204, mimetype="application/json")
class NovaShowServerDetails(Resource):
def __init__(self, api):
self.api = api
def get(self, id, serverid):
"""
Returns detailed information about the specified server.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:param serverid: Specifies the requested server.
:type serverid: ``str``
:return: Returns a flask response with details about the server.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response("Server with id or name %s does not exists." % serverid, status=404)
s = server.create_server_dict()
s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
self.api.port,
id,
server.id)}]
flavor = self.api.compute.flavors[server.flavor]
s['flavor'] = {
"id": flavor.id,
"links": [
{
"href": "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id),
"rel": "bookmark"
}
]
}
image = self.api.compute.images[server.image]
s['image'] = {
"id": image.id,
"links": [
{
"href": "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id),
"rel": "bookmark"
}
]
}
response = Response(json.dumps({'server': s}), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve the server details." % __name__)
return ex.message, 500
def delete(self, id, serverid):
"""
Delete a server instance.
:param id: tenant id, we ignore this most of the time
:type id: ``str``
:param serverid: The UUID of the server
:type serverid: ``str``
:return: Returns 200 if everything is fine.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response('Could not find server.', status=404, mimetype="application/json")
self.api.compute.stop_compute(server)
response = Response('Server deleted.', status=204, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not create the server." % __name__)
return ex.message, 500
class NovaInterfaceToServer(Resource):
def __init__(self, api):
self.api = api
def post(self, id, serverid):
"""
Add an interface to the specified server.
:param id: tenant id, we ignore this most of the time
:type id: ``str``
:param serverid: Specifies the server.
:type serverid: ``str``
:return: Returns a flask response with information about the attached interface.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response("Server with id or name %s does not exists." % serverid, status=404)
if server.emulator_compute is None:
LOG.error("The targeted container does not exist.")
return Response("The targeted container of %s does not exist." % serverid, status=404)
data = json.loads(request.data).get("interfaceAttachment")
resp = dict()
port = data.get("port_id", None)
net = data.get("net_id", None)
dc = self.api.compute.dc
network_dict = dict()
network = None
if net is not None and port is not None:
port = self.api.compute.find_port_by_name_or_id(port)
network = self.api.compute.find_network_by_name_or_id(net)
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network_dict[network_dict['id']] = network.name
elif net is not None:
network = self.api.compute.find_network_by_name_or_id(net)
if network is None:
return Response("Network with id or name %s does not exists." % net, status=404)
port = self.api.compute.create_port("port:cp%s:fl:%s" %
(len(self.api.compute.ports), str(uuid.uuid4())))
port.net_name = network.name
port.ip_address = network.get_new_ip_address(port.name)
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network_dict[network_dict['id']] = network.name
elif port is not None:
port = self.api.compute.find_port_by_name_or_id(port)
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network = self.api.compute.find_network_by_name_or_id(port.net_name)
network_dict[network_dict['id']] = network.name
else:
raise Exception("You can only attach interfaces by port or network at the moment")
if network == self.api.manage.floating_network:
dc.net.addLink(server.emulator_compute, self.api.manage.floating_switch,
params1=network_dict, cls=Link, intfName1=port.intf_name)
else:
dc.net.addLink(server.emulator_compute, dc.switch,
params1=network_dict, cls=Link, intfName1=port.intf_name)
resp["port_state"] = "ACTIVE"
resp["port_id"] = port.id
resp["net_id"] = self.api.compute.find_network_by_name_or_id(port.net_name).id
resp["mac_addr"] = port.mac_address
resp["fixed_ips"] = list()
fixed_ips = dict()
fixed_ips["ip_address"] = port.ip_address
fixed_ips["subnet_id"] = network.subnet_name
resp["fixed_ips"].append(fixed_ips)
response = Response(json.dumps({"interfaceAttachment": resp}), status=202, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not add interface to the server." % __name__)
return ex.message, 500
class NovaShowAndDeleteInterfaceAtServer(Resource):
def __init__(self, api):
self.api = api
def delete(self, id, serverid, port_id):
"""
Deletes an existing interface.
:param id: tenant id, we ignore this most of the time
:type id: ``str``
:param serverid: Specifies the server, where the interface will be deleted.
:type serverid: ``str``
:param port_id: Specifies the port of the interface.
:type port_id: ``str``
:return: Returns a flask response with 202 if everything worked out. Otherwise it will return 404 and an
error message.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response("Server with id or name %s does not exists." % serverid, status=404)
port = self.api.compute.find_port_by_name_or_id(port_id)
if port is None:
return Response("Port with id or name %s does not exists." % port_id, status=404)
for link in self.api.compute.dc.net.links:
if str(link.intf1) == port.intf_name and \
str(link.intf1.ip) == port.ip_address.split('/')[0]:
self.api.compute.dc.net.removeLink(link)
break
response = Response("", status=202, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not detach interface from the server." % __name__)
return ex.message, 500
class NovaLimits(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Returns the resource limits of the emulated cloud.
https://developer.openstack.org/api-ref/compute/?expanded=show-rate-and-absolute-limits-detail#limits-limits
TODO: For now we only return fixed limits, not based on the real deployment.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns the resource limits.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = {
"limits": {
"absolute": {
"maxImageMeta": 12800,
"maxPersonality": 500,
"maxPersonalitySize": 1024000,
"maxSecurityGroupRules": 2000,
"maxSecurityGroups": 1000,
"maxServerMeta": 12800,
"maxTotalCores": 2000,
"maxTotalFloatingIps": 1000,
"maxTotalInstances": 1000,
"maxTotalKeypairs": 1000,
"maxTotalRAMSize": 5120000,
"maxServerGroups": 1000,
"maxServerGroupMembers": 1000,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 0,
"totalServerGroupsUsed": 0
},
"rate": []
}
}
response = Response(json.dumps(resp), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
return ex.message, 500
| {
"content_hash": "e1dc6c0f8ad4a9bcea62f6e466d38b2f",
"timestamp": "",
"source": "github",
"line_count": 948,
"max_line_length": 121,
"avg_line_length": 42.67299578059072,
"alnum_prop": 0.48573688634004053,
"repo_name": "stevenvanrossem/son-emu",
"id": "7495076e481c59236c8b7106cd2b64a8c8a3633b",
"size": "40454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/emuvim/api/openstack/openstack_dummies/nova_dummy_api.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "625"
},
{
"name": "Dockerfile",
"bytes": "2644"
},
{
"name": "HTML",
"bytes": "6268"
},
{
"name": "JavaScript",
"bytes": "13979"
},
{
"name": "Python",
"bytes": "792173"
},
{
"name": "Shell",
"bytes": "5708"
}
],
"symlink_target": ""
} |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ProfileChangedEvent(vim, *args, **kwargs):
'''This event records that the profile has beed edited'''
obj = vim.client.factory.create('ns0:ProfileChangedEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'profile', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| {
"content_hash": "58454f65cf6e72954153929792383409",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 124,
"avg_line_length": 35.06060606060606,
"alnum_prop": 0.5972342264477096,
"repo_name": "xuru/pyvisdk",
"id": "095290ecefff761a98ce433a0da418d63405bdaa",
"size": "1158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/profile_changed_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslcertkey_sslvserver_binding(base_resource) :
""" Binding class showing the sslvserver that can be bound to sslcertkey.
"""
def __init__(self) :
self._servername = ""
self._data = 0
self._version = 0
self._certkey = ""
self._vservername = ""
self._vserver = False
self._ca = False
self._crlcheck = ""
self.___count = 0
@property
def vserver(self) :
ur"""Specify this option to bind the certificate to an SSL virtual server.
Note: The default option is -vServer.
"""
try :
return self._vserver
except Exception as e:
raise e
@vserver.setter
def vserver(self, vserver) :
ur"""Specify this option to bind the certificate to an SSL virtual server.
Note: The default option is -vServer.
"""
try :
self._vserver = vserver
except Exception as e:
raise e
@property
def crlcheck(self) :
ur"""The rule for use of CRL corresponding to this CA certificate during client authentication. If crlCheck is set to Mandatory, the system will deny all SSL clients if the CRL is missing, expired - NextUpdate date is in the past, or is incomplete with remote CRL refresh enabled. If crlCheck is set to optional, the system will allow SSL clients in the above error cases.However, in any case if the client certificate is revoked in the CRL, the SSL client will be denied access.<br/>Default value: CRLCHECK_OPTIONAL<br/>Possible values = Mandatory, Optional.
"""
try :
return self._crlcheck
except Exception as e:
raise e
@crlcheck.setter
def crlcheck(self, crlcheck) :
ur"""The rule for use of CRL corresponding to this CA certificate during client authentication. If crlCheck is set to Mandatory, the system will deny all SSL clients if the CRL is missing, expired - NextUpdate date is in the past, or is incomplete with remote CRL refresh enabled. If crlCheck is set to optional, the system will allow SSL clients in the above error cases.However, in any case if the client certificate is revoked in the CRL, the SSL client will be denied access.<br/>Default value: CRLCHECK_OPTIONAL<br/>Possible values = Mandatory, Optional
"""
try :
self._crlcheck = crlcheck
except Exception as e:
raise e
@property
def ca(self) :
ur"""If this option is specified, it indicates that the certificate-key pair being bound to the SSL virtual server is a CA certificate. If this option is not specified, the certificate-key pair is bound as a normal server certificate.
Note: In case of a normal server certificate, the certificate-key pair should consist of both the certificate and the private-key.
"""
try :
return self._ca
except Exception as e:
raise e
@ca.setter
def ca(self, ca) :
ur"""If this option is specified, it indicates that the certificate-key pair being bound to the SSL virtual server is a CA certificate. If this option is not specified, the certificate-key pair is bound as a normal server certificate.
Note: In case of a normal server certificate, the certificate-key pair should consist of both the certificate and the private-key.
"""
try :
self._ca = ca
except Exception as e:
raise e
@property
def vservername(self) :
ur"""The name of the SSL virtual server name to which the certificate-key pair needs to be bound.
"""
try :
return self._vservername
except Exception as e:
raise e
@vservername.setter
def vservername(self, vservername) :
ur"""The name of the SSL virtual server name to which the certificate-key pair needs to be bound.
"""
try :
self._vservername = vservername
except Exception as e:
raise e
@property
def servername(self) :
ur"""Vserver name to which the certificate key pair is bound.
"""
try :
return self._servername
except Exception as e:
raise e
@servername.setter
def servername(self, servername) :
ur"""Vserver name to which the certificate key pair is bound.
"""
try :
self._servername = servername
except Exception as e:
raise e
@property
def certkey(self) :
ur"""Name of the certificate-key pair.<br/>Minimum length = 1.
"""
try :
return self._certkey
except Exception as e:
raise e
@certkey.setter
def certkey(self, certkey) :
ur"""Name of the certificate-key pair.<br/>Minimum length = 1
"""
try :
self._certkey = certkey
except Exception as e:
raise e
@property
def version(self) :
ur"""Version.
"""
try :
return self._version
except Exception as e:
raise e
@property
def data(self) :
ur"""Vserver Id.
"""
try :
return self._data
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslcertkey_sslvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslcertkey_sslvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.certkey is not None :
return str(self.certkey)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, certkey) :
ur""" Use this API to fetch sslcertkey_sslvserver_binding resources.
"""
try :
obj = sslcertkey_sslvserver_binding()
obj.certkey = certkey
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, certkey, filter_) :
ur""" Use this API to fetch filtered set of sslcertkey_sslvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslcertkey_sslvserver_binding()
obj.certkey = certkey
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, certkey) :
ur""" Use this API to count sslcertkey_sslvserver_binding resources configued on NetScaler.
"""
try :
obj = sslcertkey_sslvserver_binding()
obj.certkey = certkey
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, certkey, filter_) :
ur""" Use this API to count the filtered set of sslcertkey_sslvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslcertkey_sslvserver_binding()
obj.certkey = certkey
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Crlcheck:
Mandatory = "Mandatory"
Optional = "Optional"
class sslcertkey_sslvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.sslcertkey_sslvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslcertkey_sslvserver_binding = [sslcertkey_sslvserver_binding() for _ in range(length)]
| {
"content_hash": "0b02de028f61ad7a2fc7d2ecfda7c87b",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 561,
"avg_line_length": 31.614173228346456,
"alnum_prop": 0.712079701120797,
"repo_name": "benfinke/ns_python",
"id": "4f58dd9d5cb6df26ba9c00d10e2447999d512107",
"size": "8644",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslcertkey_sslvserver_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21836782"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
from conans.model.conan_file import ConanFile
from conans.model.options import Options
from conans.model.settings import Settings
from conans.client.cmake import CMake
from conans.client.gcc import GCC
from conans.client.configure_environment import ConfigureEnvironment
from conans.util.files import load
import os
# complex_search: With ORs and not filtering by not restricted settings
COMPLEX_SEARCH_CAPABILITY = "complex_search"
SERVER_CAPABILITIES = [COMPLEX_SEARCH_CAPABILITY, ]
__version__ = '0.18.0-dev'
| {
"content_hash": "df69d86fb56e753f9b1f3cdd0b24ed95",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 71,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.8135922330097087,
"repo_name": "Xaltotun/conan",
"id": "8670f1b582d3f8a5fd6953e74ce01b9de2911a9b",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1565663"
},
{
"name": "Shell",
"bytes": "1148"
}
],
"symlink_target": ""
} |
"""Utility for reading / writing command-line flag files on device(s)."""
from __future__ import print_function
import argparse
import logging
import sys
import devil_chromium
from devil.android import device_errors
from devil.android import device_utils
from devil.android import flag_changer
from devil.android.tools import script_common
from devil.utils import cmd_helper
from devil.utils import logging_common
def CheckBuildTypeSupportsFlags(device, command_line_flags_file):
is_webview = command_line_flags_file == 'webview-command-line'
if device.IsUserBuild() and is_webview:
raise device_errors.CommandFailedError(
'WebView only respects flags on a userdebug or eng device, yours '
'is a user build.', device)
if device.IsUserBuild():
logging.warning(
'Your device (%s) is a user build; Chrome may or may not pick up '
'your commandline flags. Check your '
'"command_line_on_non_rooted_enabled" preference, or switch '
'devices.', device)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.usage = '''%(prog)s --name FILENAME [--device SERIAL] [flags...]
No flags: Prints existing command-line file.
Empty string: Deletes command-line file.
Otherwise: Writes command-line file.
'''
parser.add_argument('--name', required=True,
help='Name of file where to store flags on the device.')
parser.add_argument('-e', '--executable', dest='executable', default='chrome',
help='(deprecated) No longer used.')
script_common.AddEnvironmentArguments(parser)
script_common.AddDeviceArguments(parser)
logging_common.AddLoggingArguments(parser)
args, remote_args = parser.parse_known_args()
devil_chromium.Initialize(adb_path=args.adb_path)
logging_common.InitializeLogging(args)
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=args.devices,
default_retries=0)
all_devices = device_utils.DeviceUtils.parallel(devices)
if not remote_args:
# No args == do not update, just print flags.
remote_args = None
action = ''
elif len(remote_args) == 1 and not remote_args[0]:
# Single empty string arg == delete flags
remote_args = []
action = 'Deleted command line file. '
else:
action = 'Wrote command line file. '
def update_flags(device):
CheckBuildTypeSupportsFlags(device, args.name)
changer = flag_changer.FlagChanger(device, args.name)
if remote_args is not None:
flags = changer.ReplaceFlags(remote_args)
else:
flags = changer.GetCurrentFlags()
return (device, device.build_description, flags)
updated_values = all_devices.pMap(update_flags).pGet(None)
print('%sCurrent flags (in %s):' % (action, args.name))
for d, desc, flags in updated_values:
if flags:
# Shell-quote flags for easy copy/paste as new args on the terminal.
quoted_flags = ' '.join(cmd_helper.SingleQuote(f) for f in sorted(flags))
else:
quoted_flags = '( empty )'
print(' %s (%s): %s' % (d, desc, quoted_flags))
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "a64b90eb62405e4623e5c3bf966940d4",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 34.096774193548384,
"alnum_prop": 0.6830652790917692,
"repo_name": "ric2b/Vivaldi-browser",
"id": "3fd16db83ed5b1fcd73d7142af68dc9c823352d3",
"size": "3358",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/build/android/adb_command_line.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Test LayoutJson"""
# pylint: disable=R1710
# standard library
import json
import os
import shutil
from pathlib import Path
# third-party
from deepdiff import DeepDiff
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.models.layout_json_model import OutputsModel, ParametersModel
class TestLayoutJson:
"""App Config LayoutJson testing."""
# @staticmethod
# def test_dev_testing():
# """."""
# fqfn = Path('tests/app_config/layout_json_samples/tcpb/tcpb-example1-layout.json')
# try:
# lj = LayoutJson(filename=fqfn.name, path=fqfn.parent)
# except Exception as ex:
# assert False, f'Failed parsing file {fqfn.name} ({ex})'
# # ij = InstallJson(
# # filename='tcpb_-_blackberry_optics-install.json',
# # path='tests/app_config/install_json_samples/tcpb',
# # )
# print('\nfilename', filename)
# # lj.create(inputs=ij.model.params, outputs=ij.model.playbook.output_variables)
# print('lj.model.inputs', lj.model.inputs)
# # print('lj.model.outputs', lj.model.outputs)
@staticmethod
def ij(app_name: str = 'app_1', app_type: str = 'tcpb'):
"""Return install.json instance."""
# reset singleton
# InstallJson._instances = {}
tcex_test_dir = os.getenv('TCEX_TEST_DIR')
ij_fqfn = os.path.join(
tcex_test_dir, 'app_config', 'apps', app_type, app_name, 'install.json'
)
fqfn = Path(ij_fqfn)
try:
return InstallJson(filename=fqfn.name, path=fqfn.parent)
except Exception as ex:
assert False, f'Failed parsing file {fqfn.name} ({ex})'
@staticmethod
def lj(app_name: str = 'app_1', app_type: str = 'tcpb'):
"""Return layout.json instance."""
# reset singleton
LayoutJson._instances = {}
tcex_test_dir = os.getenv('TCEX_TEST_DIR')
lj_fqfn = os.path.join(
tcex_test_dir, 'app_config', 'apps', app_type, app_name, 'layout.json'
)
fqfn = Path(lj_fqfn)
try:
return LayoutJson(filename=fqfn.name, path=fqfn.parent)
except Exception as ex:
assert False, f'Failed parsing file {fqfn.name} ({ex})'
@staticmethod
def lj_bad(app_name: str = 'app_bad_layout_json', app_type: str = 'tcpb'):
"""Return layout.json instance with "bad" file."""
# reset singleton
LayoutJson._instances = {}
tcex_test_dir = os.getenv('TCEX_TEST_DIR')
base_fqpn = os.path.join(tcex_test_dir, 'app_config', 'apps', app_type, app_name)
shutil.copy2(
os.path.join(base_fqpn, 'layout-template.json'),
os.path.join(base_fqpn, 'layout.json'),
)
fqfn = Path(os.path.join(base_fqpn, 'layout.json'))
try:
return LayoutJson(filename=fqfn.name, path=fqfn.parent)
except Exception as ex:
assert False, f'Failed parsing file {fqfn.name} ({ex})'
@staticmethod
def model_validate(path: str):
"""Validate input model in and out."""
tcex_test_dir = os.getenv('TCEX_TEST_DIR')
lj_path = Path(os.path.join(tcex_test_dir, path))
for fqfn in sorted(lj_path.glob('**/*layout.json')):
# reset singleton
LayoutJson._instances = {}
fqfn = Path(fqfn)
with fqfn.open() as fh:
json_dict = json.load(fh)
try:
lj = LayoutJson(filename=fqfn.name, path=fqfn.parent)
# lj.update.multiple()
except Exception as ex:
assert False, f'Failed parsing file {fqfn.name} ({ex})'
ddiff = DeepDiff(
json_dict,
# template requires json dump to serialize certain fields
json.loads(lj.model.json(by_alias=True, exclude_defaults=True, exclude_none=True)),
ignore_order=True,
)
assert not ddiff, f'Failed validation of file {fqfn.name}'
def test_create(self):
"""Test method"""
ij = self.ij(app_type='tcpb')
lj = self.lj(app_name='app_create_layout', app_type='tcpb')
lj.create(inputs=ij.model.params, outputs=ij.model.playbook.output_variables)
assert lj.fqfn.is_file()
# remove temp file
lj.fqfn.unlink()
def test_has_layout(self):
"""Test method"""
assert self.lj().has_layout
def test_model_get_param(self):
"""Test method"""
assert isinstance(self.lj().model.get_param('tc_action'), ParametersModel)
def test_model_get_output(self):
"""Test method"""
assert isinstance(self.lj().model.get_output('action_1.binary.output1'), OutputsModel)
def test_model_output_(self):
"""Test method"""
assert isinstance(self.lj().model.outputs_, dict)
def test_model_param_names(self):
"""Test method"""
assert isinstance(self.lj().model.param_names, list)
def test_update(self):
"""Test method"""
ij = self.lj_bad()
try:
ij.update.multiple()
assert True
except Exception as ex:
assert False, f'Failed to update install.json file ({ex}).'
finally:
# cleanup temp file
ij.fqfn.unlink()
def test_tcpb_support(self):
"""Validate layout.json files."""
self.model_validate('tests/app_config/app/tcpb')
def test_tcvc_support(self):
"""Validate layout.json files."""
self.model_validate('tests/app_config/app/tcvc')
| {
"content_hash": "1e305db17c319e1bc414a3baccf8de69",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 99,
"avg_line_length": 34.80487804878049,
"alnum_prop": 0.5828661527680449,
"repo_name": "ThreatConnect-Inc/tcex",
"id": "5298fae647ae94c0ade1198014c745be75c18109",
"size": "5708",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/app_config/test_layout_json_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2735042"
}
],
"symlink_target": ""
} |
import sys
import csv
import datetime
from operator import itemgetter
from optparse import OptionParser, OptionGroup
VERSION = "0.0.3"
class MaskGen:
def __init__(self):
# Masks collections with meta data
self.masks = dict()
self.target_time = None
self.output_file = None
self.minlength = None
self.maxlength = None
self.mintime = None
self.maxtime = None
self.mincomplexity = None
self.maxcomplexity = None
self.minoccurrence = None
self.maxoccurrence = None
self.customcharset1len = None
self.customcharset2len = None
self.customcharset3len = None
self.customcharset4len = None
# PPS (Passwords per Second) Cracking Speed
self.pps = 1000000000
self.showmasks = False
# Counter for total masks coverage
self.total_occurrence = 0
def getcomplexity(self, mask):
""" Return mask complexity. """
count = 1
for char in mask[1:].split("?"):
if char == "l": count *= 26
elif char == "u": count *= 26
elif char == "d": count *= 10
elif char == "s": count *= 33
elif char == "a": count *= 95
elif char == "b": count *= 256
elif char == "h": count *= 16
elif char == "H": count *= 16
elif char == "1" and self.customcharset1len: count *= self.customcharset1len
elif char == "2" and self.customcharset2len: count *= self.customcharset2len
elif char == "3" and self.customcharset3len: count *= self.customcharset3len
elif char == "4" and self.customcharset4len: count *= self.customcharset4len
else: print "[!] Error, unknown mask ?%s in a mask %s" % (char,mask)
return count
def loadmasks(self, filename):
""" Load masks and apply filters. """
maskReader = csv.reader(open(args[0],'r'), delimiter=',', quotechar='"')
for (mask,occurrence) in maskReader:
if mask == "": continue
mask_occurrence = int(occurrence)
mask_length = len(mask)/2
mask_complexity = self.getcomplexity(mask)
mask_time = mask_complexity/self.pps
self.total_occurrence += mask_occurrence
# Apply filters based on occurrence, length, complexity and time
if (self.minoccurrence == None or mask_occurrence >= self.minoccurrence) and \
(self.maxoccurrence == None or mask_occurrence <= self.maxoccurrence) and \
(self.mincomplexity == None or mask_complexity >= self.mincomplexity) and \
(self.maxcomplexity == None or mask_complexity <= self.maxcomplexity) and \
(self.mintime == None or mask_time >= self.mintime) and \
(self.maxtime == None or mask_time <= self.maxtime) and \
(self.maxlength == None or mask_length <= self.maxlength) and \
(self.minlength == None or mask_length >= self.minlength):
self.masks[mask] = dict()
self.masks[mask]['length'] = mask_length
self.masks[mask]['occurrence'] = mask_occurrence
self.masks[mask]['complexity'] = 1 - mask_complexity
self.masks[mask]['time'] = mask_time
self.masks[mask]['optindex'] = 1 - mask_complexity/mask_occurrence
def generate_masks(self,sorting_mode):
""" Generate optimal password masks sorted by occurrence, complexity or optindex """
sample_count = 0
sample_time = 0
sample_occurrence = 0
# TODO Group by time here 1 minutes, 1 hour, 1 day, 1 month, 1 year....
# Group by length 1,2,3,4,5,6,7,8,9,10....
# Group by occurrence 10%, 20%, 30%, 40%, 50%....
if self.showmasks: print "[L:] Mask: [ Occ: ] [ Time: ]"
for mask in sorted(self.masks.keys(), key=lambda mask: self.masks[mask][sorting_mode], reverse=True):
if self.showmasks:
time_human = ">1 year" if self.masks[mask]['time'] > 60*60*24*365 else str(datetime.timedelta(seconds=self.masks[mask]['time']))
print "[{:>2}] {:<30} [{:<7}] [{:>8}] ".format(self.masks[mask]['length'], mask, self.masks[mask]['occurrence'], time_human)
if self.output_file:
self.output_file.write("%s\n" % mask)
sample_occurrence += self.masks[mask]['occurrence']
sample_time += self.masks[mask]['time']
sample_count += 1
if self.target_time and sample_time > self.target_time:
print "[!] Target time exceeded."
break
print "[*] Finished generating masks:"
print " Masks generated: %s" % sample_count
print " Masks coverage: %d%% (%d/%d)" % (sample_occurrence*100/self.total_occurrence,sample_occurrence,self.total_occurrence)
time_human = ">1 year" if sample_time > 60*60*24*365 else str(datetime.timedelta(seconds=sample_time))
print " Masks runtime: %s" % time_human
def getmaskscoverage(self, checkmasks):
sample_count = 0
sample_occurrence = 0
total_complexity = 0
if self.showmasks: print "[L:] Mask: [ Occ: ] [ Time: ]"
for mask in checkmasks:
mask = mask.strip()
mask_complexity = self.getcomplexity(mask)
total_complexity += mask_complexity
if mask in self.masks:
if self.showmasks:
time_human = ">1 year" if self.masks[mask]['time'] > 60*60*24*365 else str(datetime.timedelta(seconds=self.masks[mask]['time']))
print "[{:>2}] {:<30} [{:<7}] [{:>8}] ".format(self.masks[mask]['length'], mask, self.masks[mask]['occurrence'], time_human)
if self.output_file:
self.output_file.write("%s\n" % mask)
sample_occurrence += self.masks[mask]['occurrence']
sample_count += 1
if self.target_time and total_complexity/self.pps > self.target_time:
print "[!] Target time exceeded."
break
# TODO: Something wrong here, complexity and time doesn't match with estimated from policygen
total_time = total_complexity/self.pps
time_human = ">1 year" if total_time > 60*60*24*365 else str(datetime.timedelta(seconds=total_time))
print "[*] Finished matching masks:"
print " Masks matched: %s" % sample_count
print " Masks coverage: %d%% (%d/%d)" % (sample_occurrence*100/self.total_occurrence,sample_occurrence,self.total_occurrence)
print " Masks runtime: %s" % time_human
if __name__ == "__main__":
header = " _ \n"
header += " MaskGen %s | |\n" % VERSION
header += " _ __ __ _ ___| | _\n"
header += " | '_ \ / _` |/ __| |/ /\n"
header += " | |_) | (_| | (__| < \n"
header += " | .__/ \__,_|\___|_|\_\\\n"
header += " | | \n"
header += " |_| iphelix@thesprawl.org\n"
header += "\n"
parser = OptionParser("%prog pass0.masks [pass1.masks ...] [options]", version="%prog "+VERSION)
parser.add_option("-t", "--targettime", dest="target_time", type="int", metavar="86400", help="Target time of all masks (seconds)")
parser.add_option("-o", "--outputmasks", dest="output_masks", metavar="masks.hcmask", help="Save masks to a file")
filters = OptionGroup(parser, "Individual Mask Filter Options")
filters.add_option("--minlength", dest="minlength", type="int", metavar="8", help="Minimum password length")
filters.add_option("--maxlength", dest="maxlength", type="int", metavar="8", help="Maximum password length")
filters.add_option("--mintime", dest="mintime", type="int", metavar="3600", help="Minimum mask runtime (seconds)")
filters.add_option("--maxtime", dest="maxtime", type="int", metavar="3600", help="Maximum mask runtime (seconds)")
filters.add_option("--mincomplexity", dest="mincomplexity", type="int", metavar="1", help="Minimum complexity")
filters.add_option("--maxcomplexity", dest="maxcomplexity", type="int", metavar="100", help="Maximum complexity")
filters.add_option("--minoccurrence", dest="minoccurrence", type="int", metavar="1", help="Minimum occurrence")
filters.add_option("--maxoccurrence", dest="maxoccurrence", type="int", metavar="100", help="Maximum occurrence")
parser.add_option_group(filters)
sorting = OptionGroup(parser, "Mask Sorting Options")
sorting.add_option("--optindex", action="store_true", dest="optindex", help="sort by mask optindex (default)", default=False)
sorting.add_option("--occurrence", action="store_true", dest="occurrence", help="sort by mask occurrence", default=False)
sorting.add_option("--complexity", action="store_true", dest="complexity", help="sort by mask complexity", default=False)
parser.add_option_group(sorting)
coverage = OptionGroup(parser, "Check mask coverage")
coverage.add_option("--checkmasks", dest="checkmasks", help="check mask coverage", metavar="?u?l?l?l?l?l?d,?l?l?l?l?l?d?d")
coverage.add_option("--checkmasksfile", dest="checkmasks_file", help="check mask coverage in a file", metavar="masks.hcmask")
parser.add_option_group(coverage)
parser.add_option("--showmasks", dest="showmasks",help="Show matching masks", action="store_true", default=False)
custom = OptionGroup(parser, "Custom charater set options")
custom.add_option("--custom-charset1-len", dest="customcharset1len", type="int", metavar="26", help="Length of cutom character set 1")
custom.add_option("--custom-charset2-len", dest="customcharset2len", type="int", metavar="26", help="Length of cutom character set 2")
custom.add_option("--custom-charset3-len", dest="customcharset3len", type="int", metavar="26", help="Length of cutom character set 3")
custom.add_option("--custom-charset4-len", dest="customcharset4len", type="int", metavar="26", help="Length of cutom character set 4")
parser.add_option_group(custom)
misc = OptionGroup(parser, "Miscellaneous options")
misc.add_option("--pps", dest="pps",help="Passwords per Second", type="int", metavar="1000000000")
misc.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False, help="Don't show headers.")
parser.add_option_group(misc)
(options, args) = parser.parse_args()
# Print program header
if not options.quiet:
print header
if len(args) < 1:
parser.error("no masks file specified! Please provide statsgen output.")
exit(1)
print "[*] Analyzing masks in [%s]" % args[0]
maskgen = MaskGen()
# Settings
if options.target_time: maskgen.target_time = options.target_time
if options.output_masks:
print "[*] Saving generated masks to [%s]" % options.output_masks
maskgen.output_file = open(options.output_masks, 'w')
# Filters
if options.minlength: maskgen.minlength = options.minlength
if options.maxlength: maskgen.maxlength = options.maxlength
if options.mintime: maskgen.mintime = options.mintime
if options.maxtime: maskgen.maxtime = options.maxtime
if options.mincomplexity: maskgen.mincomplexity = options.mincomplexity
if options.maxcomplexity: maskgen.maxcomplexity = options.maxcomplexity
if options.minoccurrence: maskgen.minoccurrence = options.minoccurrence
if options.maxoccurrence: maskgen.maxoccurrence = options.maxoccurrence
# Custom
if options.customcharset1len: maskgen.customcharset1len = options.customcharset1len
if options.customcharset2len: maskgen.customcharset2len = options.customcharset2len
if options.customcharset3len: maskgen.customcharset3len = options.customcharset3len
if options.customcharset4len: maskgen.customcharset4len = options.customcharset4len
# Misc
if options.pps: maskgen.pps = options.pps
if options.showmasks: maskgen.showmasks = options.showmasks
print "[*] Using {:,d} keys/sec for calculations.".format(maskgen.pps)
# Load masks
for arg in args:
maskgen.loadmasks(arg)
# Matching masks from the command-line
if options.checkmasks:
checkmasks = [m.strip() for m in options.checkmasks.split(',')]
print "[*] Checking coverage of the these masks [%s]" % ", ".join(checkmasks)
maskgen.getmaskscoverage(checkmasks)
# Matching masks from a file
elif options.checkmasks_file:
checkmasks_file = open(options.checkmasks_file, 'r')
print "[*] Checking coverage of masks in [%s]" % options.checkmasks_file
maskgen.getmaskscoverage(checkmasks_file)
# Printing masks in a file
else:
# Process masks according to specified sorting algorithm
if options.occurrence:
sorting_mode = "occurrence"
elif options.complexity:
sorting_mode = "complexity"
else:
sorting_mode = "optindex"
print "[*] Sorting masks by their [%s]." % sorting_mode
maskgen.generate_masks(sorting_mode)
| {
"content_hash": "7f5b9c0e671c497a2635ef660b0c87aa",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 148,
"avg_line_length": 47.03859649122807,
"alnum_prop": 0.6098761748470833,
"repo_name": "iphelix/pack",
"id": "30aef4dd5003a7579518b5efb693f0b745998f53",
"size": "13690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maskgen.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "84582"
}
],
"symlink_target": ""
} |
"""
Image interface.
"""
from six.moves.urllib import parse
from novaclient import base
class Image(base.Resource):
"""
An image is a collection of files used to create or rebuild a server.
"""
HUMAN_ID = True
def __repr__(self):
return "<Image: %s>" % self.name
def delete(self):
"""
Delete this image.
"""
self.manager.delete(self)
class ImageManager(base.ManagerWithFind):
"""
Manage :class:`Image` resources.
"""
resource_class = Image
def get(self, image):
"""
Get an image.
:param image: The ID of the image to get.
:rtype: :class:`Image`
"""
return self._get("/images/%s" % base.getid(image), "image")
def list(self, detailed=True, limit=None, marker=None):
"""
Get a list of all images.
:rtype: list of :class:`Image`
:param limit: maximum number of images to return.
:param marker: Begin returning images that appear later in the image
list than that represented by this image id (optional).
"""
params = {}
detail = ''
if detailed:
detail = '/detail'
if limit:
params['limit'] = int(limit)
if marker:
params['marker'] = str(marker)
params = sorted(params.items(), key=lambda x: x[0])
query = '?%s' % parse.urlencode(params) if params else ''
return self._list('/images%s%s' % (detail, query), 'images')
def delete(self, image):
"""
Delete an image.
It should go without saying that you can't delete an image
that you didn't create.
:param image: The :class:`Image` (or its ID) to delete.
"""
self._delete("/images/%s" % base.getid(image))
def set_meta(self, image, metadata):
"""
Set an images metadata
:param image: The :class:`Image` to add metadata to
:param metadata: A dict of metadata to add to the image
"""
body = {'metadata': metadata}
return self._create("/images/%s/metadata" % base.getid(image),
body, "metadata")
def delete_meta(self, image, keys):
"""
Delete metadata from an image
:param image: The :class:`Image` to delete metadata
:param keys: A list of metadata keys to delete from the image
"""
for k in keys:
self._delete("/images/%s/metadata/%s" % (base.getid(image), k))
| {
"content_hash": "d85fa3af0d6d41d3fddf0868e056e453",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 27.619565217391305,
"alnum_prop": 0.5521448248720976,
"repo_name": "mat128/python-novaclient",
"id": "32001b465b43095414d6905b81822d08d642efde",
"size": "3151",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "novaclient/v2/images.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1104287"
},
{
"name": "Shell",
"bytes": "6319"
}
],
"symlink_target": ""
} |
import httplib
import urllib
import socket
class ToolProtocolHTTP(object):
"""
HTTP/HTTPS client for TEMA MBT protocol. Discusses with the TEMA test engine.
"""
# is client connected to the server
isConnected = False
def __init__(self):
self.host = "localhost"
self.port = 80
self.php_file = "temagui_http_proxy.php"
socket.setdefaulttimeout(1800)
def __del__(self):
if self.isConnected:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'CLOSE', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
def __requestreply(self,message ):
""" One http(s) request/reply.
Message: Message to send string.
Returns: Reply string.
"""
http_data = ''
try:
http_connection = None
if self.protocol == "HTTP":
http_connection = httplib.HTTPConnection(self.host, self.port)
elif self.protocol == "HTTPS":
http_connection = httplib.HTTPSConnection(self.host, self.port)
else:
return ''
http_connection.connect()
http_connection.request("POST", self.php_file, message , self.http_headers)
http_response = http_connection.getresponse()
http_data = http_response.read()
http_response.close()
http_connection.close()
except Exception, e:
http_data = ''
return http_data
def init(self, host, path, port, username, protocol):
""" Initialises connection. Sends HELO.
host: Server hostname.
path: path to http proxy in server.
port: port
username: wwwgui username
protocol: http/https
returns: Reply to ACK. On error returns ''
"""
self.http_headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
self.host = host
self.php_file = "/".join(["",path,"temagui_http_proxy.php"])
self.port = port
self.username = username
self.protocol = protocol.upper()
try:
# SEND HELO
http_params = urllib.urlencode({"User" : username, "Message" : 'HELO', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
self.isConnected = True
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
http_data = ''
self.isConnected = False
except Exception, e:
self.isConnected = False
return ''
return http_data
def getKeyword(self):
""" Gets keyword from testserver.
Sends GET to testserver and waits for reply.
Returns: Reply to GET. On error return ''
"""
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'GET', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return 'ERROR'
if message == 'ERR':
# TODO: don't send ack.
http_data = self.__requestreply(http_params)
http_params = urllib.urlencode({"User" : self.username, "Message" : 'ACK', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
self.isConnected = False
return 'ERROR'
if not http_data.startswith("ACK"):
print http_data
return "ERROR"
else:
#http_data = http_data.partition("ACK")[2].strip()
http_data = http_data.split("ACK")[1].strip()
if http_data == '' or http_data == None:
http_data = ''
self.isConnected = False
except Exception, e:
self.isConnected = False
return http_data
def putResult(self, result):
""" Puts result to testserver.
result: True/False
returns: Reply message to PUT
"""
try:
if result:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'PUT', "Parameter" : 'true'})
else:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'PUT', "Parameter" : 'false'})
except Exception, e:
self.isConnected = False
return ''
try:
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return ''
if http_data == '':
self.isConnected = False
except Exception, e:
self.isConnected = False
http_data = ''
return http_data
def log(self, msg):
""" Sends log message to testserver
returns: Reply to message.
"""
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'LOG', "Parameter" : msg })
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return ''
if http_data == '':
self.isConnected = False
except Exception, e:
self.isConnected = False
http_data = ''
return http_data
def bye(self):
""" Sends message BYE to testserver. """
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'BYE', "Parameter" : 'None'})
http_data = self.__requestreply(http_params)
self.isConnected = False
except Exception, e:
self.isConnected = False
return ''
def hasConnection(self):
return self.isConnected
if __name__ == "__main__":
c = ToolProtocol()
print "init -> " + c.init()
print "getKeyword -> " + c.getKeyword()
print "putResult -> " + c.putResult(True)
print "getKeyword -> " + c.getKeyword()
print "putResult -> " + c.putResult(False)
print "invalid -> " + c.invalid()
print "bye -> " + c.bye()
| {
"content_hash": "c6a390602ab8ce8e1a6602548dbbdcb9",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 118,
"avg_line_length": 30.675,
"alnum_prop": 0.47324096712849767,
"repo_name": "tema-mbt/tema-adapterlib",
"id": "0bb8ab7a7e536f796cfe3d7e218e345017e9a8cb",
"size": "8508",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "adapterlib/ToolProtocolHTTP.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61160"
}
],
"symlink_target": ""
} |
from CIM15.IEC61970.OperationalLimits.OperationalLimit import OperationalLimit
class CurrentLimit(OperationalLimit):
"""Operational limit on current.Operational limit on current.
"""
def __init__(self, value=0.0, CurrentLimitSet=None, *args, **kw_args):
"""Initialises a new 'CurrentLimit' instance.
@param value: Limit on current flow.
@param CurrentLimitSet:
"""
#: Limit on current flow.
self.value = value
self._CurrentLimitSet = None
self.CurrentLimitSet = CurrentLimitSet
super(CurrentLimit, self).__init__(*args, **kw_args)
_attrs = ["value"]
_attr_types = {"value": float}
_defaults = {"value": 0.0}
_enums = {}
_refs = ["CurrentLimitSet"]
_many_refs = []
def getCurrentLimitSet(self):
return self._CurrentLimitSet
def setCurrentLimitSet(self, value):
if self._CurrentLimitSet is not None:
filtered = [x for x in self.CurrentLimitSet.CurrentLimits if x != self]
self._CurrentLimitSet._CurrentLimits = filtered
self._CurrentLimitSet = value
if self._CurrentLimitSet is not None:
if self not in self._CurrentLimitSet._CurrentLimits:
self._CurrentLimitSet._CurrentLimits.append(self)
CurrentLimitSet = property(getCurrentLimitSet, setCurrentLimitSet)
| {
"content_hash": "43a7fa529965583064e7cbda24ddfe26",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 83,
"avg_line_length": 32.116279069767444,
"alnum_prop": 0.6422881969587255,
"repo_name": "rwl/PyCIM",
"id": "0e1248c2595d3afe98c2f8b5632dfc93285c49dd",
"size": "2481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/OperationalLimits/CurrentLimit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
import os
import sys
import unittest
try:
sys.path.insert(0, os.path.abspath('..')) #Works for local
from sheetmaker import html_builder
from data import test_html_constants
except:
sys.path.insert(0, os.path.abspath('.')) #Works for Travis CI
from sheetmaker import html_builder
from data import test_html_constants
class HtmlBuilderTestCase(unittest.TestCase):
"""Tests for `html_builder.py`."""
def test_create_empty_sheet(self):
"""Is empty sheet html created succesfully?"""
self.title = "title"
empty_sheet = html_builder.HtmlSheet(self.title, None)
test_html = empty_sheet.create_empty_sheet()
self.assertEqual(test_html[0], test_html_constants.TEST_EMPTY_SHEET)
self.assertEqual(test_html[1], None)
def test_set_style(self):
"""Is color style created succesfully?"""
self.title = "title"
empty_sheet = html_builder.HtmlSheet(self.title, None)
test_html = empty_sheet.set_style(1)
self.assertEqual(test_html[0], test_html_constants.TEST_COLOR_STYLE)
self.assertEqual(test_html[1], "<!-- css -->")
def test_build_columns(self):
"""Are columns created succesfully?"""
self.title = "title"
empty_sheet = html_builder.HtmlSheet(self.title, None)
test_html = empty_sheet.build_columns(3)
self.assertEqual(test_html[0], test_html_constants.TEST_COLUMNS)
self.assertEqual(test_html[1], "<!-- columns -->")
def test_build_header(self):
"""Is header created succesfully?"""
self.title = "title"
empty_sheet = html_builder.HtmlSheet(self.title, None)
test_html = empty_sheet.build_header("author")
self.assertEqual(test_html[0], test_html_constants.TEST_HEADER)
self.assertEqual(test_html[1], "<!-- header -->")
def test_build_footer(self):
"""Is footer created succesfully?"""
self.title = "test"
empty_sheet = html_builder.HtmlSheet(self.title, None, "author")
test_html = empty_sheet.build_footer("author.png", "http://author.com", "sponsor", "http://sponsor.com")
self.assertEqual(test_html[0], test_html_constants.TEST_FOOTER)
self.assertEqual(test_html[1], "<!-- footer -->")
def test_build_rows_block(self):
"""Is rows block created succesfully?"""
self.title = "test"
empty_sheet = html_builder.HtmlSheet(self.title, None)
test_html = empty_sheet.build_rows_block(1, "block title", 2, ["row1", "row2"])
self.assertEqual(test_html[0], test_html_constants.TEST_ROWS_BLOCK)
self.assertEqual(test_html[1], "<!-- column1 -->")
def test_build_text_block(self):
"""Is text block created succesfully?"""
self.title = "test"
empty_sheet = html_builder.HtmlSheet(self.title, None)
test_html = empty_sheet.build_text_block(2, "block title", "text text text")
self.assertEqual(test_html[0], test_html_constants.TEST_TEXT_BLOCK)
self.assertEqual(test_html[1], "<!-- column2 -->")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f69baadccf542f40915c5cf82c448132",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 112,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.6331417624521073,
"repo_name": "cosme12/cheatsheet-maker",
"id": "15b9e4d15e60bf8d726b8a8005bf1269fb8964cc",
"size": "3132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_html_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10173"
},
{
"name": "Python",
"bytes": "39464"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("SVC_poly" , "BinaryClass_100" , "mssql")
| {
"content_hash": "332b6b33443d10efcceff4d308f2547b",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 34.5,
"alnum_prop": 0.7681159420289855,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "e519b3be4097bf01b7f7f33318c4d16875af3ac6",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/BinaryClass_100/ws_BinaryClass_100_SVC_poly_mssql_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
"""
pbm.views
"""
import logging
import json
import pytz
from datetime import datetime, timedelta
from django.db.models import Count, Sum
from django.shortcuts import render_to_response, render
from django.template import RequestContext, loader
from django.http import HttpResponse
from django.template.loader import get_template
from django.core.serializers.json import DjangoJSONEncoder
from .models import DailyLog
from .utils import CATEGORY_LABELS, PLOT_TITLES, PLOT_UNITS, COLORS, \
defaultDatetimeFormat, configure, configure_plot, \
prepare_data_for_piechart, prepare_colors_for_piechart, \
data_plot_groupby_category, plot
from core.common.models import Pandalog
collectorDatetimeFormat = "%Y-%m-%dT%H:%M:%S"
#collectorDateFormat = "%Y-%m-%d"
#collectorDateFormat = collectorDatetimeFormat
collectorTimeFormat = "%Y-%m-%d %H:%M:%S"
_logger = logging.getLogger('bigpandamon-pbm')
def index(request):
"""
index -- pbm's default page
:param request: Django's HTTP request
:type request: django.http.HttpRequest
"""
### configure time interval for queries
startdate, enddate, ndays, errors_GET = configure(request.GET)
### start the query parameters
query={}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
### filter category__in
query['category__in'] = ['A', 'B', 'C']
### User selected a site/User selected a cloud/Panda Brokerage decision
### Plot 1: [User selected a site/User selected a cloud/Panda Brokerage decision] on Jobs
data01, colors01, title01, unit01 = plot('01', query)
### Plot 2: [User selected a site/User selected a cloud/Panda Brokerage decision] on jobDef
data02, colors02, title02, unit02 = plot('02', query)
### Plot 3: [User selected a site/User selected a cloud/Panda Brokerage decision] on jobSet
data03, colors03, title03, unit03 = plot('03', query)
### User selected a site - Top sites > 1 %
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
### filter category == 'A'
query['category'] = 'A'
### Plot 4: [User selected a site] on Jobs - Top sites > 1 %
data04, colors04, title04, unit04 = plot('04', query)
### Plot 5: [User selected a site] on jobDef - Top sites > 1 %
data05, colors05, title05, unit05 = plot('05', query)
### Plot 6: [User selected a site] on jobSet - Top sites > 1 %
data06, colors06, title06, unit06 = plot('06', query)
# ### User selected a site - Per cloud
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
### filter category == 'A'
query['category'] = 'A'
### Plot 7: [User selected a site] on Jobs - Per cloud
data07, colors07, title07, unit07 = plot('07', query)
### Plot 8: [User selected a site] on jobDef - Per cloud
data08, colors08, title08, unit08 = plot('08', query)
### Plot 9: [User selected a site] on jobSet - Per cloud
data09, colors09, title09, unit09 = plot('09', query)
### User selected a cloud - Per cloud
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
### filter category == 'B'
query['category'] = 'B'
### Plot 13: [User selected a cloud] on Jobs - Per cloud
data13, colors13, title13, unit13 = plot('13', query)
### Plot 14: [User selected a cloud] on jobDef - Per cloud
data14, colors14, title14, unit14 = plot('14', query)
### Plot 15: [User selected a cloud] on jobSet - Per cloud
data15, colors15, title15, unit15 = plot('15', query)
### PanDA Brokerage decision - Top sites with share > 1 %
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
### filter category == 'B'
query['category'] = 'C'
### Plot 16: PanDA Brokerage decision on Jobs - Top sites with share > 1 %
data16, colors16, title16, unit16 = plot('16', query)
### Plot 17: PanDA Brokerage decision on JobDefs - Top sites with share > 1 %
data17, colors17, title17, unit17 = plot('17', query)
### PanDA Brokerage decision - Per cloud
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
### filter category == 'C'
query['category'] = 'C'
### Plot 18: PanDA Brokerage decision on Jobs - Per cloud
data18, colors18, title18, unit18 = plot('18', query)
### Plot 19: PanDA Brokerage decision on jobDef - Per cloud
data19, colors19, title19, unit19 = plot('19', query)
### User excluded a site on distinct jobSet - With exclude / Without exclude
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
### filter category__in
query['category__in'] = ['A', 'B', 'C', 'E']
### Plot 20: User excluded a site on distinct jobSet - With exclude / Without exclude
data20, colors20, title20, unit20 = plot('20', query)
### User excluded a site on jobSet - Top sites with share > 1 %
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
### filter category__in
query['category__in'] = ['E']
### Plot 21: User excluded a site on jobSet - Top sites with share > 1 %
data21, colors21, title21, unit21 = plot('21', query)
### Plot 22: User excluded a site on distinct DnUser - Top sites with share > 1 %
data22, colors22, title22, unit22 = plot('22', query)
### User excluded a site on jobSet - Per cloud
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
### filter category__in
query['category__in'] = ['E']
### Plot 23: User excluded a site on jobSet - Per cloud
data23, colors23, title23, unit23 = plot('23', query)
### Plot 24: User excluded a site on distinct DnUser - Per cloud
data24, colors24, title24, unit24 = plot('24', query)
### Submitted by Country (from UserDN)
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
### filter category__in
query['category__in'] = ['A', 'B', 'C', 'E']
### Plot 25: Jobs submitted by Country
data25, colors25, title25, unit25 = plot('25', query)
### Plot 26: JobDefs submitted by Country
data26, colors26, title26, unit26 = plot('26', query)
### Plot 27: JobSets submitted by Country
data27, colors27, title27, unit27 = plot('27', query)
### set request response data
data = { \
'errors_GET': errors_GET,
'startdate': startdate,
'enddate': enddate,
'ndays': ndays,
'viewParams': {'MON_VO': 'ATLAS'},
'data01': prepare_data_for_piechart(data=data01, unit=unit01),
'title01': title01,
'colors01': colors01,
'data02': prepare_data_for_piechart(data=data02, unit=unit02),
'title02': title02,
'colors02': colors02,
'data03': prepare_data_for_piechart(data=data03, unit=unit03),
'title03': title03,
'colors03': colors03,
'data04': prepare_data_for_piechart(data=data04, unit=unit04, cutoff=1.0),
'title04': title04,
'colors04': colors04,
'data05': prepare_data_for_piechart(data=data05, unit=unit05),
'title05': title05,
'colors05': colors05,
'data06': prepare_data_for_piechart(data=data06, unit=unit06),
'title06': title06,
'colors06': colors06,
'data07': prepare_data_for_piechart(data=data07, unit=unit07),
'title07': title07,
'colors07': colors07,
'data08': prepare_data_for_piechart(data=data08, unit=unit08),
'title08': title08,
'colors08': colors08,
'data09': prepare_data_for_piechart(data=data09, unit=unit09),
'title09': title09,
'colors09': colors09,
'data13': prepare_data_for_piechart(data=data13, unit=unit13),
'title13': title13,
'colors13': colors13,
'data14': prepare_data_for_piechart(data=data14, unit=unit14),
'title14': title14,
'colors14': colors14,
'data15': prepare_data_for_piechart(data=data15, unit=unit15),
'title15': title15,
'colors15': colors15,
'data16': prepare_data_for_piechart(data=data16, unit=unit16, cutoff=1.0),
'title16': title16,
'colors16': colors16,
'data17': prepare_data_for_piechart(data=data17, unit=unit17, cutoff=1.0),
'title17': title17,
'colors17': colors17,
'data18': prepare_data_for_piechart(data=data18, unit=unit18),
'title18': title18,
'colors18': colors18,
'data19': prepare_data_for_piechart(data=data19, unit=unit19),
'title19': title19,
'colors19': colors19,
'data20': prepare_data_for_piechart(data=data20, unit=unit20),
'title20': title20,
'colors20': colors20,
'data21': prepare_data_for_piechart(data=data21, unit=unit21, cutoff=1.0),
'title21': title21,
'colors21': colors21,
'data22': prepare_data_for_piechart(data=data22, unit=unit22, cutoff=1.0),
'title22': title22,
'colors22': colors22,
'data23': prepare_data_for_piechart(data=data23, unit=unit23),
'title23': title23,
'colors23': colors23,
'data24': prepare_data_for_piechart(data=data24, unit=unit24),
'title24': title24,
'colors24': colors24,
'data25': prepare_data_for_piechart(data=data25, unit=unit25, cutoff=1.0),
'title25': title25,
'colors25': colors25,
'data26': prepare_data_for_piechart(data=data26, unit=unit26, cutoff=1.0),
'title26': title26,
'colors26': colors26,
'data27': prepare_data_for_piechart(data=data27, unit=unit27, cutoff=1.0),
'title27': title27,
'colors27': colors27,
}
return render_to_response('pbm/index.html', data, RequestContext(request))
def single_plot(request):
"""
single_plot -- pbm's page to view 1 plot
:param request: Django's HTTP request
:type request: django.http.HttpRequest
"""
### configure time interval for queries
startdate, enddate, ndays, errors_GET = configure(request.GET)
plotid = configure_plot(request.GET)
### start the query parameters
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
dataX, colorsX, titleX, unitX = plot(plotid, query)
### set request response data
data = { \
'errors_GET': errors_GET,
'startdate': startdate,
'enddate': enddate,
'ndays': ndays,
'viewParams': {'MON_VO': 'ATLAS'},
'dataX': prepare_data_for_piechart(data=dataX, unit=unitX),
'titleX': titleX,
'colorsX': colorsX,
'plotid': plotid,
}
return render_to_response('pbm/plot.html', data, RequestContext(request))
def single_table(request):
"""
single_table -- pbm's page to view tabular data of a plot
:param request: Django's HTTP request
:type request: django.http.HttpRequest
"""
### configure time interval for queries
startdate, enddate, ndays, errors_GET = configure(request.GET)
plotid = configure_plot(request.GET)
### start the query parameters
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
dataX, colorsX, titleX, unitX = plot(plotid, query)
### set request response data
data = { \
'errors_GET': errors_GET,
'startdate': startdate,
'enddate': enddate,
'ndays': ndays,
'viewParams': {'MON_VO': 'ATLAS'},
'dataX': prepare_data_for_piechart(data=dataX, unit=unitX),
'titleX': titleX,
'colorsX': colorsX,
'plotid': plotid,
}
return render_to_response('pbm/table.html', data, RequestContext(request))
def detail(request):
"""
detail -- pbm's page to view tabular data + a plot
:param request: Django's HTTP request
:type request: django.http.HttpRequest
"""
### configure time interval for queries
startdate, enddate, ndays, errors_GET = configure(request.GET)
plotid = configure_plot(request.GET)
### start the query parameters
query = {}
### filter logdate__range
query['logdate__range'] = [startdate, enddate]
dataX, colorsX, titleX, unitX = plot(plotid, query)
### set request response data
data = { \
'errors_GET': errors_GET,
'startdate': startdate,
'enddate': enddate,
'ndays': ndays,
'viewParams': {'MON_VO': 'ATLAS'},
'dataX': prepare_data_for_piechart(data=dataX, unit=unitX),
'titleX': titleX,
'colorsX': colorsX,
'plotid': plotid,
}
return render_to_response('pbm/detail.html', data, RequestContext(request))
def api_pbm_collector(request):
"""
api_pbm_collector -- return json with Pandalog data for specified GET parameters
?type ... Pandalog flavour, e.g. 'pd2p', 'brokerage', 'analy_brokerage'
?nhours ... date range of how many hours in past
?starttime ... datetime from, format %Y-%m-%dT%H:%M:%S
?endtime ... datetime to, format %Y-%m-%dT%H:%M:%S
nhours has higher priority than starttime, endtime
if nhours is specified, starttime&endtime are not taken into account.
:param request: Django's HTTP request
:type request: django.http.HttpRequest
"""
errors = {}
warnings = {}
### GET parameters
GET_parameters = {}
for p in request.GET:
GET_parameters[p] = str(request.GET[p])
### check that all expected parameters are in URL
expectedFields = ['type']
for expectedField in expectedFields:
try:
if len(request.GET[expectedField]) < 1:
msg = 'Missing expected GET parameter %s. ' % expectedField
if 'missingparameter' not in errors.keys():
errors['missingparameter'] = ''
errors['missingparameter'] += msg
except:
msg = 'Missing expected GET parameter %s. ' % expectedField
_logger.error(msg)
if 'missingparameter' not in errors.keys():
errors['missingparameter'] = ''
errors['missingparameter'] += msg
### time range from request.GET
optionalFields = ['starttime', 'endtime', 'nhours']
for optionalField in optionalFields:
try:
if len(request.GET[optionalField]) < 1:
msg = 'Missing optional GET parameter %s. ' % optionalField
if 'missingoptionalparameter' not in warnings.keys():
warnings['missingoptionalparameter'] = ''
warnings['missingoptionalparameter'] += msg
except:
msg = 'Missing optional GET parameter %s. ' % optionalField
_logger.warning(msg)
if 'missingoptionalparameter' not in warnings.keys():
warnings['missingoptionalparameter'] = ''
warnings['missingoptionalparameter'] += msg
### get values for optional timerange parameters
nhours = 6
starttime = None
endtime = None
startdate = None
enddate = None
if 'nhours' in request.GET:
try:
nhours = int(request.GET['nhours'])
except:
nhours = 6
starttime = (datetime.utcnow() - timedelta(hours=nhours)).strftime(collectorTimeFormat)
endtime = datetime.utcnow().strftime(collectorTimeFormat)
startdate = starttime
enddate = endtime
else:
if 'starttime' in request.GET:
try:
starttime = datetime.strptime(request.GET['starttime'], collectorDatetimeFormat).strftime(collectorTimeFormat)
startdate = starttime
except:
starttime = (datetime.utcnow() - timedelta(hours=nhours)).strftime(collectorTimeFormat)
startdate = starttime
else:
starttime = (datetime.utcnow() - timedelta(hours=nhours)).strftime(collectorTimeFormat)
startdate = starttime
if 'endtime' in request.GET:
try:
endtime = datetime.strptime(request.GET['endtime'], collectorDatetimeFormat).strftime(collectorTimeFormat)
enddate = endtime
except:
endtime = datetime.utcnow().strftime(collectorTimeFormat)
enddate = endtime
else:
endtime = datetime.utcnow().strftime(collectorTimeFormat)
enddate = endtime
### if all expected GET parameters are present, execute log lookup
query = {}
logtype = None
try:
if 'type' in request.GET and len(request.GET['type']):
logtype = request.GET['type']
except:
logtype = None
query['type'] = logtype
query['bintime__range'] = [startdate, enddate]
query['time__range'] = [starttime, endtime]
log_records = []
try:
log_records = Pandalog.objects.filter(**query).values()
except:
pass
frm_log_records = []
if not len(log_records):
if 'lookup' not in errors:
errors['lookup'] = ''
errors['lookup'] += 'Log record for parameters has not been found. query=%s' % query
### return the json data
else:
frm_log_records = [ {'name': x['name'], \
'bintime': x['bintime'].isoformat(), \
'module': x['module'], \
'loguser': x['loguser'], \
'type': x['type'], \
'pid': x['pid'], \
'loglevel': x['loglevel'], \
'levelname': x['levelname'], \
'filename': x['filename'], \
'line': x['line'], \
'time': x['time'], \
'message': x['message'] \
} \
for x in log_records ]
data = { \
'timestamp': datetime.utcnow().isoformat(), \
'errors': errors, \
'warnings': warnings, \
'query': query, \
'GET_parameters': GET_parameters, \
'nrecords': len(log_records), \
'data': frm_log_records \
}
if not len(errors):
### set request response data
# return render_to_response('pbm/api_pbm_collector.html', {'data': data}, RequestContext(request))
return HttpResponse(json.dumps(data), mimetype='application/json')
elif 'type' not in request.GET.keys() or logtype == None:
# t = get_template('pbm/api_pbm_collector.html')
# context = RequestContext(request, {'data':data})
# return HttpResponse(t.render(context), status=400)
return HttpResponse(json.dumps(data), mimetype='application/json', status=400)
elif not len(log_records):
# t = get_template('pbm/api_pbm_collector.html')
# context = RequestContext(request, {'data':data})
# return HttpResponse(t.render(context), status=404)
return HttpResponse(json.dumps(data), mimetype='application/json', status=404)
else:
# t = get_template('pbm/api_pbm_collector.html')
# context = RequestContext(request, {'data':data})
# return HttpResponse(t.render(context), status=400)
return HttpResponse(json.dumps(data), mimetype='application/json', status=400)
| {
"content_hash": "8b6d58d89887237ad6a2a0da60de975c",
"timestamp": "",
"source": "github",
"line_count": 554,
"max_line_length": 126,
"avg_line_length": 35.72924187725632,
"alnum_prop": 0.5939678690512277,
"repo_name": "SergeyPod/tmpCore",
"id": "9280ce71a484221588301ef4ea7b1dfe942f2136",
"size": "19794",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "core/pbm/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "406916"
},
{
"name": "HTML",
"bytes": "372938"
},
{
"name": "JavaScript",
"bytes": "1045553"
},
{
"name": "Python",
"bytes": "777230"
}
],
"symlink_target": ""
} |
"""AR-specific Form helpers."""
from __future__ import unicode_literals
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.translation import ugettext_lazy as _
from localflavor.compat import EmptyValueCompatMixin
from .ar_provinces import PROVINCE_CHOICES
class ARProvinceSelect(Select):
"""A Select widget that uses a list of Argentinean provinces/autonomous cities as its choices."""
def __init__(self, attrs=None):
super(ARProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class ARPostalCodeField(EmptyValueCompatMixin, RegexField):
"""
A field that accepts a 'classic' NNNN Postal Code or a CPA.
See:
http://www.correoargentino.com.ar/cpa/que_es
http://www.correoargentino.com.ar/cpa/como_escribirlo
"""
default_error_messages = {
'invalid': _("Enter a postal code in the format NNNN or ANNNNAAA."),
}
def __init__(self, max_length=8, min_length=4, *args, **kwargs):
super(ARPostalCodeField, self).__init__(r'^\d{4}$|^[A-HJ-NP-Za-hj-np-z]\d{4}\D{3}$',
max_length, min_length,
*args, **kwargs)
def clean(self, value):
value = super(ARPostalCodeField, self).clean(value)
if value in self.empty_values:
return self.empty_value
if len(value) not in (4, 8):
raise ValidationError(self.error_messages['invalid'])
if len(value) == 8:
return '%s%s%s' % (value[0].upper(), value[1:5], value[5:].upper())
return value
class ARDNIField(EmptyValueCompatMixin, CharField):
"""A field that validates 'Documento Nacional de Identidad' (DNI) numbers."""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 7 or 8 digits."),
}
def __init__(self, max_length=10, min_length=7, *args, **kwargs):
super(ARDNIField, self).__init__(max_length, min_length,
*args, **kwargs)
def clean(self, value):
"""Value can be a string either in the [X]X.XXX.XXX or [X]XXXXXXX formats."""
value = super(ARDNIField, self).clean(value)
if value in self.empty_values:
return self.empty_value
if not value.isdigit():
value = value.replace('.', '')
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if len(value) not in (7, 8):
raise ValidationError(self.error_messages['max_digits'])
return value
class ARCUITField(EmptyValueCompatMixin, RegexField):
"""
This field validates a CUIT (Código Único de Identificación Tributaria).
ACUIT is of the form XX-XXXXXXXX-V. The last digit is a check digit.
More info:
http://es.wikipedia.org/wiki/Clave_%C3%9Anica_de_Identificaci%C3%B3n_Tributaria
English info:
http://www.justlanded.com/english/Argentina/Argentina-Guide/Visas-Permits/Other-Legal-Documents
"""
default_error_messages = {
'invalid': _('Enter a valid CUIT in XX-XXXXXXXX-X or XXXXXXXXXXXX format.'),
'checksum': _("Invalid CUIT."),
'legal_type': _('Invalid legal type. Type must be 27, 20, 30, 23, 24 or 33.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ARCUITField, self).__init__(r'^\d{2}-?\d{8}-?\d$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""Value can be either a string in the format XX-XXXXXXXX-X or an 11-digit number."""
value = super(ARCUITField, self).clean(value)
if value in self.empty_values:
return self.empty_value
value, cd = self._canon(value)
if not value[:2] in ['27', '20', '30', '23', '24', '33']:
raise ValidationError(self.error_messages['legal_type'])
if self._calc_cd(value) != cd:
raise ValidationError(self.error_messages['checksum'])
return self._format(value, cd)
def _canon(self, cuit):
cuit = cuit.replace('-', '')
return cuit[:-1], cuit[-1]
def _calc_cd(self, cuit):
# Calculation code based on:
# http://es.wikipedia.org/wiki/C%C3%B3digo_%C3%9Anico_de_Identificaci%C3%B3n_Tributaria
mults = (5, 4, 3, 2, 7, 6, 5, 4, 3, 2)
tmp = sum([m * int(cuit[idx]) for idx, m in enumerate(mults)])
result = 11 - (tmp % 11)
if result == 11:
result = 0
elif result == 10:
result = 9
return str(result)
def _format(self, cuit, check_digit=None):
if check_digit is None:
check_digit = cuit[-1]
cuit = cuit[:-1]
return '%s-%s-%s' % (cuit[:2], cuit[2:], check_digit)
class ARCBUField(EmptyValueCompatMixin, CharField):
"""
This field validates a CBU (Clave Bancaria Uniforme).
A CBU is a 22-digits long number. The first 8 digits denote bank and branch number,
plus a verifying digit. The remaining 14 digits denote an account number, plus a verifying digit.
More info:
https://es.wikipedia.org/wiki/Clave_Bancaria_Uniforme
.. versionadded:: 1.3
"""
default_error_messages = {
'invalid': _('Enter a valid CBU in XXXXXXXXXXXXXXXXXXXXXX format.'),
'max_length': _('CBU must be exactly 22 digits long.'),
'min_length': _('CBU must be exactly 22 digits long.'),
'checksum': _('Invalid CBU.'),
}
def __init__(self, *args, **kwargs):
kwargs['min_length'] = kwargs['max_length'] = 22
super(ARCBUField, self).__init__(*args, **kwargs)
def _valid_block(self, block, ponderator):
number = block[:-1]
v_digit = int(block[-1])
block_sum = sum(x * int(y) for x, y in zip(ponderator, number))
remainder = block_sum % 10
# The verification digit and the result of the calculation must be the same.
# In the edge case that the remainder is 0, the verification digit must be 0 too.
if remainder == 0:
return v_digit == remainder
return v_digit == (10 - remainder)
def _checksum(self, value):
block_1 = value[0:8]
block_2 = value[8:22]
ponderator_1 = (9, 7, 1, 3, 9, 7, 1, 3)
ponderator_2 = (3, 9, 7, 1, 3, 9, 7, 1, 3, 9, 7, 1, 3)
is_valid_1 = self._valid_block(block_1, ponderator_1)
is_valid_2 = self._valid_block(block_2, ponderator_2)
return is_valid_1 and is_valid_2
def clean(self, value):
"""Value must be a 22 digits long number."""
value = super(ARCBUField, self).clean(value)
if value in self.empty_values:
return self.empty_value
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if not self._checksum(value):
raise ValidationError(self.error_messages['checksum'])
return value
| {
"content_hash": "ac8e011f58d79609e13f5ea8f73fb582",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 101,
"avg_line_length": 36.7720207253886,
"alnum_prop": 0.5956037762434832,
"repo_name": "thor/django-localflavor",
"id": "f30cb9200d6435d127f844a260511591f478165d",
"size": "7124",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "localflavor/ar/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "924448"
}
],
"symlink_target": ""
} |
"""
Kubernetes Job wrapper for Luigi.
From the Kubernetes website:
Kubernetes is an open-source system for automating deployment, scaling,
and management of containerized applications.
For more information about Kubernetes Jobs: http://kubernetes.io/docs/user-guide/jobs/
Requires:
- pykube: ``pip install pykube``
Written and maintained by Marco Capuccini (@mcapuccini).
"""
import logging
import time
import uuid
from datetime import datetime
import luigi
logger = logging.getLogger('luigi-interface')
try:
from pykube.config import KubeConfig
from pykube.http import HTTPClient
from pykube.objects import Job, Pod
except ImportError:
logger.warning('pykube is not installed. KubernetesJobTask requires pykube.')
class kubernetes(luigi.Config):
auth_method = luigi.Parameter(
default="kubeconfig",
description="Authorization method to access the cluster")
kubeconfig_path = luigi.Parameter(
default="~/.kube/config",
description="Path to kubeconfig file for cluster authentication")
max_retrials = luigi.IntParameter(
default=0,
description="Max retrials in event of job failure")
kubernetes_namespace = luigi.OptionalParameter(
default=None,
description="K8s namespace in which the job will run")
class KubernetesJobTask(luigi.Task):
__POLL_TIME = 5 # see __track_job
_kubernetes_config = None # Needs to be loaded at runtime
def _init_kubernetes(self):
self.__logger = logger
self.__logger.debug("Kubernetes auth method: " + self.auth_method)
if self.auth_method == "kubeconfig":
self.__kube_api = HTTPClient(KubeConfig.from_file(self.kubeconfig_path))
elif self.auth_method == "service-account":
self.__kube_api = HTTPClient(KubeConfig.from_service_account())
else:
raise ValueError("Illegal auth_method")
self.job_uuid = str(uuid.uuid4().hex)
now = datetime.utcnow()
self.uu_name = "%s-%s-%s" % (self.name, now.strftime('%Y%m%d%H%M%S'), self.job_uuid[:16])
@property
def auth_method(self):
"""
This can be set to ``kubeconfig`` or ``service-account``.
It defaults to ``kubeconfig``.
For more details, please refer to:
- kubeconfig: http://kubernetes.io/docs/user-guide/kubeconfig-file
- service-account: http://kubernetes.io/docs/user-guide/service-accounts
"""
return self.kubernetes_config.auth_method
@property
def kubeconfig_path(self):
"""
Path to kubeconfig file used for cluster authentication.
It defaults to "~/.kube/config", which is the default location
when using minikube (http://kubernetes.io/docs/getting-started-guides/minikube).
When auth_method is ``service-account`` this property is ignored.
**WARNING**: For Python versions < 3.5 kubeconfig must point to a Kubernetes API
hostname, and NOT to an IP address.
For more details, please refer to:
http://kubernetes.io/docs/user-guide/kubeconfig-file
"""
return self.kubernetes_config.kubeconfig_path
@property
def kubernetes_namespace(self):
"""
Namespace in Kubernetes where the job will run.
It defaults to the default namespace in Kubernetes
For more details, please refer to:
https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return self.kubernetes_config.kubernetes_namespace
@property
def name(self):
"""
A name for this job. This task will automatically append a UUID to the
name before to submit to Kubernetes.
"""
raise NotImplementedError("subclass must define name")
@property
def labels(self):
"""
Return custom labels for kubernetes job.
example::
``{"run_dt": datetime.date.today().strftime('%F')}``
"""
return {}
@property
def spec_schema(self):
"""
Kubernetes Job spec schema in JSON format, an example follows.
.. code-block:: javascript
{
"containers": [{
"name": "pi",
"image": "perl",
"command": ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
}],
"restartPolicy": "Never"
}
**restartPolicy**
- If restartPolicy is not defined, it will be set to "Never" by default.
- **Warning**: restartPolicy=OnFailure will bypass max_retrials, and restart
the container until success, with the risk of blocking the Luigi task.
For more informations please refer to:
http://kubernetes.io/docs/user-guide/pods/multi-container/#the-spec-schema
"""
raise NotImplementedError("subclass must define spec_schema")
@property
def max_retrials(self):
"""
Maximum number of retrials in case of failure.
"""
return self.kubernetes_config.max_retrials
@property
def backoff_limit(self):
"""
Maximum number of retries before considering the job as failed.
See: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy
"""
return 6
@property
def delete_on_success(self):
"""
Delete the Kubernetes workload if the job has ended successfully.
"""
return True
@property
def print_pod_logs_on_exit(self):
"""
Fetch and print the pod logs once the job is completed.
"""
return False
@property
def active_deadline_seconds(self):
"""
Time allowed to successfully schedule pods.
See: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#job-termination-and-cleanup
"""
return None
@property
def kubernetes_config(self):
if not self._kubernetes_config:
self._kubernetes_config = kubernetes()
return self._kubernetes_config
def __track_job(self):
"""Poll job status while active"""
while not self.__verify_job_has_started():
time.sleep(self.__POLL_TIME)
self.__logger.debug("Waiting for Kubernetes job " + self.uu_name + " to start")
self.__print_kubectl_hints()
status = self.__get_job_status()
while status == "RUNNING":
self.__logger.debug("Kubernetes job " + self.uu_name + " is running")
time.sleep(self.__POLL_TIME)
status = self.__get_job_status()
assert status != "FAILED", "Kubernetes job " + self.uu_name + " failed"
# status == "SUCCEEDED"
self.__logger.info("Kubernetes job " + self.uu_name + " succeeded")
self.signal_complete()
def signal_complete(self):
"""Signal job completion for scheduler and dependent tasks.
Touching a system file is an easy way to signal completion. example::
.. code-block:: python
with self.output().open('w') as output_file:
output_file.write('')
"""
pass
def __get_pods(self):
pod_objs = Pod.objects(self.__kube_api, namespace=self.kubernetes_namespace) \
.filter(selector="job-name=" + self.uu_name) \
.response['items']
return [Pod(self.__kube_api, p) for p in pod_objs]
def __get_job(self):
jobs = Job.objects(self.__kube_api, namespace=self.kubernetes_namespace) \
.filter(selector="luigi_task_id=" + self.job_uuid) \
.response['items']
assert len(jobs) == 1, "Kubernetes job " + self.uu_name + " not found"
return Job(self.__kube_api, jobs[0])
def __print_pod_logs(self):
for pod in self.__get_pods():
logs = pod.logs(timestamps=True).strip()
self.__logger.info("Fetching logs from " + pod.name)
if len(logs) > 0:
for l in logs.split('\n'):
self.__logger.info(l)
def __print_kubectl_hints(self):
self.__logger.info("To stream Pod logs, use:")
for pod in self.__get_pods():
self.__logger.info("`kubectl logs -f pod/%s`" % pod.name)
def __verify_job_has_started(self):
"""Asserts that the job has successfully started"""
# Verify that the job started
self.__get_job()
# Verify that the pod started
pods = self.__get_pods()
assert len(pods) > 0, "No pod scheduled by " + self.uu_name
for pod in pods:
status = pod.obj['status']
for cont_stats in status.get('containerStatuses', []):
if 'terminated' in cont_stats['state']:
t = cont_stats['state']['terminated']
err_msg = "Pod %s %s (exit code %d). Logs: `kubectl logs pod/%s`" % (
pod.name, t['reason'], t['exitCode'], pod.name)
assert t['exitCode'] == 0, err_msg
if 'waiting' in cont_stats['state']:
wr = cont_stats['state']['waiting']['reason']
assert wr == 'ContainerCreating', "Pod %s %s. Logs: `kubectl logs pod/%s`" % (
pod.name, wr, pod.name)
for cond in status.get('conditions', []):
if 'message' in cond:
if cond['reason'] == 'ContainersNotReady':
return False
assert cond['status'] != 'False', \
"[ERROR] %s - %s" % (cond['reason'], cond['message'])
return True
def __get_job_status(self):
"""Return the Kubernetes job status"""
# Figure out status and return it
job = self.__get_job()
if "succeeded" in job.obj["status"] and job.obj["status"]["succeeded"] > 0:
job.scale(replicas=0)
if self.print_pod_logs_on_exit:
self.__print_pod_logs()
if self.delete_on_success:
self.__delete_job_cascade(job)
return "SUCCEEDED"
if "failed" in job.obj["status"]:
failed_cnt = job.obj["status"]["failed"]
self.__logger.debug("Kubernetes job " + self.uu_name
+ " status.failed: " + str(failed_cnt))
if self.print_pod_logs_on_exit:
self.__print_pod_logs()
if failed_cnt > self.max_retrials:
job.scale(replicas=0) # avoid more retrials
return "FAILED"
return "RUNNING"
def __delete_job_cascade(self, job):
delete_options_cascade = {
"kind": "DeleteOptions",
"apiVersion": "v1",
"propagationPolicy": "Background"
}
r = self.__kube_api.delete(json=delete_options_cascade, **job.api_kwargs())
if r.status_code != 200:
self.__kube_api.raise_for_status(r)
def run(self):
self._init_kubernetes()
# Render job
job_json = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": self.uu_name,
"labels": {
"spawned_by": "luigi",
"luigi_task_id": self.job_uuid
}
},
"spec": {
"backoffLimit": self.backoff_limit,
"template": {
"metadata": {
"name": self.uu_name
},
"spec": self.spec_schema
}
}
}
if self.kubernetes_namespace is not None:
job_json['metadata']['namespace'] = self.kubernetes_namespace
if self.active_deadline_seconds is not None:
job_json['spec']['activeDeadlineSeconds'] = \
self.active_deadline_seconds
# Update user labels
job_json['metadata']['labels'].update(self.labels)
# Add default restartPolicy if not specified
if "restartPolicy" not in self.spec_schema:
job_json["spec"]["template"]["spec"]["restartPolicy"] = "Never"
# Submit job
self.__logger.info("Submitting Kubernetes Job: " + self.uu_name)
job = Job(self.__kube_api, job_json)
job.create()
# Track the Job (wait while active)
self.__logger.info("Start tracking Kubernetes Job: " + self.uu_name)
self.__track_job()
def output(self):
"""
An output target is necessary for checking job completion unless
an alternative complete method is defined.
Example::
return luigi.LocalTarget(os.path.join('/tmp', 'example'))
"""
pass
| {
"content_hash": "857939ceccdb8a95ccec054e5648008d",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 122,
"avg_line_length": 35.224657534246575,
"alnum_prop": 0.5704285603173369,
"repo_name": "soxofaan/luigi",
"id": "1b8bd90fe658dad668231655b17b9035c76dd7ed",
"size": "13463",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "luigi/contrib/kubernetes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5051"
},
{
"name": "HTML",
"bytes": "41976"
},
{
"name": "JavaScript",
"bytes": "171172"
},
{
"name": "Python",
"bytes": "2072173"
},
{
"name": "Shell",
"bytes": "2901"
}
],
"symlink_target": ""
} |
"""
sphinx.pycode
~~~~~~~~~~~~~
Utilities parsing and analyzing Python code.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import re
import sys
from os import path
from six import iteritems, text_type, BytesIO, StringIO
from sphinx import package_dir
from sphinx.errors import PycodeError
from sphinx.pycode import nodes
from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
from sphinx.util import get_module_source, detect_encoding
from sphinx.util.pycompat import TextIOWrapper
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
# load the Python grammar
_grammarfile = path.join(package_dir, 'pycode',
'Grammar-py%d.txt' % sys.version_info[0])
pygrammar = driver.load_grammar(_grammarfile)
pydriver = driver.Driver(pygrammar, convert=nodes.convert)
# an object with attributes corresponding to token and symbol names
class sym:
pass
for k, v in iteritems(pygrammar.symbol2number):
setattr(sym, k, v)
for k, v in iteritems(token.tok_name):
setattr(sym, v, k)
# a dict mapping terminal and nonterminal numbers to their names
number2name = pygrammar.number2symbol.copy()
number2name.update(token.tok_name)
_eq = nodes.Leaf(token.EQUAL, '=')
emptyline_re = re.compile('^\s*(#.*)?$')
class AttrDocVisitor(nodes.NodeVisitor):
"""
Visitor that collects docstrings for attribute assignments on toplevel and
in classes (class attributes and attributes set in __init__).
The docstrings can either be in special '#:' comments before the assignment
or in a docstring after it.
"""
def init(self, scope, encoding):
self.scope = scope
self.in_init = 0
self.encoding = encoding
self.namespace = []
self.collected = {}
self.tagnumber = 0
self.tagorder = {}
def add_tag(self, name):
name = '.'.join(self.namespace + [name])
self.tagorder[name] = self.tagnumber
self.tagnumber += 1
def visit_classdef(self, node):
"""Visit a class."""
self.add_tag(node[1].value)
self.namespace.append(node[1].value)
self.generic_visit(node)
self.namespace.pop()
def visit_funcdef(self, node):
"""Visit a function (or method)."""
# usually, don't descend into functions -- nothing interesting there
self.add_tag(node[1].value)
if node[1].value == '__init__':
# however, collect attributes set in __init__ methods
self.in_init += 1
self.generic_visit(node)
self.in_init -= 1
def visit_expr_stmt(self, node):
"""Visit an assignment which may have a special comment before (or
after) it.
"""
if _eq not in node.children:
# not an assignment (we don't care for augmented assignments)
return
# look *after* the node; there may be a comment prefixing the NEWLINE
# of the simple_stmt
parent = node.parent
idx = parent.children.index(node) + 1
while idx < len(parent):
if parent[idx].type == sym.SEMI:
idx += 1
continue # skip over semicolon
if parent[idx].type == sym.NEWLINE:
prefix = parent[idx].get_prefix()
if not isinstance(prefix, text_type):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
if docstring:
self.add_docstring(node, docstring)
return # don't allow docstrings both before and after
break
# now look *before* the node
pnode = node[0]
prefix = pnode.get_prefix()
# if the assignment is the first statement on a new indentation
# level, its preceding whitespace and comments are not assigned
# to that token, but the first INDENT or DEDENT token
while not prefix:
pnode = pnode.get_prev_leaf()
if not pnode or pnode.type not in (token.INDENT, token.DEDENT):
break
prefix = pnode.get_prefix()
if not isinstance(prefix, text_type):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
self.add_docstring(node, docstring)
def visit_simple_stmt(self, node):
"""Visit a docstring statement which may have an assignment before."""
if node[0].type != token.STRING:
# not a docstring; but still need to visit children
return self.generic_visit(node)
prev = node.get_prev_sibling()
if not prev:
return
if prev.type == sym.simple_stmt and \
prev[0].type == sym.expr_stmt and _eq in prev[0].children:
# need to "eval" the string because it's returned in its
# original form
docstring = literals.evalString(node[0].value, self.encoding)
docstring = prepare_docstring(docstring)
self.add_docstring(prev[0], docstring)
def add_docstring(self, node, docstring):
# add an item for each assignment target
for i in range(0, len(node) - 1, 2):
target = node[i]
if self.in_init and self.number2name[target.type] == 'power':
# maybe an attribute assignment -- check necessary conditions
if ( # node must have two children
len(target) != 2 or
# first child must be "self"
target[0].type != token.NAME or target[0].value != 'self' or
# second child must be a "trailer" with two children
self.number2name[target[1].type] != 'trailer' or
len(target[1]) != 2 or
# first child must be a dot, second child a name
target[1][0].type != token.DOT or
target[1][1].type != token.NAME):
continue
name = target[1][1].value
elif target.type != token.NAME:
# don't care about other complex targets
continue
else:
name = target.value
self.add_tag(name)
if docstring:
namespace = '.'.join(self.namespace)
if namespace.startswith(self.scope):
self.collected[namespace, name] = docstring
class ModuleAnalyzer(object):
# cache for analyzer objects -- caches both by module and file name
cache = {}
@classmethod
def for_string(cls, string, modname, srcname='<string>'):
if isinstance(string, bytes):
return cls(BytesIO(string), modname, srcname)
return cls(StringIO(string), modname, srcname, decoded=True)
@classmethod
def for_file(cls, filename, modname):
if ('file', filename) in cls.cache:
return cls.cache['file', filename]
try:
fileobj = open(filename, 'rb')
except Exception as err:
raise PycodeError('error opening %r' % filename, err)
obj = cls(fileobj, modname, filename)
cls.cache['file', filename] = obj
return obj
@classmethod
def for_module(cls, modname):
if ('module', modname) in cls.cache:
entry = cls.cache['module', modname]
if isinstance(entry, PycodeError):
raise entry
return entry
try:
type, source = get_module_source(modname)
if type == 'string':
obj = cls.for_string(source, modname)
else:
obj = cls.for_file(source, modname)
except PycodeError as err:
cls.cache['module', modname] = err
raise
cls.cache['module', modname] = obj
return obj
def __init__(self, source, modname, srcname, decoded=False):
# name of the module
self.modname = modname
# name of the source file
self.srcname = srcname
# file-like object yielding source lines
self.source = source
# cache the source code as well
pos = self.source.tell()
if not decoded:
self.encoding = detect_encoding(self.source.readline)
self.source.seek(pos)
self.code = self.source.read().decode(self.encoding)
self.source.seek(pos)
self.source = TextIOWrapper(self.source, self.encoding)
else:
self.encoding = None
self.code = self.source.read()
self.source.seek(pos)
# will be filled by tokenize()
self.tokens = None
# will be filled by parse()
self.parsetree = None
# will be filled by find_attr_docs()
self.attr_docs = None
self.tagorder = None
# will be filled by find_tags()
self.tags = None
def tokenize(self):
"""Generate tokens from the source."""
if self.tokens is not None:
return
try:
self.tokens = list(tokenize.generate_tokens(self.source.readline))
except tokenize.TokenError as err:
raise PycodeError('tokenizing failed', err)
self.source.close()
def parse(self):
"""Parse the generated source tokens."""
if self.parsetree is not None:
return
self.tokenize()
try:
self.parsetree = pydriver.parse_tokens(self.tokens)
except parse.ParseError as err:
raise PycodeError('parsing failed', err)
def find_attr_docs(self, scope=''):
"""Find class and module-level attributes and their documentation."""
if self.attr_docs is not None:
return self.attr_docs
self.parse()
attr_visitor = AttrDocVisitor(number2name, scope, self.encoding)
attr_visitor.visit(self.parsetree)
self.attr_docs = attr_visitor.collected
self.tagorder = attr_visitor.tagorder
# now that we found everything we could in the tree, throw it away
# (it takes quite a bit of memory for large modules)
self.parsetree = None
return attr_visitor.collected
def find_tags(self):
"""Find class, function and method definitions and their location."""
if self.tags is not None:
return self.tags
self.tokenize()
result = {}
namespace = []
stack = []
indent = 0
defline = False
expect_indent = False
emptylines = 0
def tokeniter(ignore = (token.COMMENT,)):
for tokentup in self.tokens:
if tokentup[0] not in ignore:
yield tokentup
tokeniter = tokeniter()
for type, tok, spos, epos, line in tokeniter:
if expect_indent and type != token.NL:
if type != token.INDENT:
# no suite -- one-line definition
assert stack
dtype, fullname, startline, _ = stack.pop()
endline = epos[0]
namespace.pop()
result[fullname] = (dtype, startline, endline - emptylines)
expect_indent = False
if tok in ('def', 'class'):
name = next(tokeniter)[1]
namespace.append(name)
fullname = '.'.join(namespace)
stack.append((tok, fullname, spos[0], indent))
defline = True
elif type == token.INDENT:
expect_indent = False
indent += 1
elif type == token.DEDENT:
indent -= 1
# if the stacklevel is the same as it was before the last
# def/class block, this dedent closes that block
if stack and indent == stack[-1][3]:
dtype, fullname, startline, _ = stack.pop()
endline = spos[0]
namespace.pop()
result[fullname] = (dtype, startline, endline - emptylines)
elif type == token.NEWLINE:
# if this line contained a definition, expect an INDENT
# to start the suite; if there is no such INDENT
# it's a one-line definition
if defline:
defline = False
expect_indent = True
emptylines = 0
elif type == token.NL:
# count up if line is empty or comment only
if emptyline_re.match(line):
emptylines += 1
else:
emptylines = 0
self.tags = result
return result
if __name__ == '__main__':
import time
import pprint
x0 = time.time()
# ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html')
ma = ModuleAnalyzer.for_file('sphinx/environment.py',
'sphinx.environment')
ma.tokenize()
x1 = time.time()
ma.parse()
x2 = time.time()
# for (ns, name), doc in iteritems(ma.find_attr_docs()):
# print '>>', ns, name
# print '\n'.join(doc)
pprint.pprint(ma.find_tags())
x3 = time.time()
# print nodes.nice_repr(ma.parsetree, number2name)
print("tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2))
| {
"content_hash": "c186739c2964a3b4236e1bc1cec3476e",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 84,
"avg_line_length": 37.26648351648352,
"alnum_prop": 0.5643936601548102,
"repo_name": "neerajvashistha/pa-dude",
"id": "3da887d6cf2538610b5c2a137e70d5b6753be925",
"size": "13589",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/sphinx/pycode/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "359307"
},
{
"name": "C++",
"bytes": "5695"
},
{
"name": "CSS",
"bytes": "114504"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "HTML",
"bytes": "216904"
},
{
"name": "JavaScript",
"bytes": "1323680"
},
{
"name": "Makefile",
"bytes": "2299"
},
{
"name": "Python",
"bytes": "31341230"
},
{
"name": "Self",
"bytes": "40307"
},
{
"name": "Shell",
"bytes": "5427"
},
{
"name": "TeX",
"bytes": "96790"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from collections import namedtuple
import unittest
from six import string_types
from dockermap.functional import lazy, register_type, uses_type_registry, LazyOnceObject, resolve_value, resolve_deep
LOOKUP_DICT = {
'a': '/test/path_a',
'b': '/test/path_b',
'c': ('test_value', 1),
'd': {
'd1': 'test_value_1',
'd2': 'test_value_2',
},
}
class LazyAccessCounting(LazyOnceObject):
def __init__(self, func, *args, **kwargs):
self._actual_func = func
super(LazyAccessCounting, self).__init__(self._count_proxy, *args, **kwargs)
self._run_function_count = 0
def _count_proxy(self, *args, **kwargs):
self._run_function_count += 1
return self._actual_func(*args, **kwargs)
@property
def run_function_count(self):
return self._run_function_count
lazy_once = LazyAccessCounting
CustomType = namedtuple('CustomType', ['arg1', 'arg2'])
def resolve_custom(custom_type):
return LOOKUP_DICT.get(custom_type.arg1, {}).get(custom_type.arg2)
register_type(CustomType, resolve_custom)
class LazyValueResolutionTest(unittest.TestCase):
def test_simple_lazy_lookup(self):
a = lazy(LOOKUP_DICT.get, 'a')
a_val = '/test/path_a'
self.assertTrue(a == a_val)
self.assertEqual(a, a_val)
self.assertEqual(a.get(), a_val)
self.assertEqual(a.value, a_val)
def test_lazy_lookup_only_once(self):
b = lazy_once(LOOKUP_DICT.get, 'b')
self.assertFalse(b.evaluated)
b_val = '/test/path_b'
self.assertEqual(b, b_val)
self.assertTrue(b.evaluated)
self.assertTrue(b == b_val)
self.assertEqual(b.get(), b_val)
self.assertEqual(b.value, b_val)
self.assertEqual(b.run_function_count, 1)
def test_is_type_registered(self):
ct = CustomType('d', 'd1')
self.assertTrue(uses_type_registry(ct))
def test_resolve_lazy_once(self):
l = lazy_once(LOOKUP_DICT.get, 'c')
l_val = ('test_value', 1)
self.assertEqual(resolve_value(l), l_val)
self.assertEqual(len(resolve_value(l)), 2)
self.assertEqual(l.run_function_count, 1)
def test_resolve_custom_type(self):
ct = CustomType('d', 'd1')
self.assertEqual(resolve_value(ct), 'test_value_1')
def test_resolve_deep(self):
res_data = {
'a': lazy_once(LOOKUP_DICT.get, 'a'),
'b': CustomType('d', 'd2'),
'c': {
'a': lazy_once(LOOKUP_DICT.get, 'a'),
'b': lazy_once(LOOKUP_DICT.get, 'b'),
},
'd': [
lazy_once(LOOKUP_DICT.get, 'a'),
'b',
{
'a': lazy_once(LOOKUP_DICT.get, 'a'),
'b': lazy_once(LOOKUP_DICT.get, 'b'),
},
lazy_once(LOOKUP_DICT.get, 'd'),
],
CustomType('d', 'd2'): 'e',
}
data = resolve_deep(res_data, max_depth=2)
# Original structures should be preserved
self.assertIsInstance(data['a'], string_types)
self.assertIsInstance(data['d'], list)
self.assertIsInstance(data['d'][2], dict)
# Nested dictionary should be resolved.
self.assertEqual(data['c'], dict(a='/test/path_a', b='/test/path_b'))
self.assertEqual(data['d'][3], dict(d1='test_value_1', d2='test_value_2'))
# Values below max_depth should not be substituted or evaluated.
self.assertIsInstance(data['d'][2]['a'], lazy_once)
self.assertFalse(data['d'][2]['a'].evaluated)
# Placing functions as dictionary keys may not be a good idea, but should work at least for tuples.
self.assertEqual(data.get('test_value_2'), 'e')
| {
"content_hash": "7cd67e1b12f50f1944b1967b702e4036",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 117,
"avg_line_length": 33.64035087719298,
"alnum_prop": 0.5783572359843546,
"repo_name": "merll/docker-map",
"id": "ca18b7b33d8321e60fff577fd1d198fa2392bdf8",
"size": "3859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_value_resolution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "556108"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from collections import OrderedDict
import copy
import logging
import types
import warnings
from django.conf import settings
from django.core import urlresolvers
from django import shortcuts
from django.template.loader import render_to_string # noqa
from django.utils.functional import Promise # noqa
from django.utils.http import urlencode # noqa
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import six
from horizon import messages
from horizon.utils import functions
from horizon.utils import html
LOG = logging.getLogger(__name__)
# For Bootstrap integration; can be overridden in settings.
ACTION_CSS_CLASSES = ()
STRING_SEPARATOR = "__"
class BaseActionMetaClass(type):
"""Metaclass for adding all actions options from inheritance tree
to action.
This way actions can inherit from each other but still use
the class attributes DSL. Meaning, all attributes of Actions are
defined as class attributes, but in the background, it will be used as
parameters for the initializer of the object. The object is then
initialized clean way. Similar principle is used in DataTableMetaclass.
"""
def __new__(mcs, name, bases, attrs):
# Options of action are set as class attributes, loading them.
options = {}
if attrs:
options = attrs
# Iterate in reverse to preserve final order
for base in bases[::-1]:
# It actually throws all super classes away except immediate
# superclass. But it's fine, immediate super-class base_options
# includes everything because superclasses was created also by
# this metaclass. Same principle is used in DataTableMetaclass.
if hasattr(base, 'base_options') and base.base_options:
base_options = {}
# Updating options by superclasses.
base_options.update(base.base_options)
# Updating superclass options by actual class options.
base_options.update(options)
options = base_options
# Saving all options to class attribute, this will be used for
# instantiating of the specific Action.
attrs['base_options'] = options
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
cls.base_options.update(kwargs)
# Adding cls.base_options to each init call.
klass = super(BaseActionMetaClass, cls).__call__(
*args, **cls.base_options)
return klass
@six.add_metaclass(BaseActionMetaClass)
class BaseAction(html.HTMLElement):
"""Common base class for all ``Action`` classes."""
def __init__(self, **kwargs):
super(BaseAction, self).__init__()
self.datum = kwargs.get('datum', None)
self.table = kwargs.get('table', None)
self.handles_multiple = kwargs.get('handles_multiple', False)
self.requires_input = kwargs.get('requires_input', False)
self.preempt = kwargs.get('preempt', False)
self.policy_rules = kwargs.get('policy_rules', None)
self.action_type = kwargs.get('action_type', 'default')
def data_type_matched(self, datum):
"""Method to see if the action is allowed for a certain type of data.
Only affects mixed data type tables.
"""
if datum:
action_data_types = getattr(self, "allowed_data_types", [])
# If the data types of this action is empty, we assume it accepts
# all kinds of data and this method will return True.
if action_data_types:
datum_type = getattr(datum, self.table._meta.data_type_name,
None)
if datum_type and (datum_type not in action_data_types):
return False
return True
def get_policy_target(self, request, datum):
"""Provide the target for a policy request.
This method is meant to be overridden to return target details when
one of the policy checks requires them. E.g., {"user_id": datum.id}
"""
return {}
def allowed(self, request, datum):
"""Determine whether this action is allowed for the current request.
This method is meant to be overridden with more specific checks.
"""
return True
def _allowed(self, request, datum):
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check and self.policy_rules:
target = self.get_policy_target(request, datum)
return (policy_check(self.policy_rules, request, target) and
self.allowed(request, datum))
return self.allowed(request, datum)
def update(self, request, datum):
"""Allows per-action customization based on current conditions.
This is particularly useful when you wish to create a "toggle"
action that will be rendered differently based on the value of an
attribute on the current row's data.
By default this method is a no-op.
"""
pass
def get_default_classes(self):
"""Returns a list of the default classes for the action. Defaults to
``["btn", "btn-default", "btn-sm"]``.
"""
return getattr(settings, "ACTION_CSS_CLASSES", ACTION_CSS_CLASSES)
def get_default_attrs(self):
"""Returns a list of the default HTML attributes for the action.
Defaults to returning an ``id`` attribute with the value
``{{ table.name }}__action_{{ action.name }}__{{ creation counter }}``.
"""
if self.datum is not None:
bits = (self.table.name,
"row_%s" % self.table.get_object_id(self.datum),
"action_%s" % self.name)
else:
bits = (self.table.name, "action_%s" % self.name)
return {"id": STRING_SEPARATOR.join(bits)}
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
def associate_with_table(self, table):
self.table = table
class Action(BaseAction):
"""Represents an action which can be taken on this table's data.
.. attribute:: name
Required. The short name or "slug" representing this
action. This name should not be changed at runtime.
.. attribute:: verbose_name
A descriptive name used for display purposes. Defaults to the
value of ``name`` with the first letter of each word capitalized.
.. attribute:: verbose_name_plural
Used like ``verbose_name`` in cases where ``handles_multiple`` is
``True``. Defaults to ``verbose_name`` with the letter "s" appended.
.. attribute:: method
The HTTP method for this action. Defaults to ``POST``. Other methods
may or may not succeed currently.
.. attribute:: requires_input
Boolean value indicating whether or not this action can be taken
without any additional input (e.g. an object id). Defaults to ``True``.
.. attribute:: preempt
Boolean value indicating whether this action should be evaluated in
the period after the table is instantiated but before the data has
been loaded.
This can allow actions which don't need access to the full table data
to bypass any API calls and processing which would otherwise be
required to load the table.
.. attribute:: allowed_data_types
A list that contains the allowed data types of the action. If the
datum's type is in this list, the action will be shown on the row
for the datum.
Default to be an empty list (``[]``). When set to empty, the action
will accept any kind of data.
.. attribute:: policy_rules
list of scope and rule tuples to do policy checks on, the
composition of which is (scope, rule)
scope: service type managing the policy for action
rule: string representing the action to be checked
for a policy that requires a single rule check:
policy_rules should look like
"(("compute", "compute:create_instance"),)"
for a policy that requires multiple rule checks:
rules should look like
"(("identity", "identity:list_users"),
("identity", "identity:list_roles"))"
At least one of the following methods must be defined:
.. method:: single(self, data_table, request, object_id)
Handler for a single-object action.
.. method:: multiple(self, data_table, request, object_ids)
Handler for multi-object actions.
.. method:: handle(self, data_table, request, object_ids)
If a single function can work for both single-object and
multi-object cases then simply providing a ``handle`` function
will internally route both ``single`` and ``multiple`` requests
to ``handle`` with the calls from ``single`` being transformed
into a list containing only the single object id.
"""
def __init__(self, single_func=None, multiple_func=None, handle_func=None,
attrs=None, **kwargs):
super(Action, self).__init__(**kwargs)
self.method = kwargs.get('method', "POST")
self.requires_input = kwargs.get('requires_input', True)
self.verbose_name = kwargs.get('verbose_name', self.name.title())
self.verbose_name_plural = kwargs.get('verbose_name_plural',
"%ss" % self.verbose_name)
self.allowed_data_types = kwargs.get('allowed_data_types', [])
self.icon = kwargs.get('icon', None)
if attrs:
self.attrs.update(attrs)
# Don't set these if they're None
if single_func:
self.single = single_func
if multiple_func:
self.multiple = multiple_func
if handle_func:
self.handle = handle_func
# Ensure we have the appropriate methods
has_handler = hasattr(self, 'handle') and callable(self.handle)
has_single = hasattr(self, 'single') and callable(self.single)
has_multiple = hasattr(self, 'multiple') and callable(self.multiple)
if has_handler or has_multiple:
self.handles_multiple = True
if not has_handler and (not has_single or has_multiple):
cls_name = self.__class__.__name__
raise NotImplementedError('You must define either a "handle" '
'method or a "single" or "multiple" '
'method on %s.' % cls_name)
if not has_single:
def single(self, data_table, request, object_id):
return self.handle(data_table, request, [object_id])
self.single = types.MethodType(single, self)
if not has_multiple and self.handles_multiple:
def multiple(self, data_table, request, object_ids):
return self.handle(data_table, request, object_ids)
self.multiple = types.MethodType(multiple, self)
def get_param_name(self):
"""Returns the full POST parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}``.
"""
return "__".join([self.table.name, self.name])
class LinkAction(BaseAction):
"""A table action which is simply a link rather than a form POST.
.. attribute:: name
Required. The short name or "slug" representing this
action. This name should not be changed at runtime.
.. attribute:: verbose_name
A string which will be rendered as the link text. (Required)
.. attribute:: url
A string or a callable which resolves to a url to be used as the link
target. You must either define the ``url`` attribute or override
the ``get_link_url`` method on the class.
.. attribute:: allowed_data_types
A list that contains the allowed data types of the action. If the
datum's type is in this list, the action will be shown on the row
for the datum.
Defaults to be an empty list (``[]``). When set to empty, the action
will accept any kind of data.
"""
# class attribute name is used for ordering of Actions in table
name = "link"
ajax = False
def __init__(self, attrs=None, **kwargs):
super(LinkAction, self).__init__(**kwargs)
self.method = kwargs.get('method', "GET")
self.bound_url = kwargs.get('bound_url', None)
self.name = kwargs.get('name', self.name)
self.verbose_name = kwargs.get('verbose_name', self.name.title())
self.url = kwargs.get('url', None)
self.allowed_data_types = kwargs.get('allowed_data_types', [])
self.icon = kwargs.get('icon', None)
self.kwargs = kwargs
self.action_type = kwargs.get('action_type', 'default')
if not kwargs.get('verbose_name', None):
raise NotImplementedError('A LinkAction object must have a '
'verbose_name attribute.')
if attrs:
self.attrs.update(attrs)
if self.ajax:
self.classes = list(self.classes) + ['ajax-update']
def get_ajax_update_url(self):
table_url = self.table.get_absolute_url()
params = urlencode(
OrderedDict([("action", self.name), ("table", self.table.name)])
)
return "%s?%s" % (table_url, params)
def render(self, **kwargs):
action_dict = copy.copy(kwargs)
action_dict.update({"action": self, "is_single": True})
return render_to_string("horizon/common/_data_table_action.html",
action_dict)
def associate_with_table(self, table):
super(LinkAction, self).associate_with_table(table)
if self.ajax:
self.attrs['data-update-url'] = self.get_ajax_update_url()
def get_link_url(self, datum=None):
"""Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
"""
if not self.url:
raise NotImplementedError('A LinkAction class must have a '
'url attribute or define its own '
'get_link_url method.')
if callable(self.url):
return self.url(datum, **self.kwargs)
try:
if datum:
obj_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=(obj_id,))
else:
return urlresolvers.reverse(self.url)
except urlresolvers.NoReverseMatch as ex:
LOG.info('No reverse found for "%s": %s' % (self.url, ex))
return self.url
class FilterAction(BaseAction):
"""A base class representing a filter action for a table.
.. attribute:: name
The short name or "slug" representing this action. Defaults to
``"filter"``.
.. attribute:: verbose_name
A descriptive name used for display purposes. Defaults to the
value of ``name`` with the first letter of each word capitalized.
.. attribute:: param_name
A string representing the name of the request parameter used for the
search term. Default: ``"q"``.
.. attribute:: filter_type
A string representing the type of this filter. If this is set to
``"server"`` then ``filter_choices`` must also be provided.
Default: ``"query"``.
.. attribute:: filter_choices
Required for server type filters. A tuple of tuples representing the
filter options. Tuple composition should evaluate to (string, string,
boolean), representing the filter parameter, display value, and whether
or not it should be applied to the API request as an API query
attribute. API type filters do not need to be accounted for in the
filter method since the API will do the filtering. However, server
type filters in general will need to be performed in the filter method.
By default this attribute is not provided.
.. attribute:: needs_preloading
If True, the filter function will be called for the initial
GET request with an empty ``filter_string``, regardless of the
value of ``method``.
"""
# TODO(gabriel): The method for a filter action should be a GET,
# but given the form structure of the table that's currently impossible.
# At some future date this needs to be reworked to get the filter action
# separated from the table's POST form.
# class attribute name is used for ordering of Actions in table
name = "filter"
def __init__(self, **kwargs):
super(FilterAction, self).__init__(**kwargs)
self.method = kwargs.get('method', "POST")
self.name = kwargs.get('name', self.name)
self.verbose_name = kwargs.get('verbose_name', _("Filter"))
self.filter_type = kwargs.get('filter_type', "query")
self.filter_choices = kwargs.get('filter_choices')
self.needs_preloading = kwargs.get('needs_preloading', False)
self.param_name = kwargs.get('param_name', 'q')
self.icon = "search"
if self.filter_type == 'server' and self.filter_choices is None:
raise NotImplementedError(
'A FilterAction object with the '
'filter_type attribute set to "server" must also have a '
'filter_choices attribute.')
def get_param_name(self):
"""Returns the full query parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}__{{ action.param_name }}``.
"""
return "__".join([self.table.name, self.name, self.param_name])
def assign_type_string(self, table, data, type_string):
for datum in data:
setattr(datum, table._meta.data_type_name, type_string)
def data_type_filter(self, table, data, filter_string):
filtered_data = []
for data_type in table._meta.data_types:
func_name = "filter_%s_data" % data_type
filter_func = getattr(self, func_name, None)
if not filter_func and not callable(filter_func):
# The check of filter function implementation should happen
# in the __init__. However, the current workflow of DataTable
# and actions won't allow it. Need to be fixed in the future.
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"for %s data type in %s." %
(func_name, data_type, cls_name))
_data = filter_func(table, data, filter_string)
self.assign_type_string(table, _data, data_type)
filtered_data.extend(_data)
return filtered_data
def filter(self, table, data, filter_string):
"""Provides the actual filtering logic.
This method must be overridden by subclasses and return
the filtered data.
"""
return data
def is_api_filter(self, filter_field):
"""Determine if the given filter field should be used as an
API filter.
"""
if self.filter_type == 'server':
for choice in self.filter_choices:
if (choice[0] == filter_field and len(choice) > 2 and
choice[2] is True):
return True
return False
class NameFilterAction(FilterAction):
"""A filter action for name property."""
def filter(self, table, items, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [item for item in items
if query in item.name.lower()]
class FixedFilterAction(FilterAction):
"""A filter action with fixed buttons."""
def __init__(self, **kwargs):
super(FixedFilterAction, self).__init__(**kwargs)
self.filter_type = kwargs.get('filter_type', "fixed")
self.needs_preloading = kwargs.get('needs_preloading', True)
self.fixed_buttons = self.get_fixed_buttons()
self.filter_string = ''
def filter(self, table, images, filter_string):
self.filter_string = filter_string
categories = self.categorize(table, images)
self.categories = defaultdict(list, categories)
for button in self.fixed_buttons:
button['count'] = len(self.categories[button['value']])
if not filter_string:
return images
return self.categories[filter_string]
def get_fixed_buttons(self):
"""Returns a list of dictionaries describing the fixed buttons
to use for filtering.
Each list item should be a dict with the following keys:
* ``text``: Text to display on the button
* ``icon``: Icon class for icon element (inserted before text).
* ``value``: Value returned when the button is clicked. This value is
passed to ``filter()`` as ``filter_string``.
"""
return []
def categorize(self, table, images):
"""Override to separate images into categories.
Return a dict with a key for the value of each fixed button,
and a value that is a list of images in that category.
"""
return {}
class BatchAction(Action):
"""A table action which takes batch action on one or more
objects. This action should not require user input on a
per-object basis.
.. attribute:: name
An internal name for this action.
.. method:: action_present
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_present (Deprecated)
String or tuple/list. The display forms of the name.
Should be a transitive verb, capitalized and translated. ("Delete",
"Rotate", etc.) If tuple or list - then setting
self.current_present_action = n will set the current active item
from the list(action_present[n])
You can pass a complete action name including 'data_type' by specifying
'%(data_type)s' substitution in action_present ("Delete %(data_type)s").
Otherwise a complete action name is a format of "<action> <data_type>".
<data_type> is determined based on the number of items.
By passing a complete action name you allow translators to control
the order of words as they want.
NOTE: action_present attribute is bad for translations and should be
avoided. Please use the action_present method instead.
This form is kept for legacy.
.. method:: action_past
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_past (Deprecated)
String or tuple/list. The past tense of action_present. ("Deleted",
"Rotated", etc.) If tuple or list - then
setting self.current_past_action = n will set the current active item
from the list(action_past[n])
NOTE: action_past attribute is bad for translations and should be
avoided. Please use the action_past method instead.
This form is kept for legacy.
.. attribute:: data_type_singular (Deprecated)
Optional display name (if the data_type method is not defined) for the
type of data that receives the action. ("Key Pair", "Floating IP", etc.)
.. attribute:: data_type_plural (Deprecated)
Optional plural word (if the data_type method is not defined) for the
type of data being acted on. Defaults to appending 's'. Relying on the
default is bad for translations and should not be done, so it's absence
will raise a DeprecationWarning. It is currently kept as optional for
legacy code.
NOTE: data_type_singular and data_type_plural attributes are bad for
translations and should not be used. Please use the action_present and
action_past methods. This form is kept temporarily for legacy code but
will be removed.
.. attribute:: success_url
Optional location to redirect after completion of the delete
action. Defaults to the current page.
.. attribute:: help_text
Optional message for providing an appropriate help text for
the horizon user.
"""
help_text = _("This action cannot be undone.")
def __init__(self, **kwargs):
super(BatchAction, self).__init__(**kwargs)
action_present_method = False
if hasattr(self, 'action_present'):
if callable(self.action_present):
action_present_method = True
else:
warnings.warn(DeprecationWarning(
'The %s BatchAction class must have an action_present '
'method instead of attribute.' % self.__class__.__name__
))
action_past_method = False
if hasattr(self, 'action_past'):
if callable(self.action_past):
action_past_method = True
else:
warnings.warn(DeprecationWarning(
'The %s BatchAction class must have an action_past '
'method instead of attribute.' % self.__class__.__name__
))
action_methods = action_present_method and action_past_method
has_action_method = action_present_method or action_past_method
if has_action_method and not action_methods:
raise NotImplementedError(
'The %s BatchAction class must have both action_past and'
'action_present methods.' % self.__class__.__name__
)
if not action_methods:
if not kwargs.get('data_type_singular'):
raise NotImplementedError(
'The %s BatchAction class must have a data_type_singular '
'attribute when action_past and action_present attributes '
'are used.' % self.__class__.__name__
)
self.data_type_singular = kwargs.get('data_type_singular')
self.data_type_plural = kwargs.get('data_type_plural',
self.data_type_singular + 's')
# TODO(ygbo): get rid of self.use_action_method once action_present and
# action_past are changed to methods handling plurals.
self.use_action_method = action_methods
self.success_url = kwargs.get('success_url', None)
# If setting a default name, don't initialize it too early
self.verbose_name = kwargs.get('verbose_name', self._get_action_name)
self.verbose_name_plural = kwargs.get(
'verbose_name_plural',
lambda: self._get_action_name('plural'))
self.current_present_action = 0
self.current_past_action = 0
# Keep record of successfully handled objects
self.success_ids = []
self.help_text = kwargs.get('help_text', self.help_text)
def _allowed(self, request, datum=None):
# Override the default internal action method to prevent batch
# actions from appearing on tables with no data.
if not self.table.data and not datum:
return False
return super(BatchAction, self)._allowed(request, datum)
def _get_action_name(self, items=None, past=False):
"""Builds combinations like 'Delete Object' and 'Deleted
Objects' based on the number of items and `past` flag.
:param items:
A list or tuple of items (or container with a __len__ method) to
count the number of concerned items for which this method is
called.
When this method is called for a single item (by the BatchAction
itself), this parameter can be omitted and the number of items
will be considered as "one".
If we want to evaluate to "zero" this parameter must not be omitted
(and should be an empty container).
:param past:
Boolean flag indicating if the action took place in the past.
By default a present action is considered.
"""
action_type = "past" if past else "present"
if items is None:
# Called without items parameter (by a single instance.)
count = 1
else:
count = len(items)
# TODO(ygbo): get rid of self.use_action_method once action_present and
# action_past are changed to methods handling plurals.
action_attr = getattr(self, "action_%s" % action_type)
if self.use_action_method:
action_attr = action_attr(count)
if isinstance(action_attr, (six.string_types, Promise)):
action = action_attr
else:
toggle_selection = getattr(self, "current_%s_action" % action_type)
action = action_attr[toggle_selection]
if self.use_action_method:
return action
# TODO(ygbo): get rid of all this bellow once action_present and
# action_past are changed to methods handling plurals.
data_type = ungettext_lazy(
self.data_type_singular,
self.data_type_plural,
count
)
if '%(data_type)s' in action:
# If full action string is specified, use action as format string.
msgstr = action
else:
if action_type == "past":
msgstr = pgettext_lazy(u"past", "%(action)s %(data_type)s")
else:
msgstr = pgettext_lazy(u"present", "%(action)s %(data_type)s")
return msgstr % {'action': action, 'data_type': data_type}
def action(self, request, datum_id):
"""Required. Accepts a single object id and performs the specific
action.
Return values are discarded, errors raised are caught and logged.
"""
def update(self, request, datum):
"""Switches the action verbose name, if needed."""
if getattr(self, 'action_present', False):
self.verbose_name = self._get_action_name()
self.verbose_name_plural = self._get_action_name('plural')
def get_success_url(self, request=None):
"""Returns the URL to redirect to after a successful action."""
if self.success_url:
return self.success_url
return request.get_full_path()
def get_default_attrs(self):
"""Returns a list of the default HTML attributes for the action."""
attrs = super(BatchAction, self).get_default_attrs()
attrs.update({'data-batch-action': 'true'})
return attrs
def handle(self, table, request, obj_ids):
action_success = []
action_failure = []
action_not_allowed = []
for datum_id in obj_ids:
datum = table.get_object_by_id(datum_id)
datum_display = table.get_object_display(datum) or datum_id
if not table._filter_action(self, request, datum):
action_not_allowed.append(datum_display)
LOG.warning(u'Permission denied to %s: "%s"' %
(self._get_action_name(past=True).lower(),
datum_display))
continue
try:
self.action(request, datum_id)
# Call update to invoke changes if needed
self.update(request, datum)
action_success.append(datum_display)
self.success_ids.append(datum_id)
LOG.info(u'%s: "%s"' %
(self._get_action_name(past=True), datum_display))
except Exception as ex:
# Handle the exception but silence it since we'll display
# an aggregate error message later. Otherwise we'd get
# multiple error messages displayed to the user.
action_failure.append(datum_display)
action_description = (
self._get_action_name(past=True).lower(), datum_display)
LOG.warning(
'Action %(action)s Failed for %(reason)s', {
'action': action_description, 'reason': ex})
# Begin with success message class, downgrade to info if problems.
success_message_level = messages.success
if action_not_allowed:
msg = _('You are not allowed to %(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_not_allowed).lower(),
"objs": functions.lazy_join(", ", action_not_allowed)}
messages.error(request, msg % params)
success_message_level = messages.info
if action_failure:
msg = _('Unable to %(action)s: %(objs)s')
params = {"action": self._get_action_name(action_failure).lower(),
"objs": functions.lazy_join(", ", action_failure)}
messages.error(request, msg % params)
success_message_level = messages.info
if action_success:
msg = _('%(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_success, past=True),
"objs": functions.lazy_join(", ", action_success)}
success_message_level(request, msg % params)
return shortcuts.redirect(self.get_success_url(request))
class DeleteAction(BatchAction):
"""A table action used to perform delete operations on table data.
.. attribute:: name
A short name or "slug" representing this action.
Defaults to 'delete'
.. method:: action_present
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_present (Deprecated)
A string containing the transitive verb describing the delete action.
Defaults to 'Delete'
NOTE: action_present attribute is bad for translations and should be
avoided. Please use the action_present method instead.
This form is kept for legacy.
.. method:: action_past
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_past (Deprecated)
A string set to the past tense of action_present.
Defaults to 'Deleted'
NOTE: action_past attribute is bad for translations and should be
avoided. Please use the action_past method instead.
This form is kept for legacy.
.. attribute:: data_type_singular (Deprecated)
A string used to name the data to be deleted.
.. attribute:: data_type_plural (Deprecated)
Optional. Plural of ``data_type_singular``.
Defaults to ``data_type_singular`` appended with an 's'. Relying on
the default is bad for translations and should not be done, so it's
absence will raise a DeprecationWarning. It is currently kept as
optional for legacy code.
NOTE: data_type_singular and data_type_plural attributes are bad for
translations and should not be used. Please use the action_present and
action_past methods. This form is kept temporarily for legacy code but
will be removed.
"""
name = "delete"
def __init__(self, **kwargs):
super(DeleteAction, self).__init__(**kwargs)
self.name = kwargs.get('name', self.name)
if not hasattr(self, "action_present"):
self.action_present = kwargs.get('action_present', _("Delete"))
if not hasattr(self, "action_past"):
self.action_past = kwargs.get('action_past', _("Deleted"))
self.icon = "trash"
self.action_type = "danger"
def action(self, request, obj_id):
"""Action entry point. Overrides base class' action method.
Accepts a single object id passing it over to the delete method
responsible for the object's destruction.
"""
return self.delete(request, obj_id)
def delete(self, request, obj_id):
"""Required. Deletes an object referenced by obj_id.
Override to provide delete functionality specific to your data.
"""
class UpdateAction(object):
"""A table action for cell updates by inline editing."""
name = "update"
def action(self, request, datum, obj_id, cell_name, new_cell_value):
self.update_cell(request, datum, obj_id, cell_name, new_cell_value)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Update Item",
u"Update Items",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Updated Item",
u"Updated Items",
count
)
def update_cell(self, request, datum, obj_id, cell_name, new_cell_value):
"""Method for saving data of the cell.
This method must implements saving logic of the inline edited table
cell.
"""
def allowed(self, request, datum, cell):
"""Determine whether updating is allowed for the current request.
This method is meant to be overridden with more specific checks.
Data of the row and of the cell are passed to the method.
"""
return True
| {
"content_hash": "b21275e583679f42c37007efe4b1c3be",
"timestamp": "",
"source": "github",
"line_count": 974,
"max_line_length": 79,
"avg_line_length": 39.43839835728953,
"alnum_prop": 0.6114075963866399,
"repo_name": "Mirantis/mos-horizon",
"id": "7d89cd53260b1a7e23d0b2681335f29273bee8f3",
"size": "39018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horizon/tables/actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90441"
},
{
"name": "HTML",
"bytes": "502807"
},
{
"name": "JavaScript",
"bytes": "1571234"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "5170850"
},
{
"name": "Shell",
"bytes": "19380"
}
],
"symlink_target": ""
} |
"""
Unified interfaces to minimization algorithms.
Functions
---------
- minimize : minimization of a function of several variables.
- minimize_scalar : minimization of a function of one variable.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['minimize', 'minimize_scalar']
from warnings import warn
import numpy as np
from scipy._lib.six import callable
# unconstrained minimization
from .optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg,
_minimize_bfgs, _minimize_newtoncg,
_minimize_scalar_brent, _minimize_scalar_bounded,
_minimize_scalar_golden, MemoizeJac)
from ._trustregion_dogleg import _minimize_dogleg
from ._trustregion_ncg import _minimize_trust_ncg
# constrained minimization
from .lbfgsb import _minimize_lbfgsb
from .tnc import _minimize_tnc
from .cobyla import _minimize_cobyla
from .slsqp import _minimize_slsqp
def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
hessp=None, bounds=None, constraints=(), tol=None,
callback=None, options=None):
"""Minimization of scalar function of one or more variables.
In general, the optimization problems are of the form::
minimize f(x) subject to
g_i(x) >= 0, i = 1,...,m
h_j(x) = 0, j = 1,...,p
where x is a vector of one or more variables.
``g_i(x)`` are the inequality constraints.
``h_j(x)`` are the equality constrains.
Optionally, the lower and upper bounds for each element in x can also be
specified using the `bounds` argument.
Parameters
----------
fun : callable
Objective function.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (Jacobian, Hessian).
method : str or callable, optional
Type of solver. Should be one of
- 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
- 'Powell' :ref:`(see here) <optimize.minimize-powell>`
- 'CG' :ref:`(see here) <optimize.minimize-cg>`
- 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
- 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
- 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
- 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
- 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
- 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
- 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
- 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
- custom - a callable object (added in version 0.14.0),
see below for description.
If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
depending if the problem has constraints or bounds.
jac : bool or callable, optional
Jacobian (gradient) of objective function. Only for CG, BFGS,
Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg.
If `jac` is a Boolean and is True, `fun` is assumed to return the
gradient along with the objective function. If False, the
gradient will be estimated numerically.
`jac` can also be a callable returning the gradient of the
objective. In this case, it must accept the same arguments as `fun`.
hess, hessp : callable, optional
Hessian (matrix of second-order derivatives) of objective function or
Hessian of objective function times an arbitrary vector p. Only for
Newton-CG, dogleg, trust-ncg.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. If neither `hess` nor
`hessp` is provided, then the Hessian product will be approximated
using finite differences on `jac`. `hessp` must compute the Hessian
times an arbitrary vector.
bounds : sequence, optional
Bounds for variables (only for L-BFGS-B, TNC and SLSQP).
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction.
constraints : dict or sequence of dict, optional
Constraints definition (only for COBYLA and SLSQP).
Each constraint is defined in a dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options()`.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize_scalar : Interface to minimization algorithms for scalar
univariate functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *BFGS*.
**Unconstrained minimization**
Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
Simplex algorithm [1]_, [2]_. This algorithm is robust in many
applications. However, if numerical computation of derivative can be
trusted, other algorithms using the first and/or second derivatives
information might be preferred for their better performance in
general.
Method :ref:`Powell <optimize.minimize-powell>` is a modification
of Powell's method [3]_, [4]_ which is a conjugate direction
method. It performs sequential one-dimensional minimizations along
each vector of the directions set (`direc` field in `options` and
`info`), which is updated at each iteration of the main
minimization loop. The function need not be differentiable, and no
derivatives are taken.
Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
gradient algorithm by Polak and Ribiere, a variant of the
Fletcher-Reeves method described in [5]_ pp. 120-122. Only the
first derivatives are used.
Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
pp. 136. It uses the first derivatives only. BFGS has proven good
performance even for non-smooth optimizations. This method also
returns an approximation of the Hessian inverse, stored as
`hess_inv` in the OptimizeResult object.
Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
Newton method). It uses a CG method to the compute the search
direction. See also *TNC* method for a box-constrained
minimization with a similar algorithm.
Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
trust-region algorithm [5]_ for unconstrained minimization. This
algorithm requires the gradient and Hessian; furthermore the
Hessian is required to be positive definite.
Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
Newton conjugate gradient trust-region algorithm [5]_ for
unconstrained minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector.
**Constrained minimization**
Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
algorithm [6]_, [7]_ for bound constrained minimization.
Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
algorithm [5]_, [8]_ to minimize a function with variables subject
to bounds. This algorithm uses gradient information; it is also
called Newton Conjugate-Gradient. It differs from the *Newton-CG*
method described above as it wraps a C implementation and allows
each variable to be given upper and lower bounds.
Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
Constrained Optimization BY Linear Approximation (COBYLA) method
[9]_, [10]_, [11]_. The algorithm is based on linear
approximations to the objective function and each constraint. The
method wraps a FORTRAN implementation of the algorithm. The
constraints functions 'fun' may return either a single number
or an array or list of numbers.
Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
Least SQuares Programming to minimize a function of several
variables with any combination of bounds, equality and inequality
constraints. The method wraps the SLSQP Optimization subroutine
originally implemented by Dieter Kraft [12]_. Note that the
wrapper handles infinite values in bounds by converting them into
large floating values.
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using a frontend to this method such as `scipy.optimize.basinhopping`
or a different library. You can simply pass a callable as the ``method``
parameter.
The callable is called as ``method(fun, x0, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `callback`, `hess`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. Also, if
`jac` has been passed as a bool type, `jac` and `fun` are mangled so that
`fun` returns just the function values and `jac` is converted to a function
returning the Jacobian. The method shall return an ``OptimizeResult``
object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
References
----------
.. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
Minimization. The Computer Journal 7: 308-13.
.. [2] Wright M H. 1996. Direct search methods: Once scorned, now
respectable, in Numerical Analysis 1995: Proceedings of the 1995
Dundee Biennial Conference in Numerical Analysis (Eds. D F
Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
191-208.
.. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
a function of several variables without calculating derivatives. The
Computer Journal 7: 155-162.
.. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
Numerical Recipes (any edition), Cambridge University Press.
.. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
Springer New York.
.. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
Algorithm for Bound Constrained Optimization. SIAM Journal on
Scientific and Statistical Computing 16 (5): 1190-1208.
.. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
778: L-BFGS-B, FORTRAN routines for large scale bound constrained
optimization. ACM Transactions on Mathematical Software 23 (4):
550-560.
.. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
1984. SIAM Journal of Numerical Analysis 21: 770-778.
.. [9] Powell, M J D. A direct search optimization method that models
the objective and constraint functions by linear interpolation.
1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
.. [10] Powell M J D. Direct search algorithms for optimization
calculations. 1998. Acta Numerica 7: 287-336.
.. [11] Powell M J D. A view of algorithms for optimization without
derivatives. 2007.Cambridge University Technical Report DAMTP
2007/NA03
.. [12] Kraft, D. A software package for sequential quadratic
programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
Center -- Institute for Flight Mechanics, Koln, Germany.
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function (and its respective derivatives) is implemented in `rosen`
(resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
>>> from scipy.optimize import minimize, rosen, rosen_der
A simple application of the *Nelder-Mead* method is:
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
>>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
>>> res.x
array([ 1., 1., 1., 1., 1.])
Now using the *BFGS* algorithm, using the first derivative and a few
options:
>>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
... options={'gtol': 1e-6, 'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 26
Function evaluations: 31
Gradient evaluations: 31
>>> res.x
array([ 1., 1., 1., 1., 1.])
>>> print(res.message)
Optimization terminated successfully.
>>> res.hess_inv
array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
[ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
[ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
[ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
[ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]])
Next, consider a minimization problem with several constraints (namely
Example 16.4 from [5]_). The objective function is:
>>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
There are three constraints defined as:
>>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
And variables must be positive, hence the following bounds:
>>> bnds = ((0, None), (0, None))
The optimization problem is solved using the SLSQP method as:
>>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
... constraints=cons)
It should converge to the theoretical solution (1.4 ,1.7).
"""
x0 = np.asarray(x0)
if x0.dtype.kind in np.typecodes["AllInteger"]:
x0 = np.asarray(x0, dtype=float)
if not isinstance(args, tuple):
args = (args,)
if method is None:
# Select automatically
if constraints:
method = 'SLSQP'
elif bounds is not None:
method = 'L-BFGS-B'
else:
method = 'BFGS'
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
# check if optional parameters are supported by the selected method
# - jac
if meth in ['nelder-mead', 'powell', 'cobyla'] and bool(jac):
warn('Method %s does not use gradient information (jac).' % method,
RuntimeWarning)
# - hess
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', '_custom') and hess is not None:
warn('Method %s does not use Hessian information (hess).' % method,
RuntimeWarning)
# - hessp
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', '_custom') and hessp is not None:
warn('Method %s does not use Hessian-vector product '
'information (hessp).' % method, RuntimeWarning)
# - constraints or bounds
if (meth in ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'dogleg',
'trust-ncg'] and (bounds is not None or np.any(constraints))):
warn('Method %s cannot handle constraints nor bounds.' % method,
RuntimeWarning)
if meth in ['l-bfgs-b', 'tnc'] and np.any(constraints):
warn('Method %s cannot handle constraints.' % method,
RuntimeWarning)
if meth == 'cobyla' and bounds is not None:
warn('Method %s cannot handle bounds.' % method,
RuntimeWarning)
# - callback
if (meth in ['cobyla'] and callback is not None):
warn('Method %s does not support callback.' % method, RuntimeWarning)
# - return_all
if (meth in ['l-bfgs-b', 'tnc', 'cobyla', 'slsqp'] and
options.get('return_all', False)):
warn('Method %s does not support the return_all option.' % method,
RuntimeWarning)
# fun also returns the jacobian
if not callable(jac):
if bool(jac):
fun = MemoizeJac(fun)
jac = fun.derivative
else:
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth == 'nelder-mead':
options.setdefault('xatol', tol)
options.setdefault('fatol', tol)
if meth in ['newton-cg', 'powell', 'tnc']:
options.setdefault('xtol', tol)
if meth in ['powell', 'l-bfgs-b', 'tnc', 'slsqp']:
options.setdefault('ftol', tol)
if meth in ['bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg', 'trust-ncg']:
options.setdefault('gtol', tol)
if meth in ['cobyla', '_custom']:
options.setdefault('tol', tol)
if meth == '_custom':
return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
bounds=bounds, constraints=constraints,
callback=callback, **options)
elif meth == 'nelder-mead':
return _minimize_neldermead(fun, x0, args, callback, **options)
elif meth == 'powell':
return _minimize_powell(fun, x0, args, callback, **options)
elif meth == 'cg':
return _minimize_cg(fun, x0, args, jac, callback, **options)
elif meth == 'bfgs':
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
elif meth == 'newton-cg':
return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
**options)
elif meth == 'l-bfgs-b':
return _minimize_lbfgsb(fun, x0, args, jac, bounds,
callback=callback, **options)
elif meth == 'tnc':
return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
**options)
elif meth == 'cobyla':
return _minimize_cobyla(fun, x0, args, constraints, **options)
elif meth == 'slsqp':
return _minimize_slsqp(fun, x0, args, jac, bounds,
constraints, callback=callback, **options)
elif meth == 'dogleg':
return _minimize_dogleg(fun, x0, args, jac, hess,
callback=callback, **options)
elif meth == 'trust-ncg':
return _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
else:
raise ValueError('Unknown solver %s' % method)
def minimize_scalar(fun, bracket=None, bounds=None, args=(),
method='brent', tol=None, options=None):
"""Minimization of scalar function of one variable.
Parameters
----------
fun : callable
Objective function.
Scalar function, must return a scalar.
bracket : sequence, optional
For methods 'brent' and 'golden', `bracket` defines the bracketing
interval and can either have three items ``(a, b, c)`` so that
``a < b < c`` and ``fun(b) < fun(a), fun(c)`` or two items ``a`` and
``c`` which are assumed to be a starting interval for a downhill
bracket search (see `bracket`); it doesn't always mean that the
obtained solution will satisfy ``a <= x <= c``.
bounds : sequence, optional
For method 'bounded', `bounds` is mandatory and must have two items
corresponding to the optimization bounds.
args : tuple, optional
Extra arguments passed to the objective function.
method : str or callable, optional
Type of solver. Should be one of:
- 'Brent' :ref:`(see here) <optimize.minimize_scalar-brent>`
- 'Bounded' :ref:`(see here) <optimize.minimize_scalar-bounded>`
- 'Golden' :ref:`(see here) <optimize.minimize_scalar-golden>`
- custom - a callable object (added in version 0.14.0), see below
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options.
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
See :func:`show_options()` for solver-specific options.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize : Interface to minimization algorithms for scalar multivariate
functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *Brent*.
Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's
algorithm to find a local minimum. The algorithm uses inverse
parabolic interpolation when possible to speed up convergence of
the golden section method.
Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the
golden section search technique. It uses analog of the bisection
method to decrease the bracketed interval. It is usually
preferable to use the *Brent* method.
Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can
perform bounded minimization. It uses the Brent method to find a
local minimum in the interval x1 < xopt < x2.
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using some library frontend to minimize_scalar. You can simply
pass a callable as the ``method`` parameter.
The callable is called as ``method(fun, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `bracket`, `tol`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. The method
shall return an ``OptimizeResult`` object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
Examples
--------
Consider the problem of minimizing the following function.
>>> def f(x):
... return (x - 2) * x * (x + 2)**2
Using the *Brent* method, we find the local minimum as:
>>> from scipy.optimize import minimize_scalar
>>> res = minimize_scalar(f)
>>> res.x
1.28077640403
Using the *Bounded* method, we find a local minimum with specified
bounds as:
>>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
>>> res.x
-2.0000002026
"""
if not isinstance(args, tuple):
args = (args,)
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
if tol is not None:
options = dict(options)
if meth == 'bounded' and 'xatol' not in options:
warn("Method 'bounded' does not support relative tolerance in x; "
"defaulting to absolute tolerance.", RuntimeWarning)
options['xatol'] = tol
elif meth == '_custom':
options.setdefault('tol', tol)
else:
options.setdefault('xtol', tol)
if meth == '_custom':
return method(fun, args=args, bracket=bracket, bounds=bounds, **options)
elif meth == 'brent':
return _minimize_scalar_brent(fun, bracket, args, **options)
elif meth == 'bounded':
if bounds is None:
raise ValueError('The `bounds` parameter is mandatory for '
'method `bounded`.')
return _minimize_scalar_bounded(fun, bounds, args, **options)
elif meth == 'golden':
return _minimize_scalar_golden(fun, bracket, args, **options)
else:
raise ValueError('Unknown solver %s' % method)
| {
"content_hash": "ff9a86322d01459c1ca372902ab8420c",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 89,
"avg_line_length": 42.7075928917609,
"alnum_prop": 0.6385988803147223,
"repo_name": "kalvdans/scipy",
"id": "7cb054466f522748bef751483052457165185925",
"size": "26436",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "scipy/optimize/_minimize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4127403"
},
{
"name": "C++",
"bytes": "503114"
},
{
"name": "Fortran",
"bytes": "5574493"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11190581"
},
{
"name": "Shell",
"bytes": "2226"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
from time import sleep
from threading import Thread, Barrier
# parties - 1 = parties_to_wait_for = 1
b = Barrier(2)
def task1(b):
sleep(2)
print('first')
b.wait()
print('first done')
def task2(b):
b.wait()
print('second')
Thread(target=task1, args=(b,)).start()
task2(b)
print('finished!')
| {
"content_hash": "377d584d7e7ac690b9bd06fdb376d383",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 39,
"avg_line_length": 15.95,
"alnum_prop": 0.6269592476489029,
"repo_name": "sevaivanov/various",
"id": "c2a9757a3f7aff3640dc88e80f4600e0a7d275f4",
"size": "339",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/barrier.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AspectJ",
"bytes": "4619"
},
{
"name": "C++",
"bytes": "6759"
},
{
"name": "CSS",
"bytes": "6690"
},
{
"name": "Common Lisp",
"bytes": "4008"
},
{
"name": "HTML",
"bytes": "16673"
},
{
"name": "Java",
"bytes": "4004"
},
{
"name": "Makefile",
"bytes": "387"
},
{
"name": "Prolog",
"bytes": "3709"
},
{
"name": "Python",
"bytes": "10473"
},
{
"name": "Shell",
"bytes": "2262"
},
{
"name": "TeX",
"bytes": "2779"
}
],
"symlink_target": ""
} |
""" This example demonstrates rendering a color volume.
This example demonstrates two render styles. Note that
all render styles are capable of rendering color data.
"""
import numpy as np
import visvis as vv
app = vv.use()
# Use vv.aVolume to create random bars for each color plane
N = 64
vol = np.empty((N,N,N,3), dtype='float32')
for i in range(3):
vol[:,:,:,i] = vv.aVolume(10,N)
# Show
vv.figure()
a1 = vv.subplot(121);
t1 = vv.volshow(vol[:,:,:,:], renderStyle = 'mip')
vv.title('color MIP render')
a2 = vv.subplot(122);
t2 = vv.volshow(vol[:,:,:,:], renderStyle = 'iso')
t2.isoThreshold = 0.5
vv.title('color ISO-surface render')
# Share cameras
a1.camera = a2.camera
# Run app
app.Run()
| {
"content_hash": "9129a24f6aa4b32c639bf28de51244be",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 59,
"avg_line_length": 23.566666666666666,
"alnum_prop": 0.6775106082036775,
"repo_name": "almarklein/visvis.dev",
"id": "0040c58b5dcbe5a978a2bb28652ac8b198772471",
"size": "729",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "examples/colorVolume.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1121423"
}
],
"symlink_target": ""
} |
import cherrypy
from cherrypy.lib import auth_digest
from cherrypy.test import helper
class DigestAuthTest(helper.CPWebCase):
def setup_server():
class Root:
def index(self):
return "This is public."
index.exposed = True
class DigestProtected:
def index(self):
return "Hello %s, you've been authorized." % (
cherrypy.request.login)
index.exposed = True
def fetch_users():
return {'test': 'test'}
get_ha1 = cherrypy.lib.auth_digest.get_ha1_dict_plain(fetch_users())
conf = {'/digest': {'tools.auth_digest.on': True,
'tools.auth_digest.realm': 'localhost',
'tools.auth_digest.get_ha1': get_ha1,
'tools.auth_digest.key': 'a565c27146791cfb',
'tools.auth_digest.debug': 'True'}}
root = Root()
root.digest = DigestProtected()
cherrypy.tree.mount(root, config=conf)
setup_server = staticmethod(setup_server)
def testPublic(self):
self.getPage("/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('This is public.')
def testDigest(self):
self.getPage("/digest/")
self.assertStatus(401)
value = None
for k, v in self.headers:
if k.lower() == "www-authenticate":
if v.startswith("Digest"):
value = v
break
if value is None:
self._handlewebError(
"Digest authentification scheme was not found")
value = value[7:]
items = value.split(', ')
tokens = {}
for item in items:
key, value = item.split('=')
tokens[key.lower()] = value
missing_msg = "%s is missing"
bad_value_msg = "'%s' was expecting '%s' but found '%s'"
nonce = None
if 'realm' not in tokens:
self._handlewebError(missing_msg % 'realm')
elif tokens['realm'] != '"localhost"':
self._handlewebError(bad_value_msg %
('realm', '"localhost"', tokens['realm']))
if 'nonce' not in tokens:
self._handlewebError(missing_msg % 'nonce')
else:
nonce = tokens['nonce'].strip('"')
if 'algorithm' not in tokens:
self._handlewebError(missing_msg % 'algorithm')
elif tokens['algorithm'] != '"MD5"':
self._handlewebError(bad_value_msg %
('algorithm', '"MD5"', tokens['algorithm']))
if 'qop' not in tokens:
self._handlewebError(missing_msg % 'qop')
elif tokens['qop'] != '"auth"':
self._handlewebError(bad_value_msg %
('qop', '"auth"', tokens['qop']))
get_ha1 = auth_digest.get_ha1_dict_plain({'test': 'test'})
# Test user agent response with a wrong value for 'realm'
base_auth = ('Digest username="test", '
'realm="wrong realm", '
'nonce="%s", '
'uri="/digest/", '
'algorithm=MD5, '
'response="%s", '
'qop=auth, '
'nc=%s, '
'cnonce="1522e61005789929"')
auth_header = base_auth % (
nonce, '11111111111111111111111111111111', '00000001')
auth = auth_digest.HttpDigestAuthorization(auth_header, 'GET')
# calculate the response digest
ha1 = get_ha1(auth.realm, 'test')
response = auth.request_digest(ha1)
# send response with correct response digest, but wrong realm
auth_header = base_auth % (nonce, response, '00000001')
self.getPage('/digest/', [('Authorization', auth_header)])
self.assertStatus(401)
# Test that must pass
base_auth = ('Digest username="test", '
'realm="localhost", '
'nonce="%s", '
'uri="/digest/", '
'algorithm=MD5, '
'response="%s", '
'qop=auth, '
'nc=%s, '
'cnonce="1522e61005789929"')
auth_header = base_auth % (
nonce, '11111111111111111111111111111111', '00000001')
auth = auth_digest.HttpDigestAuthorization(auth_header, 'GET')
# calculate the response digest
ha1 = get_ha1('localhost', 'test')
response = auth.request_digest(ha1)
# send response with correct response digest
auth_header = base_auth % (nonce, response, '00000001')
self.getPage('/digest/', [('Authorization', auth_header)])
self.assertStatus('200 OK')
self.assertBody("Hello test, you've been authorized.")
| {
"content_hash": "bb57ac395a10350b7dff35d1c54c2251",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 77,
"avg_line_length": 37.1044776119403,
"alnum_prop": 0.5104585679806919,
"repo_name": "clausqr/HTPC-Manager",
"id": "b46698d94ccd237b0c6a2007ebef40acf27df6d3",
"size": "5102",
"binary": false,
"copies": "21",
"ref": "refs/heads/master2",
"path": "libs/cherrypy/test/test_auth_digest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "352"
},
{
"name": "CSS",
"bytes": "48317"
},
{
"name": "HTML",
"bytes": "159360"
},
{
"name": "JavaScript",
"bytes": "453328"
},
{
"name": "Python",
"bytes": "4671193"
},
{
"name": "R",
"bytes": "2187"
},
{
"name": "Shell",
"bytes": "3746"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer
from prompt_toolkit.lexers import PygmentsLexer, SimpleLexer
from pygments.lexers import BashLexer
from .grammar import COMMAND_GRAMMAR
__all__ = (
'create_command_lexer',
)
def create_command_lexer():
"""
Lexer for highlighting of the command line.
"""
return GrammarLexer(COMMAND_GRAMMAR, lexers={
'command': SimpleLexer('class:commandline.command'),
'location': SimpleLexer('class:commandline.location'),
'shell_command': PygmentsLexer(BashLexer),
})
| {
"content_hash": "c4e20de17890ce390f1faa5c329f8a2b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 28.136363636363637,
"alnum_prop": 0.7189014539579968,
"repo_name": "jonathanslenders/pyvim",
"id": "2dd780771dcdfdbdfc44537c348c50a0685f4a06",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvim/commands/lexer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "115709"
}
],
"symlink_target": ""
} |
"""
This is awesome. And needs more documentation.
To bring some light in the big number of classes in this file:
First there are:
* ``SuperForm``
* ``SuperModelForm``
They are the forms that you probably want to use in your own code. They are
direct base classes of ``django.forms.Form`` and ``django.forms.ModelForm``
and have the formset functionallity of this module backed in. They are ready
to use. Subclass them and be happy.
Then there are:
* ``SuperFormMixin``
* ``SuperModelFormMixin``
These are the mixins you can use if you don't want to subclass from
``django.forms.Form`` for whatever reason. The ones with Base at the beginning
don't have a metaclass attached. The ones without the Base in the name have
the relevant metaclass in place that handles the search for
``FormSetField``s.
Here is an example on how you can use this module::
from django import forms
from django_superform import SuperModelForm, FormSetField
from .forms import CommentFormSet
class PostForm(SuperModelForm):
title = forms.CharField()
text = forms.CharField()
comments = FormSetField(CommentFormSet)
# Now you can use the form in the view:
def post_form(request):
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
obj = form.save()
return HttpResponseRedirect(obj.get_absolute_url())
else:
form = PostForm()
return render_to_response('post_form.html', {
'form',
}, context_instance=RequestContext(request))
And yes, thanks for asking, the ``form.is_valid()`` and ``form.save()`` calls
transparantly propagate to the defined comments formset and call their
``is_valid()`` and ``save()`` methods. So you don't have to do anything
special in your view!
Now to how you can access the instantiated formsets::
>>> form = PostForm()
>>> form.composite_fields['comments']
<CommetFormSet: ...>
Or in the template::
{{ form.as_p }}
{{ form.composite_fields.comments.management_form }}
{% for fieldset_form in form.composite_fields.comments %}
{{ fieldset_form.as_p }}
{% endfor %}
You're welcome.
"""
from functools import reduce
from django import forms
from django.forms.forms import DeclarativeFieldsMetaclass, ErrorDict, ErrorList
from django.forms.models import ModelFormMetaclass
from django.utils import six
import copy
from .fields import CompositeField
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
class DeclerativeCompositeFieldsMetaclass(type):
"""
Metaclass that converts FormField and FormSetField attributes to a
dictionary called `composite_fields`. It will also include all composite
fields from parent classes.
"""
def __new__(mcs, name, bases, attrs):
# Collect composite fields from current class.
current_fields = []
for key, value in list(attrs.items()):
if isinstance(value, CompositeField):
current_fields.append((key, value))
attrs.pop(key)
current_fields.sort(key=lambda x: x[1].creation_counter)
attrs['declared_composite_fields'] = OrderedDict(current_fields)
new_class = super(DeclerativeCompositeFieldsMetaclass, mcs).__new__(
mcs, name, bases, attrs)
# Walk through the MRO.
declared_fields = OrderedDict()
for base in reversed(new_class.__mro__):
# Collect fields from base class.
if hasattr(base, 'declared_composite_fields'):
declared_fields.update(base.declared_composite_fields)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in declared_fields:
declared_fields.pop(attr)
new_class.base_composite_fields = declared_fields
new_class.declared_composite_fields = declared_fields
return new_class
class SuperFormMetaclass(
DeclerativeCompositeFieldsMetaclass,
DeclarativeFieldsMetaclass):
"""
Metaclass for :class:`~django_superform.forms.SuperForm`.
"""
class SuperModelFormMetaclass(
DeclerativeCompositeFieldsMetaclass,
ModelFormMetaclass):
"""
Metaclass for :class:`~django_superform.forms.SuperModelForm`.
"""
class SuperFormMixin(object):
"""
The base class for all super forms. It does not inherit from any other
classes, so you are free to mix it into any custom form class you have. You
need to use it together with ``SuperFormMetaclass``, like this:
.. code:: python
from django_superform import SuperFormMixin
from django_superform import SuperFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperFormMetaclass,
SuperFormMixin,
MyCustomForm)):
pass
The goal of a superform is to behave just like a normal django form but is
able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
def __init__(self, *args, **kwargs):
super(SuperFormMixin, self).__init__(*args, **kwargs)
self._init_composite_fields()
def __getitem__(self, name):
"""
Returns a ``django.forms.BoundField`` for the given field name. It also
returns :class:`~django_superform.boundfield.CompositeBoundField`
instances for composite fields.
"""
if name not in self.fields and name in self.composite_fields:
field = self.composite_fields[name]
return field.get_bound_field(self, name)
return super(SuperFormMixin, self).__getitem__(name)
def add_composite_field(self, name, field):
"""
Add a dynamic composite field to the already existing ones and
initialize it appropriatly.
"""
self.composite_fields[name] = field
self._init_composite_field(name, field)
def get_composite_field_value(self, name):
"""
Return the form/formset instance for the given field name.
"""
field = self.composite_fields[name]
if hasattr(field, 'get_form'):
return self.forms[name]
if hasattr(field, 'get_formset'):
return self.formsets[name]
def _init_composite_field(self, name, field):
if hasattr(field, 'get_form'):
form = field.get_form(self, name)
self.forms[name] = form
if hasattr(field, 'get_formset'):
formset = field.get_formset(self, name)
self.formsets[name] = formset
def _init_composite_fields(self):
"""
Setup the forms and formsets.
"""
# The base_composite_fields class attribute is the *class-wide*
# definition of fields. Because a particular *instance* of the class
# might want to alter self.composite_fields, we create
# self.composite_fields here by copying base_composite_fields.
# Instances should always modify self.composite_fields; they should not
# modify base_composite_fields.
self.composite_fields = copy.deepcopy(self.base_composite_fields)
self.forms = OrderedDict()
self.formsets = OrderedDict()
for name, field in self.composite_fields.items():
self._init_composite_field(name, field)
def full_clean(self):
"""
Clean the form, including all formsets and add formset errors to the
errors dict. Errors of nested forms and formsets are only included if
they actually contain errors.
"""
super(SuperFormMixin, self).full_clean()
for field_name, composite in self.forms.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorDict(composite._errors)
for field_name, composite in self.formsets.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorList(composite._errors)
@property
def media(self):
"""
Incooperate composite field's media.
"""
media_list = []
media_list.append(super(SuperFormMixin, self).media)
for composite_name in self.composite_fields.keys():
form = self.get_composite_field_value(composite_name)
media_list.append(form.media)
return reduce(lambda a, b: a + b, media_list)
class SuperModelFormMixin(SuperFormMixin):
"""
Can be used in with your custom form subclasses like this:
.. code:: python
from django_superform import SuperModelFormMixin
from django_superform import SuperModelFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperModelFormMetaclass,
SuperModelFormMixin,
MyCustomModelForm)):
pass
"""
def save(self, commit=True):
"""
When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly.
"""
saved_obj = self.save_form(commit=commit)
self.save_forms(commit=commit)
self.save_formsets(commit=commit)
return saved_obj
def _extend_save_m2m(self, name, composites):
additional_save_m2m = []
for composite in composites:
if hasattr(composite, 'save_m2m'):
additional_save_m2m.append(composite.save_m2m)
if not additional_save_m2m:
return
def additional_saves():
for save_m2m in additional_save_m2m:
save_m2m()
# The save() method was called before save_forms()/save_formsets(), so
# we will already have save_m2m() available.
if hasattr(self, 'save_m2m'):
_original_save_m2m = self.save_m2m
else:
def _original_save_m2m():
return None
def augmented_save_m2m():
_original_save_m2m()
additional_saves()
self.save_m2m = augmented_save_m2m
setattr(self, name, additional_saves)
def save_form(self, commit=True):
"""
This calls Django's ``ModelForm.save()``. It only takes care of
saving this actual form, and leaves the nested forms and formsets
alone.
We separate this out of the
:meth:`~django_superform.forms.SuperModelForm.save` method to make
extensibility easier.
"""
return super(SuperModelFormMixin, self).save(commit=commit)
def save_forms(self, commit=True):
saved_composites = []
for name, composite in self.forms.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_forms_m2m', saved_composites)
def save_formsets(self, commit=True):
"""
Save all formsets. If ``commit=False``, it will modify the form's
``save_m2m()`` so that it also calls the formsets' ``save_m2m()``
methods.
"""
saved_composites = []
for name, composite in self.formsets.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_formsets_m2m', saved_composites)
class SuperModelForm(six.with_metaclass(SuperModelFormMetaclass,
SuperModelFormMixin, forms.ModelForm)):
"""
The ``SuperModelForm`` works like a Django ``ModelForm`` but has the
capabilities of nesting like :class:`~django_superform.forms.SuperForm`.
Saving a ``SuperModelForm`` will also save all nested model forms as well.
"""
class SuperForm(six.with_metaclass(SuperFormMetaclass,
SuperFormMixin, forms.Form)):
"""
The base class for all super forms. The goal of a superform is to behave
just like a normal django form but is able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
| {
"content_hash": "3d026e2d2ebdd0c59dbbf4d6745be8c2",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 79,
"avg_line_length": 34.86582278481013,
"alnum_prop": 0.6363636363636364,
"repo_name": "gregmuellegger/django-superform",
"id": "13fe99e026ac37434b98a9bf272bb0df646667d3",
"size": "13772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_superform/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "106"
},
{
"name": "Python",
"bytes": "63560"
}
],
"symlink_target": ""
} |
"""
Plugin responsible for managing SSL options
"""
import os
from OpenSSL import crypto
from socket import gethostname
from packstack.installer import basedefs
from packstack.installer import utils
from packstack.installer import validators
from packstack.modules.documentation import update_params_usage
# ------------- SSL Packstack Plugin Initialization --------------
PLUGIN_NAME = "SSL"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
params = {
"SSL": [
{"CMD_OPTION": "ssl-cacert-file",
"PROMPT": ("Enter the filename of the SSL CAcertificate, if the"
" CONFIG_SSL_CACERT_SELFSIGN is set to y the path "
"will be CONFIG_SSL_CERT_DIR/certs/selfcert.crt"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "/etc/pki/tls/certs/selfcert.crt",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_SSL_CACERT_FILE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "ssl-cacert-key-file",
"PROMPT": ("Enter the filename of the SSL CAcertificate Key file"
", if the CONFIG_SSL_CACERT_SELFSIGN is set to y the "
"path will be CONFIG_SSL_CERT_DIR/keys/selfkey.key"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "/etc/pki/tls/private/selfkey.key",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_SSL_CACERT_KEY_FILE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "ssl-cert-dir",
"PROMPT": ("Enter the path to use to store generated SSL certificates in"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty,
validators.validate_writeable_directory],
"DEFAULT_VALUE": "~/packstackca/",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_SSL_CERT_DIR",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "ssl-cacert-selfsign",
"PROMPT": "Should packstack use selfsigned CAcert.",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "y",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": 'CONFIG_SSL_CACERT_SELFSIGN',
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "ssl-cert-subject-country",
"PROMPT": "Enter the ssl certificates subject country.",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "--",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_C',
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_C']},
{"CMD_OPTION": "ssl-cert-subject-state",
"PROMPT": "Enter the ssl certificates subject state.",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "State",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_ST',
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_ST']},
{"CMD_OPTION": "ssl-cert-subject-location",
"PROMPT": "Enter the ssl certificate subject location.",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "City",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_L',
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_L']},
{"CMD_OPTION": "ssl-cert-subject-organization",
"PROMPT": "Enter the ssl certificate subject organization.",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "openstack",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_O',
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_O']},
{"CMD_OPTION": "ssl-cert-subject-organizational-unit",
"PROMPT": "Enter the ssl certificate subject organizational unit.",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "packstack",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_OU',
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_OU']},
{"CMD_OPTION": "ssl-cert-subject-common-name",
"PROMPT": "Enter the ssl certificaate subject common name.",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": gethostname(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_CN',
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_CN']},
{"CMD_OPTION": "ssl-cert-subject-email",
"PROMPT": "Enter the ssl certificate subject admin email.",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "admin@%s" % gethostname(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_MAIL',
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_MAIL']},
]
}
update_params_usage(basedefs.PACKSTACK_DOC, params)
groups = [
{"GROUP_NAME": "SSL",
"DESCRIPTION": "SSL Config parameters",
"PRE_CONDITION": lambda x: 'yes',
"PRE_CONDITION_MATCH": "yes",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
]
for group in groups:
controller.addGroup(group, params[group['GROUP_NAME']])
def initSequences(controller):
ssl_steps = [
{'title': 'Setting up CACERT',
'functions': [create_self_signed_cert]}
]
controller.addSequence("Setting up SSL", [], [],
ssl_steps)
# ------------------------- helper functions -------------------------
def create_self_signed_cert(config, messages):
"""
OpenSSL wrapper to create selfsigned CA.
"""
# for now hardcoded place for landing CACert file on servers
config['CONFIG_SSL_CACERT'] = '/etc/pki/tls/certs/packstack_cacert.crt'
if (config['CONFIG_AMQP_ENABLE_SSL'] != 'y' and
config["CONFIG_HORIZON_SSL"] != 'y'):
return
config['CONFIG_SSL_CERT_DIR'] = os.path.expanduser(
config['CONFIG_SSL_CERT_DIR']
)
if not os.path.isdir(config['CONFIG_SSL_CERT_DIR']):
os.mkdir(config['CONFIG_SSL_CERT_DIR'])
certs = os.path.join(config['CONFIG_SSL_CERT_DIR'], 'certs')
if not os.path.isdir(certs):
os.mkdir(certs)
keys = os.path.join(config['CONFIG_SSL_CERT_DIR'], 'keys')
if not os.path.isdir(keys):
os.mkdir(keys)
if config['CONFIG_SSL_CACERT_SELFSIGN'] != 'y':
return
CERT_FILE = config['CONFIG_SSL_CACERT_FILE'] = (
'%s/certs/selfcert.crt' % config['CONFIG_SSL_CERT_DIR']
)
KEY_FILE = config['CONFIG_SSL_CACERT_KEY_FILE'] = (
'%s/keys/selfcert.crt' % config['CONFIG_SSL_CERT_DIR']
)
if not os.path.exists(CERT_FILE) or not os.path.exists(KEY_FILE):
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 4096)
# create a self-signed cert
mail = config['CONFIG_SSL_CERT_SUBJECT_MAIL']
cert = crypto.X509()
subject = cert.get_subject()
subject.C = config['CONFIG_SSL_CERT_SUBJECT_C']
subject.ST = config['CONFIG_SSL_CERT_SUBJECT_ST']
subject.L = config['CONFIG_SSL_CERT_SUBJECT_L']
subject.O = config['CONFIG_SSL_CERT_SUBJECT_O'] # noqa: E741
subject.OU = config['CONFIG_SSL_CERT_SUBJECT_OU']
subject.CN = config['CONFIG_SSL_CERT_SUBJECT_CN']
subject.emailAddress = mail
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
# CA extensions
cert.add_extensions([
crypto.X509Extension("basicConstraints".encode('ascii'), False,
"CA:TRUE".encode('ascii')),
crypto.X509Extension("keyUsage".encode('ascii'), False,
"keyCertSign, cRLSign".encode('ascii')),
crypto.X509Extension("subjectKeyIdentifier".encode('ascii'), False,
"hash".encode('ascii'),
subject=cert),
])
cert.add_extensions([
crypto.X509Extension(
"authorityKeyIdentifier".encode('ascii'), False,
"keyid:always".encode('ascii'), issuer=cert)
])
cert.sign(k, 'sha256')
open((CERT_FILE), "w").write(
crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode())
open((KEY_FILE), "w").write(
crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode())
messages.append(
"%sNOTE%s : A selfsigned CA certificate was generated to be used "
"for ssl, you should still change it do subordinate CA cert. In "
"any case please save the contents of %s."
% (utils.COLORS['red'], utils.COLORS['nocolor'],
config['CONFIG_SSL_CERT_DIR']))
| {
"content_hash": "204c94461f7a298b3cc042dc65157107",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 88,
"avg_line_length": 39.40860215053763,
"alnum_prop": 0.5396089131423374,
"repo_name": "mahak/packstack",
"id": "31ce2e621a91c23b3596ac71577d2454a70b511f",
"size": "11565",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packstack/plugins/ssl_001.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "170058"
},
{
"name": "Python",
"bytes": "511156"
},
{
"name": "Ruby",
"bytes": "11197"
},
{
"name": "Shell",
"bytes": "27842"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import mock
from datetime import datetime
from bokeh.core.properties import field, value
from bokeh.core.validation import check_integrity
from bokeh.models.annotations import (
Legend, LegendItem, ColorBar, Arrow, BoxAnnotation, Span, LabelSet, Label,
Title, Band, Whisker, Slope
)
from bokeh.models import (
ColumnDataSource, ArrowHead, BasicTicker, BasicTickFormatter, GlyphRenderer
)
from .utils.property_utils import (
FILL, LINE, TEXT, ANGLE, prefix,
check_properties_existence, check_fill_properties,
check_line_properties, check_text_properties
)
def test_Legend():
legend = Legend()
assert legend.plot is None
assert legend.location == 'top_right'
assert legend.label_standoff == 5
assert legend.label_height == 20
assert legend.label_width == 20
assert legend.glyph_height == 20
assert legend.glyph_width == 20
assert legend.padding == 10
assert legend.spacing == 3
assert legend.margin == 10
assert legend.items == []
check_line_properties(legend, "border_", "#e5e5e5", 1.0, 0.5)
check_text_properties(legend, "label_", "10pt", "middle")
check_fill_properties(legend, "background_", "#ffffff", 0.95)
check_properties_existence(legend, [
"plot",
"visible",
"location",
"orientation",
"label_standoff",
"label_height",
"label_width",
"glyph_height",
"glyph_width",
"margin",
"padding",
"spacing",
"items",
"level",
"click_policy"],
prefix('label_', TEXT),
prefix('border_', LINE),
prefix('background_', FILL),
prefix('inactive_', FILL))
def test_ColorBar():
color_bar = ColorBar()
assert color_bar.plot is None
assert color_bar.location == 'top_right'
assert color_bar.orientation == 'vertical'
assert color_bar.height == 'auto'
assert color_bar.width == 'auto'
assert color_bar.scale_alpha == 1.0
assert color_bar.title is None
assert color_bar.title_standoff == 2
assert isinstance(color_bar.ticker, BasicTicker)
assert isinstance(color_bar.formatter, BasicTickFormatter)
assert color_bar.color_mapper is None
assert color_bar.margin == 30
assert color_bar.padding == 10
assert color_bar.label_standoff == 5
assert color_bar.major_tick_in == 5
assert color_bar.major_tick_out == 0
assert color_bar.minor_tick_in == 0
assert color_bar.minor_tick_out == 0
check_text_properties(color_bar, "title_", "10pt", "bottom", "italic")
check_text_properties(color_bar, "major_label_", "8pt", "middle", "normal", "center")
check_line_properties(color_bar, "major_tick_", "#ffffff")
check_line_properties(color_bar, "minor_tick_", None)
check_line_properties(color_bar, "bar_", None)
check_line_properties(color_bar, "border_", None)
check_fill_properties(color_bar, "background_", "#ffffff", 0.95)
check_properties_existence(color_bar, [
"plot",
"level",
"visible",
"location",
"orientation",
"height",
"width",
"scale_alpha",
"title",
"title_standoff",
"ticker",
"formatter",
"color_mapper",
"margin",
"padding",
"label_standoff",
"major_tick_in",
"major_tick_out",
"minor_tick_in",
"minor_tick_out",
"major_label_overrides"],
prefix('title_', TEXT),
prefix('major_label_', TEXT),
prefix('major_tick_', LINE),
prefix('minor_tick_', LINE),
prefix('bar_', LINE),
prefix('border_', LINE),
prefix('background_', FILL)
)
def test_Arrow():
arrow = Arrow()
assert arrow.plot is None
assert arrow.x_start is None
assert arrow.y_start is None
assert arrow.start_units == 'data'
assert arrow.start is None
assert arrow.x_end is None
assert arrow.y_end is None
assert arrow.end_units == 'data'
assert isinstance(arrow.end, ArrowHead)
assert arrow.source is None
assert arrow.x_range_name == "default"
assert arrow.y_range_name == "default"
check_line_properties(arrow)
check_properties_existence(arrow, [
"plot",
"level",
"visible",
"x_start",
"y_start",
"start_units",
"start",
"x_end",
"y_end",
"end_units",
"end",
"source",
"x_range_name",
"y_range_name"],
LINE)
def test_BoxAnnotation():
box = BoxAnnotation()
assert box.plot is None
assert box.left is None
assert box.left_units == 'data'
assert box.right is None
assert box.right_units == 'data'
assert box.bottom is None
assert box.bottom_units == 'data'
assert box.top is None
assert box.top_units == 'data'
assert box.x_range_name == 'default'
assert box.y_range_name == 'default'
assert box.level == 'annotation'
check_line_properties(box, "", '#cccccc', 1, 0.3)
check_fill_properties(box, "", "#fff9ba", 0.4)
check_properties_existence(box, [
"render_mode",
"plot",
"visible",
"left",
"left_units",
"right",
"right_units",
"bottom",
"bottom_units",
"top",
"top_units",
"x_range_name",
"y_range_name",
"level",
], LINE, FILL)
def test_Band():
band = Band()
assert band.plot is None
assert band.level == 'annotation'
assert band.lower is None
assert band.lower_units == 'data'
assert band.upper is None
assert band.upper_units == 'data'
assert band.base is None
assert band.dimension == 'height'
assert isinstance(band.source, ColumnDataSource)
assert band.x_range_name == 'default'
assert band.y_range_name == 'default'
check_line_properties(band, "", "#cccccc", 1.0, 0.3)
check_fill_properties(band, "", "#fff9ba", 0.4)
check_properties_existence(band, [
"plot",
"visible",
"level",
"lower",
"lower_units",
"upper",
"upper_units",
"base",
"base_units",
"dimension",
"source",
"x_range_name",
"y_range_name",
], LINE, FILL)
def test_Label():
label = Label()
assert label.plot is None
assert label.level == 'annotation'
assert label.x is None
assert label.y is None
assert label.x_units == 'data'
assert label.y_units == 'data'
assert label.text is None
assert label.angle == 0
assert label.angle_units == 'rad'
assert label.x_offset == 0
assert label.y_offset == 0
assert label.render_mode == 'canvas'
assert label.x_range_name == 'default'
assert label.y_range_name == 'default'
check_text_properties(label)
check_fill_properties(label, "background_", None, 1.0)
check_line_properties(label, "border_", None, 1.0, 1.0)
check_properties_existence(label, [
"plot",
"level",
"visible",
"x",
"y",
"x_units",
"y_units",
"text",
"angle",
"angle_units",
"x_offset",
"y_offset",
"render_mode",
"x_range_name",
"y_range_name"],
TEXT,
prefix('border_', LINE),
prefix('background_', FILL))
def test_Label_accepts_datetime_xy():
obj = Label(x = datetime(2018,8,7,0,0),
y = datetime(2018,8,7,0,0))
assert obj.x == 1533600000000.0
assert obj.y == 1533600000000.0
def test_LabelSet():
label_set = LabelSet()
assert label_set.plot is None
assert label_set.level == 'annotation'
assert label_set.x is None
assert label_set.y is None
assert label_set.x_units == 'data'
assert label_set.y_units == 'data'
assert label_set.text == 'text'
assert label_set.angle == 0
assert label_set.angle_units == 'rad'
assert label_set.x_offset == 0
assert label_set.y_offset == 0
assert label_set.render_mode == 'canvas'
assert label_set.x_range_name == 'default'
assert label_set.y_range_name == 'default'
assert isinstance(label_set.source, ColumnDataSource)
assert label_set.source.data == {}
check_text_properties(label_set)
check_fill_properties(label_set, "background_", None, 1.0)
check_line_properties(label_set, "border_", None, 1.0, 1.0)
check_properties_existence(label_set, [
"plot",
"visible",
"level",
"x",
"y",
"x_units",
"y_units",
"text",
"angle",
"angle_units",
"x_offset",
"y_offset",
"render_mode",
"x_range_name",
"y_range_name",
"source"],
TEXT,
ANGLE,
prefix('border_', LINE),
prefix('background_', FILL))
def test_Slope():
slope = Slope()
assert slope.plot is None
assert slope.gradient is None
assert slope.y_intercept is None
assert slope.x_range_name == 'default'
assert slope.y_range_name == 'default'
assert slope.level == 'annotation'
check_line_properties(slope, "", 'black', 1.0)
check_properties_existence(slope, [
"plot",
"visible",
"gradient",
"y_intercept",
"x_range_name",
"y_range_name",
"level",
], LINE)
def test_Span():
line = Span()
assert line.plot is None
assert line.location is None
assert line.location_units == 'data'
assert line.dimension == 'width'
assert line.x_range_name == 'default'
assert line.y_range_name == 'default'
assert line.level == 'annotation'
assert line.render_mode == 'canvas'
check_line_properties(line, "", 'black', 1.0)
check_properties_existence(line, [
"plot",
"visible",
"location",
"location_units",
"dimension",
"x_range_name",
"y_range_name",
"level",
"render_mode"
], LINE)
def test_Span_accepts_datetime_location():
obj = Span(location = datetime(2018,8,7,0,0))
assert obj.location == 1533600000000.0
def test_Title():
title = Title()
assert title.plot is None
assert title.level == 'annotation'
assert title.text is None
assert title.vertical_align == 'bottom'
assert title.align == 'left'
assert title.offset == 0
assert title.text_font == 'helvetica'
assert title.text_font_size == {'value': '10pt'}
assert title.text_font_style == 'bold'
assert title.text_color == '#444444'
assert title.text_alpha == 1.0
check_fill_properties(title, "background_", None, 1.0)
check_line_properties(title, "border_", None, 1.0, 1.0)
check_properties_existence(title, [
"plot",
"visible",
"level",
"text",
"vertical_align",
"align",
"offset",
"text_font",
"text_font_size",
"text_font_style",
"text_color",
"text_alpha",
"render_mode"],
prefix('border_', LINE),
prefix('background_', FILL))
def test_Whisker():
whisker = Whisker()
assert whisker.plot is None
assert whisker.level == 'underlay'
assert whisker.lower is None
assert whisker.lower_units == 'data'
assert isinstance(whisker.lower_head, ArrowHead)
assert whisker.lower_head.size == 10
assert whisker.lower_head.level == 'underlay'
assert whisker.upper is None
assert whisker.upper_units == 'data'
assert isinstance(whisker.upper_head, ArrowHead)
assert whisker.upper_head.size == 10
assert whisker.upper_head.level == 'underlay'
assert whisker.base is None
assert whisker.dimension == 'height'
assert isinstance(whisker.source, ColumnDataSource)
assert whisker.x_range_name == 'default'
assert whisker.y_range_name == 'default'
check_line_properties(whisker, "")
check_properties_existence(whisker, [
"plot",
"visible",
"level",
"lower",
"lower_units",
"lower_head",
"upper",
"upper_units",
"upper_head",
"base",
"base_units",
"dimension",
"source",
"x_range_name",
"y_range_name"],
LINE)
def test_can_add_multiple_glyph_renderers_to_legend_item():
legend_item = LegendItem()
gr_1 = GlyphRenderer()
gr_2 = GlyphRenderer()
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 0
def test_legend_item_with_field_label_and_different_data_sources_raises_a_validation_error():
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource(data={'label': [1]}))
gr_2 = GlyphRenderer(data_source=ColumnDataSource(data={'label': [1]}))
legend_item.label = field('label')
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 1
def test_legend_item_with_value_label_and_different_data_sources_does_not_raise_a_validation_error():
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource())
gr_2 = GlyphRenderer(data_source=ColumnDataSource())
legend_item.label = value('label')
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 0
def test_legend_item_with_field_label_raises_error_if_field_not_in_cds():
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource())
legend_item.label = field('label')
legend_item.renderers = [gr_1]
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 1
| {
"content_hash": "aedc194612f0f129194bc6fdac108d0c",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 101,
"avg_line_length": 30.28448275862069,
"alnum_prop": 0.5982066609735269,
"repo_name": "mindriot101/bokeh",
"id": "ab95e41c5fa31a2a2a760d5731785867be778c72",
"size": "14052",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bokeh/models/tests/test_annotations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "413395"
},
{
"name": "CoffeeScript",
"bytes": "1995470"
},
{
"name": "HTML",
"bytes": "1545838"
},
{
"name": "JavaScript",
"bytes": "4747"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1381168"
},
{
"name": "Shell",
"bytes": "13857"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
__version__ = "0.6.5"
NAME = 'tensorpac'
AUTHOR = "Etienne Combrisson"
MAINTAINER = "Etienne Combrisson"
EMAIL = 'e.combrisson@gmail.com'
KEYWORDS = "phase-amplitude coupling pac tensor oscillation meg eeg python"
DESCRIPTION = "Tensor-based Phase-Amplitude Coupling"
URL = 'http://etiennecmb.github.io/tensorpac/'
DOWNLOAD_URL = ("https://github.com/EtienneCmb/tensorpac/archive/v" +
__version__ + ".tar.gz")
# Data path :
PACKAGE_DATA = {}
def read(fname):
"""Read README and LICENSE."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name=NAME,
version=__version__,
packages=find_packages(),
package_dir={'tensorpac': 'tensorpac'},
package_data=PACKAGE_DATA,
include_package_data=True,
description=DESCRIPTION,
long_description=read('README.rst'),
platforms='any',
setup_requires=['numpy', 'joblib'],
install_requires=[
"numpy",
"scipy",
"joblib"
],
dependency_links=[],
author=AUTHOR,
maintainer=MAINTAINER,
author_email=EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license="BSD 3-Clause License",
keywords=KEYWORDS,
classifiers=["Development Status :: 5 - Production/Stable",
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: Developers',
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.7"
])
| {
"content_hash": "42fb40acd3efc7f529b4f51e7e3b2b6e",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 75,
"avg_line_length": 30.574074074074073,
"alnum_prop": 0.6165960024227741,
"repo_name": "EtienneCmb/tensorpac",
"id": "10924be6163900534aa4a20480bfe2dfa5b2e538",
"size": "1721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "354"
},
{
"name": "Python",
"bytes": "140014"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
import logging
from os import path
from typing import List, Optional
from dogebuild.dogefile_internals.dependencies import Dependency
from .common import DOGE_FILE
def resolve_dependency_tree(dependencies: List[Dependency], parents: List[str] = None) -> List[Dependency]:
if not parents:
parents = []
for d in dependencies:
id, version = d.get_id()
if id in parents:
raise Exception("Circular dependency")
use_version = _resolve_version_(id, version)
if version != use_version:
d.original_version = d.version
d.version = use_version
logging.info("Acquiring {} ...".format(d))
d.acquire_dependency()
deps, _ = load_doge_file(path.join(d.get_doge_file_folder(), DOGE_FILE)) # noqa
d.dependencies = resolve_dependency_tree(deps, parents + [id])
return dependencies
VERSIONS = {}
def _resolve_version_(id: str, version: Optional[str]) -> Optional[str]:
if not version:
return None
# simple conflict resolving strategy
saved_version = VERSIONS.get(id)
if not saved_version:
VERSIONS[id] = version
return version
else:
return saved_version
| {
"content_hash": "3445b01fb9ff955c7690ba3124f9ae59",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 107,
"avg_line_length": 28.976190476190474,
"alnum_prop": 0.6425636811832375,
"repo_name": "dogebuild/dogebuild",
"id": "c26a4231b9f9fab2a3d5442529f2ff40432b8515",
"size": "1217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dogebuild/dependencies_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18341"
}
],
"symlink_target": ""
} |
"""
Created on 25.02.2011
@author: vda
"""
import os
import datetime
import zipfile
from PyQt4 import QtCore, QtGui
from ui.gui.radiancebackupdialog import Ui_RBackupDialog
from settings import settings
from logic.misc.pyfs import recursive_glob
class RBackupDialog(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.ui = Ui_RBackupDialog()
self.ui.setupUi(self)
self.__init()
def __init(self):
# Signals
self.ui.mainDatabaseCheckBox.stateChanged.connect(self._process_form)
self.ui.reportDatabaseCheckBox.stateChanged.connect(self._process_form)
self.ui.templateFilesCheckBox.stateChanged.connect(self._process_form)
def _process_form(self):
enabled_state = self.ui.mainDatabaseCheckBox.isChecked() \
or self.ui.reportDatabaseCheckBox.isChecked() \
or self.ui.templateFilesCheckBox.isChecked() \
or self.ui.templateFilesCheckBox.isChecked()
self.ui.saveButton.setEnabled(enabled_state)
self.ui.archiveTypeComboBox.setEnabled(enabled_state)
@QtCore.pyqtSlot()
def on_saveButton_clicked(self):
if settings.get('locale') == 'ru_RU':
header = u'Сохранение резервной копии'
extensions = u'Файлы резервного копирования (*.zip)'
else:
header = u'Save backup'
extensions = u'Backup files (*.zip)'
date = datetime.datetime.now().strftime('%d_%m_%Y___%H_%M_%S')
filepath = os.path.join(os.path.expanduser('~'), 'Radiance_%s' % date)
filename = QtGui.QFileDialog.getSaveFileName(self, header, filepath, extensions)
if not filename.isEmpty():
try:
files = []
if self.ui.mainDatabaseCheckBox.isChecked():
files.append('./radiance.db')
if self.ui.reportDatabaseCheckBox.isChecked():
files.append('./reports.db')
if self.ui.templateFilesCheckBox.isChecked():
files.extend(list(recursive_glob(settings.get('template_path'), '*')))
files = [os.path.normpath(path) for path in files]
filename = unicode(os.path.normpath('%s.zip' % filename))
archive = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
for file in files:
archive.write(file, file)
archive.close()
except IOError:
raise
else:
self.close() | {
"content_hash": "eb53c06484864856af72d5af1676c24b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 90,
"avg_line_length": 36.026315789473685,
"alnum_prop": 0.5682980277574872,
"repo_name": "tetra5/radiance",
"id": "d589d598cae503872c85a8c91e118f9b20033d21",
"size": "2814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/windows/backupdialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2953"
},
{
"name": "Python",
"bytes": "465629"
},
{
"name": "Shell",
"bytes": "635"
},
{
"name": "TypeScript",
"bytes": "37354"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as pp
import math, numpy as np
import roslib; roslib.load_manifest('hrl_lib')
import tf
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_lib.matplotlib_util as mpu
from mpl_toolkits.mplot3d import Axes3D
def cody_stretch_to_angle(medium_stretch, full_stretch):
cody_wrist_beta = 0.1674
ang_medium = float(medium_stretch-1)/float(cody_wrist_beta)
ang_full = float(full_stretch-1)/float(cody_wrist_beta)
print 'medium:', np.degrees(ang_medium)
print 'full:', np.degrees(ang_full)
def cody_angles_to_stretch(angles):
cody_wrist_beta = 0.1674
stretch = []
for ang in angles:
stretch.append(1 + cody_wrist_beta*np.radians(ang))
print 'stretch:', stretch
if __name__ == '__main__':
if True:
medium_stretch = 1.2985
full_stretch = 1.5971
angles = np.arange(0,55.5,5)
cody_stretch_to_angle(medium_stretch, full_stretch)
cody_angles_to_stretch(angles)
pp.show()
| {
"content_hash": "dc7b90370943bef1ab0559aa2b11c0a9",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 63,
"avg_line_length": 24.023255813953487,
"alnum_prop": 0.6505324298160697,
"repo_name": "tapomayukh/projects_in_python",
"id": "c9bf1514584261c477f7930158825046a8e49b0e",
"size": "1034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modeling_new_tactile_skin/using_stretch_model_in_paper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "4903"
},
{
"name": "Python",
"bytes": "4451912"
}
],
"symlink_target": ""
} |
"""AdaMatch
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatchDistAlignRMM(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_data = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
saved_vars = self.model.vars().tensors()
logit_bn_x = self.model(sx.reshape((-1, c, h, w)), training=True)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx += (logit_bn_x - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_data = self.stats.p_data(sy.mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_data) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_x).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_data, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = AdaMatchDistAlignRMM(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm)')
FLAGS.set_default('para_augment', 8)
app.run(main)
| {
"content_hash": "f80c3d2f53ea0a00138e05e6af8f0d45",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 115,
"avg_line_length": 52.22641509433962,
"alnum_prop": 0.6040462427745664,
"repo_name": "google-research/adamatch",
"id": "8f486ddd06b4173f89411bf15bdd73d8369130dc",
"size": "8879",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ablation/adamatch_distalign_remixmatch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import posixpath
import re
from telemetry.timeline import event as timeline_event
class MmapCategory(object):
_DEFAULT_CATEGORY = None
def __init__(self, name, file_pattern, children=None):
"""A (sub)category for classifying memory maps.
Args:
name: A string to identify the category.
file_pattern: A regex pattern, the category will aggregate memory usage
for all mapped files matching this pattern.
children: A list of MmapCategory objects, used to sub-categorize memory
usage.
"""
self.name = name
self._file_pattern = re.compile(file_pattern) if file_pattern else None
self._children = list(children) if children else None
@classmethod
def DefaultCategory(cls):
"""An implicit 'Others' match-all category with no children."""
if cls._DEFAULT_CATEGORY is None:
cls._DEFAULT_CATEGORY = cls('Others', None)
return cls._DEFAULT_CATEGORY
def Match(self, mapped_file):
"""Test whether a mapped file matches this category."""
return (self._file_pattern is None
or bool(self._file_pattern.search(mapped_file)))
def GetMatchingChild(self, mapped_file):
"""Get the first matching sub-category for a given mapped file.
Returns None if the category has no children, or the DefaultCategory if
it does have children but none of them match.
"""
if not self._children:
return None
for child in self._children:
if child.Match(mapped_file):
return child
return type(self).DefaultCategory()
ROOT_CATEGORY = MmapCategory('/', None, [
MmapCategory('Android', r'^\/dev\/ashmem(?!\/libc malloc)', [
MmapCategory('Java runtime', r'^\/dev\/ashmem\/dalvik-', [
MmapCategory('Spaces', r'\/dalvik-(alloc|main|large'
r' object|non moving|zygote) space', [
MmapCategory('Normal', r'\/dalvik-(alloc|main)'),
MmapCategory('Large', r'\/dalvik-large object'),
MmapCategory('Zygote', r'\/dalvik-zygote'),
MmapCategory('Non-moving', r'\/dalvik-non moving')
]),
MmapCategory('Linear Alloc', r'\/dalvik-LinearAlloc'),
MmapCategory('Indirect Reference Table', r'\/dalvik-indirect.ref'),
MmapCategory('Cache', r'\/dalvik-jit-code-cache'),
MmapCategory('Accounting', None)
]),
MmapCategory('Cursor', r'\/CursorWindow'),
MmapCategory('Ashmem', None)
]),
MmapCategory('Native heap',
r'^((\[heap\])|(\[anon:)|(\/dev\/ashmem\/libc malloc)|$)'),
MmapCategory('Stack', r'^\[stack'),
MmapCategory('Files',
r'\.((((so)|(jar)|(apk)|(ttf)|(odex)|(oat)|(art))$)|(dex))', [
MmapCategory('so', r'\.so$'),
MmapCategory('jar', r'\.jar$'),
MmapCategory('apk', r'\.apk$'),
MmapCategory('ttf', r'\.ttf$'),
MmapCategory('dex', r'\.((dex)|(odex$))'),
MmapCategory('oat', r'\.oat$'),
MmapCategory('art', r'\.art$'),
]),
MmapCategory('Devices', r'(^\/dev\/)|(anon_inode:dmabuf)', [
MmapCategory('GPU', r'\/((nv)|(mali)|(kgsl))'),
MmapCategory('DMA', r'anon_inode:dmabuf'),
]),
MmapCategory('Discounted tracing overhead',
r'\[discounted tracing overhead\]')
])
# Map long descriptive attribute names, as understood by MemoryBucket.GetValue,
# to the short keys used by events in raw json traces.
BUCKET_ATTRS = {
'proportional_resident': 'pss',
'private_dirty_resident': 'pd',
'private_clean_resident': 'pc',
'shared_dirty_resident': 'sd',
'shared_clean_resident': 'sc',
'swapped': 'sw'}
# Map of {memory_key: (category_path, discount_tracing), ...}.
# When discount_tracing is True, we have to discount the resident_size of the
# tracing allocator to get the correct value for that key.
MMAPS_METRICS = {
'mmaps_overall_pss': ('/.proportional_resident', True),
'mmaps_private_dirty' : ('/.private_dirty_resident', True),
'mmaps_java_heap': ('/Android/Java runtime/Spaces.proportional_resident',
False),
'mmaps_ashmem': ('/Android/Ashmem.proportional_resident', False),
'mmaps_native_heap': ('/Native heap.proportional_resident', True)}
class MemoryBucket(object):
"""Simple object to hold and aggregate memory values."""
def __init__(self):
self._bucket = dict.fromkeys(BUCKET_ATTRS.iterkeys(), 0)
def __repr__(self):
values = ', '.join('%s=%d' % (src_key, self._bucket[dst_key])
for dst_key, src_key
in sorted(BUCKET_ATTRS.iteritems()))
return '%s[%s]' % (type(self).__name__, values)
def AddRegion(self, byte_stats):
for dst_key, src_key in BUCKET_ATTRS.iteritems():
self._bucket[dst_key] += int(byte_stats.get(src_key, '0'), 16)
def GetValue(self, name):
return self._bucket[name]
class ProcessMemoryDumpEvent(timeline_event.TimelineEvent):
"""A memory dump event belonging to a single timeline.Process object.
It's a subclass of telemetry's TimelineEvent so it can be included in
the stream of events contained in timeline.model objects, and have its
timing correlated with that of other events in the model.
Args:
process: The Process object associated with the memory dump.
dump_events: A list of dump events of the process with the same dump id.
Properties:
dump_id: A string to identify events belonging to the same global dump.
process: The timeline.Process object that owns this memory dump event.
has_mmaps: True if the memory dump has mmaps information. If False then
GetMemoryUsage will report all zeros.
"""
def __init__(self, process, dump_events):
assert dump_events
start_time = min(event['ts'] for event in dump_events) / 1000.0
duration = max(event['ts'] for event in dump_events) / 1000.0 - start_time
super(ProcessMemoryDumpEvent, self).__init__('memory', 'memory_dump',
start_time, duration)
self.process = process
self.dump_id = dump_events[0]['id']
allocator_dumps = {}
vm_regions = []
for event in dump_events:
assert (event['ph'] == 'v' and self.process.pid == event['pid'] and
self.dump_id == event['id'])
try:
allocator_dumps.update(event['args']['dumps']['allocators'])
except KeyError:
pass # It's ok if any of those keys are not present.
try:
value = event['args']['dumps']['process_mmaps']['vm_regions']
assert not vm_regions
vm_regions = value
except KeyError:
pass # It's ok if any of those keys are not present.
self._allocators = {}
parent_path = ''
parent_has_size = False
for allocator_name, size_values in sorted(allocator_dumps.iteritems()):
if ((allocator_name.startswith(parent_path) and parent_has_size) or
allocator_name.startswith('global/')):
continue
parent_path = allocator_name + '/'
parent_has_size = 'size' in size_values['attrs']
name_parts = allocator_name.split('/')
allocator_name = name_parts[0]
# For 'gpu/android_memtrack/*' we want to keep track of individual
# components. E.g. 'gpu/android_memtrack/gl' will be stored as
# 'android_memtrack_gl' in the allocators dict.
if (len(name_parts) == 3 and allocator_name == 'gpu' and
name_parts[1] == 'android_memtrack'):
allocator_name = '_'.join(name_parts[1:3])
allocator = self._allocators.setdefault(allocator_name, {})
for size_key, size_value in size_values['attrs'].iteritems():
if size_value['units'] == 'bytes':
allocator[size_key] = (allocator.get(size_key, 0)
+ int(size_value['value'], 16))
# we need to discount tracing from malloc size.
try:
self._allocators['malloc']['size'] -= self._allocators['tracing']['size']
except KeyError:
pass # It's ok if any of those keys are not present.
self.has_mmaps = bool(vm_regions)
self._buckets = {}
for vm_region in vm_regions:
self._AddRegion(vm_region)
@property
def process_name(self):
return self.process.name
def _AddRegion(self, vm_region):
path = ''
category = ROOT_CATEGORY
while category:
path = posixpath.join(path, category.name)
self.GetMemoryBucket(path).AddRegion(vm_region['bs'])
mapped_file = vm_region['mf']
category = category.GetMatchingChild(mapped_file)
def __repr__(self):
values = ['pid=%d' % self.process.pid]
for key, value in sorted(self.GetMemoryUsage().iteritems()):
values.append('%s=%d' % (key, value))
values = ', '.join(values)
return '%s[%s]' % (type(self).__name__, values)
def GetMemoryBucket(self, path):
"""Return the MemoryBucket associated with a category path.
An empty bucket will be created if the path does not already exist.
path: A string with path in the classification tree, e.g.
'/Android/Java runtime/Cache'. Note: no trailing slash, except for
the root path '/'.
"""
if not path in self._buckets:
self._buckets[path] = MemoryBucket()
return self._buckets[path]
def GetMemoryValue(self, category_path, discount_tracing=False):
"""Return a specific value from within a MemoryBucket.
category_path: A string composed of a path in the classification tree,
followed by a '.', followed by a specific bucket value, e.g.
'/Android/Java runtime/Cache.private_dirty_resident'.
discount_tracing: A boolean indicating whether the returned value should
be discounted by the resident size of the tracing allocator.
"""
path, name = category_path.rsplit('.', 1)
value = self.GetMemoryBucket(path).GetValue(name)
if discount_tracing and 'tracing' in self._allocators:
value -= self._allocators['tracing'].get('resident_size', 0)
return value
def GetMemoryUsage(self):
"""Get a dictionary with the memory usage of this process."""
usage = {}
for name, values in self._allocators.iteritems():
# If you wish to track more attributes here, make sure they are correctly
# calculated by the ProcessMemoryDumpEvent method. All dumps whose parent
# has "size" attribute are ignored to avoid double counting. So, the
# other attributes are totals of only top level dumps.
if 'size' in values:
usage['allocator_%s' % name] = values['size']
if 'allocated_objects_size' in values:
usage['allocated_objects_%s' % name] = values['allocated_objects_size']
if 'memtrack_pss' in values:
usage[name] = values['memtrack_pss']
if self.has_mmaps:
usage.update((key, self.GetMemoryValue(*value))
for key, value in MMAPS_METRICS.iteritems())
return usage
class GlobalMemoryDump(object):
"""Object to aggregate individual process dumps with the same dump id.
Args:
process_dumps: A sequence of ProcessMemoryDumpEvent objects, all sharing
the same global dump id.
Attributes:
dump_id: A string identifying this dump.
has_mmaps: True if the memory dump has mmaps information. If False then
GetMemoryUsage will report all zeros.
"""
def __init__(self, process_dumps):
assert process_dumps
# Keep dumps sorted in chronological order.
self._process_dumps = sorted(process_dumps, key=lambda dump: dump.start)
# All process dump events should have the same dump id.
dump_ids = set(dump.dump_id for dump in self._process_dumps)
assert len(dump_ids) == 1
self.dump_id = dump_ids.pop()
# Either all processes have mmaps or none of them do.
have_mmaps = set(dump.has_mmaps for dump in self._process_dumps)
assert len(have_mmaps) == 1
self.has_mmaps = have_mmaps.pop()
@property
def start(self):
return self._process_dumps[0].start
@property
def end(self):
return max(dump.end for dump in self._process_dumps)
@property
def duration(self):
return self.end - self.start
def IterProcessMemoryDumps(self):
return iter(self._process_dumps)
def CountProcessMemoryDumps(self):
return len(self._process_dumps)
def __repr__(self):
values = ['id=%s' % self.dump_id]
for key, value in sorted(self.GetMemoryUsage().iteritems()):
values.append('%s=%d' % (key, value))
values = ', '.join(values)
return '%s[%s]' % (type(self).__name__, values)
def GetMemoryUsage(self):
"""Get the aggregated memory usage over all processes in this dump."""
result = {}
for dump in self._process_dumps:
for key, value in dump.GetMemoryUsage().iteritems():
result[key] = result.get(key, 0) + value
return result
| {
"content_hash": "87d8af35a7e513e252345ed5c79608a5",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 79,
"avg_line_length": 37.70746268656716,
"alnum_prop": 0.6451076630778974,
"repo_name": "SummerLW/Perf-Insight-Report",
"id": "4e416f3129335db7f62ca49b8e591e7ca99e88cf",
"size": "12795",
"binary": false,
"copies": "1",
"ref": "refs/heads/test",
"path": "telemetry/telemetry/timeline/memory_dump_event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6411"
},
{
"name": "CSS",
"bytes": "14952"
},
{
"name": "HTML",
"bytes": "27508823"
},
{
"name": "JavaScript",
"bytes": "75587"
},
{
"name": "Python",
"bytes": "4638631"
},
{
"name": "Shell",
"bytes": "2124"
}
],
"symlink_target": ""
} |
from pycket import impersonators as imp
from pycket import values
from pycket import vector as values_vector
from pycket.cont import continuation, label, loop_label
from pycket.error import SchemeException
from pycket.prims.expose import unsafe, default, expose, subclass_unsafe
from rpython.rlib import jit
@expose("vector")
def vector(args):
return values_vector.W_Vector.fromelements(args)
@expose("flvector")
def flvector(args):
return values_vector.W_FlVector.fromelements(args)
@expose("extflvector?", [values.W_Object])
def extflvector(obj):
return values.w_false
# FIXME: immutable
@expose("vector-immutable")
def vector_immutable(args):
return values_vector.W_Vector.fromelements(args, immutable=True)
@expose("make-vector", [values.W_Fixnum, default(values.W_Object, values.W_Fixnum.ZERO)])
def make_vector(w_size, w_val):
size = w_size.value
if size < 0:
raise SchemeException("make-vector: expected a positive fixnum")
return values_vector.W_Vector.fromelement(w_val, size)
@expose("make-flvector", [values.W_Fixnum, default(values.W_Flonum, values.W_Flonum.ZERO)])
def make_vector(w_size, w_val):
size = w_size.value
if size < 0:
raise SchemeException("make-flvector: expected a positive fixnum")
return values_vector.W_FlVector.fromelement(w_val, size)
@expose("vector-length", [values_vector.W_MVector])
def vector_length(v):
return values.W_Fixnum(v.length())
@expose("flvector-length", [values_vector.W_FlVector])
def flvector_length(v):
return values.W_Fixnum(v.length())
@expose("vector-ref", [values.W_MVector, values.W_Fixnum], simple=False)
def vector_ref(v, i, env, cont):
idx = i.value
if not (0 <= idx < v.length()):
raise SchemeException("vector-ref: index out of bounds")
return v.vector_ref(idx, env, cont)
@expose("flvector-ref", [values_vector.W_FlVector, values.W_Fixnum], simple=False)
def flvector_ref(v, i, env, cont):
idx = i.value
if not (0 <= idx < v.length()):
raise SchemeException("vector-ref: index out of bounds")
return v.vector_ref(idx, env, cont)
@expose("vector-set!", [values.W_MVector, values.W_Fixnum, values.W_Object], simple=False)
def vector_set(v, i, new, env, cont):
if v.immutable():
raise SchemeException("vector-set!: given immutable vector")
idx = i.value
if not (0 <= idx < v.length()):
raise SchemeException("vector-set!: index out of bounds")
return v.vector_set(idx, new, env, cont)
@expose("flvector-set!", [values_vector.W_FlVector, values.W_Fixnum, values.W_Flonum], simple=False)
def flvector_set(v, i, new, env, cont):
idx = i.value
if not (0 <= idx < v.length()):
raise SchemeException("flvector-set!: index out of bounds")
return v.vector_set(idx, new, env, cont)
def copy_vector(v, env, cont):
from pycket.interpreter import return_value
if isinstance(v, values_vector.W_Vector):
return return_value(v._make_copy(immutable=True), env, cont)
len = v.length()
data = [None] * len
return copy_vector_loop(v, data, len, 0, env, cont)
@loop_label
def copy_vector_loop(v, data, len, idx, env, cont):
from pycket.interpreter import return_value
if idx >= len:
vector = values_vector.W_Vector.fromelements(data, immutable=True)
return return_value(vector, env, cont)
return v.vector_ref(idx, env,
copy_vector_ref_cont(v, data, len, idx, env, cont))
@continuation
def copy_vector_ref_cont(v, data, len, idx, env, cont, _vals):
from pycket.interpreter import check_one_val
val = check_one_val(_vals)
data[idx] = val
return copy_vector_loop(v, data, len, idx + 1, env, cont)
@expose("vector->immutable-vector", [values_vector.W_MVector], simple=False)
def vector2immutablevector(v, env, cont):
from pycket.interpreter import return_value
if v.immutable():
return return_value(v, env, cont)
return copy_vector(v, env, cont)
@expose("vector-copy!",
[values.W_MVector, values.W_Fixnum, values.W_MVector,
default(values.W_Fixnum, None), default(values.W_Fixnum, None)],
simple=False)
def vector_copy(dest, _dest_start, src, _src_start, _src_end, env, cont):
if dest.immutable():
raise SchemeException("vector-copy!: given an immutable destination")
src_start = _src_start.value if _src_start is not None else 0
src_end = _src_end.value if _src_end is not None else src.length()
dest_start = _dest_start.value
src_range = src_end - src_start
dest_range = dest.length() - dest_start
if not (0 <= dest_start < dest.length()):
raise SchemeException("vector-copy!: destination start out of bounds")
if not (0 <= src_start <= src.length()) or not (0 <= src_start <= src.length()):
raise SchemeException("vector-copy!: source start/end out of bounds")
if dest_range < src_range:
raise SchemeException("vector-copy!: not enough room in target vector")
return vector_copy_loop(src, src_start, src_end,
dest, dest_start, 0, env, cont)
@loop_label
def vector_copy_loop(src, src_start, src_end, dest, dest_start, i, env, cont):
from pycket.interpreter import return_value
src_idx = i + src_start
if src_idx >= src_end:
return return_value(values.w_void, env, cont)
return src.vector_ref(src_idx, env,
vector_copy_cont_get(src, src_start, src_end, dest,
dest_start, i, env, cont))
@continuation
def goto_vector_copy_loop(src, src_start, src_end, dest, dest_start, next, env, cont, _vals):
return vector_copy_loop(
src, src_start, src_end, dest, dest_start, next, env, cont)
@continuation
def vector_copy_cont_get(src, src_start, src_end, dest, dest_start, i, env, cont, _vals):
from pycket.interpreter import check_one_val
val = check_one_val(_vals)
idx = i + dest_start
next = i + 1
return dest.vector_set(idx, val, env,
goto_vector_copy_loop(src, src_start, src_end,
dest, dest_start, next, env, cont))
# FIXME: Chaperones
@expose("unsafe-vector-ref", [subclass_unsafe(values.W_MVector), unsafe(values.W_Fixnum)], simple=False)
def unsafe_vector_ref(v, i, env, cont):
from pycket.interpreter import return_value
if isinstance(v, imp.W_ImpVector) or isinstance(v, imp.W_ChpVector):
return v.vector_ref(i.value, env, cont)
else:
assert type(v) is values_vector.W_Vector
val = i.value
assert val >= 0
return return_value(v.unsafe_ref(val), env, cont)
@expose("unsafe-flvector-ref", [unsafe(values_vector.W_FlVector), unsafe(values.W_Fixnum)])
def unsafe_flvector_ref(v, i):
return v.unsafe_ref(i.value)
@expose("unsafe-vector*-ref", [unsafe(values_vector.W_Vector), unsafe(values.W_Fixnum)])
def unsafe_vector_star_ref(v, i):
return v.unsafe_ref(i.value)
# FIXME: Chaperones
@expose("unsafe-vector-set!",
[subclass_unsafe(values.W_MVector), unsafe(values.W_Fixnum), values.W_Object],
simple=False)
def unsafe_vector_set(v, i, new, env, cont):
from pycket.interpreter import return_value
if isinstance(v, imp.W_ImpVector) or isinstance(v, imp.W_ChpVector):
return v.vector_set(i.value, new, env, cont)
else:
assert type(v) is values_vector.W_Vector
v.unsafe_set(i.value, new)
return return_value(values.w_void, env, cont)
@expose("unsafe-vector*-set!",
[unsafe(values_vector.W_Vector), unsafe(values.W_Fixnum), values.W_Object])
def unsafe_vector_star_set(v, i, new):
return v.unsafe_set(i.value, new)
@expose("unsafe-flvector-set!",
[unsafe(values_vector.W_FlVector), unsafe(values.W_Fixnum), unsafe(values.W_Flonum)])
def unsafe_flvector_set(v, i, new):
return v.unsafe_set(i.value, new)
@expose("unsafe-vector-length", [subclass_unsafe(values.W_MVector)])
def unsafe_vector_length(v):
return values.W_Fixnum(v.length())
@expose("unsafe-vector*-length", [unsafe(values_vector.W_Vector)])
def unsafe_vector_star_length(v):
return values.W_Fixnum(v.length())
@expose("unsafe-flvector-length", [unsafe(values_vector.W_FlVector)])
def unsafe_flvector_length(v):
return values.W_Fixnum(v.length())
| {
"content_hash": "f4f87efad6eb160f7a7be28f69e1e4e6",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 104,
"avg_line_length": 38.84905660377358,
"alnum_prop": 0.6741136474016513,
"repo_name": "vishesh/pycket",
"id": "d0bcab96830e15c26d17165a4d8dfce5d360de84",
"size": "8284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycket/prims/vector.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "654"
},
{
"name": "Eagle",
"bytes": "137124"
},
{
"name": "KiCad",
"bytes": "241342"
},
{
"name": "Makefile",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "856891"
},
{
"name": "Racket",
"bytes": "587887"
},
{
"name": "Scheme",
"bytes": "215"
},
{
"name": "Shell",
"bytes": "8539"
}
],
"symlink_target": ""
} |
"""Implements natural language parsers.
This module provides two types of natural language parsers:
1. ConceptualParser, a conceptual, frame-based parser in the style of
Charles Martin's Direct Memory Access Parser.
2. IndexedConceptParser, a less strict parser based on the parser in
Will Fitzgerald's thesis 'Building Embedded Conceptual Parsers'.
"""
from energid_nlp import logic
from energid_nlp import fdl
from energid_nlp import utils
from energid_nlp import stemmer
import logging
import re
import string
import sys
from copy import copy
import math
import StringIO
import pprint
import getopt
# We'll be using $constraint internally.
CONSTRAINT_EXPR = logic.expr('$constraint')
def tokenize(text):
"""Tokenizes a string. Tokens consist of characters that are
letters, digits or underscores. This function is primarily intended
for text typed directly by users or from a speech recognizer.
"""
regex = re.compile(r'\W+')
tokens = regex.split(text.lower())
tokens = [token for token in tokens if token]
return tokens
class Error(Exception):
pass
class ParserBase:
"""A simple base class for the ConceptualParser and the
IndexedConceptParser. Really just provides a tokenize method and a
parse method.
"""
def __init__(self):
self.debug = 0
def tokenize(self, text):
"""Tokenizes a string."""
tokens = tokenize(text.lower())
if self.debug > 0:
print 'Tokens: %s' % (tokens,)
return tokens
def parse_tokens(self, text, debug=0):
raise NotImplementedError
def parse(self, text, debug=0):
"""Parses a string. Returns the list of valid parses."""
if not isinstance(text, basestring):
raise TypeError('%r is not a string.' % (text,))
results = self.parse_tokens(self.tokenize(text), debug=debug)
if len(results) > 1:
results = utils.remove_duplicates(results)
for result in results:
result.text = text
return results
Cardinals = {
'zero': 0, 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5,
'six': 6, 'seven': 7, 'eight': 8, 'nine': 9, 'niner': 9, 'ten': 10
}
# ----------------------------------------
# Conceptual Parser
# ----------------------------------------
class ConceptualParser(ParserBase):
"""A DMAP-inspired conceptual memory parser."""
def __init__(self, kb, stem=False):
ParserBase.__init__(self)
self.kb = kb
self.kb.define_fluent(CONSTRAINT_EXPR, inherited=True)
self.syntax_functions = {}
self.preparsers = []
self.anytime_predictions = {}
self.dynamic_predictions = {}
self.phrasal_patterns = {}
self.phrasal_pattern_objects = {}
self.references = []
self.position = 0
self.reference_callback = None
# Stemming in the ConceptualParser is not well tested; It's really
# only used when we're a servant of the ICP.
self.stem = stem
if stem:
self.stemmer = stemmer.PorterStemmer()
self.install_syntax_functions()
self.install_preparsers()
self.reset()
def install_syntax_functions(self):
"""Installs the standard syntax directives."""
self.syntax_functions[':head'] = head_prediction_generator
self.syntax_functions[':optional'] = optional_prediction_generator
self.syntax_functions[':sequence'] = sequence_prediction_generator
self.syntax_functions[':any'] = any_prediction_generator
def install_preparsers(self):
def parse_c_number(token):
return logic.Description(logic.expr('c-number'),
{logic.expr('value'): logic.expr(int(token))})
self.add_preparser('[0-9]+', parse_c_number)
def parse_cardinal(token):
return logic.Description(
logic.expr('c-digit'),
{logic.expr('value'): logic.expr(Cardinals[token])})
self.add_preparser('one|two|three|four|five|six|seven|eight|nine|ten|zero',
parse_cardinal)
def parse_mgrs_square(token):
return logic.Description(logic.expr('c-mgrs-square'),
{logic.expr('letters'): logic.expr(token)})
self.add_preparser('[a-z][a-z]', parse_mgrs_square)
def add_preparser(self, regex, function):
matcher = re.compile(regex)
self.preparsers.append([matcher, function])
def check_preparsers(self, token):
"""Checks to see if any preparsers would like to handle the token.
If not, None is returned, otherwise the result of preparsing the
token is returned.
"""
for [matcher, function] in self.preparsers:
match = matcher.match(token)
if match is None or match.end() != len(token):
pass
else:
return function(token)
return None
def preparse(self, token):
"""Runs the token through any relevant preparser."""
result = self.check_preparsers(token)
if result is None:
return token
else:
return result
def reset(self):
"""Resets the parser state as if it hadn't yet parsed anything."""
self.dynamic_predictions = {}
self.position = 0
self.references = []
def add_phrasal_pattern(self, base, phrasal_pattern):
"""Adds a phrasal pattern to a class. The phrasal_pattern
argument is a string using the phrasal pattern syntax,
e.g. '<action> the ?:[dang|darn] <object>' (see the
PhrasalPatternParser class).
"""
if not base in self.phrasal_patterns:
self.phrasal_patterns[base] = [phrasal_pattern]
else:
self.phrasal_patterns[base].append(phrasal_pattern)
pattern_parser = PhrasalPatternParser(stem=self.stem)
pp_obj = pattern_parser.parse(phrasal_pattern)
self.add_phrasal_pattern_object(base, pp_obj)
def add_phrasal_pattern_object(self, base, phrasal_pattern_obj):
if not base in self.phrasal_pattern_objects:
self.phrasal_pattern_objects[base] = [phrasal_pattern_obj]
else:
self.phrasal_pattern_objects[base].append(phrasal_pattern_obj)
for pred in self.generate_predictions(
base, [phrasal_pattern_obj], None, None, {}, 0.0):
self.index_anytime_prediction(pred)
def index_prediction(self, table, prediction):
target = self.prediction_target(prediction)
if target in table:
table[target].append(prediction)
else:
table[target] = [prediction]
def index_anytime_prediction(self, prediction):
"""Adds a prediction to the set of anytime predictions."""
if self.debug > 1:
print 'Indexing anytime prediction %s' % (prediction,)
self.index_prediction(self.anytime_predictions, prediction)
def index_dynamic_prediction(self, prediction):
"""Adds a prediction to the set of dynamic predictions."""
if self.debug > 1:
print 'Indexing dynamic prediction %s' % (prediction,)
self.index_prediction(self.dynamic_predictions, prediction)
def predictions_on(self, item):
preds = (predictions_on(self.dynamic_predictions, item) +
predictions_on(self.anytime_predictions, item))
if self.debug > 1:
print 'Predictions on %s are %s' % (item, preds)
return preds
def clear_predictions(self):
self.anytime_predictions = {}
self.dynamic_predictions = {}
def pparse(self, text, debug=0):
"""Parses a string and pretty-prints the results."""
pprint.pprint(map(logic.Description.dictify,
self.parse(text, debug)))
def parse_tokens(self, tokens, debug=0):
"""Parses a sequence of tokens. Returns the list of valid parses."""
self.reset()
self.debug = debug
for position, token in enumerate(tokens):
if self.stem:
token = self.stemmer.stem(token)
if not isinstance(token, basestring):
raise TypeError(
'Only string tokens are allowed; %s is not a string.' % (token,))
self.reference(token, self.position, self.position, 0.0)
preparse = self.check_preparsers(token)
if preparse:
self.reference(preparse, self.position, self.position, 0.0)
self.position = position + 1
return self.complete_parses(len(tokens))
def complete_parses(self, pos):
"""Returns a list of complete parses given the current parser
state.
"""
parses = []
for [item, start, end, unused_value] in self.references:
if start == 0 and end == pos - 1 and isinstance(item, logic.Description):
parses.append(item)
return parses
def reference(self, item, start, end, value):
"""References an item (a token string or a class)."""
assert isinstance(item, basestring) or isinstance(item, logic.Description)
if self.debug > 0:
print 'referencing %s' % ((item, start, end),)
self.references.append([item, start, end, value])
for abst in self.all_abstractions(item):
if self.reference_callback:
apply(self.reference_callback, [abst, start, end, value])
for prediction in self.predictions_on(abst):
self.advance_prediction(prediction, item, start, end)
def advance_prediction(self, prediction, item, start, end):
"""Advances a prediction."""
if self.debug > 2:
print 'Advancing prediction %s' % ((prediction, item, start, end),)
if prediction.next is None or prediction.next == start:
phrasal_pattern = prediction.phrasal_pattern[1:]
if prediction.start is not None:
start = prediction.start
if is_head(prediction.phrasal_pattern[0]):
base = item.base
try:
slots = self.merge_slots(prediction.slots, item.slots)
except DuplicateSlotError:
return
else:
base = prediction.base
slots = self.extend_slots(prediction, item)
if phrasal_pattern == []:
# Prediction has been used up.
self.reference(
self.find_frame(base, slots), start, end, prediction.value)
else:
for prediction in self.generate_predictions(
base, phrasal_pattern, start, self.position + 1, slots,
prediction.value):
if len(prediction.phrasal_pattern) > 0:
self.index_dynamic_prediction(prediction)
else:
self.reference(
self.find_frame(prediction.base, slots), start, end,
prediction.value)
def generate_predictions(self, base, phrasal_pattern, start, position, slots,
value):
predictions = list(self.generate_predictions2(
base, phrasal_pattern, start, position, slots, value))
return predictions
def generate_predictions2(self, base, phrasal_pattern, start, position,
slots, value):
# If there's no syntax directive, it's an implicit sequence. Make
# explicit what's implicit.
if not self.is_syntax_directive(phrasal_pattern[0]):
phrasal_pattern = [[':sequence'] + phrasal_pattern]
new_predictions = apply(self.syntax_functions[phrasal_pattern[0][0]],
[base, phrasal_pattern, start, position, slots])
for pred in new_predictions:
pred.value = pred.value + value
if (len(pred.phrasal_pattern) > 0 and
self.is_syntax_directive(pred.phrasal_pattern[0])):
for p in self.generate_predictions2(base, pred.phrasal_pattern,
start, position, slots, value):
yield p
else:
yield pred
def is_syntax_directive(self, term):
"""Checks whether a term in a phrasal pattern is a syntax
directive, e.g. [':optional' ...].
"""
if isinstance(term, list):
if term[0] in self.syntax_functions:
return True
raise Error('%s is not a valid syntax function.' % (term[0],))
else:
return False
def merge_slots(self, pred_slots, item_slots):
"""Merges two sets of slots into one superduper collection of slots."""
for slot in pred_slots:
if slot in item_slots:
raise DuplicateSlotError('Slot %s already has the value %s.' % (
slot, item_slots[slot]))
slots = {}
for slot in pred_slots:
slots[slot] = pred_slots[slot]
for slot in item_slots:
slots[slot] = item_slots[slot]
return slots
def find_frame(self, base, slots):
"""Creates a description with the specified base class and slots."""
return logic.Description(base, slots)
def extend_slots(self, prediction, item):
"""If the prediction is waiting for a slot-filler, and the item we
saw can fill the slot, add the slot with filler to the predictions
slots.
"""
spec = prediction.phrasal_pattern[0]
slots = prediction.slots
if is_role_specifier(spec):
new_slots = copy(slots)
new_slot = self.role_specifier(spec)
if new_slot in new_slots:
raise DuplicateSlotError('Slot %s already exists in %s.' % (
new_slot, prediction))
new_slots[new_slot] = item
return new_slots
else:
return slots
def prediction_target(self, prediction):
spec = prediction.phrasal_pattern[0]
if self.is_syntax_directive(spec):
raise Error('Cannot index on syntax directive %s.' % (spec,))
if is_role_specifier(spec):
base = prediction.base
value = self.slot_constraint(base, self.role_specifier(spec))
if value is not None:
return value
else:
raise Error('%s has no constraint in %s.' % (spec, base))
elif is_head(spec):
return prediction.base
else:
return spec
def all_abstractions(self, item):
if isinstance(item, basestring):
return [item]
elif isinstance(item, logic.Expr):
return self.kb.all_parents(item)
elif isinstance(item, logic.Description):
return self.kb.all_parents(logic.expr(item.base))
else:
raise Error('%s must be a string or Expr.' % (repr(item,)))
def slot_constraint(self, item, role_spec):
"""Looks up the constraint on the specified slot for item."""
return self.kb.slot_value(
logic.expr(item),
CONSTRAINT_EXPR,
logic.expr(role_spec))
def role_specifier(self, item):
return logic.expr(item[1:])
class Prediction:
"""Represents a prediction the parser has made about what the next
token might be and what frame it is part of.
"""
def __init__(self, base, phrasal_pattern, start, next_pos, slots, value):
self.base = base
self.phrasal_pattern = phrasal_pattern
self.start = start
self.next = next_pos
self.slots = slots
self.value = value
def __repr__(self):
return '<%s base: %s start: %s next: %s slots: %s pat: %s value: %s>' % (
self.__class__.__name__, repr(self.base), self.start, self.next,
repr(self.slots), self.phrasal_pattern, self.value)
# We make phrasal_pattern a somewhat fancy attribute of this class;
# When it's set to a sequence, we automatically tokenize the first
# element.
#
# pred.phrasal_pattern = ['how are you', 'john?']
#
# pred.phrasal_pattern -> ['how', 'are', 'you', 'john?']
#
# FIXME: This doesn't feel like the best place to do this.
def __setattr__(self, name, value):
if name == 'phrasal_pattern':
if (len(value) > 0 and isinstance(value[0], basestring) and
value[0][0] != ':' and value[0][0] != '?'):
tokens = self.tokenize(value[0])
self.__dict__[name] = tokens + value[1:]
else:
self.__dict__[name] = value
else:
self.__dict__[name] = value
def __getattr__(self, name):
if name == 'phrasal_pattern':
return self._phrasal_pattern
else:
raise AttributeError(name)
def tokenize(self, text):
return tokenize(text)
def predictions_on(prediction_table, item):
if item in prediction_table:
predictions = prediction_table[item]
else:
predictions = []
return predictions
# These *_prediction_generator functions are used for syntax directive
# processing. They return a list of new predictions.
def head_prediction_generator(base, phrasal_pattern, start, position, slots):
"""Generates predictions for :head."""
return [Prediction(
base, [':head'] + phrasal_pattern[1:], start, position, slots)]
def sequence_prediction_generator(base, phrasal_pattern, start, position,
slots):
"""Generates predictions for :sequence."""
return [Prediction(base, phrasal_pattern[0][1:] + phrasal_pattern[1:],
start, position, slots, 1.0)]
def optional_prediction_generator(base, phrasal_pattern, start, position,
slots):
"""Generates predictions for :optional."""
return [Prediction(base, phrasal_pattern[1:], start, position, slots, 0.2),
Prediction(base, phrasal_pattern[0][1:] + phrasal_pattern[1:],
start, position, slots, 0.0)]
def any_prediction_generator(base, phrasal_pattern, start, position, slots):
"""Generates predictions for :any."""
preds = map(lambda pat: Prediction(base, [pat] + phrasal_pattern[1:],
start, position, slots, 0.0),
phrasal_pattern[0][1:])
return preds
class DuplicateSlotError(Error):
pass
def is_role_specifier(item):
return item[0] == '?'
def is_head(item):
return item == ':head'
class PhrasalPatternParser:
"""Parses phrasal patterns.
<color> is a reference to a slot named color.
<:head> is special, and refers to the phrase head.
?:thing means that thing is optional.
[thing-a|thing-b] means that either thing-a or thing-b are acceptable.
Examples:
'pick up ?:the <object>'
'?:<name>, ?:[please|would you] clean my <object>'
"""
def __init__(self, stem=False):
# We do the stemming of literal tokens in this class. Is that
# weird?
if stem:
self.stemmer = stemmer.PorterStemmer()
else:
self.stemmer = False
# We do this thing where we parse into an intermediate
# representation (as returned by parse_tree), then convert that into
# the final form. I don't remember how that came about and it
# should perhaps be examined in the future.
#
# Pattern string:
# '?:[let's|let us] <action>'
#
# Intermediate representation:
# [':sequence',
# [':optional',
# [':any',
# [':symbol', 'let's'],
# [':sequence', [':symbol', 'let'], [':symbol', 'us']]]],
# [':slotref', [':symbol', 'action']]]
#
# Final form:
# [':sequence',
# [':optional',
# [':any', [':sequence', 'let', 's'], [':sequence', 'let', 'us']]],
# '?action']
def parse(self, pattern):
"""Parses a string containing a phrasal pattern into a tree
representation.
"""
phrasal_pattern = self.convert_parse_tree_to_phrasal_pattern(
self.parse_tree(pattern))
return phrasal_pattern
def parse_tree(self, input_str):
[obj, unused_position] = self.read_sequence(input_str, 0)
return obj
def read(self, input_str, position):
position = self.skip_whitespace(input_str, position)
if position >= len(input_str):
return [None, position]
char = input_str[position]
if char == '<':
return self.read_slot(input_str, position + 1)
elif char == '{':
return self.read_slot(input_str, position + 1, '{', '}')
elif char == '?' and input_str[position + 1] == ':':
return self.read_optional(input_str, position + 2)
elif char == '[':
return self.read_choice(input_str, position + 1)
elif self.is_symbol_char(char):
return self.read_token(input_str, position)
else:
raise Error(
'Illegal character %r at position %s in %r.' % (
char, position, repr(input_str)))
def read_sequence(self, input_str, position, terminators=''):
objects = []
[obj, position] = self.read(input_str, position)
if obj is not None:
objects.append(obj)
while (obj is not None and
(position >= len(input_str) or
not input_str[position] in terminators)):
[obj, position] = self.read(input_str, position)
if obj is not None:
objects.append(obj)
return [self.make_sequence(objects), position]
def read_slot(self, input_str, position, slot_char='<', terminator='>'):
[symbol, position] = self.read_symbol(input_str, position)
position = self.skip_whitespace(input_str, position)
if not position < len(input_str):
raise Error(
'Unterminated %r in phrasal pattern %r.' % (
slot_char, input_str))
if input_str[position] != terminator:
raise Error(
('Unexpected character %r in slot reference in phrasal '
'pattern %r') % (
input_str[position], input_str))
return [self.make_slot(symbol), position + 1]
def read_optional(self, input_str, position):
[obj, position] = self.read(input_str, position)
return [self.make_optional(obj), position]
def read_choice(self, input_str, position):
choices = []
while input_str[position] != ']':
[obj, position] = self.read_sequence(input_str, position, '|]')
position = self.skip_whitespace(input_str, position)
if position >= len(input_str):
raise Error("Unterminated '[' in %r." % (input_str,))
if not (input_str[position] == ']' or input_str[position] == '|'):
raise Error('Illegal character %r in %r.' % (
input_str[position], input_str))
if input_str[position] == '|':
position = position + 1
choices.append(obj)
return [self.make_choice(choices), position + 1]
def read_symbol(self, input_str, position):
position = self.skip_whitespace(input_str, position)
start_position = position
while (position < len(input_str) and
self.is_symbol_char(input_str[position])):
position = position + 1
return [self.make_symbol(input_str[start_position:position]), position]
def read_token(self, input_str, position):
position = self.skip_whitespace(input_str, position)
start_position = position
while (position < len(input_str) and
self.is_symbol_char(input_str[position])):
position = position + 1
symbol = self.make_symbol(
self.maybe_stem(input_str[start_position:position]))
return symbol, position
def make_symbol(self, s):
return [':symbol', s]
def make_sequence(self, objects):
if len(objects) == 1:
return objects[0]
else:
return [':sequence'] + objects
def make_optional(self, obj):
return [':optional', obj]
def make_choice(self, objects):
if len(objects) == 1:
return objects[0]
else:
return [':any'] + objects
def make_slot(self, symbol):
return [':slotref', symbol]
def skip_whitespace(self, input_str, position):
while (position < len(input_str) and
(input_str[position] == ' ' or input_str[position] == '\n')):
position = position + 1
return position
def is_symbol_char(self, char):
return char in string.digits or char in string.letters or char in "-'?:"
def convert_parse_tree_to_phrasal_pattern(self, tree):
node_type = tree[0]
if node_type == ':sequence':
return [':sequence'] + map(
self.convert_parse_tree_to_phrasal_pattern, tree[1:])
elif node_type == ':symbol':
if tree[1][0] == '?' and tree[1][1] in string.letters:
return tree[1]
else:
symbols = tokenize(tree[1])
if len(symbols) == 1:
return symbols[0]
else:
return [':sequence'] + symbols
elif node_type == ':slotref':
symbol_str = tree[1][1]
if symbol_str == ':head':
return ':head'
else:
return '?' + symbol_str
elif node_type == ':optional':
return [':optional'] + map(
self.convert_parse_tree_to_phrasal_pattern, tree[1:])
elif node_type == ':any':
return [':any'] + map(
self.convert_parse_tree_to_phrasal_pattern, tree[1:])
else:
raise Error('Unknown element %s. (%s)' % (node_type, tree))
def maybe_stem(self, token):
if self.stemmer:
return self.stemmer.stem(token)
else:
return token
class FrameHandler(fdl.BaseFrameHandler):
def __init__(self, kb, cp, icp):
fdl.BaseFrameHandler.__init__(self, kb, cp, icp)
self.constraints = {}
def handle_constraints(self, frame, constraints):
fdl.BaseFrameHandler.handle_constraints(self, frame, constraints)
if len(constraints) > 0:
self.constraints[frame['class_name']] = constraints
class InteractiveParserApp:
"""Lets you interactively play with a ConceptualParser."""
def __init__(self, argv):
self.kb = None
self.cp_parser = None
self.icp_parser = None
self.fdl_handler = None
self.fdl_parser = None
self.debug = 0
self.run_tests = False
self.transcript_path = None
self.test_classes = []
optlist, args = getopt.getopt(argv[1:], 'td:f:c:')
for o, v in optlist:
if o == '-d':
self.debug = v
elif o == '-t':
self.run_tests = True
elif o == '-c':
self.test_classes = v.split(',')
elif o == '-f':
self.transcript_path = v
self.fdl_file = args[0]
def run(self):
self.kb = logic.PropKB()
self.cp_parser = ConceptualParser(self.kb)
self.icp_parser = IndexedConceptParser(self.kb)
self.fdl_handler = FrameHandler(self.kb, self.cp_parser, self.icp_parser)
self.fdl_parser = fdl.FDLParser(self.fdl_handler)
self.fdl_parser.parse_fdl_file(self.fdl_file, self.debug)
if self.run_tests:
self.check_constraints()
self.fdl_parser.run_test_phrases(
self.test_classes, self.cp_parser, self.icp_parser)
if self.transcript_path:
for line in open(self.transcript_path):
line = line[0:-1]
if len(line) > 0:
print '\n*** %s' % (line,)
parses = self.cp_parser.parse(line)
print ' %s:' % (len(parses),)
pprint.pprint(parses)
if not self.run_tests and not self.transcript_path:
self.do_parse_loop()
def check_constraints(self):
def can_be_constraint(concept):
for c in self.kb.all_children(logic.expr(concept)):
if c in self.cp_parser.phrasal_patterns:
return True
return False
for class_name in self.fdl_handler.constraints:
constraints = self.fdl_handler.constraints[class_name]
for name in constraints:
req_type = constraints[name]
if not can_be_constraint(req_type):
logging.warning(
"%s has constraint '%s IS-A %s' which has no phrasal patterns",
class_name, name, req_type)
sv = self.kb.slot_value(logic.expr(class_name), logic.expr(name))
if sv:
if not self.kb.isa(sv, logic.expr(req_type)):
logging.warning(
("%s has constraint '%s IS-A %s' which is not consistent "
'with slot value %s'),
class_name, name, req_type, sv)
def do_parse_loop(self):
while True:
sys.stdout.write('? ')
input_str = sys.stdin.readline()
if len(input_str) == 0:
break
if input_str[0] == '#':
print 'CP: ' + self.cp_parser.predictions_on(eval(input_str[1:]))
#print 'ICP: ' + self.icp_parser.predictions_on(eval(input_str[1:]))
elif input_str[0] == '%':
print 'CP: ' + self.cp_parser.anytime_predictions
#print 'ICP: ' + self.icp_parser.anytime_predictions
else:
cp_results = self.cp_parser.parse(input_str, debug=self.debug)
if cp_results:
print 'CP:'
pprint.pprint(map(lambda d: d.dictify(), cp_results))
icp_results = self.icp_parser.parse(input_str, debug=self.debug)
if icp_results:
print 'ICP:'
pprint.pprint(map(lambda d: d.dictify(), icp_results))
# --------------------------------------------------
# Indexed Concept Parser
# --------------------------------------------------
# Only parse results with a score greater than this will be returned.
CUTOFF_ICP_SCORE = -1000000
MIN_PROBABILITY = -100.0 / CUTOFF_ICP_SCORE
MIN_INFORMATION_VALUE = -100.0 / CUTOFF_ICP_SCORE
class IndexedConceptParser(ParserBase):
"""A Will Fitzgerald-style Indexed Concept Parser."""
def __init__(self, kb):
ParserBase.__init__(self)
self.debug = 0
self.kb = kb
self.cp_parser = ConceptualParser(kb, stem=True)
self.index_sets = {}
self.target_concepts = {}
self.appraisers = []
self.total_appraiser_votes = 0
self.stemmer = stemmer.PorterStemmer()
self.index_set_pattern_parser = IndexSetPatternParser(kb)
self.install_standard_appraisers()
self.unique_target_concepts = {}
def add_appraiser(self, appraiser, votes):
self.appraisers.append([appraiser, votes])
self.total_appraiser_votes = self.total_appraiser_votes + votes
def install_standard_appraisers(self):
self.add_appraiser(PredictedScore(self), 1)
self.add_appraiser(UnpredictedScore(self), 1)
self.add_appraiser(UnseenScore(self), 1)
self.add_appraiser(RequiredScore(self), 10)
def stem(self, token):
if isinstance(token, basestring):
return self.stemmer.stem(token)
else:
return token
def parse_tokens(self, tokens, debug=0):
self.debug = debug
self.cp_parser.debug = debug
indices = self.find_indices(tokens, self.match_function)
if debug > 0:
print 'ICP parsing tokens %s' % (tokens,)
if debug > 1:
print 'ICP found indices %s' % (indices,)
results = self.score_index_sets(indices)
results.sort(key=lambda x: x.score, reverse=True)
results = [result for result in results if result.score > CUTOFF_ICP_SCORE]
results = utils.remove_duplicates(
results,
lambda r1, r2: r1.target_concept == r2.target_concept)
if debug > 0:
print 'ICP results: %s' % (results,)
return results
def find_indices(self, tokens, match_fn):
return apply(match_fn, [tokens])
def match_function(self, tokens):
"""Uses the CP to parse tokens and returns a list of the tokens
and any concepts that were referenced.
"""
items = []
def add_ref(item, unused_start, unused_end, unused_value):
if isinstance(item, logic.Description):
items.append(logic.expr(item))
else:
items.append(item)
self.cp_parser.reference_callback = add_ref
self.cp_parser.parse_tokens(tokens, debug=self.debug)
return items
def score_index_sets(self, found_indices):
results = []
for index_set in self.candidate_index_sets(found_indices):
result = ICPResult(None,
self.index_set_score(index_set, found_indices),
index_set.target_concept,
index_set.indices,
self.extract_slot_fillers(index_set, found_indices))
results.append(result)
return results
def extract_slot_fillers(self, index_set, found_indices):
# found_indices may be something like ['big', c-big, c-size], in
# which case we want the most specific concept (c-big) to fill our
# slot.
def maybe_use_filler(indices, current_filler, candidate_filler):
if current_filler is None:
return candidate_filler
elif self.kb.isa(logic.expr(indices[candidate_filler]),
logic.expr(indices[current_filler])):
return candidate_filler
else:
return current_filler
result_slots = {}
for [slot, constraint] in index_set.slots:
filler = None
for i, index in enumerate(found_indices):
if (isinstance(index, logic.Expr) and
self.kb.isa(index, constraint) and
not (i in result_slots.values())):
filler = maybe_use_filler(found_indices, filler, i)
if filler is not None:
result_slots[slot] = filler
for k, unused_v in result_slots.items():
result_slots[k] = found_indices[result_slots[k]]
return result_slots
def candidate_index_sets(self, found_indices):
candidates = []
abstractions = []
for index in found_indices:
abstractions = abstractions + self.all_abstractions(index)
for index in abstractions:
candidates = candidates + self.index_sets.get(index, [])
# print 'candidates for %s are %s' % (found_indices, candidates)
return utils.remove_duplicates(candidates)
def all_abstractions(self, item):
if isinstance(item, basestring):
return [item]
elif isinstance(item, logic.Expr):
return self.kb.all_parents(item)
elif isinstance(item, logic.Description):
return self.kb.all_parents(logic.expr(item.base))
else:
raise Error('%s must be a string or Expr.' % (repr(item,)))
def install(self, index_set):
"""Installs an index set."""
index_set.indices = map(self.stem, index_set.indices)
index_set.required_indices = map(self.stem, index_set.required_indices)
self.unique_target_concepts[index_set.target_concept] = True
for index in index_set.indices:
if not index in self.target_concepts.get(index, []):
self.target_concepts[index] = ([index_set.target_concept] +
self.target_concepts.get(index, []))
if not index_set in self.index_sets.get(index, []):
self.index_sets[index] = [index_set] + self.index_sets.get(index, [])
def add_phrasal_pattern(self, base, phrasal_pattern):
# We keep track of indexsets while we let the CP keep track of
# phrasal patterns.
self.cp_parser.add_phrasal_pattern(base, phrasal_pattern)
def add_index_set(self, target_concept, indexsetpattern):
"""Adds an index set to the target concept. The indexsetpattern
must be a string containing an indexset pattern (see
IndexSetPatternParser).
"""
indexset = self.index_set_pattern_parser.parse(
logic.expr(target_concept), indexsetpattern)
self.install(indexset)
def index_set_score(self, index_set, found_indices):
score = 0
for (appraiser, votes) in self.appraisers:
if votes > 0:
appraiser_score = self.call_appraiser(
appraiser,
votes / float(self.total_appraiser_votes),
index_set,
found_indices)
score = score + appraiser_score
return score
def call_appraiser(self, appraiser, weight, index_set, found_indices):
score = weight * appraiser.score(index_set, found_indices)
return score
def probability_of_index(self, index):
cardinality = self.target_concept_cardinality(index)
if cardinality == 0:
# Very small, but > 0
return MIN_PROBABILITY
else:
return float(cardinality) / len(self.unique_target_concepts)
def target_concept_cardinality(self, index):
return len(self.target_concepts.get(index, []))
def summed_value(self, unused_base, predicted_set):
if_sum = 0.0
for item in predicted_set:
if_sum += self.information_value(item)
return if_sum
def information_value(self, index):
value = -math.log(self.probability_of_index(index), 2)
if value == 0.0:
value = MIN_INFORMATION_VALUE
return value
class IndexSet:
"""Represents a set of indices for the IndexedConceptParser.
Includes the target concept, the indices, and required indices.
"""
def __init__(self, target=None, indices=None, required_indices=None,
slots=None):
def cond(test, a, b):
if test:
return a
else:
return b
if isinstance(target, basestring):
self.target_concept = logic.expr(target)
else:
self.target_concept = target
self.indices = cond(indices is None, [], indices)
self.required_indices = cond(
required_indices is None, [], required_indices)
self.slots = cond(slots is None, [], slots)
def __repr__(self):
s = StringIO.StringIO()
s.write('<%s target: %s indices: %s' % (
self.__class__.__name__, self.target_concept, self.indices))
if len(self.required_indices) > 0:
s.write(' required: %s' % (self.required_indices,))
if len(self.slots) > 0:
s.write(' slots: %s' % (self.slots,))
s.write('>')
return s.getvalue()
def __cmp__(self, other):
if (other is self) or (isinstance(other, IndexSet) and
self.target_concept == other.target_concept and
self.indices == other.indices and
self.required_indices == other.required_indices):
return 0
else:
return -1
class ICPResult(logic.Description):
"""Holds an Indexed Concept Parser parse result, which consists of
the target concept, the score, the index concepts and the input
text.
"""
def __init__(self, text, score, target_concept, index_concepts, slots):
logic.Description.__init__(self, target_concept, slots)
self.text = text
self.score = score
self.index_concepts = index_concepts
self.target_concept = target_concept
def __repr__(self):
return '<%s score: %s target: %s slots: %s>' % \
(self.__class__.__name__, self.score, self.target_concept,
self.slots)
# --------------------
# ICP Appraisers
# --------------------
class PredictedScore:
"""ICP appraiser that scores up for indices that we've seen that are
in the indexset.
"""
def __init__(self, parser):
self.parser = parser
def score(self, index_set, found_indices):
predicted = index_set.indices
pred_items = predicted_items(self.parser.kb, found_indices, predicted)
score = (self.parser.summed_value(index_set.target_concept, pred_items) /
self.parser.summed_value(index_set.target_concept, predicted))
return score
class UnpredictedScore:
"""ICP appraiser that penalizes for indices that we've seen that
were not part of the indexset.
"""
def __init__(self, parser):
self.parser = parser
def score(self, index_set, found_indices):
predicted = index_set.indices
unpred_items = unpredicted_items(self.parser.kb, found_indices, predicted)
unpredicted_score = self.parser.summed_value(
index_set.target_concept, unpred_items)
seen_score = self.parser.summed_value(
index_set.target_concept, found_indices)
score = 1.0 - (unpredicted_score / seen_score)
return score
class UnseenScore:
"""ICP appraiser that penalizes for indices we wanted to see but
didn't.
"""
def __init__(self, parser):
self.parser = parser
def score(self, index_set, found_indices):
predicted = index_set.indices
unseed_items = unseen_items(self.parser.kb, found_indices, predicted)
unseed_score = self.parser.summed_value(
index_set.target_concept, unseed_items)
seed_score = self.parser.summed_value(
index_set.target_concept, found_indices)
score = 1.0 - (unseed_score / seed_score)
return score
class RequiredScore:
"""ICP appraiser that nukes your score if there are required indices
that were not seen.
"""
def __init__(self, parser):
self.parser = parser
def score(self, index_set, found_indices):
# Make a copy.
found_indices = found_indices[:]
for requirement in index_set.required_indices:
if not requirement in found_indices:
# Return something nice and low.
return CUTOFF_ICP_SCORE * 10
else:
# Don't want to use a single index to satisfy multiple
# requirements.
del found_indices[found_indices.index(requirement)]
return 0.0
# --------------------
# ICP utilities
# --------------------
def is_concept(thing):
return isinstance(thing, logic.Description) or isinstance(thing, logic.Expr)
def abst_or_whole_of(kb, big, small):
if is_concept(big) and is_concept(small):
return kb.isa(big, small)
else:
return big == small
def spec_or_part_of(kb, big, small):
if is_concept(big) and is_concept(small):
return kb.isa(small, big)
else:
return big == small
def predicted_items(kb, seen_set, predicted_set):
return utils.intersection(predicted_set,
seen_set,
lambda e1, e2: abst_or_whole_of(kb, e1, e2))
def unpredicted_items(kb, seen_set, predicted_set):
return utils.set_difference(seen_set,
predicted_set,
lambda e1, e2: spec_or_part_of(kb, e1, e2))
def unseen_items(kb, seen_set, predicted_set):
return utils.set_difference(predicted_set,
seen_set,
lambda e1, e2: spec_or_part_of(kb, e1, e2))
class IndexSetPatternParser:
"""Parses indexset patterns.
word is a literal token.
$concept is a concept reference.
!thing is a required element.
{slot} is a slot reference.
Examples:
'big $c-dog'
'!big !$c-dog'
'{size} !{color}'
"""
def __init__(self, kb):
self.kb = kb
def parse(self, target, pattern):
"""Parses a string containing a indexset pattern and returns an
IndexSet.
"""
indexset = IndexSet(target)
return self.read(indexset, pattern, 0)
def read(self, indexset, input_str, position):
[unused_index, position] = self.read_one(indexset, input_str, position)
while position < len(input_str):
[unused_index, position] = self.read_one(indexset, input_str, position)
return indexset
def read_one(self, indexset, input_str, position):
position = self.skip_whitespace(input_str, position)
if position >= len(input_str):
return [None, position]
char = input_str[position]
if char == '$':
return self.parse_concept(indexset, input_str, position)
elif char == '!':
return self.parse_required(indexset, input_str, position)
elif char == '{':
return self.parse_slot(indexset, input_str, position)
elif self.is_symbol_char(char):
return self.parse_token(indexset, input_str, position)
else:
raise Error(
'Illegal character %r at position %s in indexset %r.' % (
(char, position, input_str)))
def parse_token(self, indexset, input_str, position):
# -- Token
[index, position] = self.read_symbol(input_str, position)
indexset.indices = indexset.indices + [index]
return [index, position]
def parse_concept(self, indexset, input_str, position):
[index, position] = self.read_concept(input_str, position + 1)
if index is None:
raise Error('Lone ! in indexset %s.' % (repr(input_str),))
indexset.indices = indexset.indices + [index]
return [index, position]
def parse_required(self, indexset, input_str, position):
# -- Required
[index, position] = self.read_one(indexset, input_str, position + 1)
if index is None:
raise Error('Lone ! in indexset %s.' % (repr(input_str),))
indexset.required_indices = indexset.required_indices + [index]
return [index, position]
def parse_slot(self, indexset, input_str, position):
# -- Slot reference
[slot_name, position] = self.read_slot(input_str, position + 1)
if slot_name is None:
raise Error('Empty slot reference in indexset %s.' % (repr(input_str),))
if slot_name in [slot_ref[0] for slot_ref in indexset.slots]:
raise Error(
'Duplicate slot reference %s in indexset %s.' % (
slot_name, repr(input_str)))
slot_type = self.slot_constraint(indexset.target_concept, slot_name)
indexset.slots.append([slot_name, slot_type])
indexset.indices = indexset.indices + [slot_type]
return [slot_type, position]
def read_symbol(self, input_str, position):
position = self.skip_whitespace(input_str, position)
start_position = position
while (position < len(input_str) and
self.is_symbol_char(input_str[position])):
position += 1
return [input_str[start_position:position], position]
def read_concept(self, input_str, position):
[symbol, position] = self.read_symbol(input_str, position)
return [logic.expr(symbol), position]
def read_slot(self, input_str, position):
[symbol, position] = self.read_symbol(input_str, position)
position = self.skip_whitespace(input_str, position)
if not position < len(input_str):
raise Error("Unterminated '{' in indexset %r" % (input_str,))
if input_str[position] != '}':
raise Error(
"Unexpected character '%s' in slot reference in indexset %r." % (
input_str[position], input_str))
return [symbol, position + 1]
def skip_whitespace(self, input_str, position):
while (position < len(input_str) and
(input_str[position] == ' ' or input_str[position] == '\n')):
position += 1
return position
def is_symbol_char(self, char):
return char in string.digits or char in string.letters or char in "-'?:"
def slot_constraint(self, item, slot):
item = logic.expr(item)
slot = logic.expr(slot)
return self.kb.slot_value(item, CONSTRAINT_EXPR, slot)
def main():
p = InteractiveParserApp(sys.argv)
p.run()
if __name__ == '__main__':
main()
| {
"content_hash": "b1a0e0fd1d5c1e0d7b8164f89d8dcc3b",
"timestamp": "",
"source": "github",
"line_count": 1324,
"max_line_length": 79,
"avg_line_length": 33.840634441087616,
"alnum_prop": 0.6336569579288026,
"repo_name": "wiseman/energid_nlp",
"id": "4909ca1341b67eeb4a3189cabb97f212beaa8330",
"size": "44870",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "energid_nlp/parser.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "203850"
},
{
"name": "Shell",
"bytes": "243"
}
],
"symlink_target": ""
} |
"""
Rheostatic - A Static File Server with options.
MIT License
Copyright (c) 2016 Waylan Limberg
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from unittest import TestCase
from rheostatic.__main__ import parse_args
class TestCli(TestCase):
def test_default_args(self):
self.assertEqual(
parse_args([]),
(
('localhost', 8000),
'.',
{
'index_file': 'index.html',
'default_type': 'application/octet-stream',
'encoding': 'utf-8'
}
)
)
def test_root_arg(self):
self.assertEqual(
parse_args(['some/path/']),
(
('localhost', 8000),
'some/path/',
{
'index_file': 'index.html',
'default_type': 'application/octet-stream',
'encoding': 'utf-8'
}
)
)
def test_host_arg(self):
self.assertEqual(
parse_args(['--host', '0.0.0.0']),
(
('0.0.0.0', 8000),
'.',
{
'index_file': 'index.html',
'default_type': 'application/octet-stream',
'encoding': 'utf-8'
}
)
)
def test_port_arg(self):
self.assertEqual(
parse_args(['--port', '80']),
(
('localhost', 80),
'.',
{
'index_file': 'index.html',
'default_type': 'application/octet-stream',
'encoding': 'utf-8'
}
)
)
def test_index_file_arg(self):
self.assertEqual(
parse_args(['--index-file', 'README']),
(
('localhost', 8000),
'.',
{
'index_file': 'README',
'default_type': 'application/octet-stream',
'encoding': 'utf-8'
}
)
)
def test_default_type_arg(self):
self.assertEqual(
parse_args(['--default-type', 'text/plain']),
(
('localhost', 8000),
'.',
{
'index_file': 'index.html',
'default_type': 'text/plain',
'encoding': 'utf-8'
}
)
)
def test_encoding_arg(self):
self.assertEqual(
parse_args(['--encoding', 'ASCII']),
(
('localhost', 8000),
'.',
{
'index_file': 'index.html',
'default_type': 'application/octet-stream',
'encoding': 'ASCII'
}
)
)
def test_default_extension_arg(self):
self.assertEqual(
parse_args(['--default-extension', '.html']),
(
('localhost', 8000),
'.',
{
'index_file': 'index.html',
'default_type': 'application/octet-stream',
'encoding': 'utf-8',
'default_extension': '.html'
}
)
)
| {
"content_hash": "8587b6782a6597622d57997ce8004226",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 78,
"avg_line_length": 30.317241379310346,
"alnum_prop": 0.46678798908098273,
"repo_name": "waylan/rheostatic",
"id": "1fdf7deaaf144143a52be4f6ae52678cc615eb69",
"size": "4396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rheostatic/tests/test_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40813"
}
],
"symlink_target": ""
} |
"""
The collection of classes to describe the model's meta-parameters.
The *parameter* is the value that influences the whole model's behavior
in some way. Each parameter has a default value. Then you could
customize them per each experiment or change interactively using
``Bridge``.
There are two types of parameters in Xentica:
Non-interactive
are constant during a single experiment run. The change of this
parameter is impossible without a whole model's source code being
rebuilt. The engine makes sure those params are correctly defined
globally with the ``#define`` directive. So actually, even if you'll
change their values at runtime, it doesn't affect the model in any
way.
Interactive
could be effectively changed at runtime, since engine traits them
as extra arguments to CUDA kernels. That means, as long as you'll
set a new value to an interactive parameter, it will be passed
into the kernel(s) at the next timestep. Be warned though: every
parameter declared as interactive, will degrade the model's
performance further.
The example of parameters' usage::
from xentica import core
from xentica.tools.rules import LifeLike
from examples.game_of_life import GameOfLife, GOLExperiment
class LifelikeCA(GameOfLife):
rule = core.Parameter(
default=LifeLike.golly2int("B3/S23"),
interactive=True,
)
def absorb(self):
# parent's clone with parameter instead of hardcoded rule
neighbors_alive = core.IntegerVariable()
for i in range(len(self.buffers)):
neighbors_alive += self.neighbors[i].buffer.state
is_born = (self.rule >> neighbors_alive) & 1
is_sustain = (self.rule >> 9 >> neighbors_alive) & 1
self.main.state = is_born | is_sustain & self.main.state
class DiamoebaExperiment(GOLExperiment):
rule = LifeLike.golly2int("B35678/S5678")
model = LifelikeCA(DiamoebaExperiment)
"""
import numpy as np
from xentica.core.variables import Constant
from xentica.core.mixins import BscaDetectorMixin
from xentica.core.expressions import DeferredExpression
class Parameter(BscaDetectorMixin):
"""
The implementation of Xentica meta-parameter.
:param default:
The default value for the parameter to use when it's omitted in
the experiment class.
:param interactive:
``True`` if the parameter could be safely changed at runtime
(more details above in the module description).
"""
def __init__(self, default=0, interactive=False):
"""Initialize the parameter."""
self._value = default
self._interactive = interactive
self._declared = False
self._name = "param" + str(id(self))
self._ctypes = {
int: 'int',
float: 'float',
bool: 'bool',
}
self._dtypes = {
int: np.int32,
float: np.float32,
bool: bool,
}
@property
def value(self):
"""Get the parameter's value directly."""
return self._value
@property
def name(self):
"""Get the parameter's name."""
return self._name
@name.setter
def name(self, val):
"""Set the parameter's name."""
self._name = val
@property
def ctype(self):
"""Get the parameter's C type."""
return self._ctypes.get(type(self._value), 'int32')
@property
def dtype(self):
"""Get the parameter's NumPy type."""
return self._dtypes.get(type(self._value), np.int32)
def _declare_interactive(self):
"""Declare an interactive parameter."""
if self.bsca.is_parameter(self.name):
return
self.bsca.define_parameter(self)
def _declare_once(self):
"""Declare the parameter when it's mentioned."""
if self._interactive:
self._declare_interactive()
return
if self._declared:
return
self._declared = True
self.bsca.define_constant(Constant(self.name, self._value))
def __get__(self, obj, objtype):
"""Implement custom logic when param is get as class descriptor."""
self._declare_once()
if self._interactive:
return DeferredExpression(self._name)
return self._value
def __set__(self, obj, value):
"""Implement custom logic when param is set as class descriptor."""
self._value = value
| {
"content_hash": "490e96f679d58e452dc5be522efd1cea",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 75,
"avg_line_length": 31.337931034482757,
"alnum_prop": 0.6355633802816901,
"repo_name": "a5kin/hecate",
"id": "8a312e5ea3c73871eb40fc68fd97999776b90927",
"size": "4544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xentica/core/parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57804"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
import re
import datetime
import time
#niru's git commit
while True:
#open the file for reading
file = open("test.txt")
content = file.read()
#Get timestamp
ts = time.time()
ist = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#open file for read and close it neatly(wrap code in try/except)
#with open('test.txt', 'r') as r:
#content = r.read()
#print content
#Search the entire content for '@' and replace it with time stamp.
new_content = re.sub(r'@.*', ist, content)
print new_content
#open file for write and close it neatly(wrap code in try/except)
with open('test.txt', 'w') as f:
f.write(new_content)
print "torpid loop complete"
time.sleep(5)
| {
"content_hash": "1b67761a57581d07f4bcc831c752c513",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 26.79310344827586,
"alnum_prop": 0.6164736164736164,
"repo_name": "cloud-engineering/Torpid",
"id": "d2da865d8268b2d0dc8eb4e8b1ccafa7f8841c7c",
"size": "777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7265"
}
],
"symlink_target": ""
} |
FILE_TO_SORT = 'addresses.txt'
# ==========================
import sys
from pathlib import Path
file_to_sort = Path(__file__) / '..' / FILE_TO_SORT
file_to_sort = file_to_sort.resolve()
print('Gonna be sorting file', file_to_sort)
if not file_to_sort.exists():
sys.exit(f"ERROR! Can't find the file for sorting: {file_to_sort}")
contents = file_to_sort.read_text()
fragments = [fragment.strip() for fragment in contents.split('\n\n')]
cleaned_fragments = (fragment for fragment in fragments if fragment)
sorted_fragments = sorted(cleaned_fragments)
sorted_text = '\n\n'.join(sorted_fragments)
file_to_sort.write_text(sorted_text)
print('Sorting done')
| {
"content_hash": "057907e2fbdd07d6481c841d6043386b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 26.6,
"alnum_prop": 0.6827067669172933,
"repo_name": "butla/experiments",
"id": "c67611b93d0e39ee414d7be0055342c889b035b4",
"size": "665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tricks/text_file_block_sorting/text_file_sorter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18716"
},
{
"name": "CSS",
"bytes": "848"
},
{
"name": "Dockerfile",
"bytes": "1065"
},
{
"name": "Go",
"bytes": "245"
},
{
"name": "HTML",
"bytes": "359777"
},
{
"name": "Java",
"bytes": "2439"
},
{
"name": "JavaScript",
"bytes": "22263"
},
{
"name": "Jupyter Notebook",
"bytes": "487037"
},
{
"name": "Lua",
"bytes": "684"
},
{
"name": "Makefile",
"bytes": "4177"
},
{
"name": "PLpgSQL",
"bytes": "4461"
},
{
"name": "Python",
"bytes": "193267"
},
{
"name": "Rust",
"bytes": "1075"
},
{
"name": "SCSS",
"bytes": "102440"
},
{
"name": "Scala",
"bytes": "4797"
},
{
"name": "Shell",
"bytes": "6079"
}
],
"symlink_target": ""
} |
from .resource import Resource
class RouteTable(Resource):
"""Route table resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource Identifier.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param routes: Collection of routes contained within a route table.
:type routes: list[~azure.mgmt.network.v2015_06_15.models.Route]
:param subnets: A collection of references to subnets.
:type subnets: list[~azure.mgmt.network.v2015_06_15.models.Subnet]
:param provisioning_state: The provisioning state of the resource.
Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'routes': {'key': 'properties.routes', 'type': '[Route]'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(RouteTable, self).__init__(**kwargs)
self.routes = kwargs.get('routes', None)
self.subnets = kwargs.get('subnets', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.etag = kwargs.get('etag', None)
| {
"content_hash": "897af7370e12ea60c392f9b028bdcdda",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 85,
"avg_line_length": 36.94444444444444,
"alnum_prop": 0.6025062656641604,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "2ef803a48d80a5fec6ffd9ca7517e4ba51e9a2fb",
"size": "2469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/route_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from pythonwarrior.config import Config
import time
class UI(object):
@staticmethod
def puts(msg):
if Config.out_stream:
return Config.out_stream.write(msg + "\n")
@staticmethod
def puts_with_delay(msg):
result = UI.puts(msg)
if Config.delay:
time.sleep(Config.delay)
return result
@staticmethod
def write(msg):
if Config.out_stream:
return Config.out_stream.write(msg)
@staticmethod
def gets():
if Config.in_stream:
return Config.in_stream.readline()
else:
return ''
@staticmethod
def request(msg):
UI.write(msg)
return UI.gets().rstrip()
@staticmethod
def ask(msg):
return UI.request("%s [yn] " % msg) == 'y'
@staticmethod
def choose(item, options):
if len(options) == 1:
response = options[0]
else:
for idx, option in enumerate(options):
if type(option) == list:
UI.puts("[%d] %s" % (idx+1, option[-1]))
else:
UI.puts("[%d] %s" % (idx+1, option))
choice = UI.request("Choose %s by typing the number: " % item)
response = options[int(choice)-1]
if type(response) == list:
return response[0]
else:
return response
| {
"content_hash": "984702b7eef285d2d1cf49f00ba8a674",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 74,
"avg_line_length": 25.833333333333332,
"alnum_prop": 0.5211469534050179,
"repo_name": "arbylee/python-warrior",
"id": "d65764c3dba7b7931d73c14cdfa1d573a9f78d26",
"size": "1395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonwarrior/ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "123534"
}
],
"symlink_target": ""
} |
import factory
from factory.fuzzy import FuzzyInteger, FuzzyChoice
from pycroft.model.traffic import TrafficVolume
from tests.factories import IPFactory
from .base import BaseFactory
from .user import UserFactory
class TrafficDataFactory(BaseFactory):
class Meta:
abstract = True
amount = FuzzyInteger(0, 60 * 1024 ** 3)
timestamp = factory.Faker('date_time')
user = factory.SubFactory(UserFactory)
class TrafficVolumeFactory(TrafficDataFactory):
class Meta:
model = TrafficVolume
ip = factory.SubFactory(IPFactory)
type = FuzzyChoice(['Ingress', 'Egress'])
packets = FuzzyInteger(0, 5000)
class TrafficVolumeLastWeekFactory(TrafficVolumeFactory):
timestamp = factory.Faker('date_time_between', start_date='-1w', end_date='now')
| {
"content_hash": "e4d1c27e53220b98be3695a06fc5230f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 84,
"avg_line_length": 27.24137931034483,
"alnum_prop": 0.740506329113924,
"repo_name": "agdsn/pycroft",
"id": "df0ce1c263b9993fcb79c66ea969091403bf1fae",
"size": "998",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/factories/traffic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10320"
},
{
"name": "Dockerfile",
"bytes": "3341"
},
{
"name": "HTML",
"bytes": "124781"
},
{
"name": "JavaScript",
"bytes": "74707"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1172012"
},
{
"name": "Shell",
"bytes": "13660"
},
{
"name": "TypeScript",
"bytes": "5231"
}
],
"symlink_target": ""
} |
import logging
import os
_log = logging.getLogger("antd.plugin")
_plugins = []
class Plugin(object):
"""
A plugin receives notifications when new data
is available, it can consume the data or transform it.
TCX file generation, and garmin connect upload are
both implementations of plugin. You can implement
your own to produce new file formats or upload somewhere.
"""
def data_available(self, device_sn, format, files):
"""
Notification that data is available, this could
be raw packet data from device, or higher level
data generated by other plugins, e.g. TCX.
Return: files which were sucessfullly processed.
"""
pass
class PluginQueue(object):
"""
File based queue representing unprocessed
files which were not handled the a plugin.
"""
def __init__(self, plugin):
try: self.queue_file_name = plugin.cache
except AttributeError: self.queue_file_name = None
self.queue = []
def load_queue(self):
if self.queue_file_name and os.path.isfile(self.queue_file_name):
with open(self.queue_file_name, "r") as file:
lines = file.read().splitlines()
self.queue = []
for line in lines:
device_sn, format, file = line.split(",")
if os.path.isfile(file):
self.queue.append((int(device_sn), format, file))
else:
_log.warning("File pending processing, but disappeared. %s", file)
def save_queue(self):
if self.queue_file_name and self.queue:
with open(self.queue_file_name, "w") as file:
file.writelines("%d,%s,%s\n" % e for e in self.queue)
elif self.queue_file_name and os.path.isfile(self.queue_file_name):
os.unlink(self.queue_file_name)
def add_to_queue(self, device_sn, format, files):
for file in files:
self.queue.append((device_sn, format, file))
def register_plugins(*plugins):
_plugins.extend(p for p in plugins if p is not None)
for plugin in plugins:
try: plugin and recover_and_publish_data(plugin)
except Exception: _log.warning("Plugin failed. %s", plugin, exc_info=True)
def recover_and_publish_data(plugin):
q = PluginQueue(plugin)
q.load_queue()
if q.queue:
try:
_log.debug("Attempting to reprocess failed files.")
for device_sn, format, file in list(q.queue):
if plugin.data_available(device_sn, format, [file]):
q.queue.remove((device_sn, format, file))
except Exception:
_log.warning("Plugin failed. %s", plugin, exc_info=True)
finally:
q.save_queue()
def publish_data(device_sn, format, files):
for plugin in _plugins:
try:
processed = plugin.data_available(device_sn, format, files)
not_processed = [f for f in files if f not in processed]
except Exception:
processed = []
not_processed = files
_log.warning("Plugin failed. %s", plugin, exc_info=True)
finally:
q = PluginQueue(plugin)
q.load_queue()
q.add_to_queue(device_sn, format, not_processed)
q.save_queue()
# vim: ts=4 sts=4 et
| {
"content_hash": "948d4084b0054791b2bb4d4529b07c56",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 86,
"avg_line_length": 34.81443298969072,
"alnum_prop": 0.5928338762214984,
"repo_name": "matt-leach/python-ant-downloader",
"id": "439cc443fb4ff83d56a778327a7c8215c6efcc60",
"size": "4750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "antd/plugin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "143990"
},
{
"name": "Shell",
"bytes": "1138"
}
],
"symlink_target": ""
} |
from abc import abstractmethod, ABCMeta
import threading
import time
import six
from sifr.hll import HLLCounter
try:
from collections import Counter
except ImportError: # pragma: no cover
from .backports.counter import Counter # pragma: no cover
@six.add_metaclass(ABCMeta)
class Storage(object):
@abstractmethod
def incr(self, span, amount=1):
raise NotImplementedError
@abstractmethod
def incr_multi(self, spans, amount=1):
raise NotImplementedError
@abstractmethod
def incr_unique(self, span, identifier):
raise NotImplementedError
@abstractmethod
def incr_unique_multi(self, spans, identifier):
raise NotImplementedError
@abstractmethod
def track(self, span, identifier):
raise NotImplementedError
@abstractmethod
def track_multi(self, spans, identifier):
raise NotImplementedError
@abstractmethod
def count(self, span):
raise NotImplementedError
@abstractmethod
def cardinality(self, span):
raise NotImplementedError
@abstractmethod
def uniques(self, span):
raise NotImplementedError
class MemoryStorage(Storage):
def __init__(self):
self.lock = threading.RLock()
self.unique_counter = HLLCounter()
self.counter = Counter()
self.tracker = {}
self.expirations = {}
self.timer = threading.Timer(0.01, self.__expire_events)
self.timer.start()
super(MemoryStorage, self).__init__()
def __expire_events(self):
with self.lock:
for key in list(self.expirations.keys()):
self.__check_expiry(key)
def __schedule_expiry(self):
if not self.timer.is_alive():
self.timer = threading.Timer(0.01, self.__expire_events)
self.timer.start()
def __check_expiry(self, key):
with self.lock:
if (
key in self.expirations
and self.expirations[key] <= time.time()
):
self.counter.pop(key, None)
self.unique_counter.pop(key)
self.tracker.pop(key, None)
self.expirations.pop(key, None)
def uniques(self, span):
self.__check_expiry(span.key)
if span.key not in self.tracker:
return set()
else:
return self.tracker.get(span.key)
def count(self, span):
self.__check_expiry(span.key)
return self.counter.get(span.key, 0)
def cardinality(self, span):
self.__check_expiry(span.key)
return self.unique_counter.get(span.key)
def track(self, span, identifier):
if span.expiry is not None:
self.expirations[span.key] = span.expiry
self.tracker.setdefault(span.key, set())
self.tracker[span.key].add(identifier)
def track_multi(self, spans, identifier):
for span in spans:
self.track(span, identifier)
def incr(self, span, amount=1):
with self.lock:
self.count(span)
self.__schedule_expiry()
if span.expiry is not None:
self.expirations[span.key] = span.expiry
self.counter[span.key] += amount
def incr_multi(self, spans, amount=1):
for span in spans:
self.incr(span)
def incr_unique(self, span, identifier):
self.cardinality(span)
self.__schedule_expiry()
if span.expiry is not None:
self.expirations[span.key] = span.expiry
self.unique_counter.add(span.key, identifier)
def incr_unique_multi(self, spans, identifier):
for span in spans:
self.incr_unique(span, identifier)
class RedisStorage(Storage):
def __init__(self, redis):
self.redis = redis
def track(self, span, identifier):
with self.redis.pipeline(transaction=False) as pipeline:
pipeline.sadd(span.key + ":t", identifier)
pipeline.expire(span.key + ":t",
int(span.expiry) - int(time.time()))
pipeline.execute()
def track_multi(self, spans, identifier):
with self.redis.pipeline(transaction=False) as pipeline:
for span in spans:
pipeline.sadd(span.key + ":t", identifier)
if span.expiry is not None:
pipeline.expire(
span.key + ":t",
int(span.expiry) - int(time.time())
)
pipeline.execute()
def uniques(self, span):
return self.redis.smembers(span.key + ":t") or set()
def count(self, span):
value = self.redis.get(span.key + ":c")
return int(value) if value is not None else 0
def incr_unique(self, span, identifier):
with self.redis.pipeline(transaction=False) as pipeline:
pipeline.pfadd(span.key + ":u", identifier)
pipeline.expire(span.key + ":u",
int(span.expiry) - int(time.time()))
pipeline.execute()
def incr_unique_multi(self, spans, identifier):
with self.redis.pipeline(transaction=False) as pipeline:
for span in spans:
pipeline.pfadd(span.key + ":u", identifier)
if span.expiry is not None:
pipeline.expire(
span.key + ":u",
int(span.expiry) - int(time.time())
)
pipeline.execute()
def incr(self, span, amount=1):
with self.redis.pipeline(transaction=False) as pipeline:
pipeline.incr(span.key + ":c")
if span.expiry is not None:
pipeline.expire(
span.key + ":c",
int(span.expiry) - int(time.time())
)
pipeline.execute()
def incr_multi(self, spans, amount=1):
with self.redis.pipeline(transaction=False) as pipeline:
for span in spans:
pipeline.incr(span.key + ":c")
if span.expiry is not None:
pipeline.expire(
span.key + ":c",
int(span.expiry) - int(time.time())
)
pipeline.execute()
def cardinality(self, span):
value = self.redis.pfcount(span.key + ":u")
return int(value) if value is not None else 0
class RiakStorage(Storage):
def __init__(self, riak):
self.riak = riak
self.counter_bucket = self.riak.bucket_type(
"maps"
).bucket("sifr_counter")
self.unique_counters_bucket = self.riak.bucket_type(
"maps"
).bucket("sifr_unique_counter")
self.uniques_bucket = self.riak.bucket_type(
"maps"
).bucket("sifr_uniques")
def count(self, span):
map = self.counter_bucket.get(span.namespace)
counter = map.counters.get(span.timestamp)
return counter.value
def incr(self, span, amount=1):
map = self.counter_bucket.new(span.namespace)
counter = map.counters.get(span.timestamp)
counter.increment()
map.store()
def get_maps(self, bucket, spans, create=False):
maps = {}
namespaces = set(span.namespace for span in spans)
for namespace in namespaces:
if create:
maps[namespace] = bucket.new(namespace)
else:
maps[namespace] = bucket.get(namespace)
return maps
def track_multi(self, spans, identifier):
maps = self.get_maps(self.uniques_bucket, spans, True)
for span in spans:
riak_set = maps[span.namespace].sets.get(span.timestamp)
riak_set.add(str(identifier))
for map in maps.values():
map.store()
def incr_unique_multi(self, spans, identifier):
maps = self.get_maps(self.unique_counters_bucket, spans, True)
for span in spans:
counter = maps[span.namespace].sets.get(span.timestamp)
counter.add(str(identifier))
for map in maps.values():
map.store()
def track(self, span, identifier):
map = self.uniques_bucket.new(span.namespace)
riak_set = map.sets.get(span.timestamp)
riak_set.add(str(identifier))
map.store()
def incr_unique(self, span, identifier):
map = self.unique_counters_bucket.new(span.namespace)
riak_set = map.sets.get(span.timestamp)
riak_set.add(str(identifier))
map.store()
def incr_multi(self, spans, amount=1):
maps = self.get_maps(self.counter_bucket, spans, True)
for span in spans:
counter = maps[span.namespace].counters.get(span.timestamp)
counter.increment(amount)
for map in maps.values():
map.store()
def cardinality(self, span):
map = self.unique_counters_bucket.get(span.namespace)
riak_set = map.sets.get(span.timestamp)
return len(riak_set)
def uniques(self, span):
map = self.uniques_bucket.get(span.namespace)
riak_set = map.sets.get(span.timestamp)
return riak_set.value
| {
"content_hash": "a53ffe1aec8e4b800d7f1ca27956e7ef",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 71,
"avg_line_length": 32.27972027972028,
"alnum_prop": 0.575498266897747,
"repo_name": "alisaifee/sifr",
"id": "d231c746955309f8ef8ba25901ecc03fc283b86f",
"size": "9232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sifr/storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77952"
},
{
"name": "Shell",
"bytes": "1507"
}
],
"symlink_target": ""
} |
"""Tests for the private `_RestructuredDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# TODO(b/117581999): Add eager specific test.
class RestructuredDatasetTest(test_base.DatasetTestBase):
@test_util.run_deprecated_v1
def testRestructureDataset(self):
components = (array_ops.placeholder(dtypes.int32),
(array_ops.placeholder(dtypes.int32, shape=[None]),
array_ops.placeholder(dtypes.int32, shape=[20, 30])))
dataset = dataset_ops.Dataset.from_tensors(components)
i32 = dtypes.int32
test_cases = [((i32, i32, i32), None),
(((i32, i32), i32), None),
((i32, i32, i32), (None, None, None)),
((i32, i32, i32), ([17], [17], [20, 30]))]
for new_types, new_shape_lists in test_cases:
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
self.assertEqual(new_types, dataset_ops.get_legacy_output_types(new))
if new_shape_lists is not None:
for expected_shape_list, shape in zip(
nest.flatten(new_shape_lists),
nest.flatten(dataset_ops.get_legacy_output_shapes(new))):
if expected_shape_list is None:
self.assertIs(None, shape.ndims)
else:
self.assertEqual(expected_shape_list, shape.as_list())
fail_cases = [((i32, dtypes.int64, i32), None),
((i32, i32, i32, i32), None),
((i32, i32, i32), ((None, None), None)),
((i32, i32, i32), (None, None, None, None)),
((i32, i32, i32), (None, [None], [21, 30]))]
for new_types, new_shape_lists in fail_cases:
with self.assertRaises(ValueError):
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
if __name__ == "__main__":
test.main()
| {
"content_hash": "bf008c16d3062e97e6eb880f5bfbddb4",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 40.68852459016394,
"alnum_prop": 0.6434327155519742,
"repo_name": "ageron/tensorflow",
"id": "88c14f0a6ead8e4c07487264bc17af0b6f81cc7b",
"size": "3171",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/kernel_tests/restructured_dataset_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644380"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59281238"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1501606"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908340"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94466"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15024"
},
{
"name": "Pascal",
"bytes": "617"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46230508"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481859"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Order, OrderItem
class OrderItemInline(admin.TabularInline):
model = OrderItem
raw_id_fields = ['product']
class OrderAdmin(admin.ModelAdmin):
list_display = ['id', 'first_name', 'last_name', 'email', 'address', 'postal_code', 'city', 'paid', 'created', 'updated']
list_filter = ['paid', 'created', 'updated']
inlines = [OrderItemInline]
admin.site.register(Order, OrderAdmin)
| {
"content_hash": "417aebef1b92249c12f15cfd581813ed",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 125,
"avg_line_length": 30.6,
"alnum_prop": 0.6840958605664488,
"repo_name": "janusnic/dj-21v",
"id": "8ce1eb2c0ff843aee9e8462e08d79d0141b90fce",
"size": "459",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unit_14/mysite/orders/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "607197"
},
{
"name": "HTML",
"bytes": "352620"
},
{
"name": "JavaScript",
"bytes": "4098502"
},
{
"name": "Python",
"bytes": "1906453"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
from werkzeug.utils import secure_filename
from flask import request, render_template, current_app
from weblab.core.wl import weblab_api
@weblab_api.route_webclient('/visir.html')
def visir():
return render_template("webclient/visir.html")
@weblab_api.route_webclient('/fonts/<path:path>')
def fonts(path):
blueprint = current_app.blueprints['core_webclient']
return blueprint.send_static_file("gen/fonts/{}".format(secure_filename(path)))
| {
"content_hash": "500056715f02771e1ed46f2fddb6e306",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 83,
"avg_line_length": 34.13333333333333,
"alnum_prop": 0.755859375,
"repo_name": "weblabdeusto/weblabdeusto",
"id": "601a64837efc003a280b96b5f300e3b64abe8f42",
"size": "512",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/src/weblab/core/webclient/view_visir.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP.NET",
"bytes": "4785"
},
{
"name": "ActionScript",
"bytes": "8508"
},
{
"name": "Batchfile",
"bytes": "7753"
},
{
"name": "C",
"bytes": "19456"
},
{
"name": "C#",
"bytes": "315160"
},
{
"name": "C++",
"bytes": "9547"
},
{
"name": "CSS",
"bytes": "202991"
},
{
"name": "CoffeeScript",
"bytes": "39146"
},
{
"name": "Go",
"bytes": "7076"
},
{
"name": "HTML",
"bytes": "620835"
},
{
"name": "Java",
"bytes": "856300"
},
{
"name": "JavaScript",
"bytes": "1606001"
},
{
"name": "Less",
"bytes": "13422"
},
{
"name": "Makefile",
"bytes": "24995"
},
{
"name": "Mako",
"bytes": "1236"
},
{
"name": "PHP",
"bytes": "159985"
},
{
"name": "Python",
"bytes": "3739523"
},
{
"name": "Shell",
"bytes": "7880"
},
{
"name": "Smarty",
"bytes": "42585"
},
{
"name": "VHDL",
"bytes": "5874"
}
],
"symlink_target": ""
} |
import sys
import json
import re
import doekbase.data_api
from doekbase.data_api.annotation.genome_annotation.api import GenomeAnnotationAPI , GenomeAnnotationClientAPI
from doekbase.data_api.sequence.assembly.api import AssemblyAPI , AssemblyClientAPI
ga = GenomeAnnotationAPI({
'workspace_service_url' : sys.argv[1],
'shock_service_url' : sys.argv[2],
'handle_service_url' : sys.argv[3]
},token = sys.argv[4],ref = sys.argv[5]);
gto = {
'id' : sys.argv[6],
'scientific_name' : "Unknown species",
'domain' : "Unknown",
'genetic_code' : 11,
'dna_size' : 0,
'num_contigs' : 0,
'contig_lengths' : [],
'contig_ids' : [],
'source' : "KBase",
'source_id' : sys.argv[6],
# 'md5' : "",
'taxonomy' : "Unknown",
'gc_content' : 0.5,
'complete' : 1,
'features' : []
};
taxon = {};
success = 0;
try:
taxon = ga.get_taxon();
success = 1;
except Exception, e:
success = 0
if success == 1:
try:
gto['scientific_name'] = taxon.get_scientific_name()
except Exception, e:
success = 0
try:
gto['domain'] = taxon.get_domain()
except Exception, e:
success = 0
try:
gto['genetic_code'] = taxon.get_genetic_code()
except Exception, e:
success = 0
try:
gto['taxonomy'] = ",".join(taxon.get_scientific_lineage())
except Exception, e:
success = 0
assemb = {};
success = 0;
try:
assemb = ga.get_assembly();
success = 1;
except Exception, e:
success = 0
if success == 1:
gto['contigobj'] = {
'id' : sys.argv[6],
'name' : sys.argv[6],
'source' : 'KBase',
'source_id' : sys.argv[6],
'md5' : "",
'type' : "Genome",
'contigs' : []
};
contigdata = assemb.get_contigs();
for contigid in contigdata.keys():
newcontig = {
'id' : contigdata[contigid]['contig_id'],
'length' : contigdata[contigid]['length'],
'md5' : contigdata[contigid]['md5'],
'sequence' : contigdata[contigid]['sequence'],
'genetic_code' : sys.argv[7],
'replicon_type' : "linear",
'replicon_geometry' : "linear",
'name' : contigdata[contigid]['contig_id'],
'description' : contigdata[contigid]['description'],
'complete' : 1
};
if contigdata[contigid]['is_circular'] == 1:
newcontig['replicon_type'] = "circular";
newcontig['replicon_geometry'] = "circular";
gto['contigobj']['contigs'].append(newcontig);
try:
gto['dna_size'] = taxon.get_dna_size()
except Exception, e:
success = 0
try:
gto['num_contigs'] = taxon.get_number_contigs()
except Exception, e:
success = 0
try:
gto['contig_lengths'] = taxon.get_contig_lengths()
except Exception, e:
success = 0
try:
gto['contig_ids'] = taxon.get_contig_ids()
except Exception, e:
success = 0
try:
gto['gc_content'] = assemb.get_gc_content()
except Exception, e:
success = 0
try:
extsource = assemb.get_external_source_info()
gto['contigobj']['source'] = extsource["external_source"]
gto['contigobj']['source_id'] = extsource["external_source_id"]
gto['contigobj']['name'] = extsource["external_source_id"]
gto['source'] = extsource["external_source"]
gto['source_id'] = extsource["external_source_id"]
except Exception, e:
success = 0
features = [];
success = 0;
try:
features = ga.get_features();
success = 1
except Exception, e:
success = 0
prot = ga.get_proteins();
if success == 1:
for ftrid in features.keys():
ftrdata = features[ftrid]
if 'feature_type' in ftrdata.keys():
newfeature = {'id' : ftrid,'type' : ftrdata['feature_type'],'function' : "Unknown",'location' : []}
array = ftrid.split("_");
protid = 'protein_'+array[1];
if array[0] == 'CDS' and protid in prot.keys():
newfeature['protein_translation'] = prot[protid]['protein_amino_acid_sequence']
if 'feature_ontology_terms' in ftrdata.keys():
newfeature['ontology_terms'] = ftrdata['feature_ontology_terms']
if 'feature_function' in ftrdata.keys():
newfeature['function'] = ftrdata['feature_function']
if 'feature_dna_sequence' in ftrdata.keys():
newfeature['dna_sequence'] = ftrdata['feature_dna_sequence']
if 'feature_locations' in ftrdata.keys():
for loc in ftrdata['feature_locations']:
newfeature['location'].append([loc['contig_id'],loc['start'],loc['strand'],loc['length']])
#if 'feature_aliases' in ftrdata.keys():
#newfeature['protein_translation'] = ftrdata['feature_aliases']
if 'feature_md5' in ftrdata.keys():
if len(ftrdata['feature_md5']) > 0:
newfeature['md5'] = ftrdata['feature_md5']
if 'feature_dna_sequence_length' in ftrdata.keys():
newfeature['dna_sequence_length'] = ftrdata['feature_dna_sequence_length']
gto['features'].append(newfeature);
#print json.dumps(prot, ensure_ascii=False)
print json.dumps(gto, ensure_ascii=False)
print "SUCCESS" | {
"content_hash": "3857562cb73c5947229b032ab12ece64",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 110,
"avg_line_length": 28.30909090909091,
"alnum_prop": 0.654035538428602,
"repo_name": "mdejongh/MEModeling",
"id": "459085d272d3916e16b2c7932f8ef75bec3190b1",
"size": "4764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybin/get_genome.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "12840"
},
{
"name": "JavaScript",
"bytes": "3887"
},
{
"name": "Makefile",
"bytes": "2924"
},
{
"name": "Perl",
"bytes": "504728"
},
{
"name": "Python",
"bytes": "14073"
},
{
"name": "Ruby",
"bytes": "435"
},
{
"name": "Shell",
"bytes": "1670"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("app_rename_column", "0001_initial")]
operations = [
migrations.RenameField(model_name="a", old_name="field", new_name="renamed")
]
| {
"content_hash": "ef874833f188fdc8839311dc70558eb7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 84,
"avg_line_length": 24.5,
"alnum_prop": 0.6775510204081633,
"repo_name": "3YOURMIND/django-migration-linter",
"id": "b7a0a4c9c60873eb9eb450726a8edf3771eb39f8",
"size": "297",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_project/app_rename_column/migrations/0002_auto_20190414_1502.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "143944"
}
],
"symlink_target": ""
} |
import smtplib
from email.mime.text import MIMEText
#
# A basic Mailtrap send_mail implementation.
#
def send_mail(from_email, to_emails, subject, plain_body, html_body):
"""
Feel free to override me!
:param from_email: The sender's email address. NOTE that this may
be flat-out incorrect, but it is what Vaultier thinks it is.
:param to_emails: List of the recipients' email addresses. Unlike
from_email, this should be correct.
:param subject: The email subject
:param plain_body: Plain text version of the email body
:param html_body: HTML version of the email body
"""
# Mailtrap username and password goes here
username = 'CHANGE ME'
password = 'CHANGE ME'
# Fix incorrect site url in message. You can discard this code
# and write your own cleaning code if you wish.
wrong_domain = 'example.com'
wrong_url = 'http://' + wrong_domain
real_domain = 'example.com' # your domain here
real_url = 'https://' + real_domain
plain_body = plain_body.replace(wrong_url, real_url)
plain_body = plain_body.replace(wrong_domain, real_domain)
html_body = html_body.replace(wrong_url, real_url)
html_body = html_body.replace(wrong_domain, real_domain)
from_email = from_email.replace(wrong_domain, real_domain)
# Build message
msg = MIMEText(plain_body)
msg['Subject'] = subject
msg['From'] = from_email
msg['To'] = ', '.join(to_emails)
# The actual mail send
server = smtplib.SMTP('mailtrap.io', 2525)
server.starttls()
server.login(username, password)
server.sendmail(from_email, to_emails, msg.as_string())
server.quit()
| {
"content_hash": "f3f08678179a9d3da9703bbdd0bd5025",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 70,
"avg_line_length": 34.916666666666664,
"alnum_prop": 0.6730310262529833,
"repo_name": "tannerntannern/docker-vaultier",
"id": "308c8dc23bfcdfcd70f40efc02b714763524ef02",
"size": "1676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "send_mail_examples/send_mail.mailtrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9208"
},
{
"name": "Shell",
"bytes": "1254"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("./")
# For JS
import getopt
import jsmin, mergejs
# For CSS
import re
# For file moves
import shutil
import os
def mergeCSS(inputFilenames, outputFilename):
output = ""
for inputFilename in inputFilenames:
output += file(inputFilename, "r").read()
file(outputFilename, "w").write(output)
return outputFilename
def cleanline(theLine):
""" Kills line breaks, tabs, and double spaces """
p = re.compile("(\n|\r|\t|\f|\v)+")
m = p.sub("", theLine)
# Kills double spaces
p = re.compile("( )+")
m = p.sub(" ", m)
# Removes last semicolon before }
p = re.compile("(; }|;})+")
m = p.sub("}", m)
# Removes space before {
p = re.compile("({ )+")
m = p.sub("{", m)
# Removes all comments
p = re.compile("/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/")
m = p.sub("", m)
# Strip off the Charset
p = re.compile("@CHARSET .*;")
m = p.sub("", m)
# Strip spaces before the {
p = re.compile(" {")
m = p.sub("{", m)
# Strip space after :
p = re.compile(": ")
m = p.sub(":", m)
# Strip space after ,
p = re.compile(", ")
m = p.sub(",", m)
# Strip space after ;
p = re.compile("; ")
m = p.sub(";", m)
return m
def compressCSS(inputFilename, outputFilename):
theFile = file(inputFilename, "r").read()
output = ""
for line in theFile:
output = output + cleanline(line)
# Once more, clean the entire file string
_output = cleanline(output)
file(outputFilename, "w").write(_output)
return
def dojs(dogis = False, warnings = True):
""" Minifies the JavaScript """
# Do we have local version of the Closure Compiler available?
use_compressor = "jsmin" # Fallback
try:
import closure
use_compressor = "closure"
print "using local Closure Compiler"
except Exception, E:
print "No closure (%s)" % E
print "Download from http://closure-compiler.googlecode.com/files/compiler-latest.zip"
try:
import closure_ws
use_compressor = "closure_ws"
print "Using Closure via Web Service - limited to files < 1Mb!"
except ImportError:
print "No closure_ws"
if use_compressor == "closure":
if not warnings:
closure.extra_params = "--warning_level QUIET"
minimize = closure.minimize
elif use_compressor == "closure_ws":
minimize = closure_ws.minimize
elif use_compressor == "jsmin":
minimize = jsmin.jsmin
sourceDirectory = ".."
configFilename = "sahana.js.cfg"
outputFilename = "S3.min.js"
sourceDirectorydataTables = "../S3"
configFilenamedataTables = "sahana.js.dataTables.cfg"
outputFilenamedataTables = "s3.dataTables.min.js"
# Merge JS files
print "Merging Core libraries."
merged = mergejs.run(sourceDirectory, None, configFilename)
# Compress JS files
print "Compressing - JS"
minimized = minimize(merged)
# Add license
print "Adding license file."
minimized = file("license.txt").read() + minimized
# Print to output files
print "Writing to %s." % outputFilename
file(outputFilename, "w").write(minimized)
# Remove old JS files
print "Deleting %s." % outputFilename
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
# Move new JS files
print "Moving new JS files"
shutil.move(outputFilename, "../S3")
# Also do dataTables
print "Compressing dataTables"
mergeddataTables = mergejs.run(sourceDirectorydataTables,
None,
configFilenamedataTables)
minimizeddataTables = minimize(mergeddataTables)
file(outputFilenamedataTables, "w").write(minimizeddataTables)
try:
os.remove("../S3/%s" % outputFilenamedataTables)
except:
pass
shutil.move(outputFilenamedataTables, "../S3")
# Also do s3.embed_component.js
print "Compressing s3.embed_component.js"
inputFilename = os.path.join("..", "S3", "s3.embed_component.js")
outputFilename = "s3.embed_component.min.js"
input = file(inputFilename, "r").read()
minimized = minimize(input)
file(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Also do s3.report.js
print "Compressing s3.report.js"
inputFilename = os.path.join("..", "S3", "s3.report.js")
outputFilename = "s3.report.min.js"
input = file(inputFilename, "r").read()
minimized = minimize(input)
file(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Also do s3.search.js
print "Compressing s3.search.js"
inputFilename = os.path.join("..", "S3", "s3.search.js")
outputFilename = "s3.search.min.js"
input = file(inputFilename, "r").read()
minimized = minimize(input)
file(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Also do s3.select_person.js
print "Compressing s3.select_person.js"
inputFilename = os.path.join("..", "S3", "s3.select_person.js")
outputFilename = "s3.select_person.min.js"
input = file(inputFilename, "r").read()
minimized = minimize(input)
file(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Also do s3.timeline.js
print "Compressing s3.timeline.js"
inputFilename = os.path.join("..", "S3", "s3.timeline.js")
outputFilename = "s3.timeline.min.js"
input = file(inputFilename, "r").read()
minimized = minimize(input)
file(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
if dogis:
sourceDirectoryGIS = "../S3"
sourceDirectoryOpenLayers = "../gis/openlayers/lib"
sourceDirectoryOpenLayersExten = "../gis"
sourceDirectoryMGRS = "../gis"
sourceDirectoryGeoExt = "../gis/GeoExt/lib"
sourceDirectoryGeoExtux = "../gis/GeoExt/ux"
sourceDirectoryGxp = "../gis/gxp"
#sourceDirectoryGeoExplorer = "../gis/GeoExplorer"
configFilenameGIS = "sahana.js.gis.cfg"
configFilenameOpenLayers = "sahana.js.ol.cfg"
configFilenameOpenLayersExten = "sahana.js.ol_exten.cfg"
configFilenameMGRS = "sahana.js.mgrs.cfg"
configFilenameGeoExt = "sahana.js.geoext.cfg"
configFilenameGeoExtux = "sahana.js.geoextux.cfg"
configFilenameGxpMin = "sahana.js.gxp.cfg"
configFilenameGxpFull = "sahana.js.gxpfull.cfg"
#configFilenameGeoExplorer = "sahana.js.geoexplorer.cfg"
outputFilenameGIS = "s3.gis.min.js"
outputFilenameOpenLayers = "OpenLayers.js"
outputFilenameMGRS = "MGRS.min.js"
outputFilenameGeoExt = "GeoExt.js"
outputFilenameGxp = "gxp.js"
#outputFilenameGeoExplorer = "GeoExplorer.js"
# Merge GIS JS Files
print "Merging GIS scripts."
mergedGIS = mergejs.run(sourceDirectoryGIS,
None,
configFilenameGIS)
print "Merging OpenLayers libraries."
mergedOpenLayers = mergejs.run(sourceDirectoryOpenLayers,
None,
configFilenameOpenLayers)
mergedOpenLayersExten = mergejs.run(sourceDirectoryOpenLayersExten,
None,
configFilenameOpenLayersExten)
print "Merging MGRS libraries."
mergedMGRS = mergejs.run(sourceDirectoryMGRS,
None,
configFilenameMGRS)
print "Merging GeoExt libraries."
mergedGeoExt = mergejs.run(sourceDirectoryGeoExt,
None,
configFilenameGeoExt)
mergedGeoExtux = mergejs.run(sourceDirectoryGeoExtux,
None,
configFilenameGeoExtux)
print "Merging gxp libraries."
mergedGxpMin = mergejs.run(sourceDirectoryGxp,
None,
configFilenameGxpMin)
mergedGxpFull = mergejs.run(sourceDirectoryGxp,
None,
configFilenameGxpFull)
#print "Merging GeoExplorer libraries."
#mergedGeoExplorer = mergejs.run(sourceDirectoryGeoExplorer,
# None,
# configFilenameGeoExplorer)
# Compress JS files
print "Compressing - GIS JS"
minimizedGIS = minimize(mergedGIS)
print "Compressing - OpenLayers JS"
if use_compressor == "closure_ws":
# Limited to files < 1Mb!
minimizedOpenLayers = jsmin.jsmin("%s\n%s" % (mergedOpenLayers,
mergedOpenLayersExten))
else:
minimizedOpenLayers = minimize("%s\n%s" % (mergedOpenLayers,
mergedOpenLayersExten))
print "Compressing - MGRS JS"
minimizedMGRS = minimize(mergedMGRS)
print "Compressing - GeoExt JS"
minimizedGeoExt = minimize("%s\n%s\n%s" % (mergedGeoExt,
mergedGeoExtux,
mergedGxpMin))
print "Compressing - gxp JS"
minimizedGxp = minimize(mergedGxpFull)
#print "Compressing - GeoExplorer JS"
#minimizedGeoExplorer = minimize(mergedGeoExplorer)
# Add license
#minimizedGIS = file("license.gis.txt").read() + minimizedGIS
# Print to output files
print "Writing to %s." % outputFilenameGIS
file(outputFilenameGIS, "w").write(minimizedGIS)
print "Writing to %s." % outputFilenameOpenLayers
file(outputFilenameOpenLayers, "w").write(minimizedOpenLayers)
print "Writing to %s." % outputFilenameMGRS
file(outputFilenameMGRS, "w").write(minimizedMGRS)
print "Writing to %s." % outputFilenameGeoExt
file(outputFilenameGeoExt, "w").write(minimizedGeoExt)
print "Writing to %s." % outputFilenameGxp
file(outputFilenameGxp, "w").write(minimizedGxp)
#print "Writing to %s." % outputFilenameGeoExplorer
#file(outputFilenameGeoExplorer, "w").write(minimizedGeoExplorer)
# Move new JS files
print "Deleting %s." % outputFilenameGIS
try:
os.remove("../S3/%s" % outputFilenameGIS)
except:
pass
print "Moving new GIS JS files"
shutil.move(outputFilenameGIS, "../S3")
print "Deleting %s." % outputFilenameOpenLayers
try:
os.remove("../gis/%s" % outputFilenameOpenLayers)
except:
pass
print "Moving new OpenLayers JS files"
shutil.move(outputFilenameOpenLayers, "../gis")
print "Deleting %s." % outputFilenameMGRS
try:
os.remove("../gis/%s" % outputFilenameMGRS)
except:
pass
print "Moving new MGRS JS files"
shutil.move(outputFilenameMGRS, "../gis")
print "Deleting %s." % outputFilenameGeoExt
try:
os.remove("../gis/%s" % outputFilenameGeoExt)
except:
pass
print "Moving new GeoExt JS files"
shutil.move(outputFilenameGeoExt, "../gis")
print "Deleting %s." % outputFilenameGxp
try:
os.remove("../gis/%s" % outputFilenameGxp)
except:
pass
print "Moving new gxp JS files"
shutil.move(outputFilenameGxp, "../gis")
#print "Deleting %s." % outputFilenameGeoExplorer
#try:
# os.remove("../gis/%s" % outputFilenameGeoExplorer)
#except:
# pass
#print "Moving new GeoExplorer JS files"
#shutil.move(outputFilenameGeoExplorer, "../gis")
def docss():
""" Compresses the CSS files """
listCSS = []
f = open("sahana.css.cfg", "r")
files = f.readlines()
f.close()
for file in files[:-1]:
p = re.compile("(\n|\r|\t|\f|\v)+")
file = p.sub("", file)
listCSS.append("../../styles/%s" % file)
outputFilenameCSS = "sahana.min.css"
# Merge CSS files
print "Merging Core styles."
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
# Compress CSS files
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../styles/S3/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../../styles/S3")
def main(argv):
try:
parameter1 = argv[0]
except:
parameter1 = "ALL"
try:
if(argv[1] == "DOGIS"):
parameter2 = True
else:
parameter2 = False
except:
parameter2 = True
closure_warnings = True
if "NOWARN" in argv:
closure_warnings = False
if parameter1 in ("ALL", "NOWARN"):
dojs(warnings=closure_warnings)
docss()
else:
if parameter1 == "CSS":
docss()
else:
dojs(parameter2, warnings=closure_warnings)
print "Done."
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "0c9821d9d00cdec33eaf5e8627a0b665",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 94,
"avg_line_length": 31.721719457013574,
"alnum_prop": 0.5753512588260467,
"repo_name": "flavour/ssf",
"id": "50d80ddb0cafa253224ee221bb75f4b25c84643e",
"size": "14144",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "static/scripts/tools/build.sahana.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "9763120"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "21558751"
},
{
"name": "Shell",
"bytes": "1171"
}
],
"symlink_target": ""
} |
"""A class that performs TLS-SNI-01 challenges for Nginx"""
import itertools
import logging
import os
from certbot import errors
from certbot.plugins import common
from certbot_nginx import obj
from certbot_nginx import nginxparser
logger = logging.getLogger(__name__)
class NginxTlsSni01(common.TLSSNI01):
"""TLS-SNI-01 authenticator for Nginx
:ivar configurator: NginxConfigurator object
:type configurator: :class:`~nginx.configurator.NginxConfigurator`
:ivar list achalls: Annotated
class:`~certbot.achallenges.KeyAuthorizationAnnotatedChallenge`
challenges
:param list indices: Meant to hold indices of challenges in a
larger array. NginxTlsSni01 is capable of solving many challenges
at once which causes an indexing issue within NginxConfigurator
who must return all responses in order. Imagine NginxConfigurator
maintaining state about where all of the http-01 Challenges,
TLS-SNI-01 Challenges belong in the response array. This is an
optional utility.
:param str challenge_conf: location of the challenge config file
"""
def perform(self):
"""Perform a challenge on Nginx.
:returns: list of :class:`certbot.acme.challenges.TLSSNI01Response`
:rtype: list
"""
if not self.achalls:
return []
addresses = []
default_addr = "{0} default_server ssl".format(
self.configurator.config.tls_sni_01_port)
for achall in self.achalls:
vhost = self.configurator.choose_vhost(achall.domain)
if vhost is None:
logger.error(
"No nginx vhost exists with server_name matching: %s. "
"Please specify server_names in the Nginx config.",
achall.domain)
return None
for addr in vhost.addrs:
if addr.default:
addresses.append([obj.Addr.fromstring(default_addr)])
break
else:
addresses.append(list(vhost.addrs))
# Create challenge certs
responses = [self._setup_challenge_cert(x) for x in self.achalls]
# Set up the configuration
self._mod_config(addresses)
# Save reversible changes
self.configurator.save("SNI Challenge", True)
return responses
def _mod_config(self, ll_addrs):
"""Modifies Nginx config to include challenge server blocks.
:param list ll_addrs: list of lists of
:class:`certbot_nginx.obj.Addr` to apply
:raises .MisconfigurationError:
Unable to find a suitable HTTP block in which to include
authenticator hosts.
"""
# Add the 'include' statement for the challenges if it doesn't exist
# already in the main config
included = False
include_directive = ['\n', 'include', ' ', self.challenge_conf]
root = self.configurator.parser.loc["root"]
bucket_directive = ['\n', 'server_names_hash_bucket_size', ' ', '128']
main = self.configurator.parser.parsed[root]
for key, body in main:
if key == ['http']:
found_bucket = False
for k, _ in body:
if k == bucket_directive[1]:
found_bucket = True
if not found_bucket:
body.insert(0, bucket_directive)
if include_directive not in body:
body.insert(0, include_directive)
included = True
break
if not included:
raise errors.MisconfigurationError(
'LetsEncrypt could not find an HTTP block to include '
'TLS-SNI-01 challenges in %s.' % root)
config = [self._make_server_block(pair[0], pair[1])
for pair in itertools.izip(self.achalls, ll_addrs)]
config = nginxparser.UnspacedList(config)
self.configurator.reverter.register_file_creation(
True, self.challenge_conf)
with open(self.challenge_conf, "w") as new_conf:
nginxparser.dump(config, new_conf)
def _make_server_block(self, achall, addrs):
"""Creates a server block for a challenge.
:param achall: Annotated TLS-SNI-01 challenge
:type achall:
:class:`certbot.achallenges.KeyAuthorizationAnnotatedChallenge`
:param list addrs: addresses of challenged domain
:class:`list` of type :class:`~nginx.obj.Addr`
:returns: server block for the challenge host
:rtype: list
"""
document_root = os.path.join(
self.configurator.config.work_dir, "tls_sni_01_page")
block = [['listen', ' ', str(addr)] for addr in addrs]
block.extend([['server_name', ' ',
achall.response(achall.account_key).z_domain],
# access and error logs necessary for
# integration testing (non-root)
['access_log', ' ', os.path.join(
self.configurator.config.work_dir, 'access.log')],
['error_log', ' ', os.path.join(
self.configurator.config.work_dir, 'error.log')],
['ssl_certificate', ' ', self.get_cert_path(achall)],
['ssl_certificate_key', ' ', self.get_key_path(achall)],
[['location', ' ', '/'], [['root', ' ', document_root]]]] +
self.configurator.parser.loc["ssl_options"])
return [['server'], block]
| {
"content_hash": "2828b7f9c8db7ccef189940cfda05e03",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 81,
"avg_line_length": 35.77987421383648,
"alnum_prop": 0.5814730181051151,
"repo_name": "jtl999/certbot",
"id": "0543000ea0e83494ed694110f0ae1d808824b348",
"size": "5689",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "certbot-nginx/certbot_nginx/tls_sni_01.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62302"
},
{
"name": "Augeas",
"bytes": "5245"
},
{
"name": "Batchfile",
"bytes": "35005"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37245"
},
{
"name": "Nginx",
"bytes": "118585"
},
{
"name": "Python",
"bytes": "1477643"
},
{
"name": "Shell",
"bytes": "176838"
},
{
"name": "Standard ML",
"bytes": "256"
}
],
"symlink_target": ""
} |
class WebKitFinder(object):
def __init__(self, filesystem):
self._filesystem = filesystem
self._webkit_base = None
def webkit_base(self):
"""Returns the absolute path to the top of the WebKit tree.
Raises an AssertionError if the top dir can't be determined."""
# Note: This code somewhat duplicates the code in
# scm.find_checkout_root(). However, that code only works if the top
# of the SCM repository also matches the top of the WebKit tree. Some SVN users
# (the chromium test bots, for example), might only check out subdirectories like
# Tools/Scripts. This code will also work if there is no SCM system at all.
if not self._webkit_base:
self._webkit_base = self._webkit_base
module_path = self._filesystem.path_to_module(self.__module__)
tools_index = module_path.find('Tools')
assert tools_index != -1, "could not find location of this checkout from %s" % module_path
self._webkit_base = self._filesystem.normpath(module_path[0:tools_index - 1])
return self._webkit_base
def path_from_webkit_base(self, *comps):
return self._filesystem.join(self.webkit_base(), *comps)
def path_to_script(self, script_name):
"""Returns the relative path to the script from the top of the WebKit tree."""
# This is intentionally relative in order to force callers to consider what
# their current working directory is (and change to the top of the tree if necessary).
return self._filesystem.join("Tools", "Scripts", script_name)
def layout_tests_dir(self):
return self.path_from_webkit_base('LayoutTests')
def perf_tests_dir(self):
return self.path_from_webkit_base('PerformanceTests')
| {
"content_hash": "3c1031f9e51c1770d83697a775f359cc",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 102,
"avg_line_length": 50.333333333333336,
"alnum_prop": 0.6589403973509934,
"repo_name": "leighpauls/k2cro4",
"id": "3705ef37faf7cb048add2bd2c642b01356366b1c",
"size": "3343",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/common/webkit_finder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "3062"
},
{
"name": "AppleScript",
"bytes": "25392"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "68131038"
},
{
"name": "C",
"bytes": "242794338"
},
{
"name": "C#",
"bytes": "11024"
},
{
"name": "C++",
"bytes": "353525184"
},
{
"name": "Common Lisp",
"bytes": "3721"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Emacs Lisp",
"bytes": "1639"
},
{
"name": "F#",
"bytes": "4992"
},
{
"name": "FORTRAN",
"bytes": "10404"
},
{
"name": "Java",
"bytes": "3845159"
},
{
"name": "JavaScript",
"bytes": "39146656"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "Matlab",
"bytes": "22373"
},
{
"name": "Objective-C",
"bytes": "21887598"
},
{
"name": "PHP",
"bytes": "2344144"
},
{
"name": "Perl",
"bytes": "49033099"
},
{
"name": "Prolog",
"bytes": "2926122"
},
{
"name": "Python",
"bytes": "39863959"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Racket",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "304063"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "9195117"
},
{
"name": "Tcl",
"bytes": "1919771"
},
{
"name": "Verilog",
"bytes": "3092"
},
{
"name": "Visual Basic",
"bytes": "1430"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
} |
"""Support for Buienradar.nl weather service."""
from __future__ import annotations
import logging
from buienradar.constants import (
ATTRIBUTION,
CONDCODE,
CONDITION,
DETAILED,
EXACT,
EXACTNL,
FORECAST,
IMAGE,
MEASURED,
PRECIPITATION_FORECAST,
STATIONNAME,
TIMEFRAME,
VISIBILITY,
WINDGUST,
WINDSPEED,
)
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
DEGREE,
IRRADIATION_WATTS_PER_SQUARE_METER,
LENGTH_KILOMETERS,
LENGTH_MILLIMETERS,
PERCENTAGE,
PRESSURE_HPA,
SPEED_KILOMETERS_PER_HOUR,
TEMP_CELSIUS,
UnitOfVolumetricFlux,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import dt as dt_util
from .const import CONF_TIMEFRAME, DEFAULT_TIMEFRAME
from .util import BrData
_LOGGER = logging.getLogger(__name__)
MEASURED_LABEL = "Measured"
TIMEFRAME_LABEL = "Timeframe"
SYMBOL = "symbol"
# Schedule next call after (minutes):
SCHEDULE_OK = 10
# When an error occurred, new call after (minutes):
SCHEDULE_NOK = 2
STATIONNAME_LABEL = "Stationname"
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="stationname",
name=STATIONNAME_LABEL,
),
# new in json api (>1.0.0):
SensorEntityDescription(
key="barometerfc",
name="Barometer value",
icon="mdi:gauge",
),
# new in json api (>1.0.0):
SensorEntityDescription(
key="barometerfcname",
name="Barometer",
icon="mdi:gauge",
),
# new in json api (>1.0.0):
SensorEntityDescription(
key="barometerfcnamenl",
name="Barometer",
icon="mdi:gauge",
),
SensorEntityDescription(
key="condition",
name="Condition",
),
SensorEntityDescription(
key="conditioncode",
name="Condition code",
),
SensorEntityDescription(
key="conditiondetailed",
name="Detailed condition",
),
SensorEntityDescription(
key="conditionexact",
name="Full condition",
),
SensorEntityDescription(
key="symbol",
name="Symbol",
),
# new in json api (>1.0.0):
SensorEntityDescription(
key="feeltemperature",
name="Feel temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="humidity",
name="Humidity",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:water-percent",
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="temperature",
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="groundtemperature",
name="Ground temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="windspeed",
name="Wind speed",
native_unit_of_measurement=SPEED_KILOMETERS_PER_HOUR,
device_class=SensorDeviceClass.SPEED,
icon="mdi:weather-windy",
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="windforce",
name="Wind force",
native_unit_of_measurement="Bft",
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="winddirection",
name="Wind direction",
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="windazimuth",
name="Wind direction azimuth",
native_unit_of_measurement=DEGREE,
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="pressure",
name="Pressure",
native_unit_of_measurement=PRESSURE_HPA,
icon="mdi:gauge",
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="visibility",
name="Visibility",
native_unit_of_measurement=LENGTH_KILOMETERS,
device_class=SensorDeviceClass.DISTANCE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="windgust",
name="Wind gust",
native_unit_of_measurement=SPEED_KILOMETERS_PER_HOUR,
device_class=SensorDeviceClass.SPEED,
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="precipitation",
name="Precipitation",
native_unit_of_measurement=UnitOfVolumetricFlux.MILLIMETERS_PER_HOUR,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.PRECIPITATION_INTENSITY,
),
SensorEntityDescription(
key="irradiance",
name="Irradiance",
native_unit_of_measurement=IRRADIATION_WATTS_PER_SQUARE_METER,
icon="mdi:sunglasses",
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="precipitation_forecast_average",
name="Precipitation forecast average",
native_unit_of_measurement=UnitOfVolumetricFlux.MILLIMETERS_PER_HOUR,
device_class=SensorDeviceClass.PRECIPITATION_INTENSITY,
),
SensorEntityDescription(
key="precipitation_forecast_total",
name="Precipitation forecast total",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
# new in json api (>1.0.0):
SensorEntityDescription(
key="rainlast24hour",
name="Rain last 24h",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
# new in json api (>1.0.0):
SensorEntityDescription(
key="rainlasthour",
name="Rain last hour",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="temperature_1d",
name="Temperature 1d",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="temperature_2d",
name="Temperature 2d",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="temperature_3d",
name="Temperature 3d",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="temperature_4d",
name="Temperature 4d",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="temperature_5d",
name="Temperature 5d",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="mintemp_1d",
name="Minimum temperature 1d",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="mintemp_2d",
name="Minimum temperature 2d",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="mintemp_3d",
name="Minimum temperature 3d",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="mintemp_4d",
name="Minimum temperature 4d",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="mintemp_5d",
name="Minimum temperature 5d",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
SensorEntityDescription(
key="rain_1d",
name="Rain 1d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="rain_2d",
name="Rain 2d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="rain_3d",
name="Rain 3d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="rain_4d",
name="Rain 4d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="rain_5d",
name="Rain 5d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
# new in json api (>1.0.0):
SensorEntityDescription(
key="minrain_1d",
name="Minimum rain 1d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="minrain_2d",
name="Minimum rain 2d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="minrain_3d",
name="Minimum rain 3d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="minrain_4d",
name="Minimum rain 4d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="minrain_5d",
name="Minimum rain 5d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
# new in json api (>1.0.0):
SensorEntityDescription(
key="maxrain_1d",
name="Maximum rain 1d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="maxrain_2d",
name="Maximum rain 2d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="maxrain_3d",
name="Maximum rain 3d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="maxrain_4d",
name="Maximum rain 4d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="maxrain_5d",
name="Maximum rain 5d",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="rainchance_1d",
name="Rainchance 1d",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="rainchance_2d",
name="Rainchance 2d",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="rainchance_3d",
name="Rainchance 3d",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="rainchance_4d",
name="Rainchance 4d",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="rainchance_5d",
name="Rainchance 5d",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-pouring",
),
SensorEntityDescription(
key="sunchance_1d",
name="Sunchance 1d",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-partly-cloudy",
),
SensorEntityDescription(
key="sunchance_2d",
name="Sunchance 2d",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-partly-cloudy",
),
SensorEntityDescription(
key="sunchance_3d",
name="Sunchance 3d",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-partly-cloudy",
),
SensorEntityDescription(
key="sunchance_4d",
name="Sunchance 4d",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-partly-cloudy",
),
SensorEntityDescription(
key="sunchance_5d",
name="Sunchance 5d",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-partly-cloudy",
),
SensorEntityDescription(
key="windforce_1d",
name="Wind force 1d",
native_unit_of_measurement="Bft",
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="windforce_2d",
name="Wind force 2d",
native_unit_of_measurement="Bft",
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="windforce_3d",
name="Wind force 3d",
native_unit_of_measurement="Bft",
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="windforce_4d",
name="Wind force 4d",
native_unit_of_measurement="Bft",
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="windforce_5d",
name="Wind force 5d",
native_unit_of_measurement="Bft",
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="windspeed_1d",
name="Wind speed 1d",
native_unit_of_measurement=SPEED_KILOMETERS_PER_HOUR,
device_class=SensorDeviceClass.SPEED,
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="windspeed_2d",
name="Wind speed 2d",
native_unit_of_measurement=SPEED_KILOMETERS_PER_HOUR,
device_class=SensorDeviceClass.SPEED,
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="windspeed_3d",
name="Wind speed 3d",
native_unit_of_measurement=SPEED_KILOMETERS_PER_HOUR,
device_class=SensorDeviceClass.SPEED,
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="windspeed_4d",
name="Wind speed 4d",
native_unit_of_measurement=SPEED_KILOMETERS_PER_HOUR,
device_class=SensorDeviceClass.SPEED,
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="windspeed_5d",
name="Wind speed 5d",
native_unit_of_measurement=SPEED_KILOMETERS_PER_HOUR,
device_class=SensorDeviceClass.SPEED,
icon="mdi:weather-windy",
),
SensorEntityDescription(
key="winddirection_1d",
name="Wind direction 1d",
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="winddirection_2d",
name="Wind direction 2d",
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="winddirection_3d",
name="Wind direction 3d",
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="winddirection_4d",
name="Wind direction 4d",
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="winddirection_5d",
name="Wind direction 5d",
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="windazimuth_1d",
name="Wind direction azimuth 1d",
native_unit_of_measurement=DEGREE,
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="windazimuth_2d",
name="Wind direction azimuth 2d",
native_unit_of_measurement=DEGREE,
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="windazimuth_3d",
name="Wind direction azimuth 3d",
native_unit_of_measurement=DEGREE,
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="windazimuth_4d",
name="Wind direction azimuth 4d",
native_unit_of_measurement=DEGREE,
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="windazimuth_5d",
name="Wind direction azimuth 5d",
native_unit_of_measurement=DEGREE,
icon="mdi:compass-outline",
),
SensorEntityDescription(
key="condition_1d",
name="Condition 1d",
),
SensorEntityDescription(
key="condition_2d",
name="Condition 2d",
),
SensorEntityDescription(
key="condition_3d",
name="Condition 3d",
),
SensorEntityDescription(
key="condition_4d",
name="Condition 4d",
),
SensorEntityDescription(
key="condition_5d",
name="Condition 5d",
),
SensorEntityDescription(
key="conditioncode_1d",
name="Condition code 1d",
),
SensorEntityDescription(
key="conditioncode_2d",
name="Condition code 2d",
),
SensorEntityDescription(
key="conditioncode_3d",
name="Condition code 3d",
),
SensorEntityDescription(
key="conditioncode_4d",
name="Condition code 4d",
),
SensorEntityDescription(
key="conditioncode_5d",
name="Condition code 5d",
),
SensorEntityDescription(
key="conditiondetailed_1d",
name="Detailed condition 1d",
),
SensorEntityDescription(
key="conditiondetailed_2d",
name="Detailed condition 2d",
),
SensorEntityDescription(
key="conditiondetailed_3d",
name="Detailed condition 3d",
),
SensorEntityDescription(
key="conditiondetailed_4d",
name="Detailed condition 4d",
),
SensorEntityDescription(
key="conditiondetailed_5d",
name="Detailed condition 5d",
),
SensorEntityDescription(
key="conditionexact_1d",
name="Full condition 1d",
),
SensorEntityDescription(
key="conditionexact_2d",
name="Full condition 2d",
),
SensorEntityDescription(
key="conditionexact_3d",
name="Full condition 3d",
),
SensorEntityDescription(
key="conditionexact_4d",
name="Full condition 4d",
),
SensorEntityDescription(
key="conditionexact_5d",
name="Full condition 5d",
),
SensorEntityDescription(
key="symbol_1d",
name="Symbol 1d",
),
SensorEntityDescription(
key="symbol_2d",
name="Symbol 2d",
),
SensorEntityDescription(
key="symbol_3d",
name="Symbol 3d",
),
SensorEntityDescription(
key="symbol_4d",
name="Symbol 4d",
),
SensorEntityDescription(
key="symbol_5d",
name="Symbol 5d",
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Create the buienradar sensor."""
config = entry.data
options = entry.options
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
timeframe = options.get(
CONF_TIMEFRAME, config.get(CONF_TIMEFRAME, DEFAULT_TIMEFRAME)
)
if None in (latitude, longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return
coordinates = {CONF_LATITUDE: float(latitude), CONF_LONGITUDE: float(longitude)}
_LOGGER.debug(
"Initializing buienradar sensor coordinate %s, timeframe %s",
coordinates,
timeframe,
)
entities = [
BrSensor(config.get(CONF_NAME, "Buienradar"), coordinates, description)
for description in SENSOR_TYPES
]
async_add_entities(entities)
data = BrData(hass, coordinates, timeframe, entities)
# schedule the first update in 1 minute from now:
await data.schedule_update(1)
class BrSensor(SensorEntity):
"""Representation of an Buienradar sensor."""
_attr_entity_registry_enabled_default = False
_attr_should_poll = False
def __init__(self, client_name, coordinates, description: SensorEntityDescription):
"""Initialize the sensor."""
self.entity_description = description
self._attr_name = f"{client_name} {description.name}"
self._measured = None
self._attr_unique_id = "{:2.6f}{:2.6f}{}".format(
coordinates[CONF_LATITUDE], coordinates[CONF_LONGITUDE], description.key
)
# All continuous sensors should be forced to be updated
self._attr_force_update = (
description.key != SYMBOL and not description.key.startswith(CONDITION)
)
if description.key.startswith(PRECIPITATION_FORECAST):
self._timeframe = None
@callback
def data_updated(self, data):
"""Update data."""
if self.hass and self._load_data(data):
self.async_write_ha_state()
@callback
def _load_data(self, data): # noqa: C901
"""Load the sensor with relevant data."""
# Find sensor
# Check if we have a new measurement,
# otherwise we do not have to update the sensor
if self._measured == data.get(MEASURED):
return False
self._measured = data.get(MEASURED)
sensor_type = self.entity_description.key
if (
sensor_type.endswith("_1d")
or sensor_type.endswith("_2d")
or sensor_type.endswith("_3d")
or sensor_type.endswith("_4d")
or sensor_type.endswith("_5d")
):
# update forecasting sensors:
fcday = 0
if sensor_type.endswith("_2d"):
fcday = 1
if sensor_type.endswith("_3d"):
fcday = 2
if sensor_type.endswith("_4d"):
fcday = 3
if sensor_type.endswith("_5d"):
fcday = 4
# update weather symbol & status text
if sensor_type.startswith(SYMBOL) or sensor_type.startswith(CONDITION):
try:
condition = data.get(FORECAST)[fcday].get(CONDITION)
except IndexError:
_LOGGER.warning("No forecast for fcday=%s", fcday)
return False
if condition:
new_state = condition.get(CONDITION)
if sensor_type.startswith(SYMBOL):
new_state = condition.get(EXACTNL)
if sensor_type.startswith("conditioncode"):
new_state = condition.get(CONDCODE)
if sensor_type.startswith("conditiondetailed"):
new_state = condition.get(DETAILED)
if sensor_type.startswith("conditionexact"):
new_state = condition.get(EXACT)
img = condition.get(IMAGE)
if new_state != self.state or img != self.entity_picture:
self._attr_native_value = new_state
self._attr_entity_picture = img
return True
return False
if sensor_type.startswith(WINDSPEED):
# hass wants windspeeds in km/h not m/s, so convert:
try:
self._attr_native_value = data.get(FORECAST)[fcday].get(
sensor_type[:-3]
)
if self.state is not None:
self._attr_native_value = round(self.state * 3.6, 1)
return True
except IndexError:
_LOGGER.warning("No forecast for fcday=%s", fcday)
return False
# update all other sensors
try:
self._attr_native_value = data.get(FORECAST)[fcday].get(
sensor_type[:-3]
)
return True
except IndexError:
_LOGGER.warning("No forecast for fcday=%s", fcday)
return False
if sensor_type == SYMBOL or sensor_type.startswith(CONDITION):
# update weather symbol & status text
if condition := data.get(CONDITION):
if sensor_type == SYMBOL:
new_state = condition.get(EXACTNL)
if sensor_type == CONDITION:
new_state = condition.get(CONDITION)
if sensor_type == "conditioncode":
new_state = condition.get(CONDCODE)
if sensor_type == "conditiondetailed":
new_state = condition.get(DETAILED)
if sensor_type == "conditionexact":
new_state = condition.get(EXACT)
img = condition.get(IMAGE)
if new_state != self.state or img != self.entity_picture:
self._attr_native_value = new_state
self._attr_entity_picture = img
return True
return False
if sensor_type.startswith(PRECIPITATION_FORECAST):
# update nested precipitation forecast sensors
nested = data.get(PRECIPITATION_FORECAST)
self._timeframe = nested.get(TIMEFRAME)
self._attr_native_value = nested.get(
sensor_type[len(PRECIPITATION_FORECAST) + 1 :]
)
return True
if sensor_type in [WINDSPEED, WINDGUST]:
# hass wants windspeeds in km/h not m/s, so convert:
self._attr_native_value = data.get(sensor_type)
if self.state is not None:
self._attr_native_value = round(data.get(sensor_type) * 3.6, 1)
return True
if sensor_type == VISIBILITY:
# hass wants visibility in km (not m), so convert:
self._attr_native_value = data.get(sensor_type)
if self.state is not None:
self._attr_native_value = round(self.state / 1000, 1)
return True
# update all other sensors
self._attr_native_value = data.get(sensor_type)
if sensor_type.startswith(PRECIPITATION_FORECAST):
result = {ATTR_ATTRIBUTION: data.get(ATTRIBUTION)}
if self._timeframe is not None:
result[TIMEFRAME_LABEL] = "%d min" % (self._timeframe)
self._attr_extra_state_attributes = result
result = {
ATTR_ATTRIBUTION: data.get(ATTRIBUTION),
STATIONNAME_LABEL: data.get(STATIONNAME),
}
if self._measured is not None:
# convert datetime (Europe/Amsterdam) into local datetime
local_dt = dt_util.as_local(self._measured)
result[MEASURED_LABEL] = local_dt.strftime("%c")
self._attr_extra_state_attributes = result
return True
| {
"content_hash": "1b0e54a9835ac0d831d9d09245c6ab7b",
"timestamp": "",
"source": "github",
"line_count": 872,
"max_line_length": 87,
"avg_line_length": 31.128440366972477,
"alnum_prop": 0.6010167992926614,
"repo_name": "mezz64/home-assistant",
"id": "bf44c884147e87598a80b223a5e3fcc796bb00e1",
"size": "27144",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/buienradar/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
"""Tests for TPU outside compilation."""
import os
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorboard.plugins.histogram import summary_v2 as histogram_summary_v2
from tensorboard.plugins.image import summary_v2 as image_summary_v2
from tensorboard.plugins.scalar import summary_v2 as scalar_summary_v2
from tensorflow.core.util import event_pb2
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import summary_ops_v2 as summary
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.tpu.ops import tpu_ops
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
def get_tpu_cluster_resolver():
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_strategy_util.initialize_tpu_system(resolver)
return tpu_lib.TPUStrategyV2(resolver)
def computation_with_string_ops(x):
output = string_ops.string_format("1{}", x)
return string_ops.string_to_number(output)
def _events_from_logdir(test_case, logdir):
"""Reads summary events from log directory."""
test_case.assertTrue(gfile.Exists(logdir))
files = gfile.ListDirectory(logdir)
test_case.assertLen(files, 1)
records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def _rewrite_func_wrapper(tf_func):
def tpu_fn(*args, **kwargs):
# tpu.rewrite only accepts list of tensors as input. We need to flatten
# keyword arguments to meet this requirement.
concrete = tf_func.get_concrete_function(*(list(args) +
list(kwargs.values())))
return tpu.rewrite(concrete.__call__, list(args) + list(kwargs.values()))
return def_function.function(tpu_fn)
def _tpu_partitioned_call_wrapper(tf_func):
"""Wrap a tensorflow Function with TPUPartitionedCall."""
def inner_func(*args, **kwargs):
concrete = tf_func.get_concrete_function(*args, **kwargs)
# TPUPartitionedCall only accepts list of tensors as input args.
# Flatten keyword arguments and do some basic ordering:
# Positional args + Flattened keyword args + Captured args.
op_args = list(args) + list(kwargs.values()) + concrete.captured_inputs
return tpu_functional.TPUPartitionedCall(
args=op_args,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in concrete.function_def.signature.output_arg],
f=concrete)
return def_function.function(inner_func)
class TpuOutsideCompilationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(TpuOutsideCompilationTest, self).setUp()
config.set_soft_device_placement(False)
def testHostNoInput(self):
strategy = get_tpu_strategy()
def outside_fn():
logging_ops.print_v2("Outside compiled")
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu.outside_compilation(outside_fn)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOnly(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu.outside_compilation(outside_fn, x2)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOutput(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output = tpu.outside_compilation(outside_fn, x2)
return output
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))
def testHostMultipleInputs(self):
strategy = get_tpu_strategy()
val0 = np.arange(6).reshape((2, 3)).astype(np.float32)
val1 = np.arange(6).reshape((3, 2)).astype(np.float32)
def outside_fn(arg0, arg1):
tmp = array_ops.reshape(arg1, array_ops.shape(arg0))
ret0 = arg0 + tmp
ret1 = math_ops.matmul(arg0, arg1)
ret2 = array_ops.concat([arg0, tmp], 0)
return ret0, ret1, ret2
@def_function.function
def train_step():
def tpu_fn(x, y):
a = x + 7.0
b = y * 2.0
c, d, e = tpu.outside_compilation(outside_fn, a, b)
return (math_ops.reduce_max(c) + math_ops.reduce_min(d) +
math_ops.reduce_sum(e))
return strategy.run(tpu_fn, args=(val0, val1))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(213., shape=(strategy.num_replicas_in_sync)))
def testMultipleClusters(self):
strategy = get_tpu_strategy()
def outside_fn1(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
def outside_fn2(x):
logging_ops.print_v2("Outside compiled", x)
return x - 18.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output1 = tpu.outside_compilation(outside_fn1, x2)
x3 = output1 + 3.0
output2 = tpu.outside_compilation(outside_fn2, x3)
return output2
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(21., shape=(strategy.num_replicas_in_sync)))
@parameterized.parameters((True), (False))
def testOutsideCompilationControlFlowIf(self, take_true_branch):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
input_value = 51.0 if take_true_branch else 25.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
if x < 50.0:
return tpu.outside_compilation(outside_fn, x2)
else:
return x2
return strategy.run(tpu_fn, args=(input_value,))
output_value = 36.0
if take_true_branch:
output_value = 56.0
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationControlFlowWhile(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
while x2 < 50.0:
x2 = tpu.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationHostControlFlow(self):
"""Tests that control flow on host for outside_compilation works."""
strategy = get_tpu_strategy()
def outside_fn(x):
n = 0
while n < 4:
x = x + 6.0
n = n + 1
return x
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
x2 = tpu.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
def testSummary(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
@parameterized.parameters((True), (False))
def testSummaryInCond(self, take_true_branch):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step(take_true_branch):
def computation(x):
x = x + 1.0
if x < 5.0:
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
x = y
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step(take_true_branch)),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
def testSummaryInWhile(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
n = 0
while n < 3:
x = x + 1.0
y = tpu.outside_compilation(host_computation, x)
y = tpu.outside_compilation(host_computation, x)
x = y
n = n + 1
return y + 1.0
return strategy.run(computation, args=(2.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(31., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationAtHeadAndTail(self):
"""Tests that outside_compilation at head/tail of TPU computation works."""
strategy = get_tpu_strategy()
def host_computation(x):
return x * 2.0
@def_function.function
def train_step():
def computation(x):
w = tpu.outside_compilation(host_computation, x)
y = w + 1.0
z = tpu.outside_compilation(host_computation, y)
return z + 5.0
return strategy.run(computation, args=(2.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(15., shape=(strategy.num_replicas_in_sync)))
def testGradientAcrossOutsideCompilation(self):
"""Tests compiled gradients can contain host computations."""
strategy = get_tpu_strategy()
def host_computation(a):
b = a * a
c = b * b
return c
@def_function.function
def train_step():
def computation(x, y):
a = x + 7.0
b = tpu.outside_compilation(host_computation, a)
c = b * y
d = gradients_impl.gradients(
[c], [x], colocate_gradients_with_ops=True)[0]
return d
return strategy.run(computation, args=(2.0, 3.0))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(8748., shape=(strategy.num_replicas_in_sync)))
def testGradientOfGradientAcrossOutsideCompilation(self):
"""Tests compiled gradients of gradients can contain host computations."""
strategy = get_tpu_strategy()
def host_computation(a):
b = a * a
c = b * b
return c
@def_function.function
def train_step():
def computation(x, y):
a = x + 7.0
b = tpu.outside_compilation(host_computation, a)
c = b * y
d = gradients_impl.gradients(
[c], [x], colocate_gradients_with_ops=True)[0]
e = gradients_impl.gradients(
[d], [x], colocate_gradients_with_ops=True)[0]
return e
return strategy.run(computation, args=(2.0, 3.0))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(2916., shape=(strategy.num_replicas_in_sync)))
def testColocateGradientWithOutsideCompiledOp(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step():
@def_function.function
def tpu_fn(x):
x1 = tpu.outside_compilation(math_ops.sqrt, x)
grad = gradients_impl.gradients([x1], [x],
colocate_gradients_with_ops=True)[0]
sqrt = [
op for op in ops.get_default_graph().get_operations()
if op.type == "Sqrt"
][0]
sqrt_grad = [
op for op in ops.get_default_graph().get_operations()
if op.type == "SqrtGrad"
][0]
assert sqrt.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) == b"0"
assert (sqrt_grad.get_attr(
tpu._OUTSIDE_COMPILATION_ATTR) == b"0.gradients/uid")
return grad
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(.1, shape=(strategy.num_replicas_in_sync)))
class OutsideCompilationOnUnsupportedOpTest(test.TestCase,
parameterized.TestCase):
def setUp(self):
super(OutsideCompilationOnUnsupportedOpTest, self).setUp()
config.set_soft_device_placement(True)
def testStringOpWithManualOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return tpu.outside_compilation(computation_with_string_ops, x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testStringOpWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return computation_with_string_ops(x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
# Regression test for b/180509859.
def testImageSummary(self):
strategy = get_tpu_strategy()
def run():
@def_function.function
def sample_sequence():
bsz = 3
max_length = 32 * 32
def f():
def body(step, tokens):
next_token = random_ops.random_uniform([bsz])
tokens = tokens.write(step, next_token)
return (step + 1, tokens)
def cond(step, tokens):
del tokens
return math_ops.less(step, max_length)
tokens_var = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=max_length,
dynamic_size=False,
clear_after_read=False,
element_shape=(bsz,),
name="tokens_accumulator",
)
step = constant_op.constant(0)
step, tokens_var = control_flow_ops.while_loop(
cond, body, [step, tokens_var])
image_flat = array_ops.transpose(tokens_var.stack(), [1, 0])
image = array_ops.tile(
array_ops.reshape(image_flat, [bsz, 32, 32, 1]), [1, 1, 1, 3])
image_summary_v2.image("image_sample", image,
constant_op.constant(5, dtype=dtypes.int64))
return strategy.run(f)
sample_sequence()
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
run()
events = _events_from_logdir(self, logdir)
decoded_image = image_ops.decode_png(
events[1].summary.value[0].tensor.string_val[2]).numpy()
# Ensure that non-zero values were written to the image summary.
self.assertNotAllEqual(
array_ops.zeros((3072,), dtype=dtypes.float32),
list(decoded_image.flat))
def testSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
def testNestedFunctionScalarSummary(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
@def_function.function
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
def testHistogramSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
histogram_summary_v2.histogram("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
@parameterized.parameters((True), (False))
def testSummaryControlFlowIfWithAutoOutsideCompilation(
self, take_true_branch):
strategy = get_tpu_strategy()
@def_function.function
def step():
def computation(x):
x = x + 1.0
if x < 5:
scalar_summary_v2.scalar("x", x, step=0)
x = x * 2.0
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
if take_true_branch:
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
#
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "cond/x")
def testAutoOutsideCompilationWithFunctionalNodes(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(a, b):
def fn(a, b):
fn1 = lambda: computation_with_string_ops(a * 100)
fn2 = lambda: computation_with_string_ops(a)
pred = math_ops.greater_equal(a, b)
result = array_ops.identity(
control_flow_ops.cond(pred, fn1, fn2),
name="uncompilable_control_flow")
return result
return strategy.run(fn, args=(a, b))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0.0, -1.0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testRandomOpsWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step():
def computation():
return random_ops.random_normal(shape=[1, 2, 3])
return strategy.run(computation, args=())
self.assertAllEqual(
strategy.experimental_local_results(train_step())[0].shape, [1, 2, 3])
def testOutsideCompilationWithTPUPartitionedCallOp(self):
"""Tests that control flow with TPUPartitionedCall including outside_compilation works."""
get_tpu_strategy()
def host_computation(x):
return x + 1
@def_function.function()
def train_step(x):
x2 = x + 5.0
logging_ops.print_v2(x2)
x2 = tpu.outside_compilation(host_computation, x2)
return x2 + 4.0
tpu_fn = _rewrite_func_wrapper(train_step)
partitioned_tpu_fn = _tpu_partitioned_call_wrapper(tpu_fn)
concrete = partitioned_tpu_fn.get_concrete_function(
x=tensor_spec.TensorSpec(
shape=(1), dtype=dtypes.float32, name="input_tensor"))
self.assertIsInstance(
concrete(array_ops.ones((1), dtype=dtypes.float32))[0], ops.Tensor)
if __name__ == "__main__":
test.main()
| {
"content_hash": "7606e7c16c8e20b36527384d3baa89c7",
"timestamp": "",
"source": "github",
"line_count": 784,
"max_line_length": 94,
"avg_line_length": 31.375,
"alnum_prop": 0.6436295633791365,
"repo_name": "paolodedios/tensorflow",
"id": "697ff9ef7c55584b4e7686756564650800b72890",
"size": "25287",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tensorflow/python/tpu/tpu_outside_compilation_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1387968"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125994873"
},
{
"name": "CMake",
"bytes": "182324"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11402294"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300208"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42775737"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621520"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7727119"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import argparse
import re
def main():
parser = argparse.ArgumentParser(
description="Extract selected stanzas from ncbitaxon.owl"
)
parser.add_argument("input", type=str, help="The input ncbitaxon.owl file")
parser.add_argument("taxa", type=str, help="The TSV file listing taxa to extract")
parser.add_argument("output", type=str, help="The output OWL file")
args = parser.parse_args()
taxa = set()
with open(args.taxa) as taxalist:
for line in taxalist:
taxa.add(line.split()[0])
digits = re.compile(r"\d+")
with open(args.output, "w") as output:
with open(args.input) as ncbitaxon:
keep = True
post_rank = False
skip_to_end = False
for line in ncbitaxon:
# Skip over annotations for merged nodes that were deleted
if "</rdf:RDF>" in line:
skip_to_end = False
if skip_to_end:
continue
if "NCBITaxon#_taxonomic_rank -->" in line:
post_rank = True
if post_rank and "</owl:Class>" in line:
skip_to_end = True
if "<!-- http://purl.obolibrary.org/obo/NCBITaxon_" in line:
matches = digits.search(line)
if not matches:
keep = True
elif matches[0] in taxa:
keep = True
else:
keep = False
if keep:
output.write(line)
if __name__ == "__main__":
main()
| {
"content_hash": "576e9b9fc929c68eba9727b5c9d58f1f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 86,
"avg_line_length": 33.53061224489796,
"alnum_prop": 0.4996956786366403,
"repo_name": "obophenotype/ncbitaxon",
"id": "dc58725c7156a3e2f52ebeda0d4bc6c29b2eaae0",
"size": "1667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/extract_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "4493"
},
{
"name": "Perl",
"bytes": "629"
},
{
"name": "Python",
"bytes": "20784"
}
],
"symlink_target": ""
} |
from unittest import main
from json import loads
from qiita_pet.test.tornado_test_base import TestHandlerBase
class TestNewPrepTemplateAjax(TestHandlerBase):
def test_get(self):
response = self.get('/study/new_prep_template/', {'study_id': '1'})
self.assertEqual(response.code, 200)
class TestPrepTemplateGraphAJAX(TestHandlerBase):
def test_get(self):
response = self.get('/prep/graph/', {'prep_id': 1})
self.assertEqual(response.code, 200)
exp = {"status": "success",
"node_labels": [[1, "Raw data 1 - FASTQ"],
[3, "Demultiplexed 2 - Demultiplexed"],
[2, "Demultiplexed 1 - Demultiplexed"],
[4, "BIOM - BIOM"],
[5, "BIOM - BIOM"],
[6, "BIOM - BIOM"]],
"message": "",
"edge_list": [[1, 3], [1, 2], [2, 4], [2, 5], [2, 6]]}
obs = loads(response.body)
self.assertEqual(obs['status'], exp['status'])
self.assertEqual(obs['message'], exp['message'])
self.assertItemsEqual(obs['node_labels'], exp['node_labels'])
self.assertItemsEqual(obs['edge_list'], exp['edge_list'])
class TestPrepTemplateAJAXReadOnly(TestHandlerBase):
def test_get(self):
response = self.get('/study/description/prep_template/',
{'prep_id': 1, 'study_id': 1})
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
class TestPrepFilesHandler(TestHandlerBase):
def test_get_files_not_allowed(self):
response = self.post(
'/study/prep_files/',
{'type': 'BIOM', 'prep_file': 'uploaded_file.txt', 'study_id': 1})
self.assertEqual(response.code, 405)
if __name__ == "__main__":
main()
| {
"content_hash": "e0477d8c9e15c403b7cd4296e4d9a144",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 38.183673469387756,
"alnum_prop": 0.549973276322822,
"repo_name": "squirrelo/qiita",
"id": "dee37894820b6c8bde7365d0007cfecda66d80d5",
"size": "2221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiita_pet/handlers/study_handlers/tests/test_prep_template.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1692"
},
{
"name": "HTML",
"bytes": "449930"
},
{
"name": "JavaScript",
"bytes": "5876"
},
{
"name": "Makefile",
"bytes": "6838"
},
{
"name": "PLSQL",
"bytes": "2359"
},
{
"name": "PLpgSQL",
"bytes": "45311"
},
{
"name": "Python",
"bytes": "1696427"
},
{
"name": "SQLPL",
"bytes": "6192"
},
{
"name": "Shell",
"bytes": "3062"
}
],
"symlink_target": ""
} |
import urllib2
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand, CommandError
from django.core.urlresolvers import reverse
from django.utils import translation
from django.utils.html import escape
from optparse import make_option
from BeautifulSoup import BeautifulSoup
from haystack_static_pages.models import StaticPage
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-p', '--port', action='store', dest='port', default=None,
help='The port number to use for internal urls.'),
make_option('-l', '--language', action='store', dest='language', default=None,
help='The language to use when requesting the page'),
)
help = 'Setup static pages defined in HAYSTACK_STATIC_PAGES for indexing by Haystack'
cmd = 'crawl_static_pages [-p PORT] [-l LANG]'
def handle(self, *args, **options):
if args:
raise CommandError('Usage is: %s' % cmd)
self.port = options.get('port')
if self.port:
if not self.port.isdigit():
raise CommandError('%r is not a valid port number.' % self.port)
else:
self.port = int(self.port)
count = 0
self.language = options.get('language')
if self.language:
translation.activate(self.language)
for url in settings.HAYSTACK_STATIC_PAGES:
if not url.startswith('http://'):
if self.port:
url = 'http://%s:%r%s' % (Site.objects.get_current().domain, self.port, reverse(url))
else:
url = 'http://%s%s' % (Site.objects.get_current().domain, reverse(url))
print 'Analyzing %s...' % url
try:
page = StaticPage.objects.get(url=url)
print '%s already exists in the index, updating...' % url
except StaticPage.DoesNotExist:
print '%s is new, adding...' % url
page = StaticPage(url=url)
pass
try:
html = urllib2.urlopen(url)
except urllib2.URLError:
print "Error while reading '%s'" % url
continue
soup = BeautifulSoup(html)
try:
page.title = escape(soup.head.title.string)
except AttributeError:
page.title = 'Untitled'
meta = soup.find('meta', attrs={'name': 'description'})
if meta:
page.description = meta.get('content', '')
else:
page.description = ''
page.language = soup.html.get('lang', 'en')
page.content = soup.prettify()
page.save()
count += 1
print 'Crawled %d static pages' % count
| {
"content_hash": "20476cd7b35bbb0f40ac25a30252299b",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 105,
"avg_line_length": 35.55421686746988,
"alnum_prop": 0.5523551338529312,
"repo_name": "trapeze/haystack-static-pages",
"id": "9c53148f3c60d1c8cae01ef1a209f5c68646c0cb",
"size": "2951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "haystack_static_pages/management/commands/crawl_static_pages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5325"
}
],
"symlink_target": ""
} |
from task import Task
from trello import Card
# from abc import ABCMeta #todo make this class abstract
class RemoteTask(Task):
@staticmethod
def from_task(existing_task, remote_source):
result = remote_source.add_task(existing_task)
task_id = result[0]
data = result[1]
new_task = remote_source.add_to_cache(task_id, data)
print "Created new from task "
return newTask
def __init__(self, task_id, data, remote_source):
'''inits the remote task from an object representing some remote source'''
Task.__init__(self)
self._uid = task_id
self.remote_source = remote_source
self._data = data
self.update(fetch=False)
def get_uid(self):
return self._uid
def update(self, fetch=False):
'''Every remote updates differently.'''
'''For example, thing tasks don't know their list, so check all of the lists for their uid to determine'''
self.remote_source.update_task(self, fetch=False)
def push_changes(self):
self.remote_source.push_changes(self)
def __eq__(self, other):
if isinstance(other, Card): # "raw" trello card, from py-trello class
return other.id is self._uid
elif isinstance(other, RemoteTask):
return other.id == self.id
else:
return False
# todo: determine if instance should be comp'd
| {
"content_hash": "c85ecf92a7151a2679bb7967c2892c3d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 114,
"avg_line_length": 28.86,
"alnum_prop": 0.6188496188496189,
"repo_name": "bhylak/trello_things3_sync",
"id": "e5ec0ba76e1fd87b4aa62969f74ef73e45bce2b2",
"size": "1443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/remote_task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20176"
}
],
"symlink_target": ""
} |
"""
Volume driver for NetApp iSCSI storage systems.
This driver requires NetApp Clustered Data ONTAP or 7-mode
storage systems with installed iSCSI licenses.
"""
import copy
import math
import sys
import time
import uuid
import six
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
from cinder.volume.drivers.netapp.api import NaServer
from cinder.volume.drivers.netapp.options import netapp_7mode_opts
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import ssc_utils
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
from cinder.volume.drivers.netapp.utils import round_down
from cinder.volume.drivers.netapp.utils import set_safe_attr
from cinder.volume.drivers.netapp.utils import validate_instantiation
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
class NetAppLun(object):
"""Represents a LUN on NetApp storage."""
def __init__(self, handle, name, size, metadata_dict):
self.handle = handle
self.name = name
self.size = size
self.metadata = metadata_dict or {}
def get_metadata_property(self, prop):
"""Get the metadata property of a LUN."""
if prop in self.metadata:
return self.metadata[prop]
name = self.name
msg = _("No metadata property %(prop)s defined for the"
" LUN %(name)s")
msg_fmt = {'prop': prop, 'name': name}
LOG.debug(msg % msg_fmt)
def __str__(self, *args, **kwargs):
return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
% (self.handle, self.name, self.size, self.metadata)
class NetAppDirectISCSIDriver(driver.ISCSIDriver):
"""NetApp Direct iSCSI volume driver."""
# do not increment this as it may be used in volume type definitions
VERSION = "1.0.0"
IGROUP_PREFIX = 'openstack-'
required_flags = ['netapp_login', 'netapp_password',
'netapp_server_hostname']
def __init__(self, *args, **kwargs):
self._app_version = kwargs.pop("app_version", "unknown")
super(NetAppDirectISCSIDriver, self).__init__(*args, **kwargs)
validate_instantiation(**kwargs)
self.configuration.append_config_values(netapp_connection_opts)
self.configuration.append_config_values(netapp_basicauth_opts)
self.configuration.append_config_values(netapp_transport_opts)
self.configuration.append_config_values(netapp_provisioning_opts)
self.lun_table = {}
def _create_client(self, **kwargs):
"""Instantiate a client for NetApp server.
This method creates NetApp server client for api communication.
"""
host_filer = kwargs['hostname']
LOG.debug('Using NetApp filer: %s' % host_filer)
self.client = NaServer(host=host_filer,
server_type=NaServer.SERVER_TYPE_FILER,
transport_type=kwargs['transport_type'],
style=NaServer.STYLE_LOGIN_PASSWORD,
username=kwargs['login'],
password=kwargs['password'])
if kwargs['port'] is not None:
self.client.set_port(kwargs['port'])
def _do_custom_setup(self):
"""Does custom setup depending on the type of filer."""
raise NotImplementedError()
def _check_flags(self):
"""Ensure that the flags we care about are set."""
required_flags = self.required_flags
for flag in required_flags:
if not getattr(self.configuration, flag, None):
msg = _('%s is not set') % flag
raise exception.InvalidInput(reason=msg)
def do_setup(self, context):
"""Setup the NetApp Volume driver.
Called one time by the manager after the driver is loaded.
Validate the flags we care about and setup NetApp
client.
"""
self._check_flags()
self._create_client(
transport_type=self.configuration.netapp_transport_type,
login=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port)
self._do_custom_setup()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate.
Discovers the LUNs on the NetApp server.
"""
self.lun_table = {}
self._get_lun_list()
LOG.debug("Success getting LUN list from server")
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata') or dict()
return metadata.get('Volume', None)
def create_volume(self, volume):
"""Driver entry point for creating a new volume (aka ONTAP LUN)."""
LOG.debug('create_volume on %s' % volume['host'])
# get ONTAP volume name as pool name
ontap_volume_name = volume_utils.extract_host(volume['host'],
level='pool')
if ontap_volume_name is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
lun_name = volume['name']
# start with default size, get requested size
default_size = units.Mi * 100 # 100 MB
size = default_size if not int(volume['size'])\
else int(volume['size']) * units.Gi
metadata = {'OsType': 'linux', 'SpaceReserved': 'true'}
extra_specs = get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
# warn on obsolete extra specs
na_utils.log_extra_spec_warnings(extra_specs)
self.create_lun(ontap_volume_name, lun_name, size,
metadata, qos_policy_group)
LOG.debug('Created LUN with name %s' % lun_name)
metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
metadata['Volume'] = ontap_volume_name
metadata['Qtree'] = None
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
if not metadata:
msg = _("No entry in LUN table for volume/snapshot %(name)s.")
msg_fmt = {'name': name}
LOG.warn(msg % msg_fmt)
return
self._destroy_lun(metadata['Path'])
self.lun_table.pop(name)
def _destroy_lun(self, path, force=True):
"""Destroys the lun at the path."""
lun_destroy = NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.client.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s" % seg[-1])
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume.
Since exporting is idempotent in this driver, we have nothing
to do for unexporting.
"""
pass
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance.
Do the LUN masking on the storage system so the initiator can access
the LUN on the target. Also return the iSCSI properties so the
initiator can find the LUN. This implementation does not call
_get_iscsi_properties() to get the properties because cannot store the
LUN number in the database. We only find out what the LUN number will
be during this method call so we construct the properties dictionary
ourselves.
"""
initiator_name = connector['initiator']
name = volume['name']
lun_id = self._map_lun(name, initiator_name, 'iscsi', None)
msg = _("Mapped LUN %(name)s to the initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
target_list = self._get_target_details()
if not target_list:
msg = _('No iscsi target details were found for LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
msg = _("Successfully fetched target details for LUN %(name)s and "
"initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
preferred_target = self._get_preferred_target_from_list(
target_list)
if preferred_target is None:
msg = _('Failed to get target portal for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
(address, port) = (preferred_target['address'],
preferred_target['port'])
iqn = self._get_iscsi_service_details()
if not iqn:
msg = _('Failed to get target IQN for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
return na_utils.get_iscsi_connection_properties(address, port, iqn,
lun_id, volume)
def _get_preferred_target_from_list(self, target_details_list,
filter=None):
preferred_target = None
for target in target_details_list:
if filter and target['address'] not in filter:
continue
if target.get('interface-enabled', 'true') == 'true':
preferred_target = target
break
if preferred_target is None and len(target_details_list) > 0:
preferred_target = target_details_list[0]
return preferred_target
def create_snapshot(self, snapshot):
"""Driver entry point for creating a snapshot.
This driver implements snapshots by using efficient single-file
(LUN) cloning.
"""
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
lun = self._get_lun_from_table(vol_name)
self._clone_lun(lun.name, snapshot_name, 'false')
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
self.delete_volume(snapshot)
LOG.debug("Snapshot %s deletion successful" % snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Driver entry point for creating a new volume from a snapshot.
Many would call this "cloning" and in fact we use cloning to implement
this feature.
"""
vol_size = volume['size']
snap_size = snapshot['volume_size']
snapshot_name = snapshot['name']
new_name = volume['name']
self._clone_lun(snapshot_name, new_name, 'true')
if vol_size != snap_size:
try:
self.extend_volume(volume, volume['size'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(
_("Resizing %s failed. Cleaning volume."), new_name)
self.delete_volume(volume)
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance.
Unmask the LUN on the storage system so the given initiator can no
longer access it.
"""
initiator_name = connector['initiator']
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, initiator_name)
msg = _("Unmapped LUN %(name)s from the initiator "
"%(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
def _get_ontapi_version(self):
"""Gets the supported ontapi version."""
ontapi_version = NaElement('system-get-ontapi-version')
res = self.client.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return (major, minor)
def create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
lun_create = NaElement.create_node_with_children(
'lun-create-by-size',
**{'path': path, 'size': six.text_type(size),
'ostype': metadata['OsType'],
'space-reservation-enabled': metadata['SpaceReserved']})
if qos_policy_group:
lun_create.add_new_child('qos-policy-group', qos_policy_group)
try:
self.client.invoke_successfully(lun_create, True)
except NaApiError as ex:
with excutils.save_and_reraise_exception():
msg = _("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s")
msg_args = {'lun_name': lun_name,
'volume_name': volume_name,
'ex': six.text_type(ex)}
LOG.error(msg % msg_args)
def _get_iscsi_service_details(self):
"""Returns iscsi iqn."""
raise NotImplementedError()
def _get_target_details(self):
"""Gets the target portal details."""
raise NotImplementedError()
def _create_lun_handle(self, metadata):
"""Returns lun handle based on filer type."""
raise NotImplementedError()
def _get_lun_list(self):
"""Gets the list of luns on filer."""
raise NotImplementedError()
def _extract_and_populate_luns(self, api_luns):
"""Extracts the luns from api.
Populates in the lun table.
"""
for lun in api_luns:
meta_dict = self._create_lun_meta(lun)
path = lun.get_child_content('path')
(rest, splitter, name) = path.rpartition('/')
handle = self._create_lun_handle(meta_dict)
size = lun.get_child_content('size')
discovered_lun = NetAppLun(handle, name,
size, meta_dict)
self._add_lun_to_table(discovered_lun)
def _is_naelement(self, elem):
"""Checks if element is NetApp element."""
if not isinstance(elem, NaElement):
raise ValueError('Expects NaElement')
def _map_lun(self, name, initiator, initiator_type='iscsi', lun_id=None):
"""Maps lun to the initiator and returns lun id assigned."""
metadata = self._get_lun_attr(name, 'metadata')
os = metadata['OsType']
path = metadata['Path']
if self._check_allowed_os(os):
os = os
else:
os = 'default'
igroup_name = self._get_or_create_igroup(initiator,
initiator_type, os)
lun_map = NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.client.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except NaApiError as e:
code = e.code
message = e.message
msg = _('Error mapping lun. Code :%(code)s, Message:%(message)s')
msg_fmt = {'code': code, 'message': message}
exc_info = sys.exc_info()
LOG.warn(msg % msg_fmt)
(igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator)
if lun_id is not None:
return lun_id
else:
raise exc_info[0], exc_info[1], exc_info[2]
def _unmap_lun(self, path, initiator):
"""Unmaps a lun from given initiator."""
(igroup_name, lun_id) = self._find_mapped_lun_igroup(path, initiator)
lun_unmap = NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.client.invoke_successfully(lun_unmap, True)
except NaApiError as e:
msg = _("Error unmapping lun. Code :%(code)s,"
" Message:%(message)s")
msg_fmt = {'code': e.code, 'message': e.message}
exc_info = sys.exc_info()
LOG.warn(msg % msg_fmt)
# if the lun is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
raise exc_info[0], exc_info[1], exc_info[2]
def _find_mapped_lun_igroup(self, path, initiator, os=None):
"""Find the igroup for mapped lun with initiator."""
raise NotImplementedError()
def _get_or_create_igroup(self, initiator, initiator_type='iscsi',
os='default'):
"""Checks for an igroup for an initiator.
Creates igroup if not found.
"""
igroups = self._get_igroup_by_initiator(initiator=initiator)
igroup_name = None
for igroup in igroups:
if igroup['initiator-group-os-type'] == os:
if igroup['initiator-group-type'] == initiator_type or \
igroup['initiator-group-type'] == 'mixed':
if igroup['initiator-group-name'].startswith(
self.IGROUP_PREFIX):
igroup_name = igroup['initiator-group-name']
break
if not igroup_name:
igroup_name = self.IGROUP_PREFIX + str(uuid.uuid4())
self._create_igroup(igroup_name, initiator_type, os)
self._add_igroup_initiator(igroup_name, initiator)
return igroup_name
def _get_igroup_by_initiator(self, initiator):
"""Get igroups by initiator."""
raise NotImplementedError()
def _check_allowed_os(self, os):
"""Checks if the os type supplied is NetApp supported."""
if os in ['linux', 'aix', 'hpux', 'windows', 'solaris',
'netware', 'vmware', 'openvms', 'xen', 'hyper_v']:
return True
else:
return False
def _create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igroup with specified args."""
igroup_create = NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.client.invoke_successfully(igroup_create, True)
def _add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.client.invoke_successfully(igroup_add, True)
def _add_lun_to_table(self, lun):
"""Adds LUN to cache table."""
if not isinstance(lun, NetAppLun):
msg = _("Object is not a NetApp LUN.")
raise exception.VolumeBackendAPIException(data=msg)
self.lun_table[lun.name] = lun
def _get_lun_from_table(self, name):
"""Gets LUN from cache table.
Refreshes cache if lun not found in cache.
"""
lun = self.lun_table.get(name)
if lun is None:
self._get_lun_list()
lun = self.lun_table.get(name)
if lun is None:
raise exception.VolumeNotFound(volume_id=name)
return lun
def _clone_lun(self, name, new_name, space_reserved='true',
src_block=0, dest_block=0, block_count=0):
"""Clone LUN with the given name to the new name."""
raise NotImplementedError()
def _get_lun_by_args(self, **args):
"""Retrieves luns with specified args."""
raise NotImplementedError()
def _get_lun_attr(self, name, attr):
"""Get the lun attribute if found else None."""
try:
attr = getattr(self._get_lun_from_table(name), attr)
return attr
except exception.VolumeNotFound as e:
LOG.error(_("Message: %s"), e.msg)
except Exception as e:
LOG.error(_("Error getting lun attribute. Exception: %s"),
e.__str__())
return None
def _create_lun_meta(self, lun):
raise NotImplementedError()
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_size = volume['size']
src_vol = self._get_lun_from_table(src_vref['name'])
src_vol_size = src_vref['size']
new_name = volume['name']
self._clone_lun(src_vol.name, new_name, 'true')
if vol_size != src_vol_size:
try:
self.extend_volume(volume, volume['size'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(
_("Resizing %s failed. Cleaning volume."), new_name)
self.delete_volume(volume)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
raise NotImplementedError()
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
name = volume['name']
lun = self._get_lun_from_table(name)
path = lun.metadata['Path']
curr_size_bytes = str(lun.size)
new_size_bytes = str(int(new_size) * units.Gi)
# Reused by clone scenarios.
# Hence comparing the stored size.
if curr_size_bytes != new_size_bytes:
lun_geometry = self._get_lun_geometry(path)
if (lun_geometry and lun_geometry.get("max_resize")
and int(lun_geometry.get("max_resize")) >=
int(new_size_bytes)):
self._do_direct_resize(path, new_size_bytes)
else:
self._do_sub_clone_resize(path, new_size_bytes)
self.lun_table[name].size = new_size_bytes
else:
LOG.info(_("No need to extend volume %s"
" as it is already the requested new size."), name)
def _do_direct_resize(self, path, new_size_bytes, force=True):
"""Uses the resize api to resize the lun."""
seg = path.split("/")
LOG.info(_("Resizing lun %s directly to new size."), seg[-1])
lun_resize = NaElement("lun-resize")
lun_resize.add_new_child('path', path)
lun_resize.add_new_child('size', new_size_bytes)
if force:
lun_resize.add_new_child('force', 'true')
self.client.invoke_successfully(lun_resize, True)
def _get_lun_geometry(self, path):
"""Gets the lun geometry."""
geometry = {}
lun_geo = NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
try:
result = self.client.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content("size")
geometry['bytes_per_sector'] =\
result.get_child_content("bytes-per-sector")
geometry['sectors_per_track'] =\
result.get_child_content("sectors-per-track")
geometry['tracks_per_cylinder'] =\
result.get_child_content("tracks-per-cylinder")
geometry['cylinders'] =\
result.get_child_content("cylinders")
geometry['max_resize'] =\
result.get_child_content("max-resize-size")
except Exception as e:
LOG.error(_("Lun %(path)s geometry failed. Message - %(msg)s")
% {'path': path, 'msg': e.message})
return geometry
def _get_volume_options(self, volume_name):
"""Get the value for the volume option."""
opts = []
vol_option_list = NaElement("volume-options-list-info")
vol_option_list.add_new_child('volume', volume_name)
result = self.client.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name("options")
if options:
opts = options.get_children()
return opts
def _get_vol_option(self, volume_name, option_name):
"""Get the value for the volume option."""
value = None
options = self._get_volume_options(volume_name)
for opt in options:
if opt.get_child_content('name') == option_name:
value = opt.get_child_content('value')
break
return value
def _move_lun(self, path, new_path):
"""Moves the lun at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug("Moving lun %(name)s to %(new_name)s."
% {'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
self.client.invoke_successfully(lun_move, True)
def _do_sub_clone_resize(self, path, new_size_bytes):
"""Does sub lun clone after verification.
Clones the block ranges and swaps
the luns also deletes older lun
after a successful clone.
"""
seg = path.split("/")
LOG.info(_("Resizing lun %s using sub clone to new size."), seg[-1])
name = seg[-1]
vol_name = seg[2]
lun = self._get_lun_from_table(name)
metadata = lun.metadata
compression = self._get_vol_option(vol_name, 'compression')
if compression == "on":
msg = _('%s cannot be sub clone resized'
' as it is hosted on compressed volume')
raise exception.VolumeBackendAPIException(data=msg % name)
else:
block_count = self._get_lun_block_count(path)
if block_count == 0:
msg = _('%s cannot be sub clone resized'
' as it contains no blocks.')
raise exception.VolumeBackendAPIException(data=msg % name)
new_lun = 'new-%s' % (name)
self.create_lun(vol_name, new_lun, new_size_bytes, metadata)
try:
self._clone_lun(name, new_lun, block_count=block_count)
self._post_sub_clone_resize(path)
except Exception:
with excutils.save_and_reraise_exception():
new_path = '/vol/%s/%s' % (vol_name, new_lun)
self._destroy_lun(new_path)
def _post_sub_clone_resize(self, path):
"""Try post sub clone resize in a transactional manner."""
st_tm_mv, st_nw_mv, st_del_old = None, None, None
seg = path.split("/")
LOG.info(_("Post clone resize lun %s"), seg[-1])
new_lun = 'new-%s' % (seg[-1])
tmp_lun = 'tmp-%s' % (seg[-1])
tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
new_path = "/vol/%s/%s" % (seg[2], new_lun)
try:
st_tm_mv = self._move_lun(path, tmp_path)
st_nw_mv = self._move_lun(new_path, path)
st_del_old = self._destroy_lun(tmp_path)
except Exception as e:
if st_tm_mv is None:
msg = _("Failure staging lun %s to tmp.")
raise exception.VolumeBackendAPIException(data=msg % (seg[-1]))
else:
if st_nw_mv is None:
self._move_lun(tmp_path, path)
msg = _("Failure moving new cloned lun to %s.")
raise exception.VolumeBackendAPIException(
data=msg % (seg[-1]))
elif st_del_old is None:
LOG.error(_("Failure deleting staged tmp lun %s."),
tmp_lun)
else:
LOG.error(_("Unknown exception in"
" post clone resize lun %s."), seg[-1])
LOG.error(_("Exception details: %s") % (e.__str__()))
def _get_lun_block_count(self, path):
"""Gets block counts for the lun."""
LOG.debug("Getting lun block count.")
block_count = 0
lun_infos = self._get_lun_by_args(path=path)
if not lun_infos:
seg = path.split('/')
msg = _('Failure getting lun info for %s.')
raise exception.VolumeBackendAPIException(data=msg % seg[-1])
lun_info = lun_infos[-1]
bs = int(lun_info.get_child_content('block-size'))
ls = int(lun_info.get_child_content('size'))
block_count = ls / bs
return block_count
class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
"""NetApp C-mode iSCSI volume driver."""
DEFAULT_VS = 'openstack'
def __init__(self, *args, **kwargs):
super(NetAppDirectCmodeISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_cluster_opts)
def _do_custom_setup(self):
"""Does custom setup for ontap cluster."""
self.vserver = self.configuration.netapp_vserver
self.vserver = self.vserver if self.vserver else self.DEFAULT_VS
# We set vserver in client permanently.
# To use tunneling enable_tunneling while invoking api
self.client.set_vserver(self.vserver)
# Default values to run first api
self.client.set_api_version(1, 15)
(major, minor) = self._get_ontapi_version()
self.client.set_api_version(major, minor)
self.ssc_vols = None
self.stale_vols = set()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
ssc_utils.check_ssc_api_permissions(self.client)
super(NetAppDirectCmodeISCSIDriver, self).check_for_setup_error()
def create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group=None):
"""Creates a LUN, handling ONTAP differences as needed."""
super(NetAppDirectCmodeISCSIDriver, self).create_lun(
volume_name, lun_name, size, metadata, qos_policy_group)
self._update_stale_vols(
volume=ssc_utils.NetAppVolume(volume_name, self.vserver))
def _get_target_details(self):
"""Gets the target portal details."""
iscsi_if_iter = NaElement('iscsi-interface-get-iter')
result = self.client.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
if result.get_child_content('num-records')\
and int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
iscsi_if_list = attr_list.get_children()
for iscsi_if in iscsi_if_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
d['interface-enabled'] = iscsi_if.get_child_content(
'is-interface-enabled')
tgt_list.append(d)
return tgt_list
def _get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = NaElement('iscsi-service-get-iter')
result = self.client.invoke_successfully(iscsi_service_iter, True)
if result.get_child_content('num-records') and\
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
iscsi_service = attr_list.get_child_by_name('iscsi-service-info')
return iscsi_service.get_child_content('node-name')
LOG.debug('No iscsi service found for vserver %s' % (self.vserver))
return None
def _create_lun_handle(self, metadata):
"""Returns lun handle based on filer type."""
return '%s:%s' % (self.vserver, metadata['Path'])
def _get_lun_list(self):
"""Gets the list of luns on filer.
Gets the luns from cluster with vserver.
"""
tag = None
while True:
api = NaElement('lun-get-iter')
api.add_new_child('max-records', '100')
if tag:
api.add_new_child('tag', tag, True)
lun_info = NaElement('lun-info')
lun_info.add_new_child('vserver', self.vserver)
query = NaElement('query')
query.add_child_elem(lun_info)
api.add_child_elem(query)
result = self.client.invoke_successfully(api)
if result.get_child_by_name('num-records') and\
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
self._extract_and_populate_luns(attr_list.get_children())
tag = result.get_child_content('next-tag')
if tag is None:
break
def _find_mapped_lun_igroup(self, path, initiator, os=None):
"""Find the igroup for mapped lun with initiator."""
initiator_igroups = self._get_igroup_by_initiator(initiator=initiator)
lun_maps = self._get_lun_map(path)
if initiator_igroups and lun_maps:
for igroup in initiator_igroups:
igroup_name = igroup['initiator-group-name']
if igroup_name.startswith(self.IGROUP_PREFIX):
for lun_map in lun_maps:
if lun_map['initiator-group'] == igroup_name:
return (igroup_name, lun_map['lun-id'])
return (None, None)
def _get_lun_map(self, path):
"""Gets the lun map by lun path."""
tag = None
map_list = []
while True:
lun_map_iter = NaElement('lun-map-get-iter')
lun_map_iter.add_new_child('max-records', '100')
if tag:
lun_map_iter.add_new_child('tag', tag, True)
query = NaElement('query')
lun_map_iter.add_child_elem(query)
query.add_node_with_children('lun-map-info', **{'path': path})
result = self.client.invoke_successfully(lun_map_iter, True)
tag = result.get_child_content('next-tag')
if result.get_child_content('num-records') and \
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
lun_maps = attr_list.get_children()
for lun_map in lun_maps:
lun_m = dict()
lun_m['initiator-group'] = lun_map.get_child_content(
'initiator-group')
lun_m['lun-id'] = lun_map.get_child_content('lun-id')
lun_m['vserver'] = lun_map.get_child_content('vserver')
map_list.append(lun_m)
if tag is None:
break
return map_list
def _get_igroup_by_initiator(self, initiator):
"""Get igroups by initiator."""
tag = None
igroup_list = []
while True:
igroup_iter = NaElement('igroup-get-iter')
igroup_iter.add_new_child('max-records', '100')
if tag:
igroup_iter.add_new_child('tag', tag, True)
query = NaElement('query')
igroup_iter.add_child_elem(query)
igroup_info = NaElement('initiator-group-info')
query.add_child_elem(igroup_info)
igroup_info.add_new_child('vserver', self.vserver)
initiators = NaElement('initiators')
igroup_info.add_child_elem(initiators)
initiators.add_node_with_children('initiator-info',
**{'initiator-name': initiator})
des_attrs = NaElement('desired-attributes')
des_ig_info = NaElement('initiator-group-info')
des_attrs.add_child_elem(des_ig_info)
des_ig_info.add_node_with_children('initiators',
**{'initiator-info': None})
des_ig_info.add_new_child('vserver', None)
des_ig_info.add_new_child('initiator-group-name', None)
des_ig_info.add_new_child('initiator-group-type', None)
des_ig_info.add_new_child('initiator-group-os-type', None)
igroup_iter.add_child_elem(des_attrs)
result = self.client.invoke_successfully(igroup_iter, False)
tag = result.get_child_content('next-tag')
if result.get_child_content('num-records') and\
int(result.get_child_content('num-records')) > 0:
attr_list = result.get_child_by_name('attributes-list')
igroups = attr_list.get_children()
for igroup in igroups:
ig = dict()
ig['initiator-group-os-type'] = igroup.get_child_content(
'initiator-group-os-type')
ig['initiator-group-type'] = igroup.get_child_content(
'initiator-group-type')
ig['initiator-group-name'] = igroup.get_child_content(
'initiator-group-name')
igroup_list.append(ig)
if tag is None:
break
return igroup_list
def _clone_lun(self, name, new_name, space_reserved='true',
src_block=0, dest_block=0, block_count=0):
"""Clone LUN with the given handle to the new name."""
metadata = self._get_lun_attr(name, 'metadata')
volume = metadata['Volume']
# zAPI can only handle 2^24 blocks per range
bc_limit = 2 ** 24 # 8GB
# zAPI can only handle 32 block ranges per call
br_limit = 32
z_limit = br_limit * bc_limit # 256 GB
z_calls = int(math.ceil(block_count / float(z_limit)))
zbc = block_count
if z_calls == 0:
z_calls = 1
for call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
else:
block_count = zbc
clone_create = NaElement.create_node_with_children(
'clone-create',
**{'volume': volume, 'source-path': name,
'destination-path': new_name,
'space-reserve': space_reserved})
if block_count > 0:
block_ranges = NaElement("block-ranges")
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
for segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
else:
block_count = bc
block_range = NaElement.create_node_with_children(
'block-range',
**{'source-block-number': str(src_block),
'destination-block-number': str(dest_block),
'block-count': str(block_count)})
block_ranges.add_child_elem(block_range)
src_block += int(block_count)
dest_block += int(block_count)
clone_create.add_child_elem(block_ranges)
self.client.invoke_successfully(clone_create, True)
LOG.debug("Cloned LUN with new name %s" % new_name)
lun = self._get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s'
% (volume, new_name))
if len(lun) == 0:
msg = _("No cloned lun named %s found on the filer")
raise exception.VolumeBackendAPIException(data=msg % (new_name))
clone_meta = self._create_lun_meta(lun[0])
self._add_lun_to_table(NetAppLun('%s:%s' % (clone_meta['Vserver'],
clone_meta['Path']),
new_name,
lun[0].get_child_content('size'),
clone_meta))
self._update_stale_vols(
volume=ssc_utils.NetAppVolume(volume, self.vserver))
def _get_lun_by_args(self, **args):
"""Retrieves lun with specified args."""
lun_iter = NaElement('lun-get-iter')
lun_iter.add_new_child('max-records', '100')
query = NaElement('query')
lun_iter.add_child_elem(query)
query.add_node_with_children('lun-info', **args)
luns = self.client.invoke_successfully(lun_iter)
attr_list = luns.get_child_by_name('attributes-list')
return attr_list.get_children()
def _create_lun_meta(self, lun):
"""Creates lun metadata dictionary."""
self._is_naelement(lun)
meta_dict = {}
meta_dict['Vserver'] = lun.get_child_content('vserver')
meta_dict['Volume'] = lun.get_child_content('volume')
meta_dict['Qtree'] = lun.get_child_content('qtree')
meta_dict['Path'] = lun.get_child_content('path')
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = \
lun.get_child_content('is-space-reservation-enabled')
return meta_dict
def _configure_tunneling(self, do_tunneling=False):
"""Configures tunneling for ontap cluster."""
if do_tunneling:
self.client.set_vserver(self.vserver)
else:
self.client.set_vserver(None)
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
sync = True if self.ssc_vols is None else False
ssc_utils.refresh_cluster_ssc(self, self.client,
self.vserver, synchronous=sync)
LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_iSCSI_Cluster_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or netapp_backend
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['pools'] = self._get_pool_stats()
na_utils.provide_ems(self, self.client, netapp_backend,
self._app_version)
self._stats = data
def _get_pool_stats(self):
"""Retrieve pool (i.e. ONTAP volume) stats info from SSC volumes."""
pools = []
if not self.ssc_vols:
return pools
for vol in self.ssc_vols['all']:
pool = dict()
pool['pool_name'] = vol.id['name']
pool['QoS_support'] = False
pool['reserved_percentage'] = 0
# convert sizes to GB and de-rate by NetApp multiplier
total = float(vol.space['size_total_bytes'])
total /= self.configuration.netapp_size_multiplier
total /= units.Gi
pool['total_capacity_gb'] = round_down(total, '0.01')
free = float(vol.space['size_avl_bytes'])
free /= self.configuration.netapp_size_multiplier
free /= units.Gi
pool['free_capacity_gb'] = round_down(free, '0.01')
pool['netapp_raid_type'] = vol.aggr['raid_type']
pool['netapp_disk_type'] = vol.aggr['disk_type']
mirrored = vol in self.ssc_vols['mirrored']
pool['netapp_mirrored'] = six.text_type(mirrored).lower()
pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
dedup = vol in self.ssc_vols['dedup']
pool['netapp_dedup'] = six.text_type(dedup).lower()
pool['netapp_nodedup'] = six.text_type(not dedup).lower()
compression = vol in self.ssc_vols['compression']
pool['netapp_compression'] = six.text_type(compression).lower()
pool['netapp_nocompression'] = six.text_type(
not compression).lower()
thin = vol in self.ssc_vols['thin']
pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
pool['netapp_thick_provisioned'] = six.text_type(not thin).lower()
pools.append(pool)
return pools
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy if reset."""
if volume:
self.stale_vols.add(volume)
if reset:
set_copy = copy.deepcopy(self.stale_vols)
self.stale_vols.clear()
return set_copy
@utils.synchronized("refresh_ssc_vols")
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
self.ssc_vols = vols
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
try:
lun = self._get_lun_from_table(volume['name'])
except exception.VolumeNotFound:
lun = None
netapp_vol = None
if lun:
netapp_vol = lun.get_metadata_property('Volume')
super(NetAppDirectCmodeISCSIDriver, self).delete_volume(volume)
if netapp_vol:
self._update_stale_vols(
volume=ssc_utils.NetAppVolume(netapp_vol, self.vserver))
def _get_preferred_target_from_list(self, target_details_list):
# cDOT iSCSI LIFs do not migrate from controller to controller
# in failover. Rather, an iSCSI LIF must be configured on each
# controller and the initiator has to take responsibility for
# using a LIF that is UP. In failover, the iSCSI LIF on the
# downed controller goes DOWN until the controller comes back up.
#
# Currently Nova only accepts a single target when obtaining
# target details from Cinder, so we pass back the first portal
# with an UP iSCSI LIF. There are plans to have Nova accept
# and try multiple targets. When that happens, we can and should
# remove this filter and return all targets since their operational
# state could change between the time we test here and the time
# Nova uses the target.
operational_addresses = (
self._get_operational_network_interface_addresses())
return (super(NetAppDirectCmodeISCSIDriver, self)
._get_preferred_target_from_list(target_details_list,
filter=operational_addresses))
def _get_operational_network_interface_addresses(self):
"""Gets the IP addresses of operational LIFs on the vserver."""
api_args = {
'query': {
'net-interface-info': {
'operational-status': 'up'
}
},
'desired-attributes': {
'net-interface-info': {
'address': None,
}
}
}
result = self.client.send_request('net-interface-get-iter', api_args)
lif_info_list = result.get_child_by_name(
'attributes-list') or NaElement('none')
return [lif_info.get_child_content('address') for lif_info in
lif_info_list.get_children()]
class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
"""NetApp 7-mode iSCSI volume driver."""
def __init__(self, *args, **kwargs):
super(NetAppDirect7modeISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_7mode_opts)
def _do_custom_setup(self):
"""Does custom setup depending on the type of filer."""
self.vfiler = self.configuration.netapp_vfiler
self.volume_list = self.configuration.netapp_volume_list
if self.volume_list:
self.volume_list = self.volume_list.split(',')
self.volume_list = [el.strip() for el in self.volume_list]
(major, minor) = self._get_ontapi_version()
self.client.set_api_version(major, minor)
if self.vfiler:
self.client.set_vfiler(self.vfiler)
self.vol_refresh_time = None
self.vol_refresh_interval = 1800
self.vol_refresh_running = False
self.vol_refresh_voluntary = False
self.root_volume_name = self._get_root_volume_name()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
api_version = self.client.get_api_version()
if api_version:
major, minor = api_version
if major == 1 and minor < 9:
msg = _("Unsupported ONTAP version."
" ONTAP version 7.3.1 and above is supported.")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _("Api version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
super(NetAppDirect7modeISCSIDriver, self).check_for_setup_error()
def create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group=None):
"""Creates a LUN, handling ONTAP differences as needed."""
super(NetAppDirect7modeISCSIDriver, self).create_lun(
volume_name, lun_name, size, metadata, qos_policy_group)
self.vol_refresh_voluntary = True
def _get_filer_volumes(self, volume=None):
"""Returns list of filer volumes in api format."""
vol_request = NaElement('volume-list-info')
if volume:
vol_request.add_new_child('volume', volume)
res = self.client.invoke_successfully(vol_request, True)
volumes = res.get_child_by_name('volumes')
if volumes:
return volumes.get_children()
return []
def _get_root_volume_name(self):
# switch to volume-get-root-name API when possible
vols = self._get_filer_volumes()
for vol in vols:
volume_name = vol.get_child_content('name')
if self._get_vol_option(volume_name, 'root') == 'true':
return volume_name
LOG.warn(_('Could not determine root volume name '
'on %s.') % self._get_owner())
return None
def _get_igroup_by_initiator(self, initiator):
"""Get igroups by initiator."""
igroup_list = NaElement('igroup-list-info')
result = self.client.invoke_successfully(igroup_list, True)
igroups = []
igs = result.get_child_by_name('initiator-groups')
if igs:
ig_infos = igs.get_children()
if ig_infos:
for info in ig_infos:
initiators = info.get_child_by_name('initiators')
init_infos = initiators.get_children()
if init_infos:
for init in init_infos:
if init.get_child_content('initiator-name')\
== initiator:
d = dict()
d['initiator-group-os-type'] = \
info.get_child_content(
'initiator-group-os-type')
d['initiator-group-type'] = \
info.get_child_content(
'initiator-group-type')
d['initiator-group-name'] = \
info.get_child_content(
'initiator-group-name')
igroups.append(d)
return igroups
def _get_target_details(self):
"""Gets the target portal details."""
iscsi_if_iter = NaElement('iscsi-portal-list-info')
result = self.client.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
portal_list_entries = result.get_child_by_name(
'iscsi-portal-list-entries')
if portal_list_entries:
portal_list = portal_list_entries.get_children()
for iscsi_if in portal_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
tgt_list.append(d)
return tgt_list
def _get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = NaElement('iscsi-node-get-name')
result = self.client.invoke_successfully(iscsi_service_iter, True)
return result.get_child_content('node-name')
def _get_owner(self):
if self.vfiler:
owner = '%s:%s' % (self.configuration.netapp_server_hostname,
self.vfiler)
else:
owner = self.configuration.netapp_server_hostname
return owner
def _create_lun_handle(self, metadata):
"""Returns lun handle based on filer type."""
owner = self._get_owner()
return '%s:%s' % (owner, metadata['Path'])
def _get_lun_list(self):
"""Gets the list of luns on filer."""
lun_list = []
if self.volume_list:
for vol in self.volume_list:
try:
luns = self._get_vol_luns(vol)
if luns:
lun_list.extend(luns)
except NaApiError:
LOG.warn(_("Error finding luns for volume %s."
" Verify volume exists.") % (vol))
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
self._extract_and_populate_luns(lun_list)
def _get_vol_luns(self, vol_name):
"""Gets the luns for a volume."""
api = NaElement('lun-list-info')
if vol_name:
api.add_new_child('volume-name', vol_name)
result = self.client.invoke_successfully(api, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def _find_mapped_lun_igroup(self, path, initiator, os=None):
"""Find the igroup for mapped lun with initiator."""
lun_map_list = NaElement.create_node_with_children(
'lun-map-list-info',
**{'path': path})
result = self.client.invoke_successfully(lun_map_list, True)
igroups = result.get_child_by_name('initiator-groups')
if igroups:
igroup = None
lun_id = None
found = False
igroup_infs = igroups.get_children()
for ig in igroup_infs:
initiators = ig.get_child_by_name('initiators')
init_infs = initiators.get_children()
for info in init_infs:
if info.get_child_content('initiator-name') == initiator:
found = True
igroup = ig.get_child_content('initiator-group-name')
lun_id = ig.get_child_content('lun-id')
break
if found:
break
return (igroup, lun_id)
def _clone_lun(self, name, new_name, space_reserved='true',
src_block=0, dest_block=0, block_count=0):
"""Clone LUN with the given handle to the new name."""
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
(parent, splitter, name) = path.rpartition('/')
clone_path = '%s/%s' % (parent, new_name)
# zAPI can only handle 2^24 blocks per range
bc_limit = 2 ** 24 # 8GB
# zAPI can only handle 32 block ranges per call
br_limit = 32
z_limit = br_limit * bc_limit # 256 GB
z_calls = int(math.ceil(block_count / float(z_limit)))
zbc = block_count
if z_calls == 0:
z_calls = 1
for call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
else:
block_count = zbc
clone_start = NaElement.create_node_with_children(
'clone-start', **{'source-path': path,
'destination-path': clone_path,
'no-snap': 'true'})
if block_count > 0:
block_ranges = NaElement("block-ranges")
# zAPI can only handle 2^24 block ranges
bc_limit = 2 ** 24 # 8GB
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
for segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
else:
block_count = bc
block_range = NaElement.create_node_with_children(
'block-range',
**{'source-block-number': str(src_block),
'destination-block-number': str(dest_block),
'block-count': str(block_count)})
block_ranges.add_child_elem(block_range)
src_block += int(block_count)
dest_block += int(block_count)
clone_start.add_child_elem(block_ranges)
result = self.client.invoke_successfully(clone_start, True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
self._check_clone_status(clone_id, vol_uuid, name, new_name)
self.vol_refresh_voluntary = True
luns = self._get_lun_by_args(path=clone_path)
if luns:
cloned_lun = luns[0]
self._set_space_reserve(clone_path, space_reserved)
clone_meta = self._create_lun_meta(cloned_lun)
handle = self._create_lun_handle(clone_meta)
self._add_lun_to_table(
NetAppLun(handle, new_name,
cloned_lun.get_child_content('size'),
clone_meta))
else:
raise NaApiError('ENOLUNENTRY', 'No Lun entry found on the filer')
def _set_space_reserve(self, path, enable):
"""Sets the space reserve info."""
space_res = NaElement.create_node_with_children(
'lun-set-space-reservation-info',
**{'path': path, 'enable': enable})
self.client.invoke_successfully(space_res, True)
def _check_clone_status(self, clone_id, vol_uuid, name, new_name):
"""Checks for the job till completed."""
clone_status = NaElement('clone-list-status')
cl_id = NaElement('clone-id')
clone_status.add_child_elem(cl_id)
cl_id.add_node_with_children(
'clone-id-info',
**{'clone-op-id': clone_id, 'volume-uuid': vol_uuid})
running = True
clone_ops_info = None
while running:
result = self.client.invoke_successfully(clone_status, True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
for info in ops_info:
if info.get_child_content('clone-state') == 'running':
time.sleep(1)
break
else:
running = False
clone_ops_info = info
break
else:
if clone_ops_info:
fmt = {'name': name, 'new_name': new_name}
if clone_ops_info.get_child_content('clone-state')\
== 'completed':
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s completed" % fmt)
else:
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s failed" % fmt)
raise NaApiError(
clone_ops_info.get_child_content('error'),
clone_ops_info.get_child_content('reason'))
def _get_lun_by_args(self, **args):
"""Retrieves luns with specified args."""
lun_info = NaElement.create_node_with_children('lun-list-info', **args)
result = self.client.invoke_successfully(lun_info, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def _create_lun_meta(self, lun):
"""Creates lun metadata dictionary."""
self._is_naelement(lun)
meta_dict = {}
meta_dict['Path'] = lun.get_child_content('path')
meta_dict['Volume'] = lun.get_child_content('path').split('/')[2]
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = lun.get_child_content(
'is-space-reservation-enabled')
return meta_dict
def _update_volume_stats(self):
"""Retrieve stats info from filer."""
# ensure we get current data
self.vol_refresh_voluntary = True
self._refresh_volume_info()
LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_iSCSI_7mode_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or netapp_backend
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['pools'] = self._get_pool_stats()
na_utils.provide_ems(self, self.client, netapp_backend,
self._app_version, server_type='7mode')
self._stats = data
def _get_pool_stats(self):
"""Retrieve pool (i.e. ONTAP volume) stats info from volumes."""
pools = []
if not self.vols:
return pools
for vol in self.vols:
# omit volumes not specified in the config
volume_name = vol.get_child_content('name')
if self.volume_list and volume_name not in self.volume_list:
continue
# omit root volume
if volume_name == self.root_volume_name:
continue
# ensure good volume state
state = vol.get_child_content('state')
inconsistent = vol.get_child_content('is-inconsistent')
invalid = vol.get_child_content('is-invalid')
if (state != 'online' or
inconsistent != 'false' or
invalid != 'false'):
continue
pool = dict()
pool['pool_name'] = volume_name
pool['QoS_support'] = False
pool['reserved_percentage'] = 0
# convert sizes to GB and de-rate by NetApp multiplier
total = float(vol.get_child_content('size-total') or 0)
total /= self.configuration.netapp_size_multiplier
total /= units.Gi
pool['total_capacity_gb'] = round_down(total, '0.01')
free = float(vol.get_child_content('size-available') or 0)
free /= self.configuration.netapp_size_multiplier
free /= units.Gi
pool['free_capacity_gb'] = round_down(free, '0.01')
pools.append(pool)
return pools
def _get_lun_block_count(self, path):
"""Gets block counts for the lun."""
bs = super(
NetAppDirect7modeISCSIDriver, self)._get_lun_block_count(path)
api_version = self.client.get_api_version()
if api_version:
major = api_version[0]
minor = api_version[1]
if major == 1 and minor < 15:
bs = bs - 1
return bs
def _refresh_volume_info(self):
"""Saves the volume information for the filer."""
if (self.vol_refresh_time is None or self.vol_refresh_voluntary or
timeutils.is_newer_than(self.vol_refresh_time,
self.vol_refresh_interval)):
try:
job_set = set_safe_attr(self, 'vol_refresh_running', True)
if not job_set:
LOG.warn(
_("Volume refresh job already running. Returning..."))
return
self.vol_refresh_voluntary = False
self.vols = self._get_filer_volumes()
self.vol_refresh_time = timeutils.utcnow()
except Exception as e:
LOG.warn(_("Error refreshing volume info. Message: %s"),
six.text_type(e))
finally:
set_safe_attr(self, 'vol_refresh_running', False)
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
super(NetAppDirect7modeISCSIDriver, self).delete_volume(volume)
self.vol_refresh_voluntary = True
def _get_preferred_target_from_list(self, target_details_list):
# 7-mode iSCSI LIFs migrate from controller to controller
# in failover and flap operational state in transit, so
# we don't filter these on operational state.
return (super(NetAppDirect7modeISCSIDriver, self)
._get_preferred_target_from_list(target_details_list,
filter=None))
| {
"content_hash": "8576c5f68c55e3ba879c12f5b5c1987f",
"timestamp": "",
"source": "github",
"line_count": 1623,
"max_line_length": 79,
"avg_line_length": 41.96734442390635,
"alnum_prop": 0.5579992071998003,
"repo_name": "redhat-openstack/cinder",
"id": "d9bafbb18d7442cdf150ab751e209ec855ebb901",
"size": "68786",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "cinder/volume/drivers/netapp/iscsi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "9824"
},
{
"name": "Python",
"bytes": "8537790"
},
{
"name": "Shell",
"bytes": "8429"
}
],
"symlink_target": ""
} |
"""Client-side fork interop tests as a unit test."""
import six
import subprocess
import sys
import threading
import unittest
from grpc._cython import cygrpc
from tests.fork import methods
# New instance of multiprocessing.Process using fork without exec can and will
# hang if the Python process has any other threads running. This includes the
# additional thread spawned by our _runner.py class. So in order to test our
# compatibility with multiprocessing, we first fork+exec a new process to ensure
# we don't have any conflicting background threads.
_CLIENT_FORK_SCRIPT_TEMPLATE = """if True:
import os
import sys
from grpc._cython import cygrpc
from tests.fork import methods
cygrpc._GRPC_ENABLE_FORK_SUPPORT = True
os.environ['GRPC_POLL_STRATEGY'] = 'epoll1'
methods.TestCase.%s.run_test({
'server_host': 'localhost',
'server_port': %d,
'use_tls': False
})
"""
_SUBPROCESS_TIMEOUT_S = 30
@unittest.skipUnless(
sys.platform.startswith("linux"),
"not supported on windows, and fork+exec networking blocked on mac")
@unittest.skipUnless(six.PY2, "https://github.com/grpc/grpc/issues/18075")
class ForkInteropTest(unittest.TestCase):
def setUp(self):
start_server_script = """if True:
import sys
import time
import grpc
from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import service as interop_service
from tests.unit import test_common
server = test_common.test_server()
test_pb2_grpc.add_TestServiceServicer_to_server(
interop_service.TestService(), server)
port = server.add_insecure_port('[::]:0')
server.start()
print(port)
sys.stdout.flush()
while True:
time.sleep(1)
"""
self._server_process = subprocess.Popen(
[sys.executable, '-c', start_server_script],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
timer = threading.Timer(_SUBPROCESS_TIMEOUT_S,
self._server_process.kill)
try:
timer.start()
self._port = int(self._server_process.stdout.readline())
except ValueError:
raise Exception('Failed to get port from server')
finally:
timer.cancel()
def testConnectivityWatch(self):
self._verifyTestCase(methods.TestCase.CONNECTIVITY_WATCH)
def testCloseChannelBeforeFork(self):
self._verifyTestCase(methods.TestCase.CLOSE_CHANNEL_BEFORE_FORK)
def testAsyncUnarySameChannel(self):
self._verifyTestCase(methods.TestCase.ASYNC_UNARY_SAME_CHANNEL)
def testAsyncUnaryNewChannel(self):
self._verifyTestCase(methods.TestCase.ASYNC_UNARY_NEW_CHANNEL)
def testBlockingUnarySameChannel(self):
self._verifyTestCase(methods.TestCase.BLOCKING_UNARY_SAME_CHANNEL)
def testBlockingUnaryNewChannel(self):
self._verifyTestCase(methods.TestCase.BLOCKING_UNARY_NEW_CHANNEL)
def testInProgressBidiContinueCall(self):
self._verifyTestCase(methods.TestCase.IN_PROGRESS_BIDI_CONTINUE_CALL)
def testInProgressBidiSameChannelAsyncCall(self):
self._verifyTestCase(
methods.TestCase.IN_PROGRESS_BIDI_SAME_CHANNEL_ASYNC_CALL)
def testInProgressBidiSameChannelBlockingCall(self):
self._verifyTestCase(
methods.TestCase.IN_PROGRESS_BIDI_SAME_CHANNEL_BLOCKING_CALL)
def testInProgressBidiNewChannelAsyncCall(self):
self._verifyTestCase(
methods.TestCase.IN_PROGRESS_BIDI_NEW_CHANNEL_ASYNC_CALL)
def testInProgressBidiNewChannelBlockingCall(self):
self._verifyTestCase(
methods.TestCase.IN_PROGRESS_BIDI_NEW_CHANNEL_BLOCKING_CALL)
def tearDown(self):
self._server_process.kill()
def _verifyTestCase(self, test_case):
script = _CLIENT_FORK_SCRIPT_TEMPLATE % (test_case.name, self._port)
process = subprocess.Popen(
[sys.executable, '-c', script],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
timer = threading.Timer(_SUBPROCESS_TIMEOUT_S, process.kill)
try:
timer.start()
try:
out, err = process.communicate(timeout=_SUBPROCESS_TIMEOUT_S)
except TypeError:
# The timeout parameter was added in Python 3.3.
out, err = process.communicate()
except subprocess.TimeoutExpired:
process.kill()
raise RuntimeError('Process failed to terminate')
finally:
timer.cancel()
self.assertEqual(
0, process.returncode,
'process failed with exit code %d (stdout: %s, stderr: %s)' %
(process.returncode, out, err))
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "8fbc615b038087965d34627fb27d3555",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 80,
"avg_line_length": 35.568345323741006,
"alnum_prop": 0.6490695792880259,
"repo_name": "sreecha/grpc",
"id": "602786c5e0d1e47d977aec5183adffec0c8ec580",
"size": "5521",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/python/grpcio_tests/tests/fork/_fork_interop_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "34038"
},
{
"name": "C",
"bytes": "2461432"
},
{
"name": "C#",
"bytes": "2017000"
},
{
"name": "C++",
"bytes": "31371388"
},
{
"name": "CMake",
"bytes": "653774"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "156890"
},
{
"name": "Go",
"bytes": "34791"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "6907"
},
{
"name": "JavaScript",
"bytes": "56944"
},
{
"name": "M4",
"bytes": "47783"
},
{
"name": "Makefile",
"bytes": "1005023"
},
{
"name": "Mako",
"bytes": "6211"
},
{
"name": "Objective-C",
"bytes": "561529"
},
{
"name": "Objective-C++",
"bytes": "50927"
},
{
"name": "PHP",
"bytes": "474349"
},
{
"name": "PowerShell",
"bytes": "621"
},
{
"name": "Python",
"bytes": "2838169"
},
{
"name": "Ruby",
"bytes": "1007743"
},
{
"name": "Shell",
"bytes": "472679"
},
{
"name": "Swift",
"bytes": "3516"
},
{
"name": "TSQL",
"bytes": "4901"
},
{
"name": "XSLT",
"bytes": "9673"
}
],
"symlink_target": ""
} |
from chalice.extensions import db
from chalice import init_app
if __name__ == '__main__':
app = init_app()
with app.test_request_context():
db.drop_all()
print 'Dropped the db using the following connection string: %s' % app.config['SQLALCHEMY_DATABASE_URI']
| {
"content_hash": "0ded0afe141b2f98194afff8d1851696",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 108,
"avg_line_length": 31.22222222222222,
"alnum_prop": 0.6690391459074733,
"repo_name": "andrimarjonsson/chalice",
"id": "6cdb18d464592e0d48633d515eb682f6e42df2a6",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dropdb.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "24554"
},
{
"name": "Python",
"bytes": "10791"
}
],
"symlink_target": ""
} |
import collections.abc
import inspect
import warnings
from math import ceil
from django.utils.functional import cached_property
from django.utils.inspect import method_has_no_args
from django.utils.translation import gettext_lazy as _
class UnorderedObjectListWarning(RuntimeWarning):
pass
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator:
# Translators: String used to replace omitted page numbers in elided page
# range generated by paginators, e.g. [1, 2, '…', 5, 6, 7, '…', 9, 10].
ELLIPSIS = _("…")
def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True):
self.object_list = object_list
self._check_object_list_is_ordered()
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
def __iter__(self):
for page_number in self.page_range:
yield self.page(page_number)
def validate_number(self, number):
"""Validate the given 1-based page number."""
try:
if isinstance(number, float) and not number.is_integer():
raise ValueError
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger(_("That page number is not an integer"))
if number < 1:
raise EmptyPage(_("That page number is less than 1"))
if number > self.num_pages:
raise EmptyPage(_("That page contains no results"))
return number
def get_page(self, number):
"""
Return a valid page, even if the page argument isn't a number or isn't
in range.
"""
try:
number = self.validate_number(number)
except PageNotAnInteger:
number = 1
except EmptyPage:
number = self.num_pages
return self.page(number)
def page(self, number):
"""Return a Page object for the given 1-based page number."""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return self._get_page(self.object_list[bottom:top], number, self)
def _get_page(self, *args, **kwargs):
"""
Return an instance of a single page.
This hook can be used by subclasses to use an alternative to the
standard :cls:`Page` object.
"""
return Page(*args, **kwargs)
@cached_property
def count(self):
"""Return the total number of objects, across all pages."""
c = getattr(self.object_list, "count", None)
if callable(c) and not inspect.isbuiltin(c) and method_has_no_args(c):
return c()
return len(self.object_list)
@cached_property
def num_pages(self):
"""Return the total number of pages."""
if self.count == 0 and not self.allow_empty_first_page:
return 0
hits = max(1, self.count - self.orphans)
return ceil(hits / self.per_page)
@property
def page_range(self):
"""
Return a 1-based range of pages for iterating through within
a template for loop.
"""
return range(1, self.num_pages + 1)
def _check_object_list_is_ordered(self):
"""
Warn if self.object_list is unordered (typically a QuerySet).
"""
ordered = getattr(self.object_list, "ordered", None)
if ordered is not None and not ordered:
obj_list_repr = (
"{} {}".format(
self.object_list.model, self.object_list.__class__.__name__
)
if hasattr(self.object_list, "model")
else "{!r}".format(self.object_list)
)
warnings.warn(
"Pagination may yield inconsistent results with an unordered "
"object_list: {}.".format(obj_list_repr),
UnorderedObjectListWarning,
stacklevel=3,
)
def get_elided_page_range(self, number=1, *, on_each_side=3, on_ends=2):
"""
Return a 1-based range of pages with some values elided.
If the page range is larger than a given size, the whole range is not
provided and a compact form is returned instead, e.g. for a paginator
with 50 pages, if page 43 were the current page, the output, with the
default arguments, would be:
1, 2, …, 40, 41, 42, 43, 44, 45, 46, …, 49, 50.
"""
number = self.validate_number(number)
if self.num_pages <= (on_each_side + on_ends) * 2:
yield from self.page_range
return
if number > (1 + on_each_side + on_ends) + 1:
yield from range(1, on_ends + 1)
yield self.ELLIPSIS
yield from range(number - on_each_side, number + 1)
else:
yield from range(1, number + 1)
if number < (self.num_pages - on_each_side - on_ends) - 1:
yield from range(number + 1, number + on_each_side + 1)
yield self.ELLIPSIS
yield from range(self.num_pages - on_ends + 1, self.num_pages + 1)
else:
yield from range(number + 1, self.num_pages + 1)
class Page(collections.abc.Sequence):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return "<Page %s of %s>" % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
if not isinstance(index, (int, slice)):
raise TypeError(
"Page indices must be integers or slices, not %s."
% type(index).__name__
)
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
if not isinstance(self.object_list, list):
self.object_list = list(self.object_list)
return self.object_list[index]
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.paginator.validate_number(self.number + 1)
def previous_page_number(self):
return self.paginator.validate_number(self.number - 1)
def start_index(self):
"""
Return the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Return the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
| {
"content_hash": "8940fdf1dabd82ebdaa9318d8f6b40ca",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 86,
"avg_line_length": 33.61538461538461,
"alnum_prop": 0.5856777493606138,
"repo_name": "django/django",
"id": "131ea0f81121c6f8aab0c7ebf2837b0413749467",
"size": "7439",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "django/core/paginator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91986"
},
{
"name": "HTML",
"bytes": "238949"
},
{
"name": "JavaScript",
"bytes": "157441"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16195279"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
} |
import datetime as dt
import logging
import os
import re
import traceback
from collections import defaultdict
from typing import List, Optional, Union
import aiohttp
import asyncpg
import discord
from discord.ext import commands
import cogs.utils.context
from cogs.utils import config
from cogs.utils import safe_delete_message
from cogs.utils.database import get_server_property
from cogs.utils.tibia import populate_worlds, tibia_worlds
initial_cogs = {
"cogs.core",
"cogs.serverlog",
"cogs.tracking",
"cogs.owner",
"cogs.mod",
"cogs.admin",
"cogs.tibia",
"cogs.general",
"cogs.loot",
"cogs.tibiawiki",
"cogs.roles",
"cogs.info",
"cogs.calculators",
"cogs.timers"
}
log = logging.getLogger("nabbot")
async def _prefix_callable(bot, msg):
user_id = bot.user.id
base = [f'<@!{user_id}> ', f'<@{user_id}> ']
if msg.guild is None:
base.extend(bot.config.command_prefix)
else:
prefixes = bot.prefixes[msg.guild.id]
base.extend(prefixes)
base = sorted(base, reverse=True)
return base
class NabBot(commands.AutoShardedBot):
def __init__(self):
super().__init__(command_prefix=_prefix_callable, case_insensitive=True, fetch_offline_members=True,
description="Discord bot with functions for the MMORPG Tibia.")
# Remove default help command to implement custom one
self.remove_command("help")
self.users_servers = defaultdict(list)
self.config: config.Config = None
self.pool: asyncpg.pool.Pool = None
self.start_time = dt.datetime.utcnow()
self.session = aiohttp.ClientSession(loop=self.loop)
# Dictionary of worlds tracked by nabbot, key:value = server_id:world
# Dictionary is populated from database
# A list version is created from the dictionary
self.tracked_worlds = {}
self.tracked_worlds_list = []
self.prefixes = defaultdict()
self.__version__ = "2.4.0"
async def on_ready(self):
"""Called when the bot is ready."""
print('Logged in as')
print(self.user)
print(self.user.id)
print(f"Version {self.__version__}")
print('------')
# Populating members's guild list
self.users_servers.clear()
for guild in self.guilds:
for member in guild.members:
self.users_servers[member.id].append(guild.id)
async with self.pool.acquire() as conn:
async with conn.transaction():
records = [(user.id, guild.id) for guild in self.guilds for user in guild.members]
await conn.execute("TRUNCATE user_server")
await conn.copy_records_to_table("user_server", columns=["user_id", "server_id"], records=records)
log.info("Bot is online and ready")
async def on_message(self, message: discord.Message):
"""Called every time a message is sent on a visible channel."""
# Ignore if message is from any bot
if message.author.bot:
return
ctx = await self.get_context(message, cls=cogs.utils.context.NabCtx)
if ctx.command is not None:
return await self.invoke(ctx)
# This is a PM, no further info needed
if message.guild is None:
return
if message.content.strip() == f"<@{self.user.id}>":
prefixes = list(self.config.command_prefix)
if ctx.guild:
prefixes = self.prefixes[message.guild.id]
if prefixes:
prefixes_str = ", ".join(f"`{p}`" for p in prefixes)
return await ctx.send(f"My command prefixes are: {prefixes_str}, and mentions. "
f"To see my commands, try: `{prefixes[0]}help.`", delete_after=10)
else:
return await ctx.send(f"My command prefix is mentions. "
f"To see my commands, try: `@{self.user.name} help.`", delete_after=10)
server_delete = await get_server_property(ctx.pool, message.guild.id, "commandsonly")
global_delete = self.config.ask_channel_delete
if (server_delete is None and global_delete or server_delete) and await ctx.is_askchannel():
await safe_delete_message(message)
# ------------ Utility methods ------------
def get_member(self, argument: Union[str, int], guild: Union[discord.Guild, List[discord.Guild]] = None) \
-> Union[discord.Member, discord.User]:
"""Returns a member matching the arguments provided.
If a guild or guild list is specified, then only members from those guilds will be searched. If no guild is
specified, the first member instance will be returned.
:param argument: The argument to search for, can be an id, name#disctriminator, nickname or name
:param guild: The guild or list of guilds that limit the search.
:return: The member found or None.
"""
id_regex = re.compile(r'([0-9]{15,21})$')
mention_regex = re.compile(r'<@!?([0-9]+)>$')
match = id_regex.match(str(argument)) or mention_regex.match(str(argument))
if match is None:
return self.get_member_named(argument, guild)
else:
user_id = int(match.group(1))
if guild is None:
return self.get_user(user_id)
if isinstance(guild, list) and len(guild) == 1:
guild = guild[0]
if isinstance(guild, list) and len(guild) > 0:
members = [m for ml in [g.members for g in guild] for m in ml]
return discord.utils.find(lambda m: m.id == user_id, members)
return guild.get_member(user_id)
def get_member_named(self, name: str, guild: Union[discord.Guild, List[discord.Guild]] = None) -> discord.Member:
"""Returns a member matching the name
If a guild or guild list is specified, then only members from those guilds will be searched. If no guild is
specified, the first member instance will be returned.
:param name: The name, nickname or name#discriminator of the member
:param guild: The guild or list of guilds to limit the search
:return: The member found or none
"""
name = str(name)
members = self.get_all_members()
if isinstance(guild, list) and len(guild) == 1:
guild = guild[0]
if type(guild) is discord.Guild:
members = guild.members
if isinstance(guild, list) and len(guild) > 0:
members = [m for ml in [g.members for g in guild] for m in ml]
if len(name) > 5 and name[-5] == '#':
potential_discriminator = name[-4:]
result = discord.utils.get(members, name=name[:-5], discriminator=potential_discriminator)
if result is not None:
return result
return discord.utils.find(lambda m: m.display_name.lower() == name.lower() or m.name.lower == name.lower(),
members)
def get_user_guilds(self, user_id: int) -> List[discord.Guild]:
"""Returns a list of the user's shared guilds with the bot"""
try:
return [self.get_guild(gid) for gid in self.users_servers[user_id]]
except KeyError:
return []
def get_guilds_worlds(self, guild_list: List[discord.Guild]) -> List[str]:
"""Returns a list of all tracked worlds found in a list of guilds."""
return list(set([world for guild, world in self.tracked_worlds.items() if guild in [g.id for g in guild_list]]))
def get_user_worlds(self, user_id: int) -> List[str]:
"""Returns a list of all the tibia worlds the user is tracked in.
This is based on the tracked world of each guild the user belongs to.
guild_list can be passed to search in a specific set of guilds. Note that the user may not belong to them."""
guild_list = self.get_user_guilds(user_id)
return self.get_guilds_worlds(guild_list)
def get_channel_or_top(self, guild: discord.Guild, channel_id: int) -> discord.TextChannel:
"""Returns a guild's channel by id, returns none if channel doesn't exist
It also checks if the bot has permissions on that channel, if not, it will return the top channel too."""
if channel_id is None:
return self.get_top_channel(guild)
channel = guild.get_channel(int(channel_id))
if channel is None:
return self.get_top_channel(guild)
permissions = channel.permissions_for(guild.me)
if not permissions.read_messages or not permissions.send_messages:
return self.get_top_channel(guild)
return channel
async def send_log_message(self, guild: discord.Guild, content=None, *, embed: discord.Embed = None):
"""Sends a message on the server-log channel
If the channel doesn't exist, it doesn't send anything or give of any warnings as it meant to be an optional
feature."""
ask_channel_id = await get_server_property(self.pool, guild.id, "serverlog")
channel = None
if ask_channel_id:
channel = guild.get_channel(ask_channel_id)
if channel is None:
channel = self.get_channel_by_name(self.config.log_channel_name, guild)
if channel is None:
return
try:
await channel.send(content=content, embed=embed)
return True
except discord.HTTPException:
return False
def get_channel_by_name(self, name: str, guild: discord.Guild) -> discord.TextChannel:
"""Finds a channel by name on all the servers the bot is in.
If guild is specified, only channels in that guild will be searched"""
if guild is None:
channel = discord.utils.find(lambda m: m.name == name and not type(m) == discord.ChannelType.voice,
self.get_all_channels())
else:
channel = discord.utils.find(lambda m: m.name == name and not type(m) == discord.ChannelType.voice,
guild.channels)
return channel
def get_guild_by_name(self, name: str) -> discord.Guild:
"""Returns a guild by its name"""
guild = discord.utils.find(lambda m: m.name.lower() == name.lower(), self.guilds)
return guild
@staticmethod
def get_top_channel(guild: discord.Guild) -> Optional[discord.TextChannel]:
"""Returns the highest text channel on the list.
If writeable_only is set, the first channel where the bot can write is returned
If None it returned, the guild has no channels or the bot can't write on any channel"""
if guild is None:
return None
for channel in guild.text_channels:
if channel.permissions_for(guild.me).send_messages:
return channel
return None
async def reload_worlds(self):
"""Refresh the world list from the database
This is used to avoid reading the database every time the world list is needed.
A global variable holding the world list is loaded on startup and refreshed only when worlds are modified"""
tibia_servers_dict_temp = {}
rows = await self.pool.fetch("SELECT server_id, value FROM server_property WHERE key = $1 ORDER BY value ASC",
"world")
del self.tracked_worlds_list[:]
if len(rows) > 0:
for row in rows:
value = row["value"]
if value not in self.tracked_worlds_list:
self.tracked_worlds_list.append(value)
tibia_servers_dict_temp[int(row["server_id"])] = value
self.tracked_worlds.clear()
self.tracked_worlds.update(tibia_servers_dict_temp)
async def load_prefixes(self):
"""Populates the prefix mapping."""
rows = await self.pool.fetch("SELECT server_id, prefixes FROM server_prefixes")
for row in rows:
self.prefixes[row['server_id']] = row['prefixes']
def run(self):
print("Loading config...")
config.parse()
self.config = config
self.prefixes = defaultdict(lambda: list(config.command_prefix))
# List of tracked worlds for NabBot
self.loop.run_until_complete(self.reload_worlds())
# List of all Tibia worlds
self.loop.run_until_complete(populate_worlds())
# Load prefixes
self.loop.run_until_complete(self.load_prefixes())
if len(tibia_worlds) == 0:
print("Critical information was not available: NabBot can not start without the World List.")
quit()
token = get_token()
print("Loading cogs...")
for cog in initial_cogs:
try:
self.load_extension(cog)
print(f"Cog {cog} loaded successfully.")
except ModuleNotFoundError:
print(f"Could not find cog: {cog}")
except Exception:
print(f'Cog {cog} failed to load:')
traceback.print_exc(limit=-1)
log.exception(f'Cog {cog} failed to load')
for extra in config.extra_cogs:
try:
self.load_extension(extra)
print(f"Extra cog {extra} loaded successfully.")
except ModuleNotFoundError:
print(f"Could not find extra cog: {extra}")
except Exception:
print(f'Extra cog {extra} failed to load:')
traceback.print_exc(limit=-1)
log.exception(f'Extra cog {extra} failed to load:')
try:
print("Attempting login...")
super().run(token)
except discord.errors.LoginFailure:
print("Invalid token. Edit token.txt to fix it.")
input("\nPress any key to continue...")
quit()
def get_token():
"""When the bot is run without a login.py file, it prompts the user for login info"""
if not os.path.isfile("token.txt"):
print("This seems to be the first time NabBot is ran (or token.txt is missing)")
print("To run your own instance of NabBot you need to create a new bot account to get a bot token")
print("https://discordapp.com/developers/applications/me")
print("Enter the token:")
token = input(">>")
if len(token) < 50:
input("What you entered isn't a token. Restart NabBot to retry.")
quit()
with open("token.txt", "w+") as f:
f.write(token)
print("Token has been saved to token.txt, you can edit this file later to change it.")
input("Press any key to start NabBot now...")
return token
else:
with open("token.txt") as f:
return f.read().strip()
if __name__ == "__main__":
print("NabBot can't be started from this file anymore. Use launcher.py.")
| {
"content_hash": "70aaa61b9e44714f92b3228dbed458ae",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 120,
"avg_line_length": 42.33988764044944,
"alnum_prop": 0.6043256153386851,
"repo_name": "Galarzaa90/NabBot",
"id": "51f98ecbb1fd40f5567b36bf0b29c6a9ccdce039",
"size": "15657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nabbot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "616815"
}
],
"symlink_target": ""
} |
"""
Some utility functions that can be used in this project.
Currently, we only provide one function:
- L{dice_coefficient}
"""
import re
def dice_coefficient(a, b, ignore_case=True):
"""
Calculate dice coefficient
Downloaded from
U{http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Dice's_coefficient#Python}.
And then extended to add ignore_case parameter.
@param a: First string
@param b: Second string
@param ignore_case: Ignore case in calculation
@return: Coefficient (0 < coef < 1). The higher the closer.
"""
if not len(a) or not len(b):
return 0.0
if ignore_case:
a = a.lower()
b = b.lower()
if len(a) == 1:
a = a + u'.'
if len(b) == 1:
b = b + u'.'
a_bigram_list = []
for i in range(len(a) - 1):
a_bigram_list.append(a[i:i + 2])
b_bigram_list = []
for i in range(len(b) - 1):
b_bigram_list.append(b[i:i + 2])
a_bigrams = set(a_bigram_list)
b_bigrams = set(b_bigram_list)
overlap = len(a_bigrams & b_bigrams)
dice_coeff = overlap * 2.0 / (len(a_bigrams) + len(b_bigrams))
return dice_coeff
def strings_contained(complete_name, reduced_name):
"""
Look for strings in reduced_name in complete_name
The idea is to search for all the substrings of reduced_name inside
complete_name. Substrings of reduced_name are calculated by splitting
for each non-word character.
If all characters are found, it's quite a good sign that the name is close
@param complete_name: Name of the string to look in (most likely bigger
than the real string, with junk)
@param reduced_name: Name to look for, is it in the string
@return: Percentage of word found
@rtype: float
"""
substrings = re.split('\W+', reduced_name)
return (len([1 for string in substrings if string in complete_name]) /
len(substrings))
| {
"content_hash": "d1fb8c9e2b53224b3746a82d45cdbc60",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 95,
"avg_line_length": 28.115942028985508,
"alnum_prop": 0.6345360824742268,
"repo_name": "apelisse/subgetter",
"id": "ff974b1498c775848c5c2948cef7ecedba84fef7",
"size": "1965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "69892"
}
],
"symlink_target": ""
} |
"Default tags used by the template system, available to all templates."
from django.template import Node, NodeList, Template, Context, resolve_variable
from django.template import TemplateSyntaxError, VariableDoesNotExist, BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END, SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END
from django.template import get_library, Library, InvalidTemplateLibrary
from django.conf import settings
import sys
register = Library()
class CommentNode(Node):
def render(self, context):
return ''
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None):
self.cyclevars = cyclevars
self.cyclevars_len = len(cyclevars)
self.counter = -1
self.variable_name = variable_name
def render(self, context):
self.counter += 1
value = self.cyclevars[self.counter % self.cyclevars_len]
if self.variable_name:
context[self.variable_name] = value
return value
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [pformat(val) for val in context]
output.append('\n\n')
output.append(pformat(sys.modules))
return ''.join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# apply filters
context.update({'var': output})
filtered = self.filter_expr.resolve(context)
context.pop()
return filtered
class FirstOfNode(Node):
def __init__(self, vars):
self.vars = vars
def render(self, context):
for var in self.vars:
try:
value = resolve_variable(var, context)
except VariableDoesNotExist:
continue
if value:
return str(value)
return ''
class ForNode(Node):
def __init__(self, loopvar, sequence, reversed, nodelist_loop):
self.loopvar, self.sequence = loopvar, sequence
self.reversed = reversed
self.nodelist_loop = nodelist_loop
def __repr__(self):
if self.reversed:
reversed = ' reversed'
else:
reversed = ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(self.loopvar, self.sequence, len(self.nodelist_loop), reversed)
def __iter__(self):
for node in self.nodelist_loop:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend(self.nodelist_loop.get_nodes_by_type(nodetype))
return nodes
def render(self, context):
nodelist = NodeList()
if 'forloop' in context:
parentloop = context['forloop']
else:
parentloop = {}
context.push()
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
if self.reversed:
# From http://www.python.org/doc/current/tut/node11.html
def reverse(data):
for index in range(len(data)-1, -1, -1):
yield data[index]
values = reverse(values)
for i, item in enumerate(values):
context['forloop'] = {
# shortcuts for current loop iteration number
'counter0': i,
'counter': i+1,
# reverse counter iteration numbers
'revcounter': len_values - i,
'revcounter0': len_values - i - 1,
# boolean values designating first and last times through loop
'first': (i == 0),
'last': (i == len_values - 1),
'parentloop': parentloop,
}
context[self.loopvar] = item
for node in self.nodelist_loop:
nodelist.append(node.render(context))
context.pop()
return nodelist.render(context)
class IfChangedNode(Node):
def __init__(self, nodelist, *varlist):
self.nodelist = nodelist
self._last_seen = None
self._varlist = varlist
def render(self, context):
if 'forloop' in context and context['forloop']['first']:
self._last_seen = None
try:
if self._varlist:
# Consider multiple parameters.
# This automatically behaves like a OR evaluation of the multiple variables.
compare_to = [resolve_variable(var, context) for var in self._varlist]
else:
compare_to = self.nodelist.render(context)
except VariableDoesNotExist:
compare_to = None
if compare_to != self._last_seen:
firstloop = (self._last_seen == None)
self._last_seen = compare_to
context.push()
context['ifchanged'] = {'firstloop': firstloop}
content = self.nodelist.render(context)
context.pop()
return content
else:
return ''
class IfEqualNode(Node):
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = var1, var2
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfEqualNode>"
def render(self, context):
try:
val1 = resolve_variable(self.var1, context)
except VariableDoesNotExist:
val1 = None
try:
val2 = resolve_variable(self.var2, context)
except VariableDoesNotExist:
val2 = None
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
def __init__(self, bool_exprs, nodelist_true, nodelist_false, link_type):
self.bool_exprs = bool_exprs
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.link_type = link_type
def __repr__(self):
return "<If node>"
def __iter__(self):
for node in self.nodelist_true:
yield node
for node in self.nodelist_false:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend(self.nodelist_true.get_nodes_by_type(nodetype))
nodes.extend(self.nodelist_false.get_nodes_by_type(nodetype))
return nodes
def render(self, context):
if self.link_type == IfNode.LinkTypes.or_:
for ifnot, bool_expr in self.bool_exprs:
try:
value = bool_expr.resolve(context, True)
except VariableDoesNotExist:
value = None
if (value and not ifnot) or (ifnot and not value):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
else:
for ifnot, bool_expr in self.bool_exprs:
try:
value = bool_expr.resolve(context, True)
except VariableDoesNotExist:
value = None
if not ((value and not ifnot) or (ifnot and not value)):
return self.nodelist_false.render(context)
return self.nodelist_true.render(context)
class LinkTypes:
and_ = 0,
or_ = 1
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list == None: # target_var wasn't found in context; fail silently
context[self.var_name] = []
return ''
output = [] # list of dictionaries in the format {'grouper': 'key', 'list': [list of contents]}
for obj in obj_list:
grouper = self.expression.resolve(obj, True)
# TODO: Is this a sensible way to determine equality?
if output and repr(output[-1]['grouper']) == repr(grouper):
output[-1]['list'].append(obj)
else:
output.append({'grouper': grouper, 'list': [obj]})
context[self.var_name] = output
return ''
def include_is_allowed(filepath):
for root in settings.ALLOWED_INCLUDE_ROOTS:
if filepath.startswith(root):
return True
return False
class SsiNode(Node):
def __init__(self, filepath, parsed):
self.filepath, self.parsed = filepath, parsed
def render(self, context):
if not include_is_allowed(self.filepath):
if settings.DEBUG:
return "[Didn't have permission to include file]"
else:
return '' # Fail silently for invalid includes.
try:
fp = open(self.filepath, 'r')
output = fp.read()
fp.close()
except IOError:
output = ''
if self.parsed:
try:
t = Template(output, name=self.filepath)
return t.render(context)
except TemplateSyntaxError, e:
if settings.DEBUG:
return "[Included template had syntax error: %s]" % e
else:
return '' # Fail silently for invalid included templates.
return output
class LoadNode(Node):
def render(self, context):
return ''
class NowNode(Node):
def __init__(self, format_string):
self.format_string = format_string
def render(self, context):
from datetime import datetime
from django.utils.dateformat import DateFormat
df = DateFormat(datetime.now())
return df.format(self.format_string)
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {'openblock': BLOCK_TAG_START,
'closeblock': BLOCK_TAG_END,
'openvariable': VARIABLE_TAG_START,
'closevariable': VARIABLE_TAG_END,
'openbrace': SINGLE_BRACE_START,
'closebrace': SINGLE_BRACE_END,
'opencomment': COMMENT_TAG_START,
'closecomment': COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, '')
class URLNode(Node):
def __init__(self, view_name, args, kwargs):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
def render(self, context):
from django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = dict([(k, v.resolve(context)) for k, v in self.kwargs.items()])
try:
return reverse(self.view_name, args=args, kwargs=kwargs)
except NoReverseMatch:
try:
project_name = settings.SETTINGS_MODULE.split('.')[0]
return reverse(project_name + '.' + self.view_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return ''
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
def render(self, context):
try:
value = self.val_expr.resolve(context)
maxvalue = self.max_expr.resolve(context)
except VariableDoesNotExist:
return ''
try:
value = float(value)
maxvalue = float(maxvalue)
ratio = (value / maxvalue) * int(self.max_width)
except (ValueError, ZeroDivisionError):
return ''
return str(int(round(ratio)))
class WithNode(Node):
def __init__(self, var, name, nodelist):
self.var = var
self.name = name
self.nodelist = nodelist
def __repr__(self):
return "<WithNode>"
def render(self, context):
val = self.var.resolve(context)
context.push()
context[self.name] = val
output = self.nodelist.render(context)
context.pop()
return output
#@register.tag
def comment(parser, token):
"""
Ignore everything between ``{% comment %}`` and ``{% endcomment %}``
"""
parser.skip_past('endcomment')
return CommentNode()
comment = register.tag(comment)
#@register.tag
def cycle(parser, token):
"""
Cycle among the given strings each time this tag is encountered
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle row1,row2 %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each sucessive time through::
<tr class="{% cycle row1,row2,row3 as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, seperated by commas. Make sure not to
put spaces between the values -- only commas.
"""
# Note: This returns the exact same node on each {% cycle name %} call; that
# is, the node object returned from {% cycle a,b,c as name %} and the one
# returned from {% cycle name %} are the exact same object. This shouldn't
# cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: this stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
args = token.contents.split()
if len(args) < 2:
raise TemplateSyntaxError("'Cycle' statement requires at least two arguments")
elif len(args) == 2 and "," in args[1]:
# {% cycle a,b,c %}
cyclevars = [v for v in args[1].split(",") if v] # split and kill blanks
return CycleNode(cyclevars)
# {% cycle name %}
elif len(args) == 2:
name = args[1]
if not hasattr(parser, '_namedCycleNodes'):
raise TemplateSyntaxError("No named cycles in template: '%s' is not defined" % name)
if name not in parser._namedCycleNodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._namedCycleNodes[name]
elif len(args) == 4:
# {% cycle a,b,c as name %}
if args[2] != 'as':
raise TemplateSyntaxError("Second 'cycle' argument must be 'as'")
cyclevars = [v for v in args[1].split(",") if v] # split and kill blanks
name = args[3]
node = CycleNode(cyclevars, name)
if not hasattr(parser, '_namedCycleNodes'):
parser._namedCycleNodes = {}
parser._namedCycleNodes[name] = node
return node
else:
raise TemplateSyntaxError("Invalid arguments to 'cycle': %s" % args)
cycle = register.tag(cycle)
def debug(parser, token):
"""
Output a whole load of debugging information, including the current context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
debug = register.tag(debug)
#@register.tag(name="filter")
def do_filter(parser, token):
"""
Filter the contents of the blog through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
"""
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
nodelist = parser.parse(('endfilter',))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
filter = register.tag("filter", do_filter)
#@register.tag
def firstof(parser, token):
"""
Outputs the first variable passed that is not False.
Outputs nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 %}
This is equivalent to::
{% if var1 %}
{{ var1 }}
{% else %}{% if var2 %}
{{ var2 }}
{% else %}{% if var3 %}
{{ var3 }}
{% endif %}{% endif %}{% endif %}
but obviously much cleaner!
"""
bits = token.contents.split()[1:]
if len(bits) < 1:
raise TemplateSyntaxError, "'firstof' statement requires at least one argument"
return FirstOfNode(bits)
firstof = register.tag(firstof)
#@register.tag(name="for")
def do_for(parser, token):
"""
Loop over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can also loop over a list in reverse by using
``{% for obj in list reversed %}``.
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.contents.split()
if len(bits) == 5 and bits[4] != 'reversed':
raise TemplateSyntaxError, "'for' statements with five words should end in 'reversed': %s" % token.contents
if len(bits) not in (4, 5):
raise TemplateSyntaxError, "'for' statements should have either four or five words: %s" % token.contents
if bits[2] != 'in':
raise TemplateSyntaxError, "'for' statement must contain 'in' as the second word: %s" % token.contents
loopvar = bits[1]
sequence = parser.compile_filter(bits[3])
reversed = (len(bits) == 5)
nodelist_loop = parser.parse(('endfor',))
parser.delete_first_token()
return ForNode(loopvar, sequence, reversed, nodelist_loop)
do_for = register.tag("for", do_for)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError, "%r takes two arguments" % bits[0]
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return IfEqualNode(bits[1], bits[2], nodelist_true, nodelist_false, negate)
#@register.tag
def ifequal(parser, token):
"""
Output the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
ifequal = register.tag(ifequal)
#@register.tag
def ifnotequal(parser, token):
"""Output the contents of the block if the two arguments are not equal. See ifequal."""
return do_ifequal(parser, token, True)
ifnotequal = register.tag(ifnotequal)
#@register.tag(name="if")
def do_if(parser, token):
"""
The ``{% if %}`` tag evaluates a variable, and if that variable is "true"
(i.e. exists, is not empty, and is not a false boolean value) the contents
of the block are output:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
As you can see, the ``if`` tag can take an option ``{% else %}`` clause that
will be displayed if the test fails.
``if`` tags may use ``or`` or ``not`` to test a number of variables or to
negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
For simplicity, ``if`` tags do not allow ``and`` clauses. Use nested ``if``
tags instead::
{% if athlete_list %}
{% if coach_list %}
Number of athletes: {{ athlete_list|count }}.
Number of coaches: {{ coach_list|count }}.
{% endif %}
{% endif %}
"""
bits = token.contents.split()
del bits[0]
if not bits:
raise TemplateSyntaxError, "'if' statement requires at least one argument"
# bits now looks something like this: ['a', 'or', 'not', 'b', 'or', 'c.d']
bitstr = ' '.join(bits)
boolpairs = bitstr.split(' and ')
boolvars = []
if len(boolpairs) == 1:
link_type = IfNode.LinkTypes.or_
boolpairs = bitstr.split(' or ')
else:
link_type = IfNode.LinkTypes.and_
if ' or ' in bitstr:
raise TemplateSyntaxError, "'if' tags can't mix 'and' and 'or'"
for boolpair in boolpairs:
if ' ' in boolpair:
try:
not_, boolvar = boolpair.split()
except ValueError:
raise TemplateSyntaxError, "'if' statement improperly formatted"
if not_ != 'not':
raise TemplateSyntaxError, "Expected 'not' in if statement"
boolvars.append((True, parser.compile_filter(boolvar)))
else:
boolvars.append((False, parser.compile_filter(boolpair)))
nodelist_true = parser.parse(('else', 'endif'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endif',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return IfNode(boolvars, nodelist_true, nodelist_false, link_type)
do_if = register.tag("if", do_if)
#@register.tag
def ifchanged(parser, token):
"""
Check if a value has changed from the last iteration of a loop.
The 'ifchanged' block tag is used within a loop. It has two possible uses.
1. Checks its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a list of
days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given a variable, check whether that variable has changed. For example, the
following shows the date every time it changes, but only shows the hour if both
the hour and the date have changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.contents.split()
nodelist = parser.parse(('endifchanged',))
parser.delete_first_token()
return IfChangedNode(nodelist, *bits[1:])
ifchanged = register.tag(ifchanged)
#@register.tag
def ssi(parser, token):
"""
Output the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute page --
in the current page::
{% ssi /home/html/ljworld.com/includes/right_generic.html %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi /home/html/ljworld.com/includes/right_generic.html parsed %}
"""
bits = token.contents.split()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError, "'ssi' tag takes one argument: the path to the file to be included"
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError, "Second (optional) argument to %s tag must be 'parsed'" % bits[0]
return SsiNode(bits[1], parsed)
ssi = register.tag(ssi)
#@register.tag
def load(parser, token):
"""
Load a custom template tag set.
For example, to load the template tags in ``django/templatetags/news/photos.py``::
{% load news.photos %}
"""
bits = token.contents.split()
for taglib in bits[1:]:
# add the library to the parser
try:
lib = get_library("django.templatetags.%s" % taglib.split('.')[-1])
parser.add_library(lib)
except InvalidTemplateLibrary, e:
raise TemplateSyntaxError, "'%s' is not a valid tag library: %s" % (taglib, e)
return LoadNode()
load = register.tag(load)
#@register.tag
def now(parser, token):
"""
Display the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.contents.split('"')
if len(bits) != 3:
raise TemplateSyntaxError, "'now' statement takes one argument"
format_string = bits[1]
return NowNode(format_string)
now = register.tag(now)
#@register.tag
def regroup(parser, token):
"""
Regroup a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``people`` is a list of ``Person`` objects that have ``first_name``,
``last_name``, and ``gender`` attributes, and you'd like to display a list
that looks like:
* Male:
* George Bush
* Bill Clinton
* Female:
* Margaret Thatcher
* Colendeeza Rice
* Unknown:
* Pat Smith
The following snippet of template code would accomplish this dubious task::
{% regroup people by gender as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for item in group.list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``
and ``Unknown``, and ``list`` is the list of people with those genders.
Note that `{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
people was not sorted by gender, you'd need to make sure it is sorted before
using it, i.e.::
{% regroup people|dictsort:"gender" by gender as grouped %}
"""
firstbits = token.contents.split(None, 3)
if len(firstbits) != 4:
raise TemplateSyntaxError, "'regroup' tag takes five arguments"
target = parser.compile_filter(firstbits[1])
if firstbits[2] != 'by':
raise TemplateSyntaxError, "second argument to 'regroup' tag must be 'by'"
lastbits_reversed = firstbits[3][::-1].split(None, 2)
if lastbits_reversed[1][::-1] != 'as':
raise TemplateSyntaxError, "next-to-last argument to 'regroup' tag must be 'as'"
expression = parser.compile_filter(lastbits_reversed[2][::-1])
var_name = lastbits_reversed[0][::-1]
return RegroupNode(target, expression, var_name)
regroup = register.tag(regroup)
def spaceless(parser, token):
"""
Removes whitespace between HTML tags. This includes tab
characters and newlines.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text. In
this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
spaceless = register.tag(spaceless)
#@register.tag
def templatetag(parser, token):
"""
Output one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError, "'templatetag' statement takes one argument"
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError, "Invalid templatetag argument: '%s'. Must be one of: %s" % \
(tag, TemplateTagNode.mapping.keys())
return TemplateTagNode(tag)
templatetag = register.tag(templatetag)
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL configuration::
{% url path.to.some_view arg1,arg2,name1=value1 %}
The first argument is a path to a view. It can be an absolute python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project. Other arguments are comma-separated values
that will be filled in place of positional and keyword arguments in the
URL. All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url app_name.client client.id %}
The URL will look like ``/clients/client/123/``.
"""
bits = token.contents.split(' ', 2)
if len(bits) < 2:
raise TemplateSyntaxError, "'%s' takes at least one argument (path to a view)" % bits[0]
args = []
kwargs = {}
if len(bits) > 2:
for arg in bits[2].split(','):
if '=' in arg:
k, v = arg.split('=', 1)
k = k.strip()
kwargs[k] = parser.compile_filter(v)
else:
args.append(parser.compile_filter(arg))
return URLNode(bits[1], args, kwargs)
url = register.tag(url)
#@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such, this tag calculates the ratio of a given
value to a maximum value, and then applies that ratio to a constant.
For example::
<img src='bar.gif' height='10' width='{% widthratio this_value max_value 100 %}' />
Above, if ``this_value`` is 175 and ``max_value`` is 200, the the image in
the above example will be 88 pixels wide (because 175/200 = .875; .875 *
100 = 87.5 which is rounded up to 88).
"""
bits = token.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError("widthratio takes three arguments")
tag, this_value_expr, max_value_expr, max_width = bits
try:
max_width = int(max_width)
except ValueError:
raise TemplateSyntaxError("widthratio final argument must be an integer")
return WidthRatioNode(parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr), max_width)
widthratio = register.tag(widthratio)
#@register.tag
def do_with(parser, token):
"""
Add a value to the context (inside of this block) for caching and easy
access.
For example::
{% with person.some_sql_method as total %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
"""
bits = list(token.split_contents())
if len(bits) != 4 or bits[2] != "as":
raise TemplateSyntaxError, "%r expected format is 'value as name'" % bits[0]
var = parser.compile_filter(bits[1])
name = bits[3]
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithNode(var, name, nodelist)
do_with = register.tag('with', do_with)
| {
"content_hash": "5f270e7c39ab79bc3f4a57719872780d",
"timestamp": "",
"source": "github",
"line_count": 1012,
"max_line_length": 213,
"avg_line_length": 34.705533596837945,
"alnum_prop": 0.5829394681396276,
"repo_name": "jonaustin/advisoryscan",
"id": "371b57e430b66658505d94f04482c17d15aca80a",
"size": "35122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/django/template/defaulttags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63725"
},
{
"name": "JavaScript",
"bytes": "159708"
},
{
"name": "Perl",
"bytes": "89271"
},
{
"name": "Python",
"bytes": "2194026"
},
{
"name": "Shell",
"bytes": "3612"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
| {
"content_hash": "a4e2cad4a10e9694055e17ed872efd90",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 21,
"alnum_prop": 0.5873015873015873,
"repo_name": "sdu14SoftwareEngineering/GameOfLife_WEB",
"id": "bf5481d017bea153556b5d6341a2ec1cd3d35921",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/migrations/0002_auto_20161114_1639.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "53069"
},
{
"name": "JavaScript",
"bytes": "2035954"
},
{
"name": "Python",
"bytes": "45199"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from neutron.common import constants as n_const
from neutron.common import utils as n_utils
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
LOG = logging.getLogger(__name__)
# A class to represent a DVR-hosted subnet including vif_ports resident on
# that subnet
class LocalDVRSubnetMapping(object):
def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
# set of commpute ports on on this dvr subnet
self.compute_ports = {}
self.subnet = subnet
self.csnat_ofport = csnat_ofport
self.dvr_owned = False
def __str__(self):
return ("subnet = %s compute_ports = %s csnat_port = %s"
" is_dvr_owned = %s" %
(self.subnet, self.get_compute_ofports(),
self.get_csnat_ofport(), self.is_dvr_owned()))
def get_subnet_info(self):
return self.subnet
def set_dvr_owned(self, owned):
self.dvr_owned = owned
def is_dvr_owned(self):
return self.dvr_owned
def add_compute_ofport(self, vif_id, ofport):
self.compute_ports[vif_id] = ofport
def remove_compute_ofport(self, vif_id):
self.compute_ports.pop(vif_id, 0)
def remove_all_compute_ofports(self):
self.compute_ports.clear()
def get_compute_ofports(self):
return self.compute_ports
def set_csnat_ofport(self, ofport):
self.csnat_ofport = ofport
def get_csnat_ofport(self):
return self.csnat_ofport
class OVSPort(object):
def __init__(self, id, ofport, mac, device_owner):
self.id = id
self.mac = mac
self.ofport = ofport
self.subnets = set()
self.device_owner = device_owner
def __str__(self):
return ("OVSPort: id = %s, ofport = %s, mac = %s, "
"device_owner = %s, subnets = %s" %
(self.id, self.ofport, self.mac,
self.device_owner, self.subnets))
def add_subnet(self, subnet_id):
self.subnets.add(subnet_id)
def remove_subnet(self, subnet_id):
self.subnets.remove(subnet_id)
def remove_all_subnets(self):
self.subnets.clear()
def get_subnets(self):
return self.subnets
def get_device_owner(self):
return self.device_owner
def get_mac(self):
return self.mac
def get_ofport(self):
return self.ofport
class OVSDVRNeutronAgent(object):
'''
Implements OVS-based DVR(Distributed Virtual Router), for overlay networks.
'''
# history
# 1.0 Initial version
def __init__(self, context, plugin_rpc, integ_br, tun_br,
bridge_mappings, phys_brs, int_ofports, phys_ofports,
patch_int_ofport=constants.OFPORT_INVALID,
patch_tun_ofport=constants.OFPORT_INVALID,
host=None, enable_tunneling=False,
enable_distributed_routing=False):
self.context = context
self.plugin_rpc = plugin_rpc
self.host = host
self.enable_tunneling = enable_tunneling
self.enable_distributed_routing = enable_distributed_routing
self.bridge_mappings = bridge_mappings
self.phys_brs = phys_brs
self.int_ofports = int_ofports
self.phys_ofports = phys_ofports
self.reset_ovs_parameters(integ_br, tun_br,
patch_int_ofport, patch_tun_ofport)
self.reset_dvr_parameters()
self.dvr_mac_address = None
if self.enable_distributed_routing:
self.get_dvr_mac_address()
def setup_dvr_flows(self):
self.setup_dvr_flows_on_integ_br()
self.setup_dvr_flows_on_tun_br()
self.setup_dvr_flows_on_phys_br()
self.setup_dvr_mac_flows_on_all_brs()
def reset_ovs_parameters(self, integ_br, tun_br,
patch_int_ofport, patch_tun_ofport):
'''Reset the openvswitch parameters'''
self.int_br = integ_br
self.tun_br = tun_br
self.patch_int_ofport = patch_int_ofport
self.patch_tun_ofport = patch_tun_ofport
def reset_dvr_parameters(self):
'''Reset the DVR parameters'''
self.local_dvr_map = {}
self.local_csnat_map = {}
self.local_ports = {}
self.registered_dvr_macs = set()
def get_dvr_mac_address(self):
try:
self.get_dvr_mac_address_with_retry()
except oslo_messaging.RemoteError as e:
LOG.warning(_LW('L2 agent could not get DVR MAC address at '
'startup due to RPC error. It happens when the '
'server does not support this RPC API. Detailed '
'message: %s'), e)
except oslo_messaging.MessagingTimeout:
LOG.error(_LE('DVR: Failed to obtain a valid local '
'DVR MAC address - L2 Agent operating '
'in Non-DVR Mode'))
if not self.in_distributed_mode():
# switch all traffic using L2 learning
# REVISIT(yamamoto): why to install the same flow as
# setup_integration_br?
self.int_br.install_normal()
def get_dvr_mac_address_with_retry(self):
# Get the local DVR MAC Address from the Neutron Server.
# This is the first place where we contact the server on startup
# so retry in case it's not ready to respond
for retry_count in reversed(range(5)):
try:
details = self.plugin_rpc.get_dvr_mac_address_by_host(
self.context, self.host)
except oslo_messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('L2 agent could not get DVR MAC '
'address from server. Retrying. '
'Detailed message: %s'), e)
else:
LOG.debug("L2 Agent DVR: Received response for "
"get_dvr_mac_address_by_host() from "
"plugin: %r", details)
self.dvr_mac_address = details['mac_address']
return
def setup_dvr_flows_on_integ_br(self):
'''Setup up initial dvr flows into br-int'''
if not self.in_distributed_mode():
return
LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"),
self.dvr_mac_address)
# Remove existing flows in integration bridge
self.int_br.delete_flows()
# Add a canary flow to int_br to track OVS restarts
self.int_br.setup_canary_table()
# Insert 'drop' action as the default for Table DVR_TO_SRC_MAC
self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1)
self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN,
priority=1)
# Insert 'normal' action as the default for Table LOCAL_SWITCHING
self.int_br.install_normal(table_id=constants.LOCAL_SWITCHING,
priority=1)
for physical_network in self.bridge_mappings:
self.int_br.install_drop(table_id=constants.LOCAL_SWITCHING,
priority=2,
in_port=self.int_ofports[
physical_network])
def setup_dvr_flows_on_tun_br(self):
'''Setup up initial dvr flows into br-tun'''
if not self.enable_tunneling or not self.in_distributed_mode():
return
self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS,
priority=1,
in_port=self.patch_int_ofport)
# table-miss should be sent to learning table
self.tun_br.install_goto(table_id=constants.DVR_NOT_LEARN,
dest_table_id=constants.LEARN_FROM_TUN)
self.tun_br.install_goto(table_id=constants.DVR_PROCESS,
dest_table_id=constants.PATCH_LV_TO_TUN)
def setup_dvr_flows_on_phys_br(self):
'''Setup up initial dvr flows into br-phys'''
if not self.in_distributed_mode():
return
for physical_network in self.bridge_mappings:
self.phys_brs[physical_network].install_goto(
in_port=self.phys_ofports[physical_network],
priority=2,
dest_table_id=constants.DVR_PROCESS_VLAN)
self.phys_brs[physical_network].install_goto(
priority=1,
dest_table_id=constants.DVR_NOT_LEARN_VLAN)
self.phys_brs[physical_network].install_goto(
table_id=constants.DVR_PROCESS_VLAN,
priority=0,
dest_table_id=constants.LOCAL_VLAN_TRANSLATION)
self.phys_brs[physical_network].install_drop(
table_id=constants.LOCAL_VLAN_TRANSLATION,
in_port=self.phys_ofports[physical_network],
priority=2)
self.phys_brs[physical_network].install_normal(
table_id=constants.DVR_NOT_LEARN_VLAN,
priority=1)
def _add_dvr_mac_for_phys_br(self, physical_network, mac):
self.int_br.add_dvr_mac_vlan(mac=mac,
port=self.int_ofports[physical_network])
phys_br = self.phys_brs[physical_network]
phys_br.add_dvr_mac_vlan(mac=mac,
port=self.phys_ofports[physical_network])
def _remove_dvr_mac_for_phys_br(self, physical_network, mac):
# REVISIT(yamamoto): match in_port as well?
self.int_br.remove_dvr_mac_vlan(mac=mac)
phys_br = self.phys_brs[physical_network]
# REVISIT(yamamoto): match in_port as well?
phys_br.remove_dvr_mac_vlan(mac=mac)
def _add_dvr_mac_for_tun_br(self, mac):
self.int_br.add_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
self.tun_br.add_dvr_mac_tun(mac=mac, port=self.patch_int_ofport)
def _remove_dvr_mac_for_tun_br(self, mac):
self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
# REVISIT(yamamoto): match in_port as well?
self.tun_br.remove_dvr_mac_tun(mac=mac)
def _add_dvr_mac(self, mac):
for physical_network in self.bridge_mappings:
self._add_dvr_mac_for_phys_br(physical_network, mac)
if self.enable_tunneling:
self._add_dvr_mac_for_tun_br(mac)
LOG.debug("Added DVR MAC flow for %s", mac)
self.registered_dvr_macs.add(mac)
def _remove_dvr_mac(self, mac):
for physical_network in self.bridge_mappings:
self._remove_dvr_mac_for_phys_br(physical_network, mac)
if self.enable_tunneling:
self._remove_dvr_mac_for_tun_br(mac)
LOG.debug("Removed DVR MAC flow for %s", mac)
self.registered_dvr_macs.remove(mac)
def setup_dvr_mac_flows_on_all_brs(self):
if not self.in_distributed_mode():
LOG.debug("Not in distributed mode, ignoring invocation "
"of get_dvr_mac_address_list() ")
return
dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context)
LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs)
for mac in dvr_macs:
if mac['mac_address'] == self.dvr_mac_address:
continue
self._add_dvr_mac(mac['mac_address'])
def dvr_mac_address_update(self, dvr_macs):
if not self.dvr_mac_address:
LOG.debug("Self mac unknown, ignoring this "
"dvr_mac_address_update() ")
return
dvr_host_macs = set()
for entry in dvr_macs:
if entry['mac_address'] == self.dvr_mac_address:
continue
dvr_host_macs.add(entry['mac_address'])
if dvr_host_macs == self.registered_dvr_macs:
LOG.debug("DVR Mac address already up to date")
return
dvr_macs_added = dvr_host_macs - self.registered_dvr_macs
dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs
for oldmac in dvr_macs_removed:
self._remove_dvr_mac(oldmac)
for newmac in dvr_macs_added:
self._add_dvr_mac(newmac)
def in_distributed_mode(self):
return self.dvr_mac_address is not None
def is_dvr_router_interface(self, device_owner):
return device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE
def process_tunneled_network(self, network_type, lvid, segmentation_id):
self.tun_br.provision_local_vlan(
network_type=network_type,
lvid=lvid,
segmentation_id=segmentation_id,
distributed=self.in_distributed_mode())
def _bind_distributed_router_interface_port(self, port, lvm,
fixed_ips, device_owner):
# since distributed router port must have only one fixed
# IP, directly use fixed_ips[0]
fixed_ip = fixed_ips[0]
subnet_uuid = fixed_ip['subnet_id']
csnat_ofport = constants.OFPORT_INVALID
ldm = None
if subnet_uuid in self.local_dvr_map:
ldm = self.local_dvr_map[subnet_uuid]
csnat_ofport = ldm.get_csnat_ofport()
if csnat_ofport == constants.OFPORT_INVALID:
LOG.error(_LE("DVR: Duplicate DVR router interface detected "
"for subnet %s"), subnet_uuid)
return
else:
# set up LocalDVRSubnetMapping available for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
subnet_uuid)
if not subnet_info:
LOG.error(_LE("DVR: Unable to retrieve subnet information "
"for subnet_id %s"), subnet_uuid)
return
LOG.debug("get_subnet_for_dvr for subnet %(uuid)s "
"returned with %(info)s",
{"uuid": subnet_uuid, "info": subnet_info})
ldm = LocalDVRSubnetMapping(subnet_info)
self.local_dvr_map[subnet_uuid] = ldm
# DVR takes over
ldm.set_dvr_owned(True)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
local_compute_ports = (
self.plugin_rpc.get_ports_on_host_by_subnet(
self.context, self.host, subnet_uuid))
LOG.debug("DVR: List of ports received from "
"get_ports_on_host_by_subnet %s",
local_compute_ports)
vif_by_id = self.int_br.get_vifs_by_ids(
[prt['id'] for prt in local_compute_ports])
for prt in local_compute_ports:
vif = vif_by_id.get(prt['id'])
if not vif:
continue
ldm.add_compute_ofport(vif.vif_id, vif.ofport)
if vif.vif_id in self.local_ports:
# ensure if a compute port is already on
# a different dvr routed subnet
# if yes, queue this subnet to that port
comp_ovsport = self.local_ports[vif.vif_id]
comp_ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its on
# a dvr routed subnet queue this subnet to that port
comp_ovsport = OVSPort(vif.vif_id, vif.ofport,
vif.vif_mac, prt['device_owner'])
comp_ovsport.add_subnet(subnet_uuid)
self.local_ports[vif.vif_id] = comp_ovsport
# create rule for just this vm port
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=comp_ovsport.get_mac(),
dst_port=comp_ovsport.get_ofport())
if lvm.network_type == p_const.TYPE_VLAN:
# TODO(vivek) remove the IPv6 related flows once SNAT is not
# used for IPv6 DVR.
br = self.phys_brs[lvm.physical_network]
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
# TODO(vivek) remove the IPv6 related flows once SNAT is not
# used for IPv6 DVR.
if ip_version == 4:
br.install_dvr_process_ipv4(
vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
else:
br.install_dvr_process_ipv6(
vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
br.install_dvr_process(
vlan_tag=lvm.vlan, vif_mac=port.vif_mac,
dvr_mac_address=self.dvr_mac_address)
# the dvr router interface is itself a port, so capture it
# queue this subnet to that port. A subnet appears only once as
# a router interface on any given router
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
def _bind_port_on_dvr_subnet(self, port, lvm, fixed_ips,
device_owner):
# Handle new compute port added use-case
subnet_uuid = None
for ips in fixed_ips:
if ips['subnet_id'] not in self.local_dvr_map:
continue
subnet_uuid = ips['subnet_id']
ldm = self.local_dvr_map[subnet_uuid]
if not ldm.is_dvr_owned():
# well this is CSNAT stuff, let dvr come in
# and do plumbing for this vm later
continue
# This confirms that this compute port belongs
# to a dvr hosted subnet.
# Accommodate this VM Port into the existing rule in
# the integration bridge
LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
subnet_info = ldm.get_subnet_info()
ldm.add_compute_ofport(port.vif_id, port.ofport)
if port.vif_id in self.local_ports:
# ensure if a compute port is already on a different
# dvr routed subnet
# if yes, queue this subnet to that port
ovsport = self.local_ports[port.vif_id]
ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its
# on a dvr routed subnet, queue this subnet to that port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# create a rule for this vm port
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=ovsport.get_mac(),
dst_port=ovsport.get_ofport())
def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm,
fixed_ips, device_owner):
# since centralized-SNAT (CSNAT) port must have only one fixed
# IP, directly use fixed_ips[0]
fixed_ip = fixed_ips[0]
if port.vif_id in self.local_ports:
# throw an error if CSNAT port is already on a different
# dvr routed subnet
ovsport = self.local_ports[port.vif_id]
subs = list(ovsport.get_subnets())
if subs[0] == fixed_ip['subnet_id']:
return
LOG.error(_LE("Centralized-SNAT port %(port)s on subnet "
"%(port_subnet)s already seen on a different "
"subnet %(orig_subnet)s"), {
"port": port.vif_id,
"port_subnet": fixed_ip['subnet_id'],
"orig_subnet": subs[0],
})
return
subnet_uuid = fixed_ip['subnet_id']
ldm = None
subnet_info = None
if subnet_uuid not in self.local_dvr_map:
# no csnat ports seen on this subnet - create csnat state
# for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
subnet_uuid)
ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
self.local_dvr_map[subnet_uuid] = ldm
else:
ldm = self.local_dvr_map[subnet_uuid]
subnet_info = ldm.get_subnet_info()
# Store csnat OF Port in the existing DVRSubnetMap
ldm.set_csnat_ofport(port.ofport)
# create ovsPort footprint for csnat port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=ovsport.get_mac(),
dst_port=ovsport.get_ofport())
def bind_port_to_dvr(self, port, local_vlan_map,
fixed_ips, device_owner):
if not self.in_distributed_mode():
return
if local_vlan_map.network_type not in (constants.TUNNEL_NETWORK_TYPES
+ [p_const.TYPE_VLAN]):
LOG.debug("DVR: Port %s is with network_type %s not supported"
" for dvr plumbing" % (port.vif_id,
local_vlan_map.network_type))
return
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._bind_distributed_router_interface_port(port,
local_vlan_map,
fixed_ips,
device_owner)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._bind_port_on_dvr_subnet(port, local_vlan_map,
fixed_ips,
device_owner)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._bind_centralized_snat_port_on_dvr_subnet(port,
local_vlan_map,
fixed_ips,
device_owner)
def _unbind_distributed_router_interface_port(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# removal of distributed router interface
subnet_ids = ovsport.get_subnets()
subnet_set = set(subnet_ids)
network_type = lvm.network_type
physical_network = lvm.physical_network
vlan_to_use = lvm.vlan
if network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# ensure we process for all the subnets laid on this removed port
for sub_uuid in subnet_set:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
# DVR is no more owner
ldm.set_dvr_owned(False)
# remove all vm rules for this dvr subnet
# clear of compute_ports altogether
compute_ports = ldm.get_compute_ofports()
for vif_id in compute_ports:
comp_port = self.local_ports[vif_id]
self.int_br.delete_dvr_to_src_mac(
network_type=network_type,
vlan_tag=vlan_to_use, dst_mac=comp_port.get_mac())
ldm.remove_all_compute_ofports()
if ldm.get_csnat_ofport() == constants.OFPORT_INVALID:
# if there is no csnat port for this subnet, remove
# this subnet from local_dvr_map, as no dvr (or) csnat
# ports available on this agent anymore
self.local_dvr_map.pop(sub_uuid, None)
if network_type == p_const.TYPE_VLAN:
br = self.phys_br[physical_network]
if network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
if ip_version == 4:
br.delete_dvr_process_ipv4(
vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
else:
br.delete_dvr_process_ipv6(
vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
ovsport.remove_subnet(sub_uuid)
if lvm.network_type == p_const.TYPE_VLAN:
br = self.phys_br[physical_network]
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
br.delete_dvr_process(vlan_tag=lvm.vlan, vif_mac=port.vif_mac)
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for compute port %s", port)
subnet_ids = ovsport.get_subnets()
# ensure we process for all the subnets laid on this port
for sub_uuid in subnet_ids:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
ldm.remove_compute_ofport(port.vif_id)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# first remove this vm port rule
self.int_br.delete_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_centralized_snat_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for csnat port %s", port)
sub_uuid = list(ovsport.get_subnets())[0]
# ensure we process for all the subnets laid on this port
if sub_uuid not in self.local_dvr_map:
return
ldm = self.local_dvr_map[sub_uuid]
ldm.set_csnat_ofport(constants.OFPORT_INVALID)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# then remove csnat port rule
self.int_br.delete_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
if not ldm.is_dvr_owned():
# if not owned by DVR (only used for csnat), remove this
# subnet state altogether
self.local_dvr_map.pop(sub_uuid, None)
# release port state
self.local_ports.pop(port.vif_id, None)
def unbind_port_from_dvr(self, vif_port, local_vlan_map):
if not self.in_distributed_mode():
return
# Handle port removed use-case
if vif_port and vif_port.vif_id not in self.local_ports:
LOG.debug("DVR: Non distributed port, ignoring %s", vif_port)
return
ovsport = self.local_ports[vif_port.vif_id]
device_owner = ovsport.get_device_owner()
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._unbind_distributed_router_interface_port(vif_port,
local_vlan_map)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._unbind_port_on_dvr_subnet(vif_port,
local_vlan_map)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
local_vlan_map)
| {
"content_hash": "d69ec5a138c9b050ceae28ffd4103ac4",
"timestamp": "",
"source": "github",
"line_count": 690,
"max_line_length": 79,
"avg_line_length": 42.54927536231884,
"alnum_prop": 0.5566606492046732,
"repo_name": "eonpatapon/neutron",
"id": "905c8a8e9e8868010b356c9d2504bbd4a7a9d12e",
"size": "30017",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7388312"
},
{
"name": "Shell",
"bytes": "12912"
}
],
"symlink_target": ""
} |
"""Resources management functions
"""
import sys
import bigml.api
from bigmler.utils import (dated, get_url, log_message, check_resource,
check_resource_error, log_created_resources)
from bigmler.reports import report
from bigmler.resourcesapi.common import set_basic_batch_args, map_fields, \
update_json_args
from bigmler.resourcesapi.common import FULL_FORMAT
def set_batch_anomaly_score_args(args, fields=None,
dataset_fields=None):
"""Return batch anomaly score args dict
"""
batch_anomaly_score_args = set_basic_batch_args(args, args.name)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_anomaly_score_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info == FULL_FORMAT:
batch_anomaly_score_args.update(all_fields=True)
if args.prediction_fields:
batch_anomaly_score_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError as exc:
sys.exit(exc)
prediction_fields.append(field)
batch_anomaly_score_args.update(output_fields=prediction_fields)
if 'batch_anomaly_score' in args.json_args:
update_json_args(
batch_anomaly_score_args,
args.json_args.get('batch_anomaly_score'),
fields)
return batch_anomaly_score_args
def create_batch_anomaly_score(anomaly, test_dataset,
batch_anomaly_score_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch anomaly score
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch anomaly score.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_anomaly_score = api.create_batch_anomaly_score(
anomaly, test_dataset, batch_anomaly_score_args, retries=None)
log_created_resources(
"batch_anomaly_score", path,
bigml.api.get_batch_anomaly_score_id(batch_anomaly_score),
mode='a')
batch_anomaly_score_id = check_resource_error(
batch_anomaly_score, "Failed to create batch prediction: ")
try:
batch_anomaly_score = check_resource(batch_anomaly_score,
api.get_batch_anomaly_score,
raise_on_error=True)
except Exception as exception:
sys.exit("Failed to get a finished batch anomaly score: %s"
% str(exception))
message = dated("Batch anomaly score created: %s\n"
% get_url(batch_anomaly_score))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_anomaly_score_id, log_file=log)
if args.reports:
report(args.reports, path, batch_anomaly_score)
return batch_anomaly_score
| {
"content_hash": "53290837a5771ac91dc69833f015e3f0",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 75,
"avg_line_length": 36.84615384615385,
"alnum_prop": 0.607217417238294,
"repo_name": "jaor/bigmler",
"id": "c7651a092160c7d6f3f09be84d857ba0ef53fdf2",
"size": "3953",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bigmler/resourcesapi/batch_anomaly_scores.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26465"
},
{
"name": "JavaScript",
"bytes": "73784"
},
{
"name": "Jupyter Notebook",
"bytes": "802"
},
{
"name": "Python",
"bytes": "2081730"
},
{
"name": "R",
"bytes": "71763"
}
],
"symlink_target": ""
} |
"""Support for the for Danfoss Air HRV binary sensors."""
from pydanfossair.commands import ReadCommand
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from . import DOMAIN as DANFOSS_AIR_DOMAIN
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Danfoss Air sensors etc."""
data = hass.data[DANFOSS_AIR_DOMAIN]
sensors = [
[
"Danfoss Air Bypass Active",
ReadCommand.bypass,
BinarySensorDeviceClass.OPENING,
],
["Danfoss Air Away Mode Active", ReadCommand.away_mode, None],
]
dev = []
for sensor in sensors:
dev.append(DanfossAirBinarySensor(data, sensor[0], sensor[1], sensor[2]))
add_entities(dev, True)
class DanfossAirBinarySensor(BinarySensorEntity):
"""Representation of a Danfoss Air binary sensor."""
def __init__(self, data, name, sensor_type, device_class):
"""Initialize the Danfoss Air binary sensor."""
self._data = data
self._attr_name = name
self._type = sensor_type
self._attr_device_class = device_class
def update(self):
"""Fetch new state data for the sensor."""
self._data.update()
self._attr_is_on = self._data.get_value(self._type)
| {
"content_hash": "3a7f73bf6ff25b5c5cae4fa0367d0e15",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 81,
"avg_line_length": 28.51063829787234,
"alnum_prop": 0.6470149253731343,
"repo_name": "home-assistant/home-assistant",
"id": "379d76ec4c8034a3307da8d4f2ab1c146ab1e2b4",
"size": "1340",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/danfoss_air/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
"""
test_extractor
----------------------------------
Tests for `extractor` module.
"""
import unittest
from ncgocr import extractor as ex
class TestFunctions(unittest.TestCase):
def test_fit_border(self):
text = 'very tedious'
span = (5, 8)
result = ex._fit_border(text, span)
self.assertEqual(result, False)
text = 'this ted bear'
span = (5, 8)
result = ex._fit_border(text, span)
self.assertEqual(result, True)
class TextFunctions2(unittest.TestCase):
def test_nearest_evidences(self):
positional_index = {'a': {(1, 1), (3, 3)},
'b': {(2, 2), (5, 5)},
'c': {(4, 4)}}
wanted_terms = ['a', 'b']
current_position = 3
result = ex.nearest_evidences(current_position, wanted_terms, positional_index)
wanted = [2, 3]
self.assertEqual(result, wanted)
| {
"content_hash": "86c86f9e251744060c28861d2b2a44e3",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 87,
"avg_line_length": 27.256410256410255,
"alnum_prop": 0.5446848541862653,
"repo_name": "jeroyang/mcgocr",
"id": "492119cb8944a0c2f946d530e57bba428fe80515",
"size": "1110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_extractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85851"
}
],
"symlink_target": ""
} |
import sys
if len(sys.argv) != 3:
print "Usage: spark-submit --files <file> %s <file> <result-destination>" % (sys.argv[0], )
print
print "Example:"
print " spark-submit --files query.sql %s query.sql s3://my-bucket/results/output.csv" % (sys.argv[0], )
print
sys.exit(1)
query_file = sys.argv[1]
result_uri = sys.argv[2]
from pyspark.sql import SparkSession
with open(query_file) as f:
query = f.read()
spark = SparkSession\
.builder\
.appName("spark-sql")\
.getOrCreate()
try:
print >> sys.stderr, 'Running query: %s' % (query_file, )
result = spark.sql(query)
print >> sys.stderr, 'Writing result: %s' % (result_uri, )
result.write.csv(result_uri)
finally:
spark.stop()
| {
"content_hash": "dd0c01d742c2fb1283880b38f0645579",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 109,
"avg_line_length": 23.903225806451612,
"alnum_prop": 0.6261808367071525,
"repo_name": "treasure-data/digdag",
"id": "03360a856a20a6e9490fa3b6f74affb550d9f62d",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digdag-standards/src/main/resources/io/digdag/standards/operator/aws/spark-sql-wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1015"
},
{
"name": "CSS",
"bytes": "1834"
},
{
"name": "Dockerfile",
"bytes": "195"
},
{
"name": "HTML",
"bytes": "2429"
},
{
"name": "Java",
"bytes": "4019827"
},
{
"name": "JavaScript",
"bytes": "43354"
},
{
"name": "Less",
"bytes": "1044"
},
{
"name": "Makefile",
"bytes": "980"
},
{
"name": "Python",
"bytes": "16577"
},
{
"name": "Ruby",
"bytes": "8631"
},
{
"name": "Scala",
"bytes": "515"
},
{
"name": "Shell",
"bytes": "11157"
},
{
"name": "TypeScript",
"bytes": "106143"
}
],
"symlink_target": ""
} |
from __future__ import division
from pylab import *
import random
# there already is a sample in the namespace from numpy
from random import sample as smpl
import itertools
import utils
utils.backup(__file__)
import synapses
class AbstractSource(object):
def __init__(self):
"""
Initialize all relevant variables.
"""
raise NotImplementedError
def next(self):
"""
Returns the next input
"""
raise NotImplementedError
def global_range(self):
"""
Returns the maximal global index of all inputs
"""
raise NotImplementedError
def global_index(self):
"""
Returns the current global (unique) index of the current input
"character"
"""
raise NotImplementedError
def generate_connection_e(self,N_e):
"""
Generates connection matrix W_eu from input to the excitatory
population
Parameters:
N_e: int
Number of excitatory units
"""
raise NotImplementedError
def generate_connection_i(self,N_i):
"""
Generates connection matrix W_iu from input to the inhibitory
population
Parameters:
N_i: int
Number of inhibitory units
"""
raise NotImplementedError
def update_W_eu(self,W_eu):
"""
Modifies W_eu if necessary
Called every step in SORN after next() was called but before
W_eu is used
"""
pass
class CountingSource(AbstractSource):
"""
Source for the counting task.
Different of words are presented with individual probabilities.
"""
def __init__(self, words,probs, N_u_e, N_u_i, avoid=False,
permute_ambiguous=False):
"""
Initializes variables.
Parameters:
words: list
The words to present
probs: matrix
The probabilities of transitioning between word i and j
It is assumed that they are summing to 1
N_u: int
Number of active units per step
avoid: bool
Avoid same excitatory units for different words
"""
self.word_index = 0 #Index for word
self.ind = 0 #Index within word
self.words = words #different words
self.probs = probs #Probability of transitioning from i to j
self.N_u_e = int(N_u_e) #Number active per step
self.N_u_i = int(N_u_i)
self.avoid = avoid
self.permute_ambiguous = permute_ambiguous
self.alphabet = unique("".join(words))
self.N_a = len(self.alphabet)
self.lookup = dict(zip(self.alphabet,range(self.N_a)))
self.glob_ind = [0]
self.glob_ind.extend(cumsum(map(len,words)))
self.predict = self.predictability()
self.reset()
@classmethod
def init_simple(cls, N_words, N_letters, word_length, max_fold_prob,
N_u_e, N_u_i, avoiding, words=None, seed=None):
"""
Construct the arguments for the source to make it usable for the
cluster
Parameters:
N_words: int
Number of different words
N_letters: int
Number of letters to generate words from
word_length: list
Range of length (unimodal distribution)
max_fold_prob: float
maximal probability difference between words
N_u_e: int
Number of active excitatory units per step
N_u_i: int
Number of active inhibitory units per step
avoid: bool
Avoid same excitatory units for different words
"""
newseed = np.random.randint(0,4000000000)
if seed is not None:
np.random.seed(seed=seed)
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
assert(N_letters <= len(letters))
letters = array([x for x in letters[:N_letters]])
if words is None:
words = []
for i in range(N_words):
word = letters[np.random.randint(0,N_letters,
np.random.randint(
word_length[0], word_length[1]+1))]
words.append(''.join(word))
else:
assert(N_words == len(words) and
N_letters == len(unique(''.join(words))))
probs = array([np.random.rand(N_words)*(max_fold_prob-1)+1]
*N_words)
# Normalize
probs /= sum(probs,1)
if seed is not None:
np.random.seed(seed=newseed)
return CountingSource(words, probs, N_u_e, N_u_i,avoid=avoiding)
def generate_connection_e(self,N_e):
W = zeros((N_e,self.N_a))
available = set(range(N_e))
for a in range(self.N_a):
temp = random.sample(available,self.N_u_e)
W[temp,a] = 1
if self.avoid:
available = available.difference(temp)
# The underscore has the special property that it doesn't
# activate anything:
if '_' in self.lookup:
W[:,self.lookup['_']] = 0
c = utils.Bunch(use_sparse=False,
lamb=np.inf,
avoid_self_connections=False)
ans = synapses.create_matrix((N_e,self.N_a),c)
ans.W = W
return ans
def generate_connection_i(self,N_i):
c = utils.Bunch(use_sparse=False,
lamb=np.inf,
avoid_self_connections=False)
ans = synapses.create_matrix((N_i,self.N_a),c)
W = zeros((N_i, self.N_a))
if N_i>0:
available = set(range(N_i))
for a in range(self.N_a):
temp = random.sample(available,self.N_u_i)
W[temp,a] = 1
#~ if self.avoid: # N_i is smaller -> broad inhibition?
#~ available = available.difference(temp)
if '_' in self.lookup:
W[:,self.lookup['_']] = 0
ans.W = W
return ans
def char(self):
word = self.words[self.word_index]
return word[self.ind]
def index(self):
character = self.char()
ind = self.lookup[character]
return ind
def next_word(self):
self.ind = 0
w = self.word_index
p = self.probs[w,:]
self.word_index = find(rand()<=cumsum(p))[0]
def next(self):
self.ind = self.ind+1
string = self.words[self.word_index]
if self.ind >= len(string):
self.next_word()
ans = zeros(self.N_a)
ans[self.index()] = 1
return ans
def reset(self):
self.next_word()
self.ind = -1
def global_index(self):
return self.glob_ind[self.word_index]+self.ind
def global_range(self):
return self.glob_ind[-1]
def trial_finished(self):
return self.ind+1 >= len(self.words[self.word_index])
def update_W_eu(self,W_eu):
if not self.permute_ambiguous:
return
# Init
if not hasattr(self,'A_neurons'):
self.A_neurons = where(W_eu.W[:,self.lookup['A']]==1)[0]
self.B_neurons = where(W_eu.W[:,self.lookup['B']]==1)[0]
self.N_u = int(len(self.A_neurons))
assert(self.N_u == len(self.B_neurons))
self.N_A = {}
self.N_B = {}
for letter in [word[0] for word in self.words]:
letter_index = self.lookup[letter]
self.N_A[letter] = sum(W_eu.W[self.A_neurons,letter_index]).round().astype(int)
self.N_B[letter] = sum(W_eu.W[self.B_neurons,letter_index]).round().astype(int)
# When new word presented, permute its input
# careful! simple permutation goes crazy when inputs overlap
if self.ind == 0:
letter = self.char()
if letter in ['A','B']:
return
letter_index = self.index()
# First set both to zero so that the second doesn't set
# units of the first back to zero (overlap case)
W_eu.W[self.A_neurons,letter_index] *= 0
W_eu.W[self.B_neurons,letter_index] *= 0
W_eu.W[self.A_neurons[smpl(xrange(self.N_u),self.N_A[letter])],
letter_index] = 1
W_eu.W[self.B_neurons[smpl(xrange(self.N_u),self.N_B[letter])],
letter_index] = 1
def predictability(self):
temp = self.probs
for n in range(10):
temp = temp.dot(temp)
final = temp[0,:]
#Let's assume that all words have unique initial letters
probs = map(len, self.words)
probs = array(probs)
probs = (probs + self.probs.max(1)-1)/probs
return sum(final*probs)
class RandomLetterSource(CountingSource):
def __init__(self, N_letters, N_u_e, N_u_i, avoid=False):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
assert(N_letters <= len(letters))
words = [x for x in letters[:N_letters]]
probs = ones((N_letters,N_letters))
probs /= sum(probs,1)
super(RandomLetterSource, self).__init__(words, probs, N_u_e,
N_u_i,avoid=avoid)
class TrialSource(AbstractSource):
"""
This source takes any other source and gives it a trial-like
structure with blank periods inbetween stimulation periods
The source has to implement a trial_finished method that is True
if it is at the end of one trial
"""
def __init__(self,source,blank_min_length,blank_var_length,
defaultstim,resetter=None):
assert(hasattr(source,'trial_finished'))
self.source = source
self.blank_min_length = blank_min_length
self.blank_var_length = blank_var_length
self.reset_blank_length()
self.defaultstim = defaultstim
self.resetter = resetter
self._reset_source()
self.blank_step = 0
def reset_blank_length(self):
if self.blank_var_length > 0:
self.blank_length = self.blank_min_length\
+ randint(self.blank_var_length)
else:
self.blank_length = self.blank_min_length
def next(self):
if not self.source.trial_finished():
return self.source.next()
else:
if self.blank_step >= self.blank_length:
self.blank_step = 0
self._reset_source()
self.reset_blank_length()
return self.source.next()
else:
self.blank_step += 1
return self.defaultstim
def _reset_source(self):
if self.resetter is not None:
getattr(self.source,self.resetter)()
def global_range(self):
return source.global_range()
def global_index(self):
if self.blank_step > 0:
return -1
return self.source.global_index()
def generate_connection_e(self, N_e):
return self.source.generate_connection_e(N_e)
def generate_connection_i(self, N_i):
return self.source.generate_connection_i(N_i)
def update_W_eu(self,W_eu):
self.source.update_W_eu(W_eu)
class AndreeaCountingSource(AbstractSource):
"""
This was only for debugging purposes - it resembles her matlab code
perfectly
"""
def __init__(self,sequence,sequence_U,pop,train):
self.pop = pop-1
self.seq = sequence[0]-1
self.seq_u = sequence_U[0]-1
self.t = -1
# change m,n,x to make them identical
if train:
self.seq[self.seq==2] = 80
self.seq[self.seq==3] = 2
self.seq[self.seq==4] = 3
self.seq[self.seq==80] = 4
self.seq_u[self.seq_u>13] = (self.seq_u[self.seq_u>13]%7)+7
self.lookup = {'A':0,'B':1,'M':2,'N':3,'X':4}
else:
self.seq[self.seq==2] = 7
self.seq[self.seq==9] = 2
self.seq[self.seq==3] = 5
self.seq[self.seq==10] = 3
self.seq[self.seq==4] = 6
self.seq[self.seq==11] = 4
self.lookup = {'A':0,'B':1,'X':7,'M':5,'N':6,'C':2,'D':3,
'E':4}
#~ self.lookup = {'A':0,'B':1,'X':2,'M':3,'N':4,'C':9,
#~ 'D':10,'E':11}
self.alphabet = 'ABCDEMNX'
self.words = ['AXXXXXM','BXXXXXN','CXXXXXN','CXXXXXM',
'DXXXXXN','DXXXXXM','EXXXXXN','EXXXXXM']
self.glob_ind = [0]
self.glob_ind.extend(cumsum(map(len,self.words)))
self.N_a = self.seq.max()+1
def next(self):
self.t += 1
tmp = zeros((self.N_a))
tmp[self.seq[self.t]] = 1
return tmp
def global_range(self):
return self.seq_u.max()
def global_index(self):
return self.seq_u[self.t]
def generate_connection(self,N_e):
W = np.zeros((N_e,self.N_a))
for i in range(self.N_a):
if i <= 4: #--> A,B,X,M,N
W[self.pop[:,i],i] = 1
if i == 9:
W[self.pop[0:2,0],i] = 1
W[self.pop[2:10,1],i] = 1
if i == 10:
W[self.pop[0:5,0],i] = 1
W[self.pop[5:10,1],i] = 1
if i == 11:
W[self.pop[0:8,0],i] = 1
W[self.pop[8:10,1],i] = 1
self.W = W
return W
class NoSource(AbstractSource):
"""
No input for the spontaneous conditions
Parameters:
N_u: int
Number of input units
"""
def __init__(self,N_u=1):
self.N_u = N_u
def next(self):
return np.zeros((self.N_u))
def global_range(self):
return 1
def global_index(self):
return -1
def generate_connection_e(self,N_e):
c = utils.Bunch(use_sparse=False,
lamb=np.inf, # cannot be 0
avoid_self_connections=False)
tmpsyn = synapses.create_matrix((N_e,self.N_u),c)
tmpsyn.set_synapses(tmpsyn.get_synapses()*0)
return tmpsyn
def generate_connection_i(self,N_i):
c = utils.Bunch(use_sparse=False,
lamb=np.inf, # cannot be 0
avoid_self_connections=False)
tmpsyn = synapses.create_matrix((N_i,self.N_u),c)
tmpsyn.set_synapses(tmpsyn.get_synapses()*0)
return tmpsyn
class RandomSource(AbstractSource):
"""
Poisson input spike trains.
"""
def __init__(self, firing_rate, N_neurons, connection_density,
eta_stdp):
"""
Initialize the source
Parameters:
firing_rate: double
The firing rate of all input neurons
N_neurons: int
The number of poisson input units
connection_density: double
Density of connections from input to excitatory pop
eta_stdp: double
STDP rate for the W_eu matrix
"""
self.rate = firing_rate
self.N = N_neurons
self.density = connection_density
self.eta_stdp = eta_stdp
def next(self):
return rand(self.N)<=self.rate
def global_range(self):
return 1
def global_index(self):
return 0
def generate_connection(self,N_e):
c = utils.Bunch(use_sparse=False,
lamb=self.density*N_e,
avoid_self_connections=False,
#CHANGE should this be different?
eta_stdp = self.eta_stdp)
tmp = synapses.create_matrix((N_e,self.N),c)
# get correct connection density
noone = True
while(noone):
tmp.set_synapses((rand(N_e,self.N)<self.density).astype(
float))
if sum(tmp.get_synapses()) > 0:
noone = False
return tmp
#Useful for turning an index into a direction
mapping = {0:(+1,+0),
1:(-1,+0),
2:(+0,+1),
3:(+0,-1),
4:(+1,+1),
5:(+1,-1),
6:(-1,+1),
7:(-1,-1)}
class DriftingSource(AbstractSource):
"""
One of Philip's sources. Drifting objects in different directions
"""
def __init__(self,c):
self.X = c.X #num horizontal pixels
self.Y = c.Y #num vertical pixels
#number of directions of possible movement
self.num_direction = c.num_direction
self.num_steps = c.num_steps
self.choose_each_step = c.choose_each_step
self.symbol = c.symbol # Number of bits for each symbol
#~ self.N = c.N #number of neurons
self.change_prob = c.change_prob #probability of changing dir
self.direction = random.randrange(0,self.num_direction)
self.all = list(itertools.product(range(c.X),range(c.Y)))
self.active = {}
def get_new_symbols(self,next):
everything = set(self.all)
active = set(next)
remain = list(everything.difference(active))
random.shuffle(remain)
activated = remain[:self.choose_each_step]
return activated
def next(self):
if random.random() < self.change_prob:
#random.randint has different endpoint behaviour from
# np.random.randint!!
self.direction = random.randrange(0,self.num_direction)
temp = {}
(dx,dy) = mapping[self.direction]
for (x,y) in self.active.keys():
count = self.active[(x,y)] - 1
if count <= 0:
continue
nx = (x+dx)%self.X
ny = (y+dy)%self.Y
temp[(nx,ny)] = count
activated = self.get_new_symbols(temp.keys())
for k in activated:
temp[k] = self.num_steps
self.active = temp
# Now convert that into representation
ans = np.zeros((self.X,self.Y))
for (x,y) in self.active.keys():
ans[x,y] = 1
ans.shape = self.X*self.Y
return ans
def generate_connection(self,N):
W = np.zeros((N,self.X,self.Y))
available = set(range(N))
for a in range(self.X):
for b in range(self.Y):
temp = random.sample(available,self.symbol)
W[temp,a,b] = 1
available = available.difference(temp)
W.shape = (N,self.X*self.Y)
c = utils.Bunch(use_sparse=False,
lamb=np.inf,
avoid_self_connections=False)
ans = synapses.create_matrix((N,self.X*self.Y),c)
ans.W = W
return ans
def global_range(self):
return self.num_direction
def global_index(self):
return self.direction
| {
"content_hash": "141f364ba3b72ce45f46f65225e4557c",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 95,
"avg_line_length": 33.81597222222222,
"alnum_prop": 0.5202279494814662,
"repo_name": "Saran-nns/SORN",
"id": "224fc026ab91f28720bc1d49f1a4ccb09980f90b",
"size": "19478",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "common/sources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2238"
},
{
"name": "Jupyter Notebook",
"bytes": "744836"
},
{
"name": "Matlab",
"bytes": "17838"
},
{
"name": "Objective-C",
"bytes": "580"
},
{
"name": "Python",
"bytes": "430689"
}
],
"symlink_target": ""
} |
import unittest
import sublime
from Vintageous.state import State
from Vintageous.vi.utils import modes
from Vintageous.vi.mappings import Mappings
from Vintageous.vi.mappings import _mappings
from Vintageous.vi.mappings import mapping_status
from Vintageous.tests import set_text
from Vintageous.tests import add_sel
from Vintageous.tests import make_region
from Vintageous.tests import ViewTest
from Vintageous.vi.cmd_base import cmd_types
adding_tests = (
(modes.NORMAL, 'G', 'G_', 'adding to normal mode'),
(modes.VISUAL, 'G', 'G_', 'adding to visual mode'),
(modes.OPERATOR_PENDING, 'G', 'G_', 'adding to operator pending mode'),
(modes.VISUAL_LINE, 'G', 'G_', 'adding to visual line mode'),
(modes.VISUAL_BLOCK, 'G', 'G_', 'adding to visual block mode'),
)
class Test_Mappings_AddingAndRemoving(ViewTest):
def setUp(self):
super().setUp()
self.mappings = Mappings(self.state)
self.mappings.clear()
def testCanAdd(self):
for (i, data) in enumerate(adding_tests):
mode, keys, target, msg = data
self.mappings.add(mode, keys, target)
self.assertEqual(_mappings[mode][keys], {'name': target, 'type': cmd_types.USER}, '{0} [{1}] failed'.format(msg, i))
self.mappings.clear()
def testCanRemove(self):
for (i, data) in enumerate(adding_tests):
mode, keys, target, msg = data
self.mappings.add(mode, keys, target)
self.mappings.remove(mode, keys)
self.assertFalse(_mappings[modes.NORMAL])
self.assertFalse(_mappings[modes.VISUAL])
self.assertFalse(_mappings[modes.VISUAL_LINE])
self.assertFalse(_mappings[modes.VISUAL_BLOCK])
expanding_tests = (
((modes.NORMAL, 'G', 'G_'), ('G', 'G', 'G_', '', 'G', mapping_status.COMPLETE)),
((modes.NORMAL, '<C-m>', 'daw'), ('<C-m>', '<C-m>', 'daw', '', '<C-m>', mapping_status.COMPLETE)),
((modes.NORMAL, '<C-m>', 'daw'), ('<C-m>x', '<C-m>', 'daw', 'x', '<C-m>x', mapping_status.COMPLETE)),
((modes.NORMAL, 'xxA', 'daw'), ('xx', 'xx', '', '', 'xx', mapping_status.INCOMPLETE)),
)
class Test_Mapping_Expanding(ViewTest):
def setUp(self):
super().setUp()
self.mappings = Mappings(self.state)
self.mappings.clear()
def testCanExpand(self):
for (i, data) in enumerate(expanding_tests):
setup_data, test_data = data
mode, keys, new_mapping = setup_data
self.mappings.add(mode, keys, new_mapping)
self.state.mode = modes.NORMAL
seq, expected_head, expected_mapping, expected_tail, expected_full, expected_status = test_data
result = self.mappings.expand_first(seq)
self.assertEqual(result.head, expected_head, '[{0}] head failed'.format(i))
self.assertEqual(result.tail, expected_tail, '[{0}] tail failed'.format(i))
self.assertEqual(result.mapping, expected_mapping, '[{0}] mapping failed'.format(i))
self.assertEqual(result.sequence, expected_full, '[{0}] sequence failed'.format(i))
self.assertEqual(result.status, expected_status, '[{0}] status failed'.format(i))
self.mappings.clear()
| {
"content_hash": "bb5d266058c49d3d85ffaf9c25f320a6",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 128,
"avg_line_length": 40.19277108433735,
"alnum_prop": 0.6064148681055156,
"repo_name": "xushuwei202/Vintageous",
"id": "a4839a80d39f6574c76baa2651080abfb50b9300",
"size": "3336",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tests/vi/test_mappings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "60409"
},
{
"name": "PowerShell",
"bytes": "4216"
},
{
"name": "Python",
"bytes": "967331"
},
{
"name": "Shell",
"bytes": "431"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
import win32com.client
import codecs
FILE = 1
DRAFT = 4
def login(user, password):
app = win32com.client.Dispatch('NovellGroupWareSession')
account = app.MultiLogin(user, None, password, 1)
# 0 - promptIfNeeded
# 1 - noverPrompt
# 2 - allowPasswordPrompt
def incoming(account):
return account.MailBox.Messages.Find('(mail) and (box_type = incoming)')
def msg_atts(msg):
'att generator'
for att in msg.Attachments:
if att.ObjType == FILE:
fn = att.FileName
if not fn:
continue
elif fn == 'Mime.822':
# email from Thunderbird through smtp
continue
elif fn == 'Header':
# forwarded from Thunderbird through smtp
continue
yield att
return None
def att_save(att, fpath):
if att.AttachmentSize > 0:
att.Save(fpath)
else:
# GW-error workaround, cat > fpath
with open(fpath, 'wb'):
pass
def msg_move(msg, fromFolder, toFolder):
fromFolder.Messages.Move(msg, toFolder.Messages)
def msg_move2(msg, toFolder):
'move from Inbox'
inbox = msg.Parent.MailBox
folders = msg.EnclosingFolders
if inbox in folders:
msg_move(msg, inbox, toFolder)
elif not toFolder in folders:
toFolder.Messages.Add(msg)
class AttStream:
def __init__(self, att):
self.stream = att.Stream
self.size = att.AttachmentSize
def read(self, size = -1):
if size < 0:
size = self.size
data = self.stream.Read(size)
return str(data)
def close(self):
pass
def att_text(att, encoding):
fp = AttStream(att)
return fp.read().decode(encoding)
def att_reader(att, encoding):
'''
with att_reader(att, encoding) as fp:
do_something
'''
fp = AttStream(att)
return codecs.getreader(encoding)(fp)
def create_msg(folder):
return folder.Messages.Add('GW.MESSAGE.MAIL', DRAFT)
def add_recipients(msg, *addrL):
for addr in addrL:
msg.Recipients.Add(addr)
def add_file(msg, fpath, fn = None):
if fn:
msg.Attachments.Add(fpath, FILE, fn)
else:
msg.Attachments.Add(fpath, FILE)
| {
"content_hash": "6d02013eab7ab7f2ef350247c2b8fb15",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 76,
"avg_line_length": 25.91111111111111,
"alnum_prop": 0.5969125214408233,
"repo_name": "ActiveState/code",
"id": "194bc9473880003affdf3b1763632087be9b103d",
"size": "2332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/577410_working_with_GroupWise/recipe-577410.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'silk'
copyright = u'2014, Michael Ford'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'silkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'silk.tex', u'silk Documentation',
u'Michael Ford', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'silk', u'silk Documentation',
[u'Michael Ford'], 1),
('profiling', 'Profiling', u'Profiling',
[u'Michael Ford'], 2),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'silk', u'silk Documentation',
u'Michael Ford', 'silk', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "d59ec81c5204a84304c138dbd698cd0a",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 79,
"avg_line_length": 31.585365853658537,
"alnum_prop": 0.7021879021879022,
"repo_name": "Alkalit/silk",
"id": "30f4e4ac5246434efd82b1936b0848e56249f9a8",
"size": "8187",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23911"
},
{
"name": "HTML",
"bytes": "60030"
},
{
"name": "JavaScript",
"bytes": "85804"
},
{
"name": "Python",
"bytes": "218196"
}
],
"symlink_target": ""
} |
"""distutils.ccompiler
Contains CCompiler, an abstract base class that defines the interface
for the Distutils compiler abstraction model."""
# created 1999/07/05, Greg Ward
__revision__ = "$Id: ccompiler.py,v 1.39 2001/02/27 19:13:15 akuchling Exp $"
import sys, os, re
from types import *
from copy import copy
from distutils.errors import *
from distutils.spawn import spawn
from distutils.file_util import move_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer_pairwise, newer_group
from distutils.util import split_quoted, execute
class CCompiler:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type = None
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions = None # list of strings
obj_extension = None # string
static_lib_extension = None
shared_lib_extension = None # string
static_lib_format = None # format string
shared_lib_format = None # prob. same as static_lib_format
exe_extension = None # string
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
self.verbose = verbose
self.dry_run = dry_run
self.force = force
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
# __init__ ()
def set_executables (self, **args):
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in args.keys():
if not self.executables.has_key(key):
raise ValueError, \
"unknown executable '%s' for class %s" % \
(key, self.__class__.__name__)
self.set_executable(key, args[key])
# set_executables ()
def set_executable(self, key, value):
if type(value) is StringType:
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro (self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i = i + 1
return None
def _check_macro_definitions (self, definitions):
"""Ensures that every element of 'definitions' is a valid macro
definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
nothing if all definitions are OK, raise TypeError otherwise.
"""
for defn in definitions:
if not (type (defn) is TupleType and
(len (defn) == 1 or
(len (defn) == 2 and
(type (defn[1]) is StringType or defn[1] is None))) and
type (defn[0]) is StringType):
raise TypeError, \
("invalid macro definition '%s': " % defn) + \
"must be tuple (string,), (string, string), or " + \
"(string, None)"
# -- Bookkeeping methods -------------------------------------------
def define_macro (self, name, value=None):
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
defn = (name, value)
self.macros.append (defn)
def undefine_macro (self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append (undefn)
def add_include_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append (dir)
def set_include_dirs (self, dirs):
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = copy (dirs)
def add_library (self, libname):
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append (libname)
def set_libraries (self, libnames):
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = copy (libnames)
def add_library_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append (dir)
def set_library_dirs (self, dirs):
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = copy (dirs)
def add_runtime_library_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append (dir)
def set_runtime_library_dirs (self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = copy (dirs)
def add_link_object (self, object):
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append (object)
def set_link_objects (self, objects):
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = copy (objects)
# -- Priviate utility methods --------------------------------------
# (here for the convenience of subclasses)
def _fix_compile_args (self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif type (output_dir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif type (macros) is ListType:
macros = macros + (self.macros or [])
else:
raise TypeError, \
"'macros' (if supplied) must be a list of tuples"
if include_dirs is None:
include_dirs = self.include_dirs
elif type (include_dirs) in (ListType, TupleType):
include_dirs = list (include_dirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
return (output_dir, macros, include_dirs)
# _fix_compile_args ()
def _prep_compile (self, sources, output_dir):
"""Determine the list of object files corresponding to 'sources',
and figure out which ones really need to be recompiled. Return a
list of all object files and a dictionary telling which source
files can be skipped.
"""
# Get the list of expected output (object) files
objects = self.object_filenames (sources,
strip_dir=1,
output_dir=output_dir)
if self.force:
skip_source = {} # rebuild everything
for source in sources:
skip_source[source] = 0
else:
# Figure out which source files we have to recompile according
# to a simplistic check -- we just compare the source and
# object file, no deep dependency checking involving header
# files.
skip_source = {} # rebuild everything
for source in sources: # no wait, rebuild nothing
skip_source[source] = 1
(n_sources, n_objects) = newer_pairwise (sources, objects)
for source in n_sources: # no really, only rebuild what's
skip_source[source] = 0 # out-of-date
return (objects, skip_source)
# _prep_compile ()
def _fix_object_args (self, objects, output_dir):
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if type (objects) not in (ListType, TupleType):
raise TypeError, \
"'objects' must be a list or tuple of strings"
objects = list (objects)
if output_dir is None:
output_dir = self.output_dir
elif type (output_dir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
return (objects, output_dir)
def _fix_lib_args (self, libraries, library_dirs, runtime_library_dirs):
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = self.libraries
elif type (libraries) in (ListType, TupleType):
libraries = list (libraries) + (self.libraries or [])
else:
raise TypeError, \
"'libraries' (if supplied) must be a list of strings"
if library_dirs is None:
library_dirs = self.library_dirs
elif type (library_dirs) in (ListType, TupleType):
library_dirs = list (library_dirs) + (self.library_dirs or [])
else:
raise TypeError, \
"'library_dirs' (if supplied) must be a list of strings"
if runtime_library_dirs is None:
runtime_library_dirs = self.runtime_library_dirs
elif type (runtime_library_dirs) in (ListType, TupleType):
runtime_library_dirs = (list (runtime_library_dirs) +
(self.runtime_library_dirs or []))
else:
raise TypeError, \
"'runtime_library_dirs' (if supplied) " + \
"must be a list of strings"
return (libraries, library_dirs, runtime_library_dirs)
# _fix_lib_args ()
def _need_link (self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return 1
else:
if self.dry_run:
newer = newer_group (objects, output_file, missing='newer')
else:
newer = newer_group (objects, output_file)
return newer
# _need_link ()
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile (self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None):
"""Compile one or more source files. 'sources' must be a list of
filenames, most likely C/C++ files, but in reality anything that
can be handled by a particular compiler and compiler class
(eg. MSVCCompiler can handle resource files in 'sources'). Return
a list of object filenames, one per source filename in 'sources'.
Depending on the implementation, not all source files will
necessarily be compiled, but all corresponding object filenames
will be returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepand/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
Raises CompileError on failure.
"""
pass
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib (self,
objects,
output_libname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None):
self.link(CCompiler.SHARED_LIBRARY, objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp)
def link_shared_object (self,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None):
self.link(CCompiler.SHARED_OBJECT, objects,
output_filename, output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp)
def link_executable (self,
objects,
output_progname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None):
self.link(CCompiler.EXECUTABLE, objects,
self.executable_filename(output_progname), output_dir,
libraries, library_dirs, runtime_library_dirs, None,
debug, extra_preargs, extra_postargs, None)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option (self, lib):
"""Return the compiler option to add 'dir' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
if ext not in self.src_extensions:
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def shared_object_filename (self,
basename,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
if strip_dir:
basename = os.path.basename (basename)
return os.path.join (output_dir, basename + self.shared_lib_extension)
def executable_filename (self,
basename,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename (self,
libname,
lib_type='static', # or 'shared'
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
if lib_type not in ("static","shared"):
raise ValueError, "'lib_type' must be \"static\" or \"shared\""
fmt = getattr (self, lib_type + "_lib_format")
ext = getattr (self, lib_type + "_lib_extension")
(dir, base) = os.path.split (libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join (output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce (self, msg, level=1):
if self.verbose >= level:
print msg
def debug_print (self, msg):
from distutils.core import DEBUG
if DEBUG:
print msg
def warn (self, msg):
sys.stderr.write ("warning: %s\n" % msg)
def execute (self, func, args, msg=None, level=1):
execute(func, args, msg, self.verbose >= level, self.dry_run)
def spawn (self, cmd):
spawn (cmd, verbose=self.verbose, dry_run=self.dry_run)
def move_file (self, src, dst):
return move_file (src, dst, verbose=self.verbose, dry_run=self.dry_run)
def mkpath (self, name, mode=0777):
mkpath (name, mode, self.verbose, self.dry_run)
# class CCompiler
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
('mac', 'mwerks'),
)
def get_default_compiler(osname=None, platform=None):
""" Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
for pattern, compiler in _default_compilers:
if re.match(pattern, platform) is not None or \
re.match(pattern, osname) is not None:
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
"standard UNIX-style compiler"),
'msvc': ('msvccompiler', 'MSVCCompiler',
"Microsoft Visual C++"),
'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32"),
'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"),
'bcpp': ('bcppcompiler', 'BCPPCompiler',
"Borland C++ Compiler"),
'mwerks': ('mwerkscompiler', 'MWerksCompiler',
"MetroWerks CodeWarrior"),
}
def show_compilers():
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append(("compiler="+compiler, None,
compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler (plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0):
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError, msg
try:
module_name = "distutils." + module_name
__import__ (module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError, \
"can't compile C/C++ code: unable to load module '%s'" % \
module_name
except KeyError:
raise DistutilsModuleError, \
("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name)
return klass (verbose, dry_run, force)
def gen_preprocess_options (macros, include_dirs):
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (type (macro) is TupleType and
1 <= len (macro) <= 2):
raise TypeError, \
("bad macro definition '%s': " +
"each element of 'macros' list must be a 1- or 2-tuple") % \
macro
if len (macro) == 1: # undefine this macro
pp_opts.append ("-U%s" % macro[0])
elif len (macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append ("-D%s" % macro[0])
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append ("-D%s=%s" % macro)
for dir in include_dirs:
pp_opts.append ("-I%s" % dir)
return pp_opts
# gen_preprocess_options ()
def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append (compiler.library_dir_option (dir))
for dir in runtime_library_dirs:
lib_opts.append (compiler.runtime_library_dir_option (dir))
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split (lib)
if lib_dir:
lib_file = compiler.find_library_file ([lib_dir], lib_name)
if lib_file:
lib_opts.append (lib_file)
else:
compiler.warn ("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append (compiler.library_option (lib))
return lib_opts
# gen_lib_options ()
| {
"content_hash": "661c9fa735d7e83579de1e5f13770890",
"timestamp": "",
"source": "github",
"line_count": 1046,
"max_line_length": 79,
"avg_line_length": 41.14053537284895,
"alnum_prop": 0.592289638184649,
"repo_name": "MalloyPower/parsing-python",
"id": "703d9b9dfb45dc626ad305a5b9e5af9bfc4fcd0b",
"size": "43033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.1/Lib/distutils/ccompiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import base64
from urllib.parse import unquote, urlunparse
from urllib.request import getproxies, proxy_bypass, _parse_proxy
from scrapy.exceptions import NotConfigured
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_bytes
class HttpProxyMiddleware:
def __init__(self, auth_encoding='latin-1'):
self.auth_encoding = auth_encoding
self.proxies = {}
for type_, url in getproxies().items():
try:
self.proxies[type_] = self._get_proxy(url, type_)
# some values such as '/var/run/docker.sock' can't be parsed
# by _parse_proxy and as such should be skipped
except ValueError:
continue
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('HTTPPROXY_ENABLED'):
raise NotConfigured
auth_encoding = crawler.settings.get('HTTPPROXY_AUTH_ENCODING')
return cls(auth_encoding)
def _basic_auth_header(self, username, password):
user_pass = to_bytes(
f'{unquote(username)}:{unquote(password)}',
encoding=self.auth_encoding)
return base64.b64encode(user_pass)
def _get_proxy(self, url, orig_type):
proxy_type, user, password, hostport = _parse_proxy(url)
proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))
if user:
creds = self._basic_auth_header(user, password)
else:
creds = None
return creds, proxy_url
def process_request(self, request, spider):
creds, proxy_url = None, None
if 'proxy' in request.meta:
if request.meta['proxy'] is not None:
creds, proxy_url = self._get_proxy(request.meta['proxy'], '')
elif self.proxies:
parsed = urlparse_cached(request)
scheme = parsed.scheme
if (
(
# 'no_proxy' is only supported by http schemes
scheme not in ('http', 'https')
or not proxy_bypass(parsed.hostname)
)
and scheme in self.proxies
):
creds, proxy_url = self.proxies[scheme]
self._set_proxy_and_creds(request, proxy_url, creds)
def _set_proxy_and_creds(self, request, proxy_url, creds):
if proxy_url:
request.meta['proxy'] = proxy_url
elif request.meta.get('proxy') is not None:
request.meta['proxy'] = None
if creds:
request.headers[b'Proxy-Authorization'] = b'Basic ' + creds
request.meta['_auth_proxy'] = proxy_url
elif '_auth_proxy' in request.meta:
if proxy_url != request.meta['_auth_proxy']:
if b'Proxy-Authorization' in request.headers:
del request.headers[b'Proxy-Authorization']
del request.meta['_auth_proxy']
elif b'Proxy-Authorization' in request.headers:
if proxy_url:
request.meta['_auth_proxy'] = proxy_url
else:
del request.headers[b'Proxy-Authorization']
| {
"content_hash": "3e25dd0bc6c99b818d683a12a87fb478",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 83,
"avg_line_length": 37.75,
"alnum_prop": 0.5771050141911069,
"repo_name": "pablohoffman/scrapy",
"id": "dd8a7e79778bac598802a934c27fef7a4659b858",
"size": "3171",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scrapy/downloadermiddlewares/httpproxy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3237"
},
{
"name": "Python",
"bytes": "2027142"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
import unittest
''' O(1) para memória pois só utiliza variáveis
O(N²) para tempo porque utiliza dois whiles aninhados para percorrer a lista'''
def insertion_sort(seq):
if len(seq) <= 1:
return seq
else:
i_desordenada = 1
limite_ordenada = 0
while i_desordenada < len(seq):
i_ordenada = limite_ordenada
if seq[limite_ordenada] < seq[i_desordenada]:
limite_ordenada += 1
i_desordenada += 1
else:
i_ordenada += 1
i_desordenada += 1
while i_ordenada > 0:
if seq[i_ordenada] < seq[i_ordenada - 1]:
seq[i_ordenada], seq[i_ordenada - 1] = seq[i_ordenada - 1], seq[i_ordenada]
i_ordenada -= 1
limite_ordenada += 1
return seq
class OrdenacaoTestes(unittest.TestCase):
def teste_lista_vazia(self):
self.assertListEqual([], insertion_sort([]))
def teste_lista_unitaria(self):
self.assertListEqual([1], insertion_sort([1]))
def teste_lista_binaria(self):
self.assertListEqual([1, 2], insertion_sort([2, 1]))
def teste_lista_binaria(self):
self.assertListEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], insertion_sort([9, 7, 1, 8, 5, 3, 6, 4, 2, 0]))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "153f4c9ba6bc5ac05a240c29a7f3c053",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 108,
"avg_line_length": 31.318181818181817,
"alnum_prop": 0.5435413642960812,
"repo_name": "Julianosantosb/dataStructure",
"id": "0425b001d4d7e7ecb1ac91d296039d35787911be",
"size": "1382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inserctionSort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15050"
}
],
"symlink_target": ""
} |
""" S3 Framework Extensions for web2py
This package is loaded in models/000_1st_run.py as "s3base",
this namespace can be used to access all S3 classes, e.g.::
s3base.S3Resource()
@see: U{B{I{S3 Developer Guidelines}} <http://eden.sahanafoundation.org/wiki/DeveloperGuidelinesS3>}
@requires: U{B{I{gluon}} <http://web2py.com>}
@author: Fran Boon <francisboon[at]gmail.com>
@author: Dominic König <dominic[at]aidiq.com>
@copyright: 2009-2011 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# Import all names from the S3 modules that shall be accessible
# under the s3base namespace:
# Basic Tools
from s3tools import *
from s3navigation import *
# Authentication, Authorization, Accounting
from s3aaa import *
# Utilities, Validators and Widgets
# These names are also imported into the global namespace in
# 00_db.py in order to access them without the s3base prefix:
from s3utils import *
from s3validators import *
from s3widgets import *
# Test framework (currently unused)
#from s3test import *
# RESTful API
from s3rest import *
from s3method import *
# Method Handlers
from s3crud import *
from s3search import *
from s3cube import *
from s3pdf import S3PDF
from s3import import *
# GIS Mapping
from s3gis import *
# Messaging
from s3msg import *
# VITA Person Data Toolkit
from s3vita import *
# Tracking System
from s3track import *
# Synchronization Toolkit
from s3sync import *
# Asynchronous Tasks
from s3task import *
# Charting
from s3chart import * | {
"content_hash": "9477ef0dfb1bc9706ebf11cef109420f",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 104,
"avg_line_length": 29.11111111111111,
"alnum_prop": 0.7454198473282443,
"repo_name": "flavour/cert",
"id": "19ed8591aad12832600d3e5e74a96f47fa81c844",
"size": "2646",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "modules/s3/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "13068308"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "21061411"
},
{
"name": "Shell",
"bytes": "1645"
}
],
"symlink_target": ""
} |
"""
Django test cases for this project.
.. autosummary::
:toctree:
test_ddh
test_notify
test_webindex
test_workflow
"""
| {
"content_hash": "af37400a198586e7d92d27b7b65be0a4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 35,
"avg_line_length": 10.357142857142858,
"alnum_prop": 0.6068965517241379,
"repo_name": "khchine5/book",
"id": "394e86f9179baa91a3546e4bf7611f4441a1e256",
"size": "145",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino_book/projects/eric/tests/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "Python",
"bytes": "486198"
},
{
"name": "Shell",
"bytes": "702"
}
],
"symlink_target": ""
} |
from runner.koan import *
class AboutIteration(Koan):
def test_iterators_are_a_type(self):
it = iter(range(1,6))
fib = 0
for num in it:
fib += num
self.assertEqual(__ , fib)
def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual(__, next(stages))
next(stages)
self.assertEqual(__, next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegex(err_msg, __)
# ------------------------------------------------------------------
def add_ten(self, item):
return item + 10
def test_map_transforms_elements_of_a_list(self):
seq = [1, 2, 3]
mapped_seq = list()
mapping = map(self.add_ten, seq)
self.assertNotEqual(list, mapping.__class__)
self.assertEqual(__, mapping.__class__)
# In Python 3 built in iterator funcs return iterable view objects
# instead of lists
for item in mapping:
mapped_seq.append(item)
self.assertEqual(__, mapped_seq)
# Note, iterator methods actually return objects of iter type in
# python 3. In python 2 map() would give you a list.
def test_filter_selects_certain_items_from_a_list(self):
def is_even(item):
return (item % 2) == 0
seq = [1, 2, 3, 4, 5, 6]
even_numbers = list()
for item in filter(is_even, seq):
even_numbers.append(item)
self.assertEqual(__, even_numbers)
def test_just_return_first_item_found(self):
def is_big_name(item):
return len(item) > 4
names = ["Jim", "Bill", "Clarence", "Doug", "Eli"]
name = None
iterator = filter(is_big_name, names)
try:
name = next(iterator)
except StopIteration:
msg = 'Ran out of big names'
self.assertEqual(__, name)
# ------------------------------------------------------------------
def add(self,accum,item):
return accum + item
def multiply(self,accum,item):
return accum * item
def test_reduce_will_blow_your_mind(self):
import functools
# As of Python 3 reduce() has been demoted from a builtin function
# to the functools module.
result = functools.reduce(self.add, [2, 3, 4])
self.assertEqual(__, result.__class__)
# Reduce() syntax is same as Python 2
self.assertEqual(__, result)
result2 = functools.reduce(self.multiply, [2, 3, 4], 1)
self.assertEqual(__, result2)
# Extra Credit:
# Describe in your own words what reduce does.
# ------------------------------------------------------------------
def test_use_pass_for_iterations_with_no_body(self):
for num in range(1,5):
pass
self.assertEqual(__, num)
# ------------------------------------------------------------------
def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self):
# Ranges are an iterable sequence
result = map(self.add_ten, range(1,4))
self.assertEqual(__, list(result))
try:
file = open("example_file.txt")
try:
def make_upcase(line):
return line.strip().upper()
upcase_lines = map(make_upcase, file.readlines())
self.assertEqual(__, list(upcase_lines))
finally:
# Arg, this is ugly.
# We will figure out how to fix this later.
file.close()
except IOError:
# should never happen
self.fail()
| {
"content_hash": "3ecd54f3b92b8ea879fe397b6d7f3afd",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 77,
"avg_line_length": 28.313432835820894,
"alnum_prop": 0.5060622034791776,
"repo_name": "bordeltabernacle/python_koans",
"id": "93d64504a67ca697f23a6ff11e355bf4b2226cf5",
"size": "3841",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python3/koans/about_iteration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1633"
},
{
"name": "Python",
"bytes": "329252"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
} |
from lldbsuite.test import lldbinline
from lldbsuite.test import decorators
lldbinline.MakeInlineTest(__file__, globals(), lldbinline.expectedFailureAll(oslist=["windows"]))
| {
"content_hash": "53de644e699cf605438b03eadf3ea904",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 97,
"avg_line_length": 43.75,
"alnum_prop": 0.8114285714285714,
"repo_name": "llvm-mirror/lldb",
"id": "b1b7e1744f1f8f9049a933b3f630696247d2f8ae",
"size": "175",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "packages/Python/lldbsuite/test/lang/cpp/extern_c/TestExternCSymbols.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "131618"
},
{
"name": "C",
"bytes": "195293"
},
{
"name": "C++",
"bytes": "23346708"
},
{
"name": "CMake",
"bytes": "167302"
},
{
"name": "DTrace",
"bytes": "334"
},
{
"name": "LLVM",
"bytes": "6106"
},
{
"name": "Makefile",
"bytes": "50396"
},
{
"name": "Objective-C",
"bytes": "106956"
},
{
"name": "Objective-C++",
"bytes": "24806"
},
{
"name": "Perl",
"bytes": "72175"
},
{
"name": "Python",
"bytes": "3669886"
},
{
"name": "Shell",
"bytes": "6573"
},
{
"name": "Vim script",
"bytes": "8434"
}
],
"symlink_target": ""
} |
from dnutils import ProgressBar
from .common import *
from ..grounding.default import DefaultGroundingFactory
from ..constants import HARD
from ..errors import SatisfiabilityException
class LL(AbstractLearner):
"""
Exact Log-Likelihood learner.
"""
def __init__(self, mrf, **params):
AbstractLearner.__init__(self, mrf, **params)
self._stat = None
self._ls = None
self._eworld_idx = None
self._lastw = None
def _prepare(self):
self._compute_statistics()
def _l(self, w):
"""
computes the likelihoods of all possible worlds under weights w
"""
if self._lastw is None or list(w) != self._lastw:
self._lastw = list(w)
expsums = []
for fvalues in self._stat:
s = 0
hc_violation = False
for fidx, val in fvalues.items():
if self.mrf.mln.formulas[fidx].weight == HARD:
if val == 0:
hc_violation = True
break
else:
s += val * w[fidx]
if hc_violation:
expsums.append(0)
else:
expsums.append(exp(s))
z = sum(expsums)
if z == 0: raise SatisfiabilityException('MLN is unsatisfiable: probability masses of all possible worlds are zero.')
self._ls = [v / z for v in expsums]
return self._ls
def _f(self, w):
ls = self._l(w)
return numpy.log(ls[self._eworld_idx])
def _grad(self, w):
ls = self._l(w)
grad = numpy.zeros(len(self.mrf.formulas), numpy.float64)
for widx, values in enumerate(self._stat):
for fidx, count in values.items():
if widx == self._eworld_idx:
grad[fidx] += count
grad[fidx] -= count * ls[widx]
return grad
def _compute_statistics(self):
self._stat = []
grounder = DefaultGroundingFactory(self.mrf)
eworld = list(self.mrf.evidence)
if self.verbose:
bar = ProgressBar(steps=self.mrf.countworlds(), color='green')
for widx, world in self.mrf.iterallworlds():
if self.verbose:
bar.label(str(widx))
bar.inc()
values = {}
self._stat.append(values)
if self._eworld_idx is None and world == eworld:
self._eworld_idx = widx
for gf in grounder.itergroundings():
truth = gf(world)
if truth != 0: values[gf.idx] = values.get(gf.idx, 0) + truth
| {
"content_hash": "17bf98eaec6043feb606cad2fb5fe75f",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 129,
"avg_line_length": 32.61176470588235,
"alnum_prop": 0.5003607503607503,
"repo_name": "danielnyga/pracmln",
"id": "24557b5a7467b4beaee919196bff786b2b72c82f",
"size": "3946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/pracmln/mln/learning/ll.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "16327"
},
{
"name": "CMake",
"bytes": "9841"
},
{
"name": "Java",
"bytes": "101"
},
{
"name": "Makefile",
"bytes": "42"
},
{
"name": "Python",
"bytes": "1659815"
},
{
"name": "Shell",
"bytes": "188"
},
{
"name": "TeX",
"bytes": "243"
}
],
"symlink_target": ""
} |
import os
import json
import logging
import sys
from django.db import transaction
from django.apps import apps
from scripts import utils as script_utils
from scripts.populate_preprint_providers import update_or_create
from osf.models import PreprintProvider, Subject
from website.app import init_app
from website import settings
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def add_subjects_to_paleorxiv():
paleoarix = PreprintProvider.objects.get(_id='paleorxiv')
bepress_subject = Subject.objects.get(text='Paleontology', provider___id='osf')
life_sciences = Subject.objects.get(text='Earth and Life Sciences', provider=paleoarix)
ichnology = Subject(text='Ichnology', provider=paleoarix, parent=life_sciences, bepress_subject=bepress_subject)
ichnology.save()
taphonomy = Subject(text='Taphonomy', provider=paleoarix, parent=life_sciences, bepress_subject=bepress_subject)
taphonomy.save()
paleoarix.save()
def main():
init_app(set_backends=True, routes=False)
dry_run = '--dry' in sys.argv
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
add_subjects_to_paleorxiv()
if dry_run:
raise RuntimeError('Dry run, transaction rolled back')
if __name__ == '__main__':
main()
| {
"content_hash": "3a5860986474c433edc95e19f53bb508",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 116,
"avg_line_length": 31.232558139534884,
"alnum_prop": 0.7252419955323902,
"repo_name": "caseyrollins/osf.io",
"id": "f746b55b25b5e7164320c849c9023e69d705d48e",
"size": "1343",
"binary": false,
"copies": "16",
"ref": "refs/heads/develop",
"path": "scripts/add_taxonomies_to_paleoarxiv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93007"
},
{
"name": "Dockerfile",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "296984"
},
{
"name": "JavaScript",
"bytes": "1813961"
},
{
"name": "Mako",
"bytes": "676476"
},
{
"name": "Python",
"bytes": "8712355"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import pipes
import fnmatch
import IECore
import Gaffer
import GafferUI
class ParameterisedHolderNodeUI( GafferUI.NodeUI ) :
def __init__( self, node, readOnly=False, **kw ) :
column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 )
GafferUI.NodeUI.__init__( self, node, column, **kw )
with column :
with GafferUI.ListContainer( orientation = GafferUI.ListContainer.Orientation.Horizontal ) :
GafferUI.Spacer( IECore.V2i( 10 ), parenting = { "expand" : True } )
toolButton = GafferUI.ToolParameterValueWidget( self.node().parameterHandler() )
toolButton.plugValueWidget().setReadOnly( readOnly )
_InfoButton( node )
with GafferUI.ScrolledContainer( horizontalMode=GafferUI.ScrolledContainer.ScrollMode.Never, borderWidth=4 ) :
self.__parameterValueWidget = GafferUI.CompoundParameterValueWidget( self.node().parameterHandler(), collapsible = False )
self.setReadOnly( readOnly )
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
GafferUI.NodeUI.setReadOnly( self, readOnly )
self.__parameterValueWidget.plugValueWidget().setReadOnly( readOnly )
GafferUI.NodeUI.registerNodeUI( Gaffer.ParameterisedHolderNode.staticTypeId(), ParameterisedHolderNodeUI )
GafferUI.NodeUI.registerNodeUI( Gaffer.ParameterisedHolderComputeNode.staticTypeId(), ParameterisedHolderNodeUI )
GafferUI.NodeUI.registerNodeUI( Gaffer.ParameterisedHolderDependencyNode.staticTypeId(), ParameterisedHolderNodeUI )
##########################################################################
# Info button
##########################################################################
## \todo We might want to think about using this for all NodeUIs, since it
# relies only on Metadata which should be available for all node types.
class _InfoButton( GafferUI.Button ) :
def __init__( self, node ) :
GafferUI.Button.__init__( self, image="info.png", hasFrame=False )
self.__node = node
self.__window = None
self.__clickedConnection = self.clickedSignal().connect( Gaffer.WeakMethod( self.__clicked ) )
def getToolTip( self ) :
result = GafferUI.Button.getToolTip( self )
if result :
return result
result = IECore.StringUtil.wrap( self.__infoText(), 75 )
return result
def __infoText( self ) :
result = Gaffer.Metadata.nodeDescription( self.__node )
summary = Gaffer.Metadata.nodeValue( self.__node, "summary" )
if summary :
if result :
result += "\n\n"
result += summary
return result
def __clicked( self, button ) :
if self.__window is None :
with GafferUI.Window( "Info", borderWidth=8 ) as self.__window :
GafferUI.MultiLineTextWidget( editable = False )
self.ancestor( GafferUI.Window ).addChildWindow( self.__window )
self.__window.getChild().setText( self.__infoText() )
self.__window.reveal()
##########################################################################
# Nodules
##########################################################################
def __parameterNoduleCreator( plug ) :
if isinstance( plug, Gaffer.ObjectPlug ) :
return GafferUI.StandardNodule( plug )
else :
return None
GafferUI.Nodule.registerNodule( Gaffer.ParameterisedHolderNode.staticTypeId(), "parameters", GafferUI.CompoundNodule )
GafferUI.Nodule.registerNodule( Gaffer.ParameterisedHolderComputeNode.staticTypeId(), "parameters", GafferUI.CompoundNodule )
GafferUI.Nodule.registerNodule( Gaffer.ParameterisedHolderDependencyNode.staticTypeId(), "parameters", GafferUI.CompoundNodule )
GafferUI.Nodule.registerNodule( Gaffer.ParameterisedHolderNode.staticTypeId(), fnmatch.translate( "parameters.*" ), __parameterNoduleCreator )
GafferUI.Nodule.registerNodule( Gaffer.ParameterisedHolderComputeNode.staticTypeId(), fnmatch.translate( "parameters.*" ), __parameterNoduleCreator )
GafferUI.Nodule.registerNodule( Gaffer.ParameterisedHolderDependencyNode.staticTypeId(), fnmatch.translate( "parameters.*" ), __parameterNoduleCreator )
##########################################################################
# Metadata
##########################################################################
def __nodeDescription( node ) :
parameterised = node.getParameterised()[0]
if parameterised is None :
return ""
return parameterised.description
def __nodeSummary( node ) :
parameterised = node.getParameterised()[0]
if not isinstance( parameterised, IECore.Op ) :
return ""
node.parameterHandler().setParameterValue()
parameterValues = IECore.ParameterParser().serialise( parameterised.parameters() )
# pipes.quote() has a bug in some python versions where it doesn't quote empty strings.
parameterValues = " ".join( [ pipes.quote( x ) if x else "''" for x in parameterValues ] )
return "Command line equivalent : \n\ngaffer op %s -version %d -arguments %s" % (
parameterised.path,
parameterised.version,
parameterValues,
)
def __plugDescription( plug ) :
## \todo There should really be a method to map from plug to parameter.
# The logic exists in ParameterisedHolder.plugSet() but isn't public.
parameter = plug.node().parameterHandler().parameter()
for name in plug.relativeName( plug.node() ).split( "." )[1:] :
if not isinstance( parameter, IECore.CompoundParameter ) :
return None
else :
parameter = parameter[name]
return parameter.description
for nodeType in (
Gaffer.ParameterisedHolderNode,
Gaffer.ParameterisedHolderComputeNode,
Gaffer.ParameterisedHolderDependencyNode,
) :
Gaffer.Metadata.registerNodeDescription( nodeType, __nodeDescription )
Gaffer.Metadata.registerNodeValue( nodeType, "summary", __nodeSummary )
Gaffer.Metadata.registerPlugDescription( nodeType, "parameters.*", __plugDescription )
| {
"content_hash": "dff501e800652121cb5fefd65dcff445",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 152,
"avg_line_length": 36.2875,
"alnum_prop": 0.6879090595935239,
"repo_name": "davidsminor/gaffer",
"id": "5bd1f7b87b569d493392d497af92f9d2a86b5822",
"size": "7690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferUI/ParameterisedHolderNodeUI.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9286"
},
{
"name": "C++",
"bytes": "3358250"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Python",
"bytes": "3267354"
},
{
"name": "Shell",
"bytes": "7055"
},
{
"name": "Slash",
"bytes": "35200"
}
],
"symlink_target": ""
} |
"""Pre-configured remote application for enabling sign in/up with ORCID.
1. Edit your configuration and add:
.. code-block:: python
from invenio_oauthclient.contrib import orcid
OAUTHCLIENT_REMOTE_APPS = dict(
orcid=orcid.REMOTE_APP,
)
ORCID_APP_CREDENTIALS = dict(
consumer_key="changeme",
consumer_secret="changeme",
)
Note, if you want to use the ORCID Member API, use
``orcid.REMOTE_MEMBER_APP`` instead of ``orcid.REMOTE_APP``.
In case you want use sandbox:
To use the ORCID Public API sandbox, use ``orcid.REMOTE_SANDBOX_APP``
instead of ``orcid.REMOTE_APP``.
To use the ORCID Member API sandbox, use ``orcid.REMOTE_SANDBOX_MEMBER_APP``.
2. Register a new application with ORCID. When registering the
application ensure that the *Redirect URI* points to:
``CFG_SITE_URL/oauth/authorized/orcid/`` (note, ORCID does not
allow localhost to be used, thus testing on development machines is
somewhat complicated by this).
3. Grab the *Client ID* and *Client Secret* after registering the application
and add them to your instance configuration (``invenio.cfg``):
.. code-block:: python
ORCID_APP_CREDENTIALS = dict(
consumer_key="<CLIENT ID>",
consumer_secret="<CLIENT SECRET>",
)
4. Now go to ``CFG_SITE_URL/oauth/login/orcid/`` (e.g.
http://localhost:4000/oauth/login/orcid/)
5. Also, you should see ORCID listed under Linked accounts:
http://localhost:4000/account/settings/linkedaccounts/
By default the ORCID module will try first look if a link already exists
between a ORCID account and a user. If no link is found, the user is asked
to provide an email address to sign-up.
In templates you can add a sign in/up link:
.. code-block:: jinja
<a href="{{url_for('invenio_oauthclient.login', remote_app='orcid')}}">
Sign in with ORCID
</a>
For more details you can play with a :doc:`working example <examplesapp>`.
"""
import copy
from flask import current_app, redirect, url_for
from flask_login import current_user
from invenio_db import db
from invenio_oauthclient.models import RemoteAccount
from invenio_oauthclient.utils import oauth_link_external_id, \
oauth_unlink_external_id
REMOTE_APP = dict(
title='ORCID',
description='Connecting Research and Researchers.',
icon='',
authorized_handler='invenio_oauthclient.handlers'
':authorized_signup_handler',
disconnect_handler='invenio_oauthclient.contrib.orcid'
':disconnect_handler',
signup_handler=dict(
info='invenio_oauthclient.contrib.orcid:account_info',
setup='invenio_oauthclient.contrib.orcid:account_setup',
view='invenio_oauthclient.handlers:signup_handler',
),
params=dict(
request_token_params={'scope': '/authenticate',
'show_login': 'true'},
base_url='https://pub.orcid.org/v1.2/',
request_token_url=None,
access_token_url='https://pub.orcid.org/oauth/token',
access_token_method='POST',
authorize_url='https://orcid.org/oauth/authorize',
app_key='ORCID_APP_CREDENTIALS',
content_type='application/json',
)
)
"""ORCID Remote Application."""
REMOTE_MEMBER_APP = copy.deepcopy(REMOTE_APP)
"""ORCID Remote Application with member API."""
REMOTE_MEMBER_APP['params'].update(dict(
base_url='https://api.orcid.org/',
access_token_url='https://api.orcid.org/oauth/token',
))
"""ORCID sandbox member API."""
REMOTE_SANDBOX_MEMBER_APP = copy.deepcopy(REMOTE_APP)
"""ORCID Sandbox Remote Application with member API."""
REMOTE_SANDBOX_MEMBER_APP['params'].update(dict(
base_url='https://api.sandbox.orcid.org/',
access_token_url='https://api.sandbox.orcid.org/oauth/token',
authorize_url='https://sandbox.orcid.org/oauth/authorize#show_login',
))
"""ORCID sandbox member API."""
REMOTE_SANDBOX_APP = copy.deepcopy(REMOTE_APP)
"""ORCID Sandbox Remote Application with public API."""
REMOTE_SANDBOX_APP['params'].update(dict(
base_url='https://pub.sandbox.orcid.org/',
access_token_url='https://pub.sandbox.orcid.org/oauth/token',
authorize_url='https://sandbox.orcid.org/oauth/authorize#show_login',
))
"""ORCID sandbox public API."""
def account_info(remote, resp):
"""Retrieve remote account information used to find local user.
It returns a dictionary with the following structure:
.. code-block:: python
{
'user': {
'profile': {
'full_name': 'Full Name',
},
},
'external_id': 'github-unique-identifier',
'external_method': 'github',
}
:param remote: The remote application.
:param resp: The response.
:returns: A dictionary with the user information.
"""
orcid = resp.get('orcid')
return {
'external_id': orcid,
'external_method': 'orcid',
'user': {
'profile': {
'full_name': resp.get('name'),
},
},
}
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account.
:param remote: The remote application.
"""
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
account = RemoteAccount.get(user_id=current_user.get_id(),
client_id=remote.consumer_key)
orcid = account.extra_data.get('orcid')
if orcid:
oauth_unlink_external_id({'id': orcid, 'method': 'orcid'})
if account:
with db.session.begin_nested():
account.delete()
return redirect(url_for('invenio_oauthclient_settings.index'))
def account_setup(remote, token, resp):
"""Perform additional setup after user have been logged in.
:param remote: The remote application.
:param token: The token value.
:param resp: The response.
"""
with db.session.begin_nested():
# Retrieve ORCID from response.
orcid = resp.get('orcid')
full_name = resp.get('name')
# Set ORCID in extra_data.
token.remote_account.extra_data = {
'orcid': orcid,
'full_name': full_name,
}
user = token.remote_account.user
# Create user <-> external id link.
oauth_link_external_id(user, {'id': orcid, 'method': 'orcid'})
| {
"content_hash": "f5dcbd8a751fc4f77552a748a0998ecf",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 30.904306220095695,
"alnum_prop": 0.6426691438303143,
"repo_name": "tiborsimko/invenio-oauthclient",
"id": "3958cdeb7e2836ad53ccc0489598eca881f0cdfe",
"size": "6694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_oauthclient/contrib/orcid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6539"
},
{
"name": "Python",
"bytes": "174905"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
} |
from PIL import Image
from pylab import *
# read image to array
im = array(Image.open('img/landscape.jpg'))
# plot the image
imshow(im)
# some points
x = [100,100,400,400]
y = [200,400,200,400]
# plot the points with red star-markers
plot(x,y,'r*')
# line plot connecting the first two points
plot(x[:2],y[:2])
# add title and show the plot
title('Plotting: "img/landscape.jpg"')
show()
gin_xy=ginput(3)
print gin_xy | {
"content_hash": "03f2f5cb87c11d6b6702042300418621",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 43,
"avg_line_length": 21.94736842105263,
"alnum_prop": 0.7026378896882494,
"repo_name": "wasit7/cs634",
"id": "58f568023b35ac43ae0a809211582ed8b034ae62",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2016/lab2_00loadimage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2450104"
},
{
"name": "Python",
"bytes": "50308"
}
],
"symlink_target": ""
} |
import unittest
from datetime import datetime as dt
from time import sleep
from hamcrest import assert_that, equal_to, contains_exactly
from freemap.map import Branch, Icons, Map
from helpers.matchers import between
class BranchTester(unittest.TestCase):
def setUp(self):
self.ts = 'CREATED="1541258689450" MODIFIED="1541353381000"'
self.branch_with_text = Branch.from_string(Map(), '<node TEXT="text"/>')
self.branch_with_localized_text = Branch.from_string(Map(), '<node LOCALIZED_TEXT="localized"/>')
self.branch_with_rich_content = Branch.from_string(Map(),
'<node><richcontent TYPE="NODE"><html><body><p>Ha!</p></body></html></richcontent></node>')
self.branch_with_note = Branch.from_string(Map(),
'<node><richcontent TYPE="NOTE"><html><body><p>Hi!</p></body></html></richcontent></node>')
self.branch_with_details = Branch.from_string(Map(), '<node><richcontent '
'TYPE="DETAILS"><html><body><p>Who!</p></body></html></richcontent></node>')
self.branch_with_icons = Branch.from_string(Map(), '<node><icon BUILTIN="button_ok"/><icon BUILTIN="full-1"/></node>')
def test_knows_when_created(self):
before = self.now()
branch = Branch(Map())
after = self.now()
created = branch.created
assert_that(created, between(before, after))
def test_uses_node_id_if_present(self):
node_id = "Freemind_Link_1331878192"
branch = Branch.from_string(Map(), '<node ID="{nid}" {ts}/>'.format(nid=node_id, ts=self.ts))
assert_that(branch.node_id, equal_to(node_id))
def test_knows_when_attribute_modified(self):
branch = Branch(Map())
t1 = self.now()
branch.set_link('foo')
t2 = self.now()
assert_that(branch.modified, between(t1, t2))
def test_knows_when_child_added(self):
mmap = Map()
branch = Branch(mmap)
t1 = branch.modified
sleep(0.01)
branch.add_child(Branch(mmap))
t2 = branch.modified
assert_that(t1 < t2)
def now(self):
return round(1000 * dt.now().timestamp())
def test_knows_timestamps(self):
text = '<node {ts} TEXT="foo"></node>' \
.format(ts=self.ts)
branch = Branch.from_string(Map(), text)
assert_that(branch.created, equal_to(1541258689450))
assert_that(branch.modified, equal_to(1541353381000))
def test_knows_if_text_is_rich(self):
assert_that(self.branch_with_rich_content.has_rich_content())
assert_that(not self.branch_with_text.has_rich_content())
assert_that(not self.branch_with_localized_text.has_rich_content())
def test_retrieves_text_from_rich_content_nodes(self):
assert_that(self.branch_with_rich_content.text, equal_to('Ha!'))
def test_sets_text(self):
self.branch_with_text.text = 'Foo'
assert_that(self.branch_with_text.text, equal_to('Foo'))
self.branch_with_localized_text.text = 'Bar'
assert_that(self.branch_with_localized_text.text, equal_to('Bar'))
self.branch_with_rich_content.text = 'Baz'
assert_that(self.branch_with_rich_content.text, equal_to('Baz'))
def test_retrieves_rich_content_from_nodes(self):
assert_that(self.branch_with_text.rich_content.markdown, equal_to('text'))
assert_that(self.branch_with_localized_text.rich_content.markdown, equal_to('localized'))
assert_that(self.branch_with_rich_content.rich_content.markdown, equal_to('Ha!'))
def test_sets_rich_content(self):
self.branch_with_rich_content.rich_content = 'Ho!'
assert_that(self.branch_with_rich_content.rich_content.markdown, equal_to('Ho!'))
self.branch_with_text.rich_content = 'Hi!'
assert_that(self.branch_with_text.rich_content.markdown, equal_to('Hi!'))
self.branch_with_localized_text.rich_content = 'Hum!'
assert_that(self.branch_with_localized_text.rich_content.markdown, equal_to('Hum!'))
def test_retrieves_note(self):
assert_that(self.branch_with_note.note.markdown, equal_to('Hi!'))
def test_can_set_note(self):
self.branch_with_text.note = 'Note one'
assert_that(self.branch_with_text.note.markdown, equal_to('Note one'))
self.branch_with_note.note = 'replacement note'
assert_that(self.branch_with_note.note.markdown, equal_to('replacement note'))
def test_retrieves_description(self):
assert_that(self.branch_with_details.details.markdown, equal_to('Who!'))
def test_sets_details(self):
self.branch_with_text.details = 'interesting details'
assert_that(self.branch_with_text.details.markdown, equal_to('interesting details'))
def test_retrieves_link(self):
branch = Branch.from_string(Map(), '<node LINK="foo"/>')
assert_that(branch.link, equal_to('foo'))
def test_sets_link(self):
branch = Branch.from_string(Map(), '<node LINK="foo"/>')
branch.link = 'boo!'
assert_that(branch.link, equal_to('boo!'))
def test_reads_icons(self):
assert_that(self.branch_with_icons.icons, contains_exactly(Icons.icon('button_ok'),Icons.icon('full-1')))
def test_adds_icons(self):
self.branch_with_icons.add_icons(Icons.icon('full-2'))
assert_that(self.branch_with_icons.icons, contains_exactly(
Icons.icon('button_ok'),
Icons.icon('full-1'),
Icons.icon('full-2')))
def test_removes_icons(self):
self.branch_with_icons.remove_icons(Icons.icon('button_ok'), Icons.icon('full-1'))
assert_that(len(self.branch_with_icons.icons), equal_to(0))
| {
"content_hash": "8f5444d8b122ee0299b86c9eab39547b",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 126,
"avg_line_length": 41.75912408759124,
"alnum_prop": 0.6392239119035134,
"repo_name": "romilly/freemap",
"id": "0292d8d966b99554a5c87877d1ec49d2dcbe70bf",
"size": "5721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_branch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "19833"
},
{
"name": "Python",
"bytes": "33091"
}
],
"symlink_target": ""
} |
from pandas import Timestamp, Timedelta
from pandas.tseries.tools import normalize_date
class FutureChain(object):
""" Allows users to look up future contracts.
Parameters
----------
asset_finder : AssetFinder
An AssetFinder for future contract lookups, in particular the
AssetFinder of the TradingAlgorithm instance.
get_datetime : function
A function that returns the simulation datetime, in particular
the get_datetime method of the TradingAlgorithm instance.
root_symbol : str
The root symbol of a future chain.
as_of_date : pandas.Timestamp, optional
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this date is
the primary contract, etc. If not provided, the current
simulation date is used as the as_of_date.
Attributes
----------
root_symbol : str
The root symbol of the future chain.
as_of_date
The current as-of date of this future chain.
Methods
-------
as_of(dt)
offset(time_delta)
Raises
------
RootSymbolNotFound
Raised when the FutureChain is initialized with a root symbol for which
a future chain could not be found.
"""
def __init__(self, asset_finder, get_datetime, root_symbol,
as_of_date=None):
self.root_symbol = root_symbol
# Reference to the algo's AssetFinder for contract lookups
self._asset_finder = asset_finder
# Reference to the algo's get_datetime to know the current dt
self._algorithm_get_datetime = get_datetime
# If an as_of_date is provided, self._as_of_date uses that
# value, otherwise None. This attribute backs the as_of_date property.
if as_of_date:
self._as_of_date = normalize_date(as_of_date)
else:
self._as_of_date = None
# Attribute to cache the most up-to-date chain, and the dt when it was
# last updated.
self._current_chain = []
self._last_updated = None
# Get the initial chain, since self._last_updated is None.
self._maybe_update_current_chain()
def __repr__(self):
# NOTE: The string returned cannot be used to instantiate this
# exact FutureChain, since we don't want to display the asset
# finder and get_datetime function to the user.
if self._as_of_date:
return "FutureChain(root_symbol='%s', as_of_date='%s')" % (
self.root_symbol, self.as_of_date)
else:
return "FutureChain(root_symbol='%s')" % self.root_symbol
def _get_datetime(self):
"""
Returns the normalized simulation datetime.
Returns
-------
pandas.Timestamp
The normalized datetime of FutureChain's TradingAlgorithm.
"""
return normalize_date(
Timestamp(self._algorithm_get_datetime(), tz='UTC')
)
@property
def as_of_date(self):
"""
The current as-of date of this future chain.
Returns
-------
pandas.Timestamp
The user-provided as_of_date if given, otherwise the
current datetime of the simulation.
"""
if self._as_of_date is not None:
return self._as_of_date
else:
return self._get_datetime()
def _maybe_update_current_chain(self):
""" Updates the current chain if it's out of date, then returns
it.
Returns
-------
list
The up-to-date current chain, a list of Future objects.
"""
if (self._last_updated is None)\
or (self._last_updated != self.as_of_date):
self._current_chain = self._asset_finder.lookup_future_chain(
self.root_symbol,
self.as_of_date
)
self._last_updated = self.as_of_date
return self._current_chain
def __getitem__(self, key):
return self._maybe_update_current_chain()[key]
def __len__(self):
return len(self._maybe_update_current_chain())
def __iter__(self):
return iter(self._maybe_update_current_chain())
def as_of(self, dt):
""" Get the future chain for this root symbol as of a specific date.
Parameters
----------
dt : datetime.datetime or pandas.Timestamp or str, optional
The as_of_date for the new chain.
Returns
-------
FutureChain
"""
return FutureChain(
asset_finder=self._asset_finder,
get_datetime=self._algorithm_get_datetime,
root_symbol=self.root_symbol,
as_of_date=Timestamp(dt, tz='UTC'),
)
def offset(self, time_delta):
""" Get the future chain for this root symbol with a given
offset from the current as_of_date.
Parameters
----------
time_delta : datetime.timedelta or pandas.Timedelta or str
The offset from the current as_of_date for the new chain.
Returns
-------
FutureChain
"""
return self.as_of(self.as_of_date + Timedelta(time_delta))
# http://www.cmegroup.com/product-codes-listing/month-codes.html
CME_CODE_TO_MONTH = dict(zip('FGHJKMNQUVXZ', range(1, 13)))
MONTH_TO_CME_CODE = dict(zip(range(1, 13), 'FGHJKMNQUVXZ'))
def cme_code_to_month(code):
"""
Convert a CME month code to a month index.
The month codes are as follows:
'F' -> 1 (January)
'G' -> 2 (February)
'H' -> 3 (March)
'J' -> 4 (April)
'K' -> 5 (May)
'M' -> 6 (June)
'N' -> 7 (July)
'Q' -> 8 (August)
'U' -> 9 (September)
'V' -> 10 (October)
'X' -> 11 (November)
'Z' -> 12 (December)
Parameters
----------
code : str
The month code to look up.
Returns
-------
month : int
The month number (starting at 1 for January) corresponding to the
requested code.
See Also
--------
month_to_cme_code
Inverse of this function.
"""
return CME_CODE_TO_MONTH[code]
def month_to_cme_code(month):
"""
Convert a month to a CME code.
The month codes are as follows:
1 (January) -> 'F'
2 (February) -> 'G'
3 (March) -> 'H'
4 (April) -> 'J'
5 (May) -> 'K'
6 (June) -> 'M'
7 (July) -> 'N'
8 (August) -> 'Q'
9 (September) -> 'U'
10 (October) -> 'V'
11 (November) -> 'X'
12 (December) -> 'Z'
Parameters
----------
month : int
The month number (starting at 1 for January) corresponding to the
requested code.
Returns
-------
code : str
The month code to look up.
See Also
--------
cme_code_to_month
Inverse of this function.
"""
return MONTH_TO_CME_CODE[month]
| {
"content_hash": "4e3237b8f56974350c5263d6c097fd00",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 79,
"avg_line_length": 28.18951612903226,
"alnum_prop": 0.5654412816478329,
"repo_name": "dmitriz/zipline",
"id": "12dcae525ead768a8d8566e940ee3219a26e6e67",
"size": "7574",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "zipline/assets/futures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1641639"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import tcelery
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinxcontrib.fulltoc',
'sphinxcontrib.httpdomain',
'sphinxcontrib.autohttp.tornado',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tornado-celery'
copyright = u'2014, Mher Movsisyan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(map(str, tcelery.VERSION[0:2]))
# The full version, including alpha/beta/rc tags.
release = tcelery.__version__.rstrip('-dev')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'celery'
html_theme_path = ['_theme']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tornado-celerydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'tornado-celery.tex', u'tornado-celery Documentation',
u'Mher Movsisyan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tornado-celery', u'tornado-celery Documentation',
[u'Mher Movsisyan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tornado-celery', u'tornado-celery Documentation',
u'Mher Movsisyan', 'tornado-celery', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "72adad28a75959b7863d3982c20ed88a",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 79,
"avg_line_length": 32.18677042801556,
"alnum_prop": 0.7027321083172147,
"repo_name": "sangwonl/tornado-celery",
"id": "71ce7dfbdd82dc023041acb682a6d5dabff6253c",
"size": "8699",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "37902"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('builds', '0030_add_automation_rule_matches'),
]
operations = [
migrations.AddField(
model_name='build',
name='version_name',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Version name'),
),
migrations.AddField(
model_name='build',
name='version_slug',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Version slug'),
),
migrations.AddField(
model_name='build',
name='version_type',
field=models.CharField(blank=True, choices=[('branch', 'Branch'), ('tag', 'Tag'), ('external', 'External'), ('unknown', 'Unknown')], max_length=32, null=True, verbose_name='Version type'),
),
]
| {
"content_hash": "859a1092dcf1f365cd48d78ebc96e308",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 200,
"avg_line_length": 35.148148148148145,
"alnum_prop": 0.589041095890411,
"repo_name": "rtfd/readthedocs.org",
"id": "cb9a86db48840c87c5db6f50ec3e60af8396b45e",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/builds/migrations/0031_add_version_fields_to_build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66552"
},
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "HTML",
"bytes": "196998"
},
{
"name": "JavaScript",
"bytes": "431128"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1821332"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
} |
import requests
def image_search(query):
url = 'https://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=%s' % (query)
res = requests.get(url)
if res.status_code != 200:
return []
result = res.json()
return [r for r in result['responseData']['results']]
def first_image(query):
images = image_search(query)
if len(images) > 0:
return images[0].get('unescapedUrl', '')
else:
return ''
| {
"content_hash": "a10a5e49407a1c7a7da89ebf95a87756",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 88,
"avg_line_length": 21.61904761904762,
"alnum_prop": 0.6035242290748899,
"repo_name": "davidyen1124/Google-Image",
"id": "7d1a5beaacdab2ea7bd9233a94c07d7f3069d551",
"size": "481",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gooimage/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "855"
}
],
"symlink_target": ""
} |
from itertools import combinations
from operator import mul
f = open("input.txt")
d = f.readlines()
total = 0
for p in d:
sides = [int(n) for n in p.split("x")]
combos = list(combinations(sides, 2))
areas = [ mul(*a) for a in combos]
areas.sort()
total += areas[0]
total += sum([2*a for a in areas])
print(total)
| {
"content_hash": "ecd6e7bb6ab2ad5297ce781696eecd54",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 42,
"avg_line_length": 22.6,
"alnum_prop": 0.6224188790560472,
"repo_name": "pwicks86/adventofcode2015",
"id": "72cd1553d141e1833390719e0440a3dbb04861d0",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day2/p1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42595"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from bcc import BPF
from time import sleep
from threading import Event
import argparse
import json
import sys
import os
import signal
description = """
Monitor IO latency distribution of a block device
"""
epilog = """
When interval is infinite, biolatpcts will print out result once the
initialization is complete to indicate readiness. After initialized,
biolatpcts will output whenever it receives SIGUSR1/2 and before exiting on
SIGINT, SIGTERM or SIGHUP.
SIGUSR1 starts a new period after reporting. SIGUSR2 doesn't and can be used
to monitor progress without affecting accumulation of data points. They can
be used to obtain latency distribution between two arbitrary events and
monitor progress inbetween.
"""
parser = argparse.ArgumentParser(description = description, epilog = epilog,
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('dev', metavar='DEV', type=str,
help='Target block device (/dev/DEVNAME, DEVNAME or MAJ:MIN)')
parser.add_argument('-i', '--interval', type=int, default=3,
help='Report interval (0: exit after startup, -1: infinite)')
parser.add_argument('-w', '--which', choices=['from-rq-alloc', 'after-rq-alloc', 'on-device'],
default='on-device', help='Which latency to measure')
parser.add_argument('-p', '--pcts', metavar='PCT,...', type=str,
default='1,5,10,16,25,50,75,84,90,95,99,100',
help='Percentiles to calculate')
parser.add_argument('-j', '--json', action='store_true',
help='Output in json')
parser.add_argument('--verbose', '-v', action='count', default = 0)
bpf_source = """
#include <linux/blk_types.h>
#include <linux/blkdev.h>
#include <linux/time64.h>
BPF_PERCPU_ARRAY(rwdf_100ms, u64, 400);
BPF_PERCPU_ARRAY(rwdf_1ms, u64, 400);
BPF_PERCPU_ARRAY(rwdf_10us, u64, 400);
void kprobe_blk_account_io_done(struct pt_regs *ctx, struct request *rq, u64 now)
{
unsigned int cmd_flags;
u64 dur;
size_t base, slot;
if (!rq->__START_TIME_FIELD__)
return;
if (!rq->rq_disk ||
rq->rq_disk->major != __MAJOR__ ||
rq->rq_disk->first_minor != __MINOR__)
return;
cmd_flags = rq->cmd_flags;
switch (cmd_flags & REQ_OP_MASK) {
case REQ_OP_READ:
base = 0;
break;
case REQ_OP_WRITE:
base = 100;
break;
case REQ_OP_DISCARD:
base = 200;
break;
case REQ_OP_FLUSH:
base = 300;
break;
default:
return;
}
dur = now - rq->__START_TIME_FIELD__;
slot = min_t(size_t, div_u64(dur, 100 * NSEC_PER_MSEC), 99);
rwdf_100ms.increment(base + slot);
if (slot)
return;
slot = min_t(size_t, div_u64(dur, NSEC_PER_MSEC), 99);
rwdf_1ms.increment(base + slot);
if (slot)
return;
slot = min_t(size_t, div_u64(dur, 10 * NSEC_PER_USEC), 99);
rwdf_10us.increment(base + slot);
}
"""
args = parser.parse_args()
args.pcts = args.pcts.split(',')
args.pcts.sort(key=lambda x: float(x))
try:
major = int(args.dev.split(':')[0])
minor = int(args.dev.split(':')[1])
except Exception:
if '/' in args.dev:
stat = os.stat(args.dev)
else:
stat = os.stat('/dev/' + args.dev)
major = os.major(stat.st_rdev)
minor = os.minor(stat.st_rdev)
if args.which == 'from-rq-alloc':
start_time_field = 'alloc_time_ns'
elif args.which == 'after-rq-alloc':
start_time_field = 'start_time_ns'
elif args.which == 'on-device':
start_time_field = 'io_start_time_ns'
else:
print("Invalid latency measurement {}".format(args.which))
exit()
bpf_source = bpf_source.replace('__START_TIME_FIELD__', start_time_field)
bpf_source = bpf_source.replace('__MAJOR__', str(major))
bpf_source = bpf_source.replace('__MINOR__', str(minor))
bpf = BPF(text=bpf_source)
if BPF.get_kprobe_functions(b'__blk_account_io_done'):
bpf.attach_kprobe(event="__blk_account_io_done", fn_name="kprobe_blk_account_io_done")
else:
bpf.attach_kprobe(event="blk_account_io_done", fn_name="kprobe_blk_account_io_done")
# times are in usecs
MSEC = 1000
SEC = 1000 * 1000
cur_rwdf_100ms = bpf["rwdf_100ms"]
cur_rwdf_1ms = bpf["rwdf_1ms"]
cur_rwdf_10us = bpf["rwdf_10us"]
last_rwdf_100ms = [0] * 400
last_rwdf_1ms = [0] * 400
last_rwdf_10us = [0] * 400
rwdf_100ms = [0] * 400
rwdf_1ms = [0] * 400
rwdf_10us = [0] * 400
io_type = ["read", "write", "discard", "flush"]
def find_pct(req, total, slots, idx, counted):
while idx > 0:
idx -= 1
if slots[idx] > 0:
counted += slots[idx]
if args.verbose > 1:
print('idx={} counted={} pct={:.1f} req={}'
.format(idx, counted, counted / total, req))
if (counted / total) * 100 >= 100 - req:
break
return (idx, counted)
def calc_lat_pct(req_pcts, total, lat_100ms, lat_1ms, lat_10us):
pcts = [0] * len(req_pcts)
if total == 0:
return pcts
data = [(100 * MSEC, lat_100ms), (MSEC, lat_1ms), (10, lat_10us)]
data_sel = 0
idx = 100
counted = 0
for pct_idx in reversed(range(len(req_pcts))):
req = float(req_pcts[pct_idx])
while True:
last_counted = counted
(gran, slots) = data[data_sel]
(idx, counted) = find_pct(req, total, slots, idx, counted)
if args.verbose > 1:
print('pct_idx={} req={} gran={} idx={} counted={} total={}'
.format(pct_idx, req, gran, idx, counted, total))
if idx > 0 or data_sel == len(data) - 1:
break
counted = last_counted
data_sel += 1
idx = 100
pcts[pct_idx] = gran * idx + gran / 2
return pcts
def format_usec(lat):
if lat > SEC:
return '{:.1f}s'.format(lat / SEC)
elif lat > 10 * MSEC:
return '{:.0f}ms'.format(lat / MSEC)
elif lat > MSEC:
return '{:.1f}ms'.format(lat / MSEC)
elif lat > 0:
return '{:.0f}us'.format(lat)
else:
return '-'
# 0 interval can be used to test whether this script would run successfully.
if args.interval == 0:
sys.exit(0)
# Set up signal handling so that we print the result on USR1/2 and before
# exiting on a signal. Combined with infinite interval, this can be used to
# obtain overall latency distribution between two events. On USR2 the
# accumulated counters are cleared too, which can be used to define
# arbitrary intervals.
force_update_last_rwdf = False
keep_running = True
result_req = Event()
def sig_handler(sig, frame):
global keep_running, force_update_last_rwdf, result_req
if sig == signal.SIGUSR1:
force_update_last_rwdf = True
elif sig != signal.SIGUSR2:
keep_running = False
result_req.set()
for sig in (signal.SIGUSR1, signal.SIGUSR2, signal.SIGINT, signal.SIGTERM, signal.SIGHUP):
signal.signal(sig, sig_handler)
# If infinite interval, always trigger the first output so that the caller
# can tell when initialization is complete.
if args.interval < 0:
result_req.set();
while keep_running:
result_req.wait(args.interval if args.interval > 0 else None)
result_req.clear()
update_last_rwdf = args.interval > 0 or force_update_last_rwdf
force_update_last_rwdf = False
rwdf_total = [0] * 4;
for i in range(400):
v = cur_rwdf_100ms.sum(i).value
rwdf_100ms[i] = max(v - last_rwdf_100ms[i], 0)
if update_last_rwdf:
last_rwdf_100ms[i] = v
v = cur_rwdf_1ms.sum(i).value
rwdf_1ms[i] = max(v - last_rwdf_1ms[i], 0)
if update_last_rwdf:
last_rwdf_1ms[i] = v
v = cur_rwdf_10us.sum(i).value
rwdf_10us[i] = max(v - last_rwdf_10us[i], 0)
if update_last_rwdf:
last_rwdf_10us[i] = v
rwdf_total[int(i / 100)] += rwdf_100ms[i]
rwdf_lat = []
for i in range(4):
left = i * 100
right = left + 100
rwdf_lat.append(
calc_lat_pct(args.pcts, rwdf_total[i],
rwdf_100ms[left:right],
rwdf_1ms[left:right],
rwdf_10us[left:right]))
if args.verbose:
print('{:7} 100ms {}'.format(io_type[i], rwdf_100ms[left:right]))
print('{:7} 1ms {}'.format(io_type[i], rwdf_1ms[left:right]))
print('{:7} 10us {}'.format(io_type[i], rwdf_10us[left:right]))
if args.json:
result = {}
for iot in range(4):
lats = {}
for pi in range(len(args.pcts)):
lats[args.pcts[pi]] = rwdf_lat[iot][pi] / SEC
result[io_type[iot]] = lats
print(json.dumps(result), flush=True)
else:
print('\n{:<7}'.format(os.path.basename(args.dev)), end='')
widths = []
for pct in args.pcts:
widths.append(max(len(pct), 5))
print(' {:>5}'.format(pct), end='')
print()
for iot in range(4):
print('{:7}'.format(io_type[iot]), end='')
for pi in range(len(rwdf_lat[iot])):
print(' {:>{}}'.format(format_usec(rwdf_lat[iot][pi]), widths[pi]), end='')
print()
| {
"content_hash": "5133e0cca86466ef89cd292c750d92a1",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 94,
"avg_line_length": 32.104729729729726,
"alnum_prop": 0.5743449437019889,
"repo_name": "brendangregg/bcc",
"id": "a2f595924ac1477cedca1955231fa00f5f68ba88",
"size": "10122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/biolatpcts.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8274020"
},
{
"name": "C++",
"bytes": "890599"
},
{
"name": "CMake",
"bytes": "48236"
},
{
"name": "HTML",
"bytes": "2997"
},
{
"name": "Lua",
"bytes": "299473"
},
{
"name": "Makefile",
"bytes": "3214"
},
{
"name": "Python",
"bytes": "1377079"
},
{
"name": "Shell",
"bytes": "21733"
}
],
"symlink_target": ""
} |
import uuid
from sqlalchemy import types, sql, orm
from sqlalchemy.schema import Column, ForeignKey
from .base import Base, GUID
from .user import User
from midauth.utils.text import slugify
__all__ = ['Group']
class Group(Base):
__tablename__ = 'group'
pk = Column(GUID, primary_key=True, default=uuid.uuid4)
name = Column(types.Unicode(64), nullable=False)
slug = Column(types.Unicode(32), nullable=False)
created_at = Column(types.DateTime(timezone=True), nullable=False,
default=sql.functions.now())
users = orm.relationship(lambda: GroupAssociation, collection_class=set)
def __init__(self, name, slug=None, users=()):
slug = slug or slugify(name)
self.name = name
self.slug = slug
self.users = set(GroupAssociation(group=self, user=u) for u in users)
def __repr__(self):
return u'{class_name}({0.name!r}, slug={0.slug!r})' \
.format(self, class_name=type(self).__name__)
class GroupAssociation(Base):
__tablename__ = 'group_association'
user_pk = Column(GUID, ForeignKey(User.pk, onupdate='CASCADE',
ondelete='CASCADE'),
primary_key=True)
group_pk = Column(GUID, ForeignKey(Group.pk, onupdate='CASCADE',
ondelete='CASCADE'),
primary_key=True)
created_at = Column(types.DateTime(timezone=True), nullable=False,
default=sql.functions.now())
primary = Column(types.Boolean, nullable=False, default=False)
group = orm.relationship(Group)
user = orm.relationship(User, backref=orm.backref('group_assocs',
collection_class=set))
| {
"content_hash": "913013befbf9e7d4b4fba711d97425e5",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 35.9,
"alnum_prop": 0.5938718662952647,
"repo_name": "smartstudy/midauth",
"id": "0fbb8e42c4f96d08871a8157223f9182bca43ad0",
"size": "1819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "midauth/models/group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "157248"
},
{
"name": "JavaScript",
"bytes": "197"
},
{
"name": "Python",
"bytes": "70528"
},
{
"name": "Shell",
"bytes": "5098"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
import views
urlpatterns = patterns('',
url('^openid/login/$', views.login, name="openid_login"),
url('^openid/callback/$', views.callback),
)
| {
"content_hash": "bbc7d30d2a019b3555710a510c6ffb88",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 80,
"avg_line_length": 31.75,
"alnum_prop": 0.515748031496063,
"repo_name": "uroslates/django-allauth",
"id": "5e34f2b81fc7b3a4d25ca78265cd181523e9c0eb",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/openid/urls.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import logging
from olympia.lib.settings_base import * # noqa
ENGAGE_ROBOTS = True
EMAIL_URL = env.email_url('EMAIL_URL')
EMAIL_HOST = EMAIL_URL['EMAIL_HOST']
EMAIL_PORT = EMAIL_URL['EMAIL_PORT']
EMAIL_BACKEND = EMAIL_URL['EMAIL_BACKEND']
EMAIL_HOST_USER = EMAIL_URL['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = EMAIL_URL['EMAIL_HOST_PASSWORD']
SEND_REAL_EMAIL = True
ENV = env('ENV')
API_THROTTLING = True
CDN_HOST = 'https://addons.cdn.mozilla.net'
DOMAIN = env('DOMAIN', default='addons.mozilla.org')
SERVER_EMAIL = 'zprod@addons.mozilla.org'
SITE_URL = 'https://' + DOMAIN
SERVICES_URL = env('SERVICES_URL',
default='https://services.addons.mozilla.org')
STATIC_URL = '%s/static/' % CDN_HOST
MEDIA_URL = '%s/user-media/' % CDN_HOST
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
# Domain emails should be sent to.
INBOUND_EMAIL_DOMAIN = env('INBOUND_EMAIL_DOMAIN',
default='addons.mozilla.org')
DATABASES = {
'default': get_db_config('DATABASES_DEFAULT_URL'),
'slave': get_db_config('DATABASES_SLAVE_URL', atomic_requests=False),
}
SERVICES_DATABASE = get_db_config('SERVICES_DATABASE_URL')
SLAVE_DATABASES = ['slave']
CACHES = {}
CACHES['default'] = env.cache('CACHES_DEFAULT')
CACHES['default']['TIMEOUT'] = 500
CACHES['default']['BACKEND'] = 'django.core.cache.backends.memcached.MemcachedCache' # noqa
CACHES['default']['KEY_PREFIX'] = CACHE_KEY_PREFIX
# Celery
CELERY_BROKER_CONNECTION_TIMEOUT = 0.5
LOGGING['loggers'].update({
'adi.updatecounts': {'level': logging.INFO},
'amqp': {'level': logging.WARNING},
'raven': {'level': logging.WARNING},
'requests': {'level': logging.WARNING},
'z.addons': {'level': logging.INFO},
'z.task': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
ES_TIMEOUT = 60
ES_HOSTS = env('ES_HOSTS')
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_%s' % (v, ENV)) for k, v in ES_INDEXES.items())
CEF_PRODUCT = STATSD_PREFIX
NEW_FEATURES = True
ADDONS_LINTER_BIN = 'node_modules/.bin/addons-linter'
XSENDFILE_HEADER = 'X-Accel-Redirect'
NEWRELIC_ENABLE = env.bool('NEWRELIC_ENABLE', default=False)
if NEWRELIC_ENABLE:
NEWRELIC_INI = '/etc/newrelic.d/%s.ini' % DOMAIN
FXA_CONFIG = {
'default': {
'client_id': env('FXA_CLIENT_ID'),
'client_secret': env('FXA_CLIENT_SECRET'),
'content_host': 'https://accounts.firefox.com',
'oauth_host': 'https://oauth.accounts.firefox.com/v1',
'profile_host': 'https://profile.accounts.firefox.com/v1',
'redirect_url':
'https://%s/api/v3/accounts/authenticate/' % DOMAIN,
'scope': 'profile',
},
'amo': {
'client_id': env('AMO_FXA_CLIENT_ID'),
'client_secret': env('AMO_FXA_CLIENT_SECRET'),
'content_host': 'https://accounts.firefox.com',
'oauth_host': 'https://oauth.accounts.firefox.com/v1',
'profile_host': 'https://profile.accounts.firefox.com/v1',
'redirect_url':
'https://addons.mozilla.org/api/v3/accounts/authenticate/',
'scope': 'profile',
'skip_register_redirect': True,
},
}
DEFAULT_FXA_CONFIG_NAME = 'default'
ALLOWED_FXA_CONFIGS = ['default', 'amo']
VALIDATOR_TIMEOUT = 360
ES_DEFAULT_NUM_SHARDS = 10
RECOMMENDATION_ENGINE_URL = env(
'RECOMMENDATION_ENGINE_URL',
default='https://taar.prod.mozaws.net/v1/api/recommendations/')
TAAR_LITE_RECOMMENDATION_ENGINE_URL = env(
'TAAR_LITE_RECOMMENDATION_ENGINE_URL',
default=('https://taarlite.prod.mozaws.net/taarlite/api/v1/'
'addon_recommendations/'))
FXA_SQS_AWS_QUEUE_URL = (
'https://sqs.us-west-2.amazonaws.com/361527076523/'
'amo-account-change-prod')
DRF_API_VERSIONS = ['v3', 'v4']
DRF_API_REGEX = r'^/?api/(?:v3|v4)/'
| {
"content_hash": "2ffeb6911771ba79992e1809082cca21",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 92,
"avg_line_length": 30.08730158730159,
"alnum_prop": 0.644684779741493,
"repo_name": "aviarypl/mozilla-l10n-addons-server",
"id": "8940cda188fb28be991746d65733da586e24fde3",
"size": "3791",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/olympia/conf/prod/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "809734"
},
{
"name": "Dockerfile",
"bytes": "2898"
},
{
"name": "HTML",
"bytes": "515798"
},
{
"name": "JavaScript",
"bytes": "1070508"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "316"
},
{
"name": "PLpgSQL",
"bytes": "10596"
},
{
"name": "Python",
"bytes": "5462821"
},
{
"name": "SQLPL",
"bytes": "645"
},
{
"name": "Shell",
"bytes": "8821"
},
{
"name": "Smarty",
"bytes": "1388"
}
],
"symlink_target": ""
} |
import grpc
from google.cloud.bigquery_storage_v1beta1.proto import (
storage_pb2 as google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class BigQueryStorageStub(object):
"""BigQuery storage API.
The BigQuery storage API can be used to read data stored in BigQuery.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateReadSession = channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession",
request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.CreateReadSessionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadSession.FromString,
)
self.ReadRows = channel.unary_stream(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/ReadRows",
request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsResponse.FromString,
)
self.BatchCreateReadSessionStreams = channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams",
request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsResponse.FromString,
)
self.FinalizeStream = channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream",
request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.FinalizeStreamRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.SplitReadStream = channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream",
request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamResponse.FromString,
)
class BigQueryStorageServicer(object):
"""BigQuery storage API.
The BigQuery storage API can be used to read data stored in BigQuery.
"""
def CreateReadSession(self, request, context):
"""Creates a new read session. A read session divides the contents of a
BigQuery table into one or more streams, which can then be used to read
data from the table. The read session also specifies properties of the
data to be read, such as a list of columns or a push-down filter describing
the rows to be returned.
A particular row can be read by at most one stream. When the caller has
reached the end of each stream in the session, then all the data in the
table has been read.
Read sessions automatically expire 24 hours after they are created and do
not require manual clean-up by the caller.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ReadRows(self, request, context):
"""Reads rows from the table in the format prescribed by the read session.
Each response contains one or more table rows, up to a maximum of 10 MiB
per response; read requests which attempt to read individual rows larger
than this will fail.
Each request also returns a set of stream statistics reflecting the
estimated total number of rows in the read stream. This number is computed
based on the total table size and the number of active streams in the read
session, and may change as other streams continue to read data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def BatchCreateReadSessionStreams(self, request, context):
"""Creates additional streams for a ReadSession. This API can be used to
dynamically adjust the parallelism of a batch processing task upwards by
adding additional workers.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def FinalizeStream(self, request, context):
"""Triggers the graceful termination of a single stream in a ReadSession. This
API can be used to dynamically adjust the parallelism of a batch processing
task downwards without losing data.
This API does not delete the stream -- it remains visible in the
ReadSession, and any data processed by the stream is not released to other
streams. However, no additional data will be assigned to the stream once
this call completes. Callers must continue reading data on the stream until
the end of the stream is reached so that data which has already been
assigned to the stream will be processed.
This method will return an error if there are no other live streams
in the Session, or if SplitReadStream() has been called on the given
Stream.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SplitReadStream(self, request, context):
"""Splits a given read stream into two Streams. These streams are referred to
as the primary and the residual of the split. The original stream can still
be read from in the same manner as before. Both of the returned streams can
also be read from, and the total rows return by both child streams will be
the same as the rows read from the original stream.
Moreover, the two child streams will be allocated back to back in the
original Stream. Concretely, it is guaranteed that for streams Original,
Primary, and Residual, that Original[0-j] = Primary[0-j] and
Original[j-n] = Residual[0-m] once the streams have been read to
completion.
This method is guaranteed to be idempotent.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_BigQueryStorageServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateReadSession": grpc.unary_unary_rpc_method_handler(
servicer.CreateReadSession,
request_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.CreateReadSessionRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadSession.SerializeToString,
),
"ReadRows": grpc.unary_stream_rpc_method_handler(
servicer.ReadRows,
request_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsResponse.SerializeToString,
),
"BatchCreateReadSessionStreams": grpc.unary_unary_rpc_method_handler(
servicer.BatchCreateReadSessionStreams,
request_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsResponse.SerializeToString,
),
"FinalizeStream": grpc.unary_unary_rpc_method_handler(
servicer.FinalizeStream,
request_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.FinalizeStreamRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"SplitReadStream": grpc.unary_unary_rpc_method_handler(
servicer.SplitReadStream,
request_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.bigquery.storage.v1beta1.BigQueryStorage", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| {
"content_hash": "7498d3f71f449988ce40e8d13128bd6a",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 166,
"avg_line_length": 55.32934131736527,
"alnum_prop": 0.7316017316017316,
"repo_name": "tseaver/google-cloud-python",
"id": "85b890f0e0cc2319de59b82612c812659e0ce24b",
"size": "9310",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bigquery_storage/google/cloud/bigquery_storage_v1beta1/proto/storage_pb2_grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "30519057"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.