text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import unicode_literals
"""
Patch courtesy of:
https://marmida.com/blog/index.php/2012/08/08/monkey-patching-assert_raises/
"""
# code for monkey-patching
import nose.tools
# let's fix nose.tools.assert_raises (which is really unittest.assertRaises)
# so that it always supports context management
# in order for these changes to be available to other modules, you'll need
# to guarantee this module is imported by your fixture before either nose or
# unittest are imported
try:
nose.tools.assert_raises(Exception)
except TypeError:
# this version of assert_raises doesn't support the 1-arg version
class AssertRaisesContext(object):
def __init__(self, expected):
self.expected = expected
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
self.exception = exc_val
if issubclass(exc_type, self.expected):
return True
nose.tools.assert_equal(exc_type, self.expected)
# if you get to this line, the last assertion must have passed
# suppress the propagation of this exception
return True
def assert_raises_context(exc_type):
return AssertRaisesContext(exc_type)
nose.tools.assert_raises = assert_raises_context
| {
"content_hash": "53d0bf1c7bcf81ecb260f5bab0a4da51",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 76,
"avg_line_length": 33.05,
"alnum_prop": 0.6762481089258698,
"repo_name": "silveregg/moto",
"id": "6ceacaa891bf3cd115325b8ceb1dff173c8061e7",
"size": "1322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/backport_assert_raises.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "230"
},
{
"name": "Python",
"bytes": "2435907"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
input_name = '../examples/navier_stokes/navier_stokes2d.py'
output_name = 'test_navier_stokes2d.vtk'
from tests_basic import TestInput
class Test(TestInput):
pass
| {
"content_hash": "f98877bc03628094cde27f51b31cfad1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 59,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.7584541062801933,
"repo_name": "lokik/sfepy",
"id": "9d59b0d4120703a2e468740c68d07c94d55db7b0",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_input_navier_stokes2d.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "448969"
},
{
"name": "C++",
"bytes": "37842"
},
{
"name": "GLSL",
"bytes": "6058"
},
{
"name": "Makefile",
"bytes": "184"
},
{
"name": "PowerShell",
"bytes": "3118"
},
{
"name": "Python",
"bytes": "2701733"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
} |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Programmable fiberoptic instrument
from .diconGP700 import diconGP700
| {
"content_hash": "fcc7ef67781d4c0f01c41d343d9ef8b6",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 39.93333333333333,
"alnum_prop": 0.8046744574290484,
"repo_name": "Diti24/python-ivi",
"id": "f5112f804a138755dd00d2293a646501207f3aa4",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivi/dicon/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1992462"
}
],
"symlink_target": ""
} |
from oslotest import base as test_base
from testtools import matchers
import webob
import webob.dec
from oslo_middleware import request_id
class RequestIdTest(test_base.BaseTestCase):
def test_generate_request_id(self):
@webob.dec.wsgify
def application(req):
return req.environ[request_id.ENV_REQUEST_ID]
app = request_id.RequestId(application)
req = webob.Request.blank('/test')
res = req.get_response(app)
res_req_id = res.headers.get(request_id.HTTP_RESP_HEADER_REQUEST_ID)
self.assertThat(res_req_id, matchers.StartsWith(b'req-'))
# request-id in request environ is returned as response body
self.assertEqual(res_req_id, res.body)
| {
"content_hash": "f193cbdb2c9c25cc630886f66d107449",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.6909340659340659,
"repo_name": "JioCloud/oslo.middleware",
"id": "09bdd327596f3f4c726d4d1be4657bedb147e72b",
"size": "1364",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oslo_middleware/tests/test_request_id.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "123331"
}
],
"symlink_target": ""
} |
"""
.. module:: libopenzwave
This file is part of **python-openzwave** project https://github.com/OpenZWave/python-openzwave.
:platform: Unix, Windows, MacOS X
:sinopsis: openzwave C++
.. moduleauthor: bibi21000 aka Sebastien GALLET <bibi21000@gmail.com>
.. moduleauthor: Maarten Damen <m.damen@gmail.com>
License : GPL(v3)
**python-openzwave** is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
**python-openzwave** is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with python-openzwave. If not, see http://www.gnu.org/licenses.
"""
class PyOptions(object):
"""
Manage options manager
"""
def __init__(self, config_path=None, user_path=".", cmd_line=""):
"""
Create an option object and check that parameters are valid.
:param device: The device to use
:type device: str
:param config_path: The openzwave config directory. If None, try to configure automatically.
:type config_path: str
:param user_path: The user directory
:type user_path: str
:param cmd_line: The "command line" options of the openzwave library
:type cmd_line: str
"""
if config_path is None:
config_path = self.getConfigPath()
if config_path is None:
raise LibZWaveException("Can't autoconfigure path to config")
self._config_path = config_path
if user_path is None:
user_path = "."
self._user_path = user_path
if cmd_line is None:
cmd_line=""
self._cmd_line = cmd_line
self.create(self._config_path, self._user_path, self._cmd_line)
def create(self, a, b, c):
"""
.. _createoptions:
Create an option object used to start the manager
:param a: The path of the config directory
:type a: str
:param b: The path of the user directory
:type b: str
:param c: The "command line" options of the openzwave library
:type c: str
:see: destroyoptions_
"""
self.options = CreateOptions(
str_to_cppstr(a), str_to_cppstr(b), str_to_cppstr(c))
return True
def destroy(self):
"""
.. _destroyoptions:
Deletes the Options and cleans up any associated objects.
The application is responsible for destroying the Options object,
but this must not be done until after the Manager object has been
destroyed.
:return: The result of the operation.
:rtype: bool
:see: createoptions_
"""
return self.options.Destroy()
def lock(self):
"""
.. _lock:
Lock the options. Needed to start the manager
:return: The result of the operation.
:rtype: bool
:see: areLocked_
"""
return self.options.Lock()
def areLocked(self):
'''
.. _areLocked:
Test whether the options have been locked.
:return: true if the options have been locked.
:rtype: boolean
:see: lock_
'''
return self.options.AreLocked()
def addOptionBool(self, name, value):
"""
.. _addOptionBool:
Add a boolean option.
:param name: The name of the option.
:type name: str
:param value: The value of the option.
:type value: boolean
:return: The result of the operation.
:rtype: bool
:see: addOption_, addOptionInt_, addOptionString_
"""
return self.options.AddOptionBool(str_to_cppstr(name), value)
def addOptionInt(self, name, value):
"""
.. _addOptionInt:
Add an integer option.
:param name: The name of the option.
:type name: str
:param value: The value of the option.
:type value: boolean
:return: The result of the operation.
:rtype: bool
:see: addOption_, addOptionBool_, addOptionString_
"""
return self.options.AddOptionInt(str_to_cppstr(name), value)
def addOptionString(self, name, value, append=False):
"""
.. _addOptionString:
Add a string option.
:param name: The name of the option. Option names are case insensitive and must be unique.
:type name: str
:param value: The value of the option.
:type value: str
:param append: Setting append to true will cause values read from the command line
or XML file to be concatenated into a comma delimited set. If _append is false,
newer values will overwrite older ones.
:type append: boolean
:return: The result of the operation.
:rtype: bool
:see: addOption_, addOptionBool_, addOptionInt_
"""
return self.options.AddOptionString(
str_to_cppstr(name), str_to_cppstr(value), append)
def addOption(self, name, value):
"""
.. _addOption:
Add an option.
:param name: The name of the option.
:type name: string
:param value: The value of the option.
:type value: boolean, integer, string
:return: The result of the operation.
:rtype: bool
:see: addOptionBool_, addOptionInt_, addOptionString_
"""
if name not in PyOptionList:
return False
if PyOptionList[name]['type'] == "String":
return self.addOptionString(name, value)
elif PyOptionList[name]['type'] == "Bool":
return self.addOptionBool(name, value)
elif PyOptionList[name]['type'] == "Int":
return self.addOptionInt(name, value)
return False
def getOption(self, name):
"""
.. _getOption:
Retrieve option of a value.
:param name: The name of the option.
:type name: string
:return: The value
:rtype: boolean, integer, string or None
:see: getOptionAsBool_, getOptionAsInt_, getOptionAsString_
"""
if name not in PyOptionList:
return None
if PyOptionList[name]['type'] == "String":
return self.getOptionAsString(name)
elif PyOptionList[name]['type'] == "Bool":
return self.getOptionAsBool(name)
elif PyOptionList[name]['type'] == "Int":
return self.getOptionAsInt(name)
return False
# def getOptionAsBool(self, name):
# """
# .. _getOptionAsBool:
#
# Retrieve boolean value of an option.
#
# :param name: The name of the option.
# :type name: string
# :return: The value or None
# :rtype: boolean or None
#
# :see: getOption_, getOptionAsInt_, getOptionAsString_
#
# """
# cdef bool type_bool
# cret = self.options.GetOptionAsBool(str_to_cppstr(name), &type_bool)
# ret = type_bool if cret==True else None
# return ret
#
# def getOptionAsInt(self, name):
# """
# .. _getOptionAsInt:
#
# Retrieve integer value of an option.
#
# :param name: The name of the option.
# :type name: string
# :return: The value or None
# :rtype: Integer or None
#
# :see: getOption_, getOptionAsBool_, getOptionAsString_
#
# """
# cdef int32_t type_int
# cret = self.options.GetOptionAsInt(str_to_cppstr(name), &type_int)
# ret = type_int if cret==True else None
# return ret
#
# def getOptionAsString(self, name):
# """
# .. _getOptionAsString:
#
# Retrieve string value of an option.
#
# :param name: The name of the option.
# :type name: string
# :return: The value or None
# :rtype: String or None
#
# :see: getOption_, getOptionAsBool_, getOptionAsInt_
#
# """
# cdef string type_string
# cret = self.options.GetOptionAsString(str_to_cppstr(name), &type_string)
# ret = cstr_to_str(type_string.c_str()) if cret==True else None
# return ret
def getConfigPath(self):
'''
.. _getConfigPath:
Retrieve the config path. This directory hold the xml files.
:return: A string containing the library config path or None.
:rtype: str
'''
return configPath()
PyStatDriver = {
'SOFCnt' : "Number of SOF bytes received",
'ACKWaiting' : "Number of unsolicited messages while waiting for an ACK",
'readAborts' : "Number of times read were aborted due to timeouts",
'badChecksum' : "Number of bad checksums",
'readCnt' : "Number of messages successfully read",
'writeCnt' : "Number of messages successfully sent",
'CANCnt' : "Number of CAN bytes received",
'NAKCnt' : "Number of NAK bytes received",
'ACKCnt' : "Number of ACK bytes received",
'OOFCnt' : "Number of bytes out of framing",
'dropped' : "Number of messages dropped & not delivered",
'retries' : "Number of messages retransmitted",
'callbacks' : "Number of unexpected callbacks",
'badroutes' : "Number of failed messages due to bad route response",
'noack' : "Number of no ACK returned errors",
'netbusy' : "Number of network busy/failure messages",
'nondelivery' : "Number of messages not delivered to network",
'routedbusy' : "Number of messages received with routed busy status",
'broadcastReadCnt' : "Number of broadcasts read",
'broadcastWriteCnt' : "Number of broadcasts sent",
}
PyLogLevels = {
'Invalid' : {'doc':'Invalid Log Status', 'value':0},
'None' : {'doc':'Disable all logging', 'value':1},
'Always' : {'doc':'These messages should always be shown', 'value':2},
'Fatal' : {'doc':'A likely fatal issue in the library', 'value':3},
'Error' : {'doc':'A serious issue with the library or the network', 'value':4},
'Warning' : {'doc':'A minor issue from which the library should be able to recover', 'value':5},
'Alert' : {'doc':'Something unexpected by the library about which the controlling application should be aware', 'value':6},
'Info' : {'doc':"Everything's working fine...these messages provide streamlined feedback on each message", 'value':7},
'Detail' : {'doc':'Detailed information on the progress of each message', 'value':8},
'Debug' : {'doc':'Very detailed information on progress that will create a huge log file quickly but this level (as others) can be queued and sent to the log only on an error or warning', 'value':9},
'StreamDetail' : {'doc':'Will include low-level byte transfers from controller to buffer to application and back', 'value':10},
'Internal' : {'doc':'Used only within the log class (uses existing timestamp, etc', 'value':11},
}
class EnumWithDoc(str):
"""Enum helper"""
def setDoc(self, doc):
self.doc = doc
return self
PyControllerState = [
EnumWithDoc('Normal').setDoc("No command in progress."),
EnumWithDoc('Starting').setDoc("The command is starting."),
EnumWithDoc('Cancel').setDoc("The command was cancelled."),
EnumWithDoc('Error').setDoc("Command invocation had error(s) and was aborted."),
EnumWithDoc('Waiting').setDoc("Controller is waiting for a user action."),
EnumWithDoc('Sleeping').setDoc("Controller command is on a sleep queue wait for device."),
EnumWithDoc('InProgress').setDoc("The controller is communicating with the other device to carry out the command."),
EnumWithDoc('Completed').setDoc("The command has completed successfully."),
EnumWithDoc('Failed').setDoc("The command has failed."),
EnumWithDoc('NodeOK').setDoc("Used only with ControllerCommand_HasNodeFailed to indicate that the controller thinks the node is OK."),
EnumWithDoc('NodeFailed').setDoc("Used only with ControllerCommand_HasNodeFailed to indicate that the controller thinks the node has failed."),
]
PyNotifications = [
EnumWithDoc('ValueAdded').setDoc("A new node value has been added to OpenZWave's set. These notifications occur after a node has been discovered, and details of its command classes have been received. Each command class may generate one or more values depending on the complexity of the item being represented."),
EnumWithDoc('ValueRemoved').setDoc("A node value has been removed from OpenZWave's set. This only occurs when a node is removed."),
EnumWithDoc('ValueChanged').setDoc("A node value has been updated from the Z-Wave network and it is different from the previous value."),
EnumWithDoc('ValueRefreshed').setDoc("A node value has been updated from the Z-Wave network."),
EnumWithDoc('Group').setDoc("The associations for the node have changed. The application should rebuild any group information it holds about the node."),
EnumWithDoc('NodeNew').setDoc("A new node has been found (not already stored in zwcfg*.xml file)."),
EnumWithDoc('NodeAdded').setDoc("A new node has been added to OpenZWave's set. This may be due to a device being added to the Z-Wave network, or because the application is initializing itself."),
EnumWithDoc('NodeRemoved').setDoc("A node has been removed from OpenZWave's set. This may be due to a device being removed from the Z-Wave network, or because the application is closing."),
EnumWithDoc('NodeProtocolInfo').setDoc("Basic node information has been receievd, such as whether the node is a setening device, a routing device and its baud rate and basic, generic and specific types. It is after this notification that you can call Manager::GetNodeType to obtain a label containing the device description."),
EnumWithDoc('NodeNaming').setDoc("One of the node names has changed (name, manufacturer, product)."),
EnumWithDoc('NodeEvent').setDoc("A node has triggered an event. This is commonly caused when a node sends a Basic_Set command to the controller. The event value is stored in the notification."),
EnumWithDoc('PollingDisabled').setDoc("Polling of a node has been successfully turned off by a call to Manager::DisablePoll."),
EnumWithDoc('PollingEnabled').setDoc("Polling of a node has been successfully turned on by a call to Manager::EnablePoll."),
EnumWithDoc('SceneEvent').setDoc("Scene Activation Set received."),
EnumWithDoc('CreateButton').setDoc("Handheld controller button event created."),
EnumWithDoc('DeleteButton').setDoc("Handheld controller button event deleted."),
EnumWithDoc('ButtonOn').setDoc("Handheld controller button on pressed event."),
EnumWithDoc('ButtonOff').setDoc("Handheld controller button off pressed event."),
EnumWithDoc('DriverReady').setDoc("A driver for a PC Z-Wave controller has been added and is ready to use. The notification will contain the controller's Home ID, which is needed to call most of the Manager methods."),
EnumWithDoc('DriverFailed').setDoc("Driver failed to load."),
EnumWithDoc('DriverReset').setDoc("All nodes and values for this driver have been removed. This is sent instead of potentially hundreds of individual node and value notifications."),
EnumWithDoc('EssentialNodeQueriesComplete').setDoc("The queries on a node that are essential to its operation have been completed. The node can now handle incoming messages."),
EnumWithDoc('NodeQueriesComplete').setDoc("All the initialisation queries on a node have been completed."),
EnumWithDoc('AwakeNodesQueried').setDoc("All awake nodes have been queried, so client application can expected complete data for these nodes."),
EnumWithDoc('AllNodesQueriedSomeDead').setDoc("All nodes have been queried but some dead nodes found."),
EnumWithDoc('AllNodesQueried').setDoc("All nodes have been queried, so client application can expected complete data."),
EnumWithDoc('Notification').setDoc("A manager notification report."),
EnumWithDoc('DriverRemoved').setDoc("The Driver is being removed."),
EnumWithDoc('ControllerCommand').setDoc("When Controller Commands are executed, Notifications of Success/Failure etc are communicated via this Notification."),
]
PyStatDriver = {
'SOFCnt' : "Number of SOF bytes received",
'ACKWaiting' : "Number of unsolicited messages while waiting for an ACK",
'readAborts' : "Number of times read were aborted due to timeouts",
'badChecksum' : "Number of bad checksums",
'readCnt' : "Number of messages successfully read",
'writeCnt' : "Number of messages successfully sent",
'CANCnt' : "Number of CAN bytes received",
'NAKCnt' : "Number of NAK bytes received",
'ACKCnt' : "Number of ACK bytes received",
'OOFCnt' : "Number of bytes out of framing",
'dropped' : "Number of messages dropped & not delivered",
'retries' : "Number of messages retransmitted",
'callbacks' : "Number of unexpected callbacks",
'badroutes' : "Number of failed messages due to bad route response",
'noack' : "Number of no ACK returned errors",
'netbusy' : "Number of network busy/failure messages",
'nondelivery' : "Number of messages not delivered to network", 'routedbusy' : "Number of messages received with routed busy status", 'broadcastReadCnt' : "Number of broadcasts read", 'broadcastWriteCnt' : "Number of broadcasts sent",
}
COMMAND_CLASS_DESC = {
0x00: 'COMMAND_CLASS_NO_OPERATION',
0x20: 'COMMAND_CLASS_BASIC',
0x21: 'COMMAND_CLASS_CONTROLLER_REPLICATION',
0x22: 'COMMAND_CLASS_APPLICATION_STATUS',
0x23: 'COMMAND_CLASS_ZIP_SERVICES',
0x24: 'COMMAND_CLASS_ZIP_SERVER',
0x25: 'COMMAND_CLASS_SWITCH_BINARY',
0x26: 'COMMAND_CLASS_SWITCH_MULTILEVEL',
0x27: 'COMMAND_CLASS_SWITCH_ALL',
0x28: 'COMMAND_CLASS_SWITCH_TOGGLE_BINARY',
0x29: 'COMMAND_CLASS_SWITCH_TOGGLE_MULTILEVEL',
0x2A: 'COMMAND_CLASS_CHIMNEY_FAN',
0x2B: 'COMMAND_CLASS_SCENE_ACTIVATION',
0x2C: 'COMMAND_CLASS_SCENE_ACTUATOR_CONF',
0x2D: 'COMMAND_CLASS_SCENE_CONTROLLER_CONF',
0x2E: 'COMMAND_CLASS_ZIP_CLIENT',
0x2F: 'COMMAND_CLASS_ZIP_ADV_SERVICES',
0x30: 'COMMAND_CLASS_SENSOR_BINARY',
0x31: 'COMMAND_CLASS_SENSOR_MULTILEVEL',
0x32: 'COMMAND_CLASS_METER',
0x33: 'COMMAND_CLASS_COLOR',
0x34: 'COMMAND_CLASS_ZIP_ADV_CLIENT',
0x35: 'COMMAND_CLASS_METER_PULSE',
0x3C: 'COMMAND_CLASS_METER_TBL_CONFIG',
0x3D: 'COMMAND_CLASS_METER_TBL_MONITOR',
0x3E: 'COMMAND_CLASS_METER_TBL_PUSH',
0x38: 'COMMAND_CLASS_THERMOSTAT_HEATING',
0x40: 'COMMAND_CLASS_THERMOSTAT_MODE',
0x42: 'COMMAND_CLASS_THERMOSTAT_OPERATING_STATE',
0x43: 'COMMAND_CLASS_THERMOSTAT_SETPOINT',
0x44: 'COMMAND_CLASS_THERMOSTAT_FAN_MODE',
0x45: 'COMMAND_CLASS_THERMOSTAT_FAN_STATE',
0x46: 'COMMAND_CLASS_CLIMATE_CONTROL_SCHEDULE',
0x47: 'COMMAND_CLASS_THERMOSTAT_SETBACK',
0x4c: 'COMMAND_CLASS_DOOR_LOCK_LOGGING',
0x4E: 'COMMAND_CLASS_SCHEDULE_ENTRY_LOCK',
0x50: 'COMMAND_CLASS_BASIC_WINDOW_COVERING',
0x51: 'COMMAND_CLASS_MTP_WINDOW_COVERING',
0x56: 'COMMAND_CLASS_CRC_16_ENCAP',
0x5A: 'COMMAND_CLASS_DEVICE_RESET_LOCALLY',
0x5E: 'COMMAND_CLASS_ZWAVE_PLUS_INFO',
0x60: 'COMMAND_CLASS_MULTI_CHANNEL_V2',
0x61: 'COMMAND_CLASS_DISPLAY',
0x62: 'COMMAND_CLASS_DOOR_LOCK',
0x63: 'COMMAND_CLASS_USER_CODE',
0x64: 'COMMAND_CLASS_GARAGE_DOOR',
0x70: 'COMMAND_CLASS_CONFIGURATION',
0x71: 'COMMAND_CLASS_ALARM',
0x72: 'COMMAND_CLASS_MANUFACTURER_SPECIFIC',
0x73: 'COMMAND_CLASS_POWERLEVEL',
0x75: 'COMMAND_CLASS_PROTECTION',
0x76: 'COMMAND_CLASS_LOCK',
0x77: 'COMMAND_CLASS_NODE_NAMING',
0x78: 'COMMAND_CLASS_ACTUATOR_MULTILEVEL',
0x79: 'COMMAND_CLASS_KICK',
0x7A: 'COMMAND_CLASS_FIRMWARE_UPDATE_MD',
0x7B: 'COMMAND_CLASS_GROUPING_NAME',
0x7C: 'COMMAND_CLASS_REMOTE_ASSOCIATION_ACTIVATE',
0x7D: 'COMMAND_CLASS_REMOTE_ASSOCIATION',
0x80: 'COMMAND_CLASS_BATTERY',
0x81: 'COMMAND_CLASS_CLOCK',
0x82: 'COMMAND_CLASS_HAIL',
0x83: 'COMMAND_CLASS_NETWORK_STAT',
0x84: 'COMMAND_CLASS_WAKE_UP',
0x85: 'COMMAND_CLASS_ASSOCIATION',
0x86: 'COMMAND_CLASS_VERSION',
0x87: 'COMMAND_CLASS_INDICATOR',
0x88: 'COMMAND_CLASS_PROPRIETARY',
0x89: 'COMMAND_CLASS_LANGUAGE',
0x8A: 'COMMAND_CLASS_TIME',
0x8B: 'COMMAND_CLASS_TIME_PARAMETERS',
0x8C: 'COMMAND_CLASS_GEOGRAPHIC_LOCATION',
0x8D: 'COMMAND_CLASS_COMPOSITE',
0x8E: 'COMMAND_CLASS_MULTI_INSTANCE_ASSOCIATION',
0x8F: 'COMMAND_CLASS_MULTI_CMD',
0x90: 'COMMAND_CLASS_ENERGY_PRODUCTION',
0x91: 'COMMAND_CLASS_MANUFACTURER_PROPRIETARY',
0x92: 'COMMAND_CLASS_SCREEN_MD',
0x93: 'COMMAND_CLASS_SCREEN_ATTRIBUTES',
0x94: 'COMMAND_CLASS_SIMPLE_AV_CONTROL',
0x95: 'COMMAND_CLASS_AV_CONTENT_DIRECTORY_MD',
0x96: 'COMMAND_CLASS_AV_RENDERER_STATUS',
0x97: 'COMMAND_CLASS_AV_CONTENT_SEARCH_MD',
0x98: 'COMMAND_CLASS_SECURITY',
0x99: 'COMMAND_CLASS_AV_TAGGING_MD',
0x9A: 'COMMAND_CLASS_IP_CONFIGURATION',
0x9B: 'COMMAND_CLASS_ASSOCIATION_COMMAND_CONFIGURATION',
0x9C: 'COMMAND_CLASS_SENSOR_ALARM',
0x9D: 'COMMAND_CLASS_SILENCE_ALARM',
0x9E: 'COMMAND_CLASS_SENSOR_CONFIGURATION',
0xEF: 'COMMAND_CLASS_MARK',
0xF0: 'COMMAND_CLASS_NON_INTEROPERABLE'
}
| {
"content_hash": "282b935448ea7dc4dfa6a351ab3f5f15",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 331,
"avg_line_length": 42.05642023346304,
"alnum_prop": 0.6635518342045612,
"repo_name": "Julian/libopenzwave-cffi",
"id": "0414778e36eb738ff3c83eb64b793b974e632049",
"size": "21642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libopenzwave/_global.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3589"
},
{
"name": "C++",
"bytes": "9952"
},
{
"name": "Python",
"bytes": "33820"
}
],
"symlink_target": ""
} |
from string import ascii_lowercase
import scrapy
from scrapy.spiders import CrawlSpider
from w3lib.html import remove_tags, remove_tags_with_content
class InvestopediaSpider(CrawlSpider):
name = 'investopedia'
start_urls = ['http://www.investopedia.com/terms/%s/' % s for s in ascii_lowercase + '1']
def parse(self, response):
"""
Parse the response page
"""
url = response.url
# 'terms' has to be there in the URL to proceed further
if 'terms' not in url:
return
# if the url ends with '.asp', then that's a topic page
if url.endswith('.asp'):
return self._parse_topic_response(response)
# Otherwise, assume that this a list page
return self._parse_topic_list(response)
def _parse_topic_response(self, response):
"""
Parses various topics
e.g. www.investopedia.com/terms/o/oddlottheory.asp
"""
# Get the title first
title = response.css('title::text').extract_first()
# Replace / with a space - creates issues with writing to file
title = title.replace('/', ' ')
# Get the first div with id Content
content = response.css('div#Content')[0]
content = content.css('div.content-box')
text = ''
for child in content.xpath('//p'):
# Get the text from this child <p></p> tag
paragraph = child.extract()
# Remove tags including <p> and <a>
paragraph = remove_tags(remove_tags_with_content(paragraph, ('script', ))).strip()
# Replace '&' with '&'
paragraph = paragraph.replace('&', '&')
# Replace 'U.S.' with 'US':
paragraph = paragraph.replace('U.S.', 'US')
# Some more replacements to improve the default tokenization
for c in '();.,[]"\'-:/%$+@?':
paragraph = paragraph.replace(c, ' {} '.format(c))
# Add to the file
text += paragraph.lower() + '\n'
# Save the title and the text both
filename = 'investopedia_data.txt'
f = open(filename, 'a')
f.write(text)
f.close()
def _parse_topic_list(self, response):
"""
Parse the page with the topics listed out
e.g. www.investopedia.com/terms/o/
"""
list_element = response.css('ol.list')
# Iterate through the list of topics
for l in list_element.css('li'):
# Extract the URL
url = l.css('a::attr(href)').extract_first()
next_page = response.urljoin(url)
yield scrapy.Request(next_page, callback=self.parse)
| {
"content_hash": "7904c135fdd0358f5a0492de8b0b2b04",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 94,
"avg_line_length": 31.858823529411765,
"alnum_prop": 0.5646233382570163,
"repo_name": "hardikp/fnlp",
"id": "7c0f9870e79cff216a1da016624dd93d4b229446",
"size": "2708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapy-spiders/investopedia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15705"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
import scipy as sp
from pylab import ion
from scipy import signal as sig
from scipy import optimize as opt
from scipy.interpolate import interp1d
#from scipy.io import loadmat
import matplotlib as mpl
from matplotlib.mlab import *
from matplotlib.pyplot import *
from matplotlib.widgets import MultiCursor
from matplotlib.ticker import EngFormatter
import re
import os
import sys
import csv
import argparse
import pprint
from itertools import cycle
import time
from peak_finding import find_peaks_cwt
from constants import pi
from smartFormat import smartFormat, simpleFormat
import timeAndFreq as tf
# TODO: Constrained optimization, especially keeping frequency of peak
# and amplitude sign (PEAK vs. NULL) within desired ranges so as to
# truly optimize fit and get a rational result out.
def structurallyDampedRes(params, angFreq):
'''
Model of a single mass-spring system with only structural (hysteretic)
damping (i.e., no viscous damping)
X-values are given by
angFreq, omega
Model parameters and the variables typically used for them are
ampl0, A0 = F0/m
resFreq, omega_0 = sqrt(k/m)
qFactor, Q = 1/eta
'''
ampl0 = params['ampl0']
resFreq = params['resFreq']
qFactor = params['qFactor']
#B = -2j*lossFactor*springConst*mass-2*springConst*mass+2*mass**2*angFreq**2
#ampl = 4*force*mass**2 * (-1j*lossFactor*springConst -
# springConst + mass*angFreq**2) / ( -B**2 )
ampl = ampl0/(angFreq**2 - resFreq**2*(1-1j/qFactor))
return ampl
def viscAndStructDampedRes(params, angFreq):
'''
Model of a single mass-spring-damper system with both viscous and
structural (hysteretic) damping
X-values are given by
angFreq, omega
Model parameters and the variables typically used for them are
mass, m
springConst, k
lossFactor, eta
viscousDamping, gamma
force, F0
'''
mass = params['mass']
springConst = params['springConst']
lossFactor = params['lossFactor']
viscousDamping = params['viscousDamping']
force = params['force']
A = viscousDamping*np.sqrt(
viscousDamping**2 - 4j*lossFactor*springConst*mass - 4*springConst*mass)
B = viscousDamping**2 - 2j*lossFactor*springConst*mass \
- 2*springConst*mass + 2*mass**2*angFreq**2
ampl = 4*force*mass**2 * ( -1j*lossFactor*springConst - springConst \
+ mass*angFreq**2 - gamma*angFreq*(pi*1j/2) ) \
/ ( (A+B)*(A-B) )
return ampl
def twoCoupledOscViscousDamping(params, omega):
'''
Model of two coupled mass-spring-damper systems, where there is no
loss in the coupling term.
X-values are given by
omega
Model parameters are
alpha0 -- nominal driving force
r_alpha -- ratio of driving forces
omega1 -- angular frequency of first resonance
omega2 -- angular frequency of second resonance
Q1 -- Q factor for first resonance
Q2 -- Q factor for second resonance
coupling -- strength of coupling between the two
r_mass -- ratio of masses
'''
alpha0 = params['alpha0'].value
r_alpha = params['r_alpha'].value
omega1 = params['omega1'].value
omega2 = params['omega2'].value
Q1 = params['Q1'].value
Q2 = params['Q2'].value
coupling = params['coupling'].value
r_mass = params['r_mass'].value
#dc_offset = params['dc_offset'].value
zeta1 = 1/(2*Q1)
zeta2 = 1/(2*Q2)
model = \
(-( \
( \
alpha0*( \
coupling*(-1+r_mass*r_alpha) \
+ r_alpha*(-2*1j*zeta2*omega+omega**2-omega2**2) \
) \
) \
/ ( \
(-2*1j* zeta1* omega + omega**2 - omega1**2) \
* (-2*1j* zeta2 *omega + omega**2 - omega2**2) \
+ coupling*( \
omega*( \
-2*1j*(r_mass*zeta1+zeta2) \
+ (1 + r_mass)*omega \
) \
- r_mass*omega1**2 \
- omega2**2 \
) \
) \
) \
+ ( \
1j*alpha0*( \
coupling*(-1+r_mass*r_alpha) \
+ 2*1j*zeta1*omega - omega**2 + omega1**2 \
) \
) \
/ ( \
(2*zeta1*omega + 1j*(omega - omega1)*(omega + omega1)) \
* (-2*1j* zeta2*omega + omega**2 - omega2**2) \
+ coupling*( \
omega*(2*(r_mass*zeta1 + zeta2) + 1j*(1+r_mass)*omega) \
- 1j*r_mass*omega1**2 - 1j*omega2**2 \
) \
))
return model
def complLorentzian(freq, x0, beta, gamma, phi0):
Y = 1j*phi0 + \
beta/(-freq**2 + 1j*gamma*freq + x0**2)
return Y
def complResErr(params, freq, cVal):
x0, beta, gamma, phi0 = params
Y = complLorentzian(freq, x0, beta, gamma, phi0)
err = Y - cVal
return np.abs(err)
def realLorentzian(freq, x0, beta, gamma, y0):
#Y = beta * (gamma/2)/((freq-x0)**2 + (gamma/2)**2)
#Y = (gamma/2)/((freq-x0)**2 + (gamma/2)**2)
#Y = beta/(1+((freq-x0)*gamma/2)**2) + y0
Y = (beta)/((freq-x0)**2 + (gamma/2)**2) + y0
return Y
def realGaussian(freq, x0, beta, gamma, y0):
#-- Gamma is FWHM
#Y = beta * np.exp((freq-x0)**2/(gamma**2/8/np.log(2)))
Y = np.exp((freq-x0)**2/(gamma**2/8/np.log(2))) + y0
return Y
def realResErr(params, freq, amplVal):
x0, beta, gamma, y0 = params
Y = realLorentzian(freq, x0, beta, gamma, y0)
#Y = realGaussian(freq, x0, beta, gamma, y00)
err = Y - amplVal
return abs(err)
def fitLorentzian(extremumInd, xCoords, yData, f0, gamma0, n=1,
peak=True, compl=True):
xCoords = xCoords.astype(np.float_)
yData = yData.astype(np.float_)
f0 = xCoords[extremumInd]
gamma0 = 0.0001*f0
#trialLorentzian = realLorentzian(xCoords, f0, 1, gamma0)
#beta0 = np.abs(yData[extremumInd]) / max(trialLorentzian)
beta0 = yData[extremumInd]
beta0 = max(yData)
phi0 = 0
y00 = 0
print "initial parameters", f0, beta0, gamma0
if compl:
params = [f0, beta0, gamma0, phi0]
optout = opt.leastsq(complResErr, params, args=(xCoords, yData),
full_output=True)
return optout
params = [f0, beta0, gamma0, y00]
#optout = opt.leastsq(realResErr, params, args=(xCoords, yData),
# full_output=True)
optout = opt.curve_fit(realLorentzian, xCoords, yData, p0=params)
return optout
def realLorentziansPD(x, paramsDicts):
if isinstance(paramsDicts, dict):
pd = paramsDicts
return realLorentzian(x, pd['x0'], pd['beta'], pd['gamma'], pd['y0'])
y = np.zeros_like(x)
for pd in paramsDicts:
y += realLorentzian(x, pd['x0'], pd['beta'], pd['gamma'], pd['y0'])
return y
def realLorentziansPL(x, *args, **kwargs):
nParams = 4
paramsList = list(args)[1:]
paramsDicts = []
for n in range(int(len(paramsList)/nParams)):
paramsDicts.append(
{'x0': paramsList[nParams*n],
'beta': paramsList[nParams*n+1],
'gamma': paramsList[nParams*n+2],
'y0': paramsList[nParams*n+3]}
)
return realLorentziansPD(x, paramsDicts)
def realLorentziansTemp(x, x0, beta, gamma, y0=0.0):
freq = x
#y0 = 0.0
#x0 = 6197.0
print 'x0', x0, 'beta', beta, 'gamma', gamma, 'y0', y0
Y = (beta*(gamma/2)**2)/((freq-x0)**2 + (gamma/2)**2) + y0
return Y
def fitLorentzians(xCoords, yData, initialGuessDicts, compl=False):
if compl:
nParams = 5
else:
nParams = 4
#-- Make sure data types are floats s.t. bug in scipy doesn't rear its
# ugly head
xCoords = xCoords.astype(np.float_)
yData = yData.astype(np.float_)
#if isinstance(initialGuessDicts, dict):
# initialGuessDicts = [initialGuessDicts]
##-- Unpack dictionary parameters into a list
#params = []
#for igd in initialGuessDicts:
# params.extend([igd['x0'], igd['beta'], igd['gamma'], igd['y0']])
params = (initialGuessDicts['x0'], initialGuessDicts['beta'],
initialGuessDicts['gamma'], initialGuessDicts['y0'])
print 'igparams', params
#if compl:
# params = [f0, beta0, gamma0, phi0]
# optout = opt.leastsq(complResErr, params, args=(xCoords, yData),
# full_output=True)
# return optout
optout = opt.curve_fit(realLorentziansTemp, xCoords, yData, p0=params)
#optout = opt.curve_fit(realLorentziansPL, xCoords, yData, p0=params)
print 'optout', optout
##-- Re-pack dictionary parameters into list of dictionaries
#n = 0
#paramsList = optout[0]
#for igd in initialGuessDicts:
# igd.update(
# {'x0': paramsList[n*nParams],
# 'beta': paramsList[n*nParams+1],
# 'gamma': paramsList[n*nParams+2],
# 'y0': paramsList[n*nParams+3]}
# )
optout = list(optout)
optout[0] = initialGuessDicts
return optout
| {
"content_hash": "d2f540171a37f7ca4d84c0a8e3f34063",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 80,
"avg_line_length": 30.654605263157894,
"alnum_prop": 0.5759201631076296,
"repo_name": "jllanfranchi/pygeneric",
"id": "591b7ad005c1b91e82e954befe0d85cc7400c94d",
"size": "9437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peak_fitting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "18292"
},
{
"name": "Python",
"bytes": "313385"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import yaml
def main():
with open(sys.argv[1], 'rb') as f:
known_issues = yaml.load(f.read())
skipstrings = [
'passed in strict mode',
'passed in non-strict mode',
'failed in strict mode as expected',
'failed in non-strict mode as expected'
]
in_failed_tests = False
tofix_count = 0 # count of bugs that will be fixed (no uncertainty about proper behavior etc)
known_errors = []
diagnosed_errors = []
unknown_errors = []
other_errors = []
for line in sys.stdin:
if len(line) > 1 and line[-1] == '\n':
line = line[:-1]
# Skip success cases
skip = False
for sk in skipstrings:
if sk in line:
skip = True
if skip:
continue
# Augment error list with "known bugs"
print(line) # print error list as is, then refined version later
if 'failed tests' in line.lower():
in_failed_tests = True
continue
if in_failed_tests and line.strip() == '':
in_failed_tests = False
continue
if in_failed_tests:
# " intl402/ch12/12.2/12.2.3_c in non-strict mode"
tmp = line.strip().split(' ')
test = tmp[0]
matched = False
for kn in known_issues:
if kn.get('test', None) != test:
continue
if kn.has_key('diagnosed'):
tofix_count += 1
diagnosed_errors.append(line + ' // diagnosed: ' + kn['diagnosed'])
elif kn.has_key('knownissue'):
# Don't bump tofix_count, as testcase expected result is not certain
known_errors.append(line + ' // KNOWN: ' + kn['knownissue'])
else:
tofix_count += 1
unknown_errors.append(line + ' // ??? (rule matches)')
kn['used'] = True # mark rule used
matched = True
break
if matched:
continue
# no match, to fix
other_errors.append(line)
tofix_count += 1
print('')
print('=== CATEGORISED ERRORS ===')
print('')
# With ES2015+ semantic changes to ES5 there are too many known
# issues to print by default.
#for i in known_errors:
# print(i)
for i in diagnosed_errors:
print(i)
for i in unknown_errors:
print(i)
for i in other_errors:
print(i)
# Check for unused rules (e.g. bugs fixed)
print('')
for kn in known_issues:
if not kn.has_key('used'):
print('WARNING: unused rule: ' + json.dumps(kn))
# Used by testclient
if len(unknown_errors) > 0 or len(other_errors) > 0:
print('TEST262 FAILED')
elif len(known_errors) > 0 or len(diagnosed_errors) > 0:
# Known and diagnosed errors don't indicate test failure
# as far as Github status is concerned.
print('TEST262 SUCCESS')
else:
print('TEST262 SUCCESS')
# To fix count
print('')
print('KNOWN ISSUE COUNT: ' + str(len(known_errors)))
print('TO-FIX COUNT: ' + str(tofix_count))
print(' = test case failures which need fixing (Duktape bugs, uninvestigated)')
if __name__ == '__main__':
main()
| {
"content_hash": "417a473ad02bfe38cf8b7cfbb33c58aa",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 101,
"avg_line_length": 28.31404958677686,
"alnum_prop": 0.5239346176298891,
"repo_name": "markand/duktape",
"id": "5cde7e3daafce2f8d22d738ecd6009d19b14d3d8",
"size": "3450",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "util/filter_test262_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2859797"
},
{
"name": "C++",
"bytes": "18532"
},
{
"name": "CSS",
"bytes": "32733"
},
{
"name": "CoffeeScript",
"bytes": "1029"
},
{
"name": "HTML",
"bytes": "4438824"
},
{
"name": "Java",
"bytes": "3043"
},
{
"name": "JavaScript",
"bytes": "8666999"
},
{
"name": "Lua",
"bytes": "19160"
},
{
"name": "Makefile",
"bytes": "47049"
},
{
"name": "Perl",
"bytes": "177"
},
{
"name": "Perl6",
"bytes": "22748"
},
{
"name": "Python",
"bytes": "291537"
},
{
"name": "Ruby",
"bytes": "18384"
},
{
"name": "Shell",
"bytes": "24267"
}
],
"symlink_target": ""
} |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp.plugins.base import BasePlugin
from sleekxmpp.plugins.xep_0152 import stanza, Reachability
log = logging.getLogger(__name__)
class XEP_0152(BasePlugin):
"""
XEP-0152: Reachability Addresses
"""
name = 'xep_0152'
description = 'XEP-0152: Reachability Addresses'
dependencies = set(['xep_0163'])
stanza = stanza
def plugin_end(self):
self.xmpp['xep_0030'].del_feature(feature=Reachability.namespace)
self.xmpp['xep_0163'].remove_interest(Reachability.namespace)
def session_bind(self, jid):
self.xmpp['xep_0163'].register_pep('reachability', Reachability)
def publish_reachability(self, addresses, options=None,
ifrom=None, block=True, callback=None, timeout=None):
"""
Publish alternative addresses where the user can be reached.
Arguments:
addresses -- A list of dictionaries containing the URI and
optional description for each address.
options -- Optional form of publish options.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
if not isinstance(addresses, (list, tuple)):
addresses = [addresses]
reach = Reachability()
for address in addresses:
if not hasattr(address, 'items'):
address = {'uri': address}
addr = stanza.Address()
for key, val in address.items():
addr[key] = val
reach.append(addr)
return self.xmpp['xep_0163'].publish(reach,
node=Reachability.namespace,
options=options,
ifrom=ifrom,
block=block,
callback=callback,
timeout=timeout)
def stop(self, ifrom=None, block=True, callback=None, timeout=None):
"""
Clear existing user activity information to stop notifications.
Arguments:
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
reach = Reachability()
return self.xmpp['xep_0163'].publish(reach,
node=Reachability.namespace,
ifrom=ifrom,
block=block,
callback=callback,
timeout=timeout)
| {
"content_hash": "51d166942fc48743091ff5afea817fd7",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 38.064516129032256,
"alnum_prop": 0.5847457627118644,
"repo_name": "isandlaTech/cohorte-3rdparty",
"id": "4cf81739f04f9447c49570957f6f7883c112f246",
"size": "3540",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "sleekxmpp/src/main/python/sleekxmpp/plugins/xep_0152/reachability.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "Perl",
"bytes": "8420"
},
{
"name": "Python",
"bytes": "5545307"
},
{
"name": "Shell",
"bytes": "2838"
}
],
"symlink_target": ""
} |
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
from fontTools.subset import load_font
from fontTools.subset import Options
from fontTools.subset import save_font
from fontTools.subset import Subsetter
def main(args):
"""Subset a font (useful for making small test fonts).
Args:
args: list, arguments the user typed.
"""
parser = argparse.ArgumentParser()
parser.add_argument('fontfile', help='Input font file')
parser.add_argument('--subset_postfix', default='',
help='Postfix to the subset extension')
parser.add_argument('--text', default='',
help='Text to include in the subset')
parser.add_argument('--unicodes', default='',
help='Comma separated list of Unicode codepoints (hex) '
'to include in the subset; eg, "e7,0xe8,U+00e9"')
parser.add_argument('--glyphs', default='',
help='Comma separated list of glyph IDs (decimal) to '
'include in the subset; eg, "1,27"')
parser.add_argument('--hinting', default=False, action='store_true',
help='Enable hinting if specified, no hinting if not '
'present')
cmd_args = parser.parse_args(args)
options = Options()
# Definitely want the .notdef glyph and outlines.
options.notdef_glyph = True
options.notdef_outline = True
# Get the item. to keep in the subset.
text = cmd_args.text
unicodes_str = cmd_args.unicodes.lower().replace('0x', '').replace('u+', '')
# TODO(bstell) replace this whole files by using the new subset.py code
unicodes_input = [c for c in unicodes_str.split(',') if c]
unicodes = []
for c in unicodes_input:
if '-' in c:
uni_range = c.split('-')
uni_range_expanded = range(int(uni_range[0], 16), int(uni_range[1], 16) + 1)
unicodes.extend(uni_range_expanded)
else:
unicodes.append(int(c, 16))
#unicodes = [int(c, 16) for c in unicodes_input_expanded]
glyphs = [int(c) for c in cmd_args.glyphs.split(',') if c]
fontfile = cmd_args.fontfile
options.hinting = cmd_args.hinting # False => no hinting
options.hinting = True # hint stripping for CFF is currently broken
dirname = os.path.dirname(fontfile)
basename = os.path.basename(fontfile)
filename, extension = os.path.splitext(basename)
subset_postfix = cmd_args.subset_postfix
output_file = dirname + '/' + filename + '_subset' + subset_postfix + extension
print "output_file =", output_file
font = load_font(fontfile, options, lazy=False)
subsetter = Subsetter(options)
subsetter.populate(text=text, unicodes=unicodes, glyphs=glyphs)
subsetter.subset(font)
save_font(font, output_file, options)
if __name__ == '__main__':
main(sys.argv[1:])
| {
"content_hash": "9be744f082867094695a55644029056c",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 82,
"avg_line_length": 38.125,
"alnum_prop": 0.6700447093889716,
"repo_name": "googlefonts/TachyFont",
"id": "603ccd8140305a850b01bafe8ed52b0eeb70d606",
"size": "3355",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "build_time/src/make_subset.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "303868"
},
{
"name": "CSS",
"bytes": "313471"
},
{
"name": "HTML",
"bytes": "100691"
},
{
"name": "Java",
"bytes": "24368"
},
{
"name": "JavaScript",
"bytes": "1163591"
},
{
"name": "Python",
"bytes": "169129"
},
{
"name": "Shell",
"bytes": "11939"
}
],
"symlink_target": ""
} |
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Poeks', 'poeksweb@gmail.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'appengine' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'hvhxfm5u=^*v&fdsgfhfdoo#oq8x*eg8+1&gfsgdf9sxbye@=umutgn^t_sg_nx'
# Ensure that email is not sent via SMTP by default to match the standard App
# Engine SDK behaviour. If you want to sent email via SMTP then add the name of
# your mailserver here.
EMAIL_HOST = ''
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.doc.XViewMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
# 'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
# 'django.core.context_processors.media', # 0.97 only.
# 'django.core.context_processors.request',
)
ROOT_URLCONF = 'urls'
ROOT_PATH = os.path.dirname(__file__)
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates')
)
INSTALLED_APPS = (
'appengine_django',
'faves',
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.sites',
)
TWITTER = {
'base': 'http://twitter.com/',
'faves': 'favorites',
'page': '?page=',
'auth_user': 'poeks',
'auth_pswd': 'digital'
}
BITLY = {
'api_key': 'R_83df87afe0f7b99fb9a7a5c50ef09494',
'login': 'poeks'
}
APP_DOMAIN = 'www.twitterbelle.com'
APP_URL = "http://%s" % APP_DOMAIN | {
"content_hash": "85197fafe4fa4f1fdc2ba6002276275c",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 101,
"avg_line_length": 31.554545454545455,
"alnum_prop": 0.700374531835206,
"repo_name": "poeks/twitterbelle",
"id": "d9e24c893bef3cfaa680958fc9b26001364a58c8",
"size": "4104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "5451"
},
{
"name": "Python",
"bytes": "350587"
}
],
"symlink_target": ""
} |
import os
import copy
import six
import jsonschema
from jsonschema import _validators
from jsonschema.validators import create
from st2common.util import jsonify
__all__ = [
'get_validator',
'get_parameter_schema',
'validate'
]
# https://github.com/json-schema/json-schema/blob/master/draft-04/schema
# The source material is licensed under the AFL or BSD license.
# Both draft 4 and custom schema has additionalProperties set to false by default.
# The custom schema differs from draft 4 with the extension of position, immutable,
# and draft 3 version of required.
PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)))
SCHEMAS = {
'draft4': jsonify.load_file('%s/draft4.json' % PATH),
'custom': jsonify.load_file('%s/custom.json' % PATH)
}
SCHEMA_ANY_TYPE = {
"anyOf": [
{"type": "array"},
{"type": "boolean"},
{"type": "integer"},
{"type": "number"},
{"type": "object"},
{"type": "string"}
]
}
def get_draft_schema(version='custom', additional_properties=False):
schema = copy.deepcopy(SCHEMAS[version])
if additional_properties and 'additionalProperties' in schema:
del schema['additionalProperties']
return schema
def extend_with_default(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for error in validate_properties(
validator, properties, instance, schema,
):
yield error
for property, subschema in six.iteritems(properties):
if "default" in subschema:
instance.setdefault(property, subschema["default"])
return jsonschema.validators.extend(
validator_class, {"properties": set_defaults},
)
CustomValidator = create(
meta_schema=get_draft_schema(additional_properties=True),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
u"allOf": _validators.allOf_draft4,
u"anyOf": _validators.anyOf_draft4,
u"dependencies": _validators.dependencies,
u"enum": _validators.enum,
u"format": _validators.format,
u"items": _validators.items,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
u"maxProperties": _validators.maxProperties_draft4,
u"maximum": _validators.maximum,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
u"minProperties": _validators.minProperties_draft4,
u"minimum": _validators.minimum,
u"multipleOf": _validators.multipleOf,
u"not": _validators.not_draft4,
u"oneOf": _validators.oneOf_draft4,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
u"properties": _validators.properties_draft3,
u"type": _validators.type_draft4,
u"uniqueItems": _validators.uniqueItems,
},
version="action_param",
)
def validate(instance, schema, cls=None, use_default=True, *args, **kwargs):
"""
Custom validate function which supports default arguments combined with the "required"
property.
:param use_default: True to support the use of the optional default property.
:type use_default: ``bool``
"""
instance = copy.deepcopy(instance)
schema_type = schema.get('type', None)
instance_is_dict = isinstance(instance, dict)
if use_default and schema_type == 'object' and instance_is_dict:
properties = schema.get('properties', {})
for property_name, property_data in six.iteritems(properties):
default_value = property_data.get('default', None)
# Assign default value on the instance so the validation doesn't fail if requires is
# true but the value is not provided
if default_value is not None and instance.get(property_name, None) is None:
instance[property_name] = default_value
# pylint: disable=assignment-from-no-return
result = jsonschema.validate(instance=instance, schema=schema, cls=cls, *args, **kwargs)
return result
VALIDATORS = {
'draft4': jsonschema.Draft4Validator,
'custom': CustomValidator
}
def get_validator(version='custom', assign_property_default=False):
validator = VALIDATORS[version]
return extend_with_default(validator) if assign_property_default else validator
def get_parameter_schema(model):
# Dynamically construct JSON schema from the parameters metadata.
def normalize(x):
return {k: v if v else SCHEMA_ANY_TYPE for k, v in six.iteritems(x)}
schema = {}
from st2common.util.action_db import get_runnertype_by_name
runner_type = get_runnertype_by_name(model.runner_type['name'])
properties = normalize(runner_type.runner_parameters)
properties.update(normalize(model.parameters))
if properties:
schema['title'] = model.name
if model.description:
schema['description'] = model.description
schema['type'] = 'object'
schema['properties'] = properties
schema['additionalProperties'] = False
return schema
| {
"content_hash": "2bc275fee4303b403d68762b139f730b",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 96,
"avg_line_length": 34.38961038961039,
"alnum_prop": 0.6695619335347432,
"repo_name": "Itxaka/st2",
"id": "157ac9b4173a0d970fcff824a929ff7d92c5350a",
"size": "5296",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "st2common/st2common/util/schema/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "35769"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "2673739"
},
{
"name": "Shell",
"bytes": "16008"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
"""
Classes for handling sparse matrices.
To read about different sparse formats, see
http://www-users.cs.umn.edu/~saad/software/SPARSKIT/paper.ps
"""
from __future__ import print_function
# TODO
# Automatic methods for determining best sparse format?
import sys
import numpy
from numpy.lib.stride_tricks import as_strided
from six import integer_types
from six.moves import xrange
import scipy.sparse
import theano
from theano import gof, tensor, scalar, config
from theano.gradient import DisconnectedType
from theano.sparse.utils import hash_from_sparse
import theano.tests.unittest_tools as utt
from theano.gradient import grad_not_implemented, grad_undefined
from theano.sparse.type import SparseType, _is_sparse
sparse_formats = ['csc', 'csr']
"""
Types of sparse matrices to use for testing.
"""
_mtypes = [scipy.sparse.csc_matrix, scipy.sparse.csr_matrix]
# _mtypes = [sparse.csc_matrix, sparse.csr_matrix, sparse.dok_matrix,
# sparse.lil_matrix, sparse.coo_matrix]
# * new class ``dia_matrix`` : the sparse DIAgonal format
# * new class ``bsr_matrix`` : the Block CSR format
_mtype_to_str = {scipy.sparse.csc_matrix: "csc",
scipy.sparse.csr_matrix: "csr"}
def _is_sparse_variable(x):
"""
Returns
-------
boolean
True iff x is a L{SparseVariable} (and not a L{tensor.TensorType},
for instance).
"""
if not isinstance(x, gof.Variable):
raise NotImplementedError("this function should only be called on "
"*variables* (of type sparse.SparseType "
"or tensor.TensorType, for instance), not ",
x)
return isinstance(x.type, SparseType)
def _is_dense_variable(x):
"""
Returns
-------
boolean
True if x is a L{tensor.TensorType} (and not a L{SparseVariable},
for instance).
"""
if not isinstance(x, gof.Variable):
raise NotImplementedError("this function should only be called on "
"*variables* (of type sparse.SparseType or "
"tensor.TensorType, for instance), not ", x)
return isinstance(x.type, tensor.TensorType)
def _is_dense(x):
"""
Returns
-------
boolean
True unless x is a L{scipy.sparse.spmatrix} (and not a
L{numpy.ndarray}).
"""
if not isinstance(x, (scipy.sparse.spmatrix, numpy.ndarray)):
raise NotImplementedError("this function should only be called on "
"sparse.scipy.sparse.spmatrix or "
"numpy.ndarray, not,", x)
return isinstance(x, numpy.ndarray)
# Wrapper type
def as_sparse_variable(x, name=None):
"""
Wrapper around SparseVariable constructor to construct
a Variable with a sparse matrix with the same dtype and
format.
Parameters
----------
x
A sparse matrix.
Returns
-------
object
SparseVariable version of `x`.
"""
# TODO
# Verify that sp is sufficiently sparse, and raise a
# warning if it is not
if isinstance(x, gof.Apply):
if len(x.outputs) != 1:
raise ValueError("It is ambiguous which output of a "
"multi-output Op has to be fetched.", x)
else:
x = x.outputs[0]
if isinstance(x, gof.Variable):
if not isinstance(x.type, SparseType):
raise TypeError("Variable type field must be a SparseType.", x,
x.type)
return x
try:
return constant(x, name=name)
except TypeError:
raise TypeError("Cannot convert %s to SparseType" % x, type(x))
as_sparse = as_sparse_variable
def as_sparse_or_tensor_variable(x, name=None):
"""
Same as `as_sparse_variable` but if we can't make a
sparse variable, we try to make a tensor variable.
Parameters
----------
x
A sparse matrix.
Returns
-------
SparseVariable or TensorVariable version of `x`
"""
try:
return as_sparse_variable(x, name)
except (ValueError, TypeError):
return theano.tensor.as_tensor_variable(x, name)
def verify_grad_sparse(op, pt, structured=False, *args, **kwargs):
"""
Wrapper for theano.test.unittest_tools.py:verify_grad wich
converts sparse variables back and forth.
Parameters
----------
op
Op to check.
pt
List of inputs to realize the tests.
structured
True to tests with a structured grad, False otherwise.
args
Other `verify_grad` parameters if any.
kwargs
Other `verify_grad` keywords if any.
Returns
-------
None
"""
def conv_none(x):
return x
def conv_csr(ind, indptr, shp):
def f(spdata):
return CSR(spdata, ind, indptr, shp)
return f
def conv_csc(ind, indptr, shp):
def f(spdata):
return CSC(spdata, ind, indptr, shp)
return f
iconv = []
dpt = []
for p in pt:
if _is_sparse(p):
if structured:
dpt.append(p.data)
else:
dpt.append(p.toarray())
if p.format == 'csr':
if structured:
iconv.append(conv_csr(p.indices[:p.size], p.indptr,
p.shape))
else:
iconv.append(csr_from_dense)
elif p.format == 'csc':
if structured:
iconv.append(conv_csc(p.indices[:p.size], p.indptr,
p.shape))
else:
iconv.append(csc_from_dense)
else:
raise NotImplementedError("No conv for %s" % (p.format,))
else:
dpt.append(p)
iconv.append(conv_none)
output = op(*[as_sparse_or_tensor_variable(p) for p in pt])
if isinstance(output, (list, tuple)):
raise NotImplementedError("verify_grad can't deal with "
"multiple outputs")
if _is_sparse_variable(output):
oconv = DenseFromSparse(structured=structured)
else:
oconv = conv_none
def conv_op(*inputs):
ipt = [conv(i) for i, conv in zip(inputs, iconv)]
out = op(*ipt)
return oconv(out)
return utt.verify_grad(conv_op, dpt, *args, **kwargs)
verify_grad_sparse.E_grad = utt.verify_grad.E_grad
def constant(x, name=None):
if not isinstance(x, scipy.sparse.spmatrix):
raise TypeError("sparse.constant must be called on a "
"scipy.sparse.spmatrix")
try:
return SparseConstant(SparseType(format=x.format,
dtype=x.dtype), x.copy(), name=name)
except TypeError:
raise TypeError("Could not convert %s to SparseType" % x, type(x))
def sp_ones_like(x):
"""
Construct a sparse matrix of ones with the same sparsity pattern.
Parameters
----------
x
Sparse matrix to take the sparsity pattern.
Returns
-------
A sparse matrix
The same as `x` with data changed for ones.
"""
# TODO: don't restrict to CSM formats
data, indices, indptr, shape = csm_properties(x)
return CSM(format=x.format)(tensor.ones_like(data), indices, indptr, shape)
def sp_zeros_like(x):
"""
Construct a sparse matrix of zeros.
Parameters
----------
x
Sparse matrix to take the shape.
Returns
-------
A sparse matrix
The same as `x` with zero entries for all element.
"""
# TODO: don't restrict to CSM formats
_, _, indptr, shape = csm_properties(x)
return CSM(format=x.format)(data=numpy.array([], dtype=x.type.dtype),
indices=numpy.array([], dtype='int32'),
indptr=tensor.zeros_like(indptr),
shape=shape)
class _sparse_py_operators:
T = property(lambda self: transpose(self),
doc="Return aliased transpose of self (read-only)")
def astype(self, dtype):
return cast(self, dtype)
def __neg__(self):
return neg(self)
def __add__(left, right):
return add(left, right)
def __radd__(right, left):
return add(left, right)
def __sub__(left, right):
return sub(left, right)
def __rsub__(right, left):
return sub(left, right)
def __mul__(left, right):
return mul(left, right)
def __rmul__(left, right):
return mul(left, right)
# comparison operators
def __lt__(self, other):
return lt(self, other)
def __le__(self, other):
return le(self, other)
def __gt__(self, other):
return gt(self, other)
def __ge__(self, other):
return ge(self, other)
# extra pseudo-operator symbols
def __dot__(left, right):
return structured_dot(left, right)
def __rdot__(right, left):
return structured_dot(left, right)
# N.B. THIS IS COMMENTED OUT ON PURPOSE!!!
# Discussion with Fred & James (at least, and maybe others before)
# we decided that casting from a sparse to dense should be explicit
# because it's usually something you just want to be pretty careful
# about, and not to do by accident.
# def _as_TensorVariable(self):
# return dense_from_sparse(self)
def toarray(self):
return dense_from_sparse(self)
shape = property(lambda self: tensor.shape(dense_from_sparse(self)))
# don't worry!
# the plan is that the ShapeFeature in tensor.opt will do shape propagation
# and remove the dense_from_sparse from the graph. This will *NOT*
# actually expand your sparse matrix just to get the shape.
ndim = property(lambda self: self.type.ndim)
dtype = property(lambda self: self.type.dtype)
# Note that the `size` attribute of sparse matrices behaves differently
# from dense matrices: it is the number of elements stored in the matrix
# rather than the total number of elements that may be stored. Note also
# that stored zeros *do* count in the size.
size = property(lambda self: csm_data(self).size)
def zeros_like(model):
return sp_zeros_like(model)
def __getitem__(self, args):
if not isinstance(args, tuple):
args = args,
if len(args) == 2:
scalar_arg_1 = (numpy.isscalar(args[0]) or
getattr(args[0], 'type', None) == tensor.iscalar)
scalar_arg_2 = (numpy.isscalar(args[1]) or
getattr(args[1], 'type', None) == tensor.iscalar)
if scalar_arg_1 and scalar_arg_2:
ret = get_item_scalar(self, args)
elif isinstance(args[0], list):
ret = get_item_2lists(self, args[0], args[1])
else:
ret = get_item_2d(self, args)
elif isinstance(args[0], list):
ret = get_item_list(self, args[0])
else:
ret = get_item_2d(self, args)
return ret
class SparseVariable(_sparse_py_operators, gof.Variable):
dtype = property(lambda self: self.type.dtype)
format = property(lambda self: self.type.format)
def __str__(self):
return '%s{%s,%s}' % (
self.__class__.__name__,
self.format,
self.dtype)
def __repr__(self):
return str(self)
class SparseConstantSignature(tuple):
def __eq__(self, other):
(a, b), (x, y) = self, other
return (a == x and
(b.dtype == y.dtype) and
(type(b) == type(y)) and
(b.shape == y.shape) and
(abs(b - y).sum() < 1e-6 * b.nnz))
def __hash__(self):
(a, b) = self
return hash(type(self)) ^ hash(a) ^ hash(type(b))
def theano_hash(self):
(_, d) = self
return hash_from_sparse(d)
class SparseConstant(gof.Constant, _sparse_py_operators):
dtype = property(lambda self: self.type.dtype)
format = property(lambda self: self.type.format)
def signature(self):
assert self.data is not None
return SparseConstantSignature((self.type, self.data))
def __str__(self):
return '%s{%s,%s,shape=%s,nnz=%s}' % (
self.__class__.__name__,
self.format,
self.dtype,
self.data.shape,
self.data.nnz)
def __repr__(self):
return str(self)
SparseType.Variable = SparseVariable
SparseType.Constant = SparseConstant
# for more dtypes, call SparseType(format, dtype)
def matrix(format, name=None, dtype=None):
if dtype is None:
dtype = config.floatX
type = SparseType(format=format, dtype=dtype)
return type(name)
def csc_matrix(name=None, dtype=None):
return matrix('csc', name, dtype)
def csr_matrix(name=None, dtype=None):
return matrix('csr', name, dtype)
def bsr_matrix(name=None, dtype=None):
return matrix('bsr', name, dtype)
# for more dtypes, call SparseType(format, dtype)
csc_dmatrix = SparseType(format='csc', dtype='float64')
csr_dmatrix = SparseType(format='csr', dtype='float64')
bsr_dmatrix = SparseType(format='bsr', dtype='float64')
csc_fmatrix = SparseType(format='csc', dtype='float32')
csr_fmatrix = SparseType(format='csr', dtype='float32')
bsr_fmatrix = SparseType(format='bsr', dtype='float32')
all_dtypes = SparseType.dtype_set
complex_dtypes = [t for t in all_dtypes if t[:7] == 'complex']
float_dtypes = [t for t in all_dtypes if t[:5] == 'float']
int_dtypes = [t for t in all_dtypes if t[:3] == 'int']
uint_dtypes = [t for t in all_dtypes if t[:4] == 'uint']
continuous_dtypes = complex_dtypes + float_dtypes
discrete_dtypes = int_dtypes + uint_dtypes
# CONSTRUCTION
class CSMProperties(gof.Op):
# See doc in instance of this Op or function after this class definition.
# NOTE
# We won't implement infer_shape for this op now. This will
# ask that we implement an GetNNZ op, and this op will keep
# the dependence on the input of this op. So this won't help
# to remove computations in the graph. To remove computation,
# we will need to make an infer_sparse_pattern feature to
# remove computations. Doing this is trickier then the
# infer_shape feature. For example, how do we handle the case
# when some op create some 0 values? So there is dependence
# on the values themselves. We could write an infer_shape for
# the last output that is the shape, but I dough this will
# get used.
# we don't return a view of the shape, we create a new ndarray from the
# shape tuple.
__props__ = ()
view_map = {0: [0], 1: [0], 2: [0]}
"""
Indexing to speficied what part of the data parameter
should be use to construct the sparse matrix.
"""
def __init__(self, kmap=None):
if kmap is not None:
raise Exception("Do not use kmap, it is removed")
def make_node(self, csm):
csm = as_sparse_variable(csm)
assert csm.format in ["csr", "csc"]
data = tensor.TensorType(dtype=csm.type.dtype,
broadcastable=(False,))()
return gof.Apply(self, [csm],
[data, tensor.ivector(),
tensor.ivector(), tensor.ivector()])
def perform(self, node, inputs, out):
(csm,) = inputs
out[0][0] = csm.data
if str(csm.data.dtype) == 'int32':
out[0][0] = theano._asarray(out[0][0], dtype='int32')
# backport
out[1][0] = theano._asarray(csm.indices, dtype='int32')
out[2][0] = theano._asarray(csm.indptr, dtype='int32')
out[3][0] = theano._asarray(csm.shape, dtype='int32')
def grad(self, inputs, g):
# g[1:] is all integers, so their Jacobian in this op
# is 0. We thus don't need to worry about what their values
# are.
# if g[0] is disconnected, then this op doesn't contribute
# any gradient anywhere. but we know that at least one of
# g[1:] is connected, or this grad method wouldn't have been
# called, so we should report zeros
(csm,) = inputs
if isinstance(g[0].type, DisconnectedType):
return [csm.zeros_like()]
data, indices, indptr, shape = csm_properties(csm)
return [CSM(csm.format)(g[0], indices, indptr, shape)]
# don't make this a function or it breaks some optimizations below
csm_properties = CSMProperties()
"""
Extract all of .data, .indices, .indptr and .shape field.
For specific field, `csm_data`, `csm_indices`, `csm_indptr`
and `csm_shape` are provided.
Parameters
----------
csm
Sparse matrix in CSR or CSC format.
Returns
(data, indices, indptr, shape), the properties of `csm`.
Notes
-----
The grad implemented is regular, i.e. not structured.
`infer_shape` method is not available for this op.
"""
def csm_data(csm):
"""
Return the data field of the sparse variable.
"""
return csm_properties(csm)[0]
def csm_indices(csm):
"""
Return the indices field of the sparse variable.
"""
return csm_properties(csm)[1]
def csm_indptr(csm):
"""
Return the indptr field of the sparse variable.
"""
return csm_properties(csm)[2]
def csm_shape(csm):
"""
Return the shape field of the sparse variable.
"""
return csm_properties(csm)[3]
class CSM(gof.Op):
# See doc in instance of this Op or function after this class definition.
"""
Indexing to speficied what part of the data parameter
should be used to construct the sparse matrix.
"""
__props__ = ('format',)
"""
Pre-computed hash value, defined by __init__.
"""
def __init__(self, format, kmap=None):
if format not in ('csr', 'csc'):
raise ValueError("format must be one of: 'csr', 'csc'", format)
self.format = format
if kmap is not None:
raise Exception("Do not use kmap, it is removed")
# should view the other inputs too, but viewing multiple
# inputs is not currently supported by the destroyhandler
self.view_map = {0: [0]}
def make_node(self, data, indices, indptr, shape):
data = tensor.as_tensor_variable(data)
if not isinstance(indices, gof.Variable):
indices_ = numpy.asarray(indices)
indices_32 = theano._asarray(indices, dtype='int32')
assert (indices_ == indices_32).all()
indices = indices_32
if not isinstance(indptr, gof.Variable):
indptr_ = numpy.asarray(indptr)
indptr_32 = theano._asarray(indptr, dtype='int32')
assert (indptr_ == indptr_32).all()
indptr = indptr_32
if not isinstance(shape, gof.Variable):
shape_ = numpy.asarray(shape)
shape_32 = theano._asarray(shape, dtype='int32')
assert (shape_ == shape_32).all()
shape = shape_32
indices = tensor.as_tensor_variable(indices)
indptr = tensor.as_tensor_variable(indptr)
shape = tensor.as_tensor_variable(shape)
if data.type.ndim != 1:
raise TypeError('data argument must be a vector', data.type,
data.type.ndim)
if indices.type.ndim != 1 or indices.type.dtype not in discrete_dtypes:
raise TypeError('indices must be vector of integers', indices,
indices.type)
if indptr.type.ndim != 1 or indptr.type.dtype not in discrete_dtypes:
raise TypeError('indices must be vector of integers', indptr,
indptr.type)
if shape.type.ndim != 1 or shape.type.dtype not in discrete_dtypes:
raise TypeError('n_rows must be integer type', shape, shape.type)
return gof.Apply(self,
[data, indices, indptr, shape],
[SparseType(dtype=data.type.dtype,
format=self.format)()])
def perform(self, node, inputs, outputs):
# for efficiency, if remap does nothing, then do not apply it
(data, indices, indptr, shape) = inputs
(out,) = outputs
if len(shape) != 2:
raise ValueError('Shape should be an array of length 2')
if data.shape != indices.shape:
errmsg = ('Data (shape ' + repr(data.shape) +
' must have the same number of elements ' +
'as indices (shape' + repr(indices.shape) +
')')
raise ValueError(errmsg)
if self.format == 'csc':
out[0] = scipy.sparse.csc_matrix((data, indices.copy(),
indptr.copy()),
numpy.asarray(shape), copy=False)
else:
assert self.format == 'csr'
out[0] = scipy.sparse.csr_matrix((data, indices.copy(),
indptr.copy()), shape.copy(),
copy=False)
def connection_pattern(self, node):
return [[True], [False], [False], [False]]
def grad(self, inputs, gout):
(x_data, x_indices, x_indptr, x_shape) = inputs
(g_out,) = gout
g_data, g_indices, g_indptr, g_shape = csm_properties(g_out)
# unpack the data vector and wrap it as a 1d TensorType
g_data = csm_grad()(x_data, x_indices, x_indptr, x_shape,
g_data, g_indices, g_indptr, g_shape)
return [g_data, DisconnectedType()(), DisconnectedType()(), DisconnectedType()()]
def infer_shape(self, node, shapes):
# node.inputs[3] is of lenght as we only support sparse matrix.
return [(node.inputs[3][0], node.inputs[3][1])]
CSC = CSM('csc')
"""
Construct a CSC matrix from the internal representation.
Parameters
----------
data
One dimensional tensor representing the data of the sparse matrix to
construct.
indices
One dimensional tensor of integers representing the indices of the sparse
matrix to construct.
indptr
One dimensional tensor of integers representing the indice pointer for
the sparse matrix to construct.
shape
One dimensional tensor of integers representing the shape of the sparse
matrix to construct.
Returns
-------
sparse matrix
A sparse matrix having the properties specified by the inputs.
Notes
-----
The grad method returns a dense vector, so it provides a regular grad.
"""
CSR = CSM('csr')
"""
Construct a CSR matrix from the internal representation.
Parameters
----------
data
One dimensional tensor representing the data of the sparse matrix to
construct.
indices
One dimensional tensor of integers representing the indices of the sparse
matrix to construct.
indptr
One dimensional tensor of integers representing the indice pointer for
the sparse matrix to construct.
shape
One dimensional tensor of integers representing the shape of the sparse
matrix to construct.
Returns
-------
sparse matrix
A sparse matrix having the properties specified by the inputs.
Notes
-----
The grad method returns a dense vector, so it provides a regular grad.
"""
class CSMGrad(gof.op.Op):
# Note
# This Op computes the gradient of the CSM Op. CSM creates a matrix from
# data, indices, and indptr vectors; it's gradient is the gradient of
# the data vector only. There are two complexities to calculate this
# gradient:
# 1. The gradient may be sparser than the input matrix defined by (data,
# indices, indptr). In this case, the data vector of the gradient will have
# less elements than the data vector of the input because sparse formats
# remove 0s. Since we are only returning the gradient of the data vector,
# the relevant 0s need to be added back.
# 2. The elements in the sparse dimension are not guaranteed to be sorted.
# Therefore, the input data vector may have a different order than the
# gradient data vector.
__props__ = ()
def __init__(self, kmap=None):
if kmap is not None:
raise Exception("Do not use kmap, it is removed")
# This class always allocate a new output.
# I keep this here to help GD understand what this kmap think is.
# if self.kmap is None:
# self.view_map = {0: [1]}
def make_node(self, x_data, x_indices, x_indptr, x_shape,
g_data, g_indices, g_indptr, g_shape):
gout_data = g_data.type()
return gof.Apply(self, [x_data, x_indices, x_indptr, x_shape,
g_data, g_indices, g_indptr, g_shape], [gout_data])
def perform(self, node, inputs, outputs):
(x_data, x_indices, x_indptr, x_shape,
g_data, g_indices, g_indptr, g_shape) = inputs
(g_out,) = outputs
if len(x_indptr) - 1 == x_shape[0]:
sp_dim = x_shape[1]
else:
sp_dim = x_shape[0]
g_row = numpy.zeros(sp_dim, dtype=g_data.dtype)
gout_data = numpy.zeros(x_data.shape, dtype=node.outputs[0].dtype)
for i in range(len(x_indptr) - 1):
for j_ptr in range(g_indptr[i], g_indptr[i + 1]):
g_row[g_indices[j_ptr]] += g_data[j_ptr]
for j_ptr in range(x_indptr[i], x_indptr[i + 1]):
gout_data[j_ptr] = g_row[x_indices[j_ptr]]
for j_ptr in range(g_indptr[i], g_indptr[i + 1]):
g_row[g_indices[j_ptr]] = 0
g_out[0] = gout_data
def infer_shape(self, node, shapes):
return [shapes[1]]
csm_grad = CSMGrad
class Cast(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ("out_type",)
def __init__(self, out_type):
self.out_type = out_type
def make_node(self, x):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
return gof.Apply(
self, [x],
[SparseType(dtype=self.out_type, format=x.format)()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = x.astype(self.out_type)
def grad(self, inputs, outputs_gradients):
gz = outputs_gradients[0]
if gz.dtype in complex_dtypes:
raise NotImplementedError("grad not implemented for complex types")
if inputs[0].dtype in complex_dtypes:
raise NotImplementedError("grad not implemented for complex types")
if gz.dtype in discrete_dtypes:
if inputs[0].dtype in discrete_dtypes:
return [inputs[0].zeros_like(dtype=theano.config.floatX)]
else:
return [inputs[0].zeros_like()]
else:
if inputs[0].dtype in discrete_dtypes:
return [gz]
else:
return [Cast(inputs[0].dtype)(gz)]
def infer_shape(self, node, ins_shapes):
return ins_shapes
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.out_type)
bcast = Cast('int8')
wcast = Cast('int16')
icast = Cast('int32')
lcast = Cast('int64')
fcast = Cast('float32')
dcast = Cast('float64')
ccast = Cast('complex64')
zcast = Cast('complex128')
def cast(variable, dtype):
"""
Cast sparse variable to the desired dtype.
Parameters
----------
variable
Sparse matrix.
dtype
The dtype wanted.
Returns
-------
Same as `x` but having `dtype` as dtype.
Notes
-----
The grad implemented is regular, i.e. not structured.
"""
return Cast(dtype)(variable)
#
# Conversion
#
class DenseFromSparse(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = () # We don't put sparse_grad in the props.
def __init__(self, structured=True):
self.sparse_grad = structured
def __str__(self):
return "%s{structured_grad=%s}" % (
self.__class__.__name__,
self.sparse_grad)
def make_node(self, x):
x = as_sparse_variable(x)
return gof.Apply(self,
[x],
[tensor.TensorType(dtype=x.type.dtype,
broadcastable=(False, False))()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
if _is_dense(x):
print((
"WARNING: You just called DenseFromSparse on a dense matrix."
), file=sys.stderr)
out[0] = x
else:
out[0] = x.toarray()
assert _is_dense(out[0])
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if self.sparse_grad:
left = sp_ones_like(x)
right = gz
# Do upcasting if necessary to avoid an unimplemented case
# of mul
if right.dtype == 'float64' and left.dtype == 'float32':
left = left.astype('float64')
if right.dtype == 'float32' and left.dtype == 'float64':
right = right.astype('float64')
return [left * right]
else:
return [SparseFromDense(x.type.format)(gz)]
def infer_shape(self, node, shapes):
return [shapes[0]]
dense_from_sparse = DenseFromSparse()
"""
Convert a sparse matrix to a dense one.
Parameters
----------
x
A sparse matrix.
Returns
-------
theano.tensor.matrix
A dense matrix, the same as `x`.
Notes
-----
The grad implementation can be controlled through the constructor via the
`structured` parameter. `True` will provide a structured grad while `False`
will provide a regular grad. By default, the grad is structured.
"""
class SparseFromDense(gof.op.Op):
__props__ = ()
def __init__(self, format):
self.format = format
def __str__(self):
return "%s{%s}" % (
self.__class__.__name__,
self.format)
def make_node(self, x):
x = tensor.as_tensor_variable(x)
if x.ndim > 2:
raise TypeError(
"Theano does not have sparse tensor types with more "
"than 2 dimensions, but %s.ndim = %i" % (x, x.ndim))
elif x.ndim == 1:
x = x.dimshuffle('x', 0)
elif x.ndim == 0:
x = x.dimshuffle('x', 'x')
else:
assert x.ndim == 2
return gof.Apply(self,
[x],
[SparseType(dtype=x.type.dtype,
format=self.format)()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
out[0] = SparseType.format_cls[self.format](x)
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
gx = dense_from_sparse(gz)
gx = tensor.patternbroadcast(gx, x.broadcastable)
return gx,
def infer_shape(self, node, shapes):
return [shapes[0]]
csr_from_dense = SparseFromDense('csr')
"""
Convert a dense matrix to a sparse csr matrix.
Parameters
----------
x
A dense matrix.
Returns
-------
sparse matrix
The same as `x` in a sparse csr matrix format.
"""
csc_from_dense = SparseFromDense('csc')
"""
Convert a dense matrix to a sparse csc matrix.
Parameters
----------
x
A dense matrix.
Returns
-------
sparse matrix
The same as `x` in a sparse csc matrix format.
"""
# Indexing
class GetItemList(gof.op.Op):
__props__ = ()
def infer_shape(self, node, shapes):
return [(shapes[1][0], shapes[0][1])]
def make_node(self, x, index):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
ind = tensor.as_tensor_variable(index)
assert ind.ndim == 1
assert "int" in ind.dtype
return gof.Apply(self, [x, ind], [x.type()])
def perform(self, node, inp, outputs):
(out,) = outputs
x = inp[0]
indices = inp[1]
assert _is_sparse(x)
out[0] = x[indices]
def grad(self, inputs, g_outputs):
x, indices = inputs
gout, = g_outputs
return [GetItemListGrad(self)(x, indices, gout),
grad_undefined(self, 1, indices, "No gradient for this input")]
get_item_list = GetItemList()
"""
Select row of sparse matrix, returning them as a new sparse matrix.
Parameters
----------
x
Sparse matrix.
index
List of rows.
Returns
-------
sparse matrix
The corresponding rows in `x`.
"""
class GetItemListGrad(gof.op.Op):
__props__ = ()
def infer_shape(self, node, shapes):
return [(shapes[0])]
def make_node(self, x, index, gz):
x = as_sparse_variable(x)
gz = as_sparse_variable(gz)
assert x.format in ["csr", "csc"]
assert gz.format in ["csr", "csc"]
ind = tensor.as_tensor_variable(index)
assert ind.ndim == 1
assert "int" in ind.dtype
scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]]
if not scipy_ver >= [0, 13]:
raise NotImplementedError("Scipy version is to old")
return gof.Apply(self, [x, ind, gz], [x.type()])
def perform(self, node, inp, outputs):
(out,) = outputs
x = inp[0]
indices = inp[1]
gz = inp[2]
if x.format in ["csr"]:
y = scipy.sparse.csr_matrix((x.shape[0], x.shape[1]))
else:
y = scipy.sparse.csc_matrix((x.shape[0], x.shape[1]))
for a in range(0, len(indices)):
y[indices[a]] = gz[a]
out[0] = y
get_item_list_grad = GetItemListGrad()
class GetItem2Lists(gof.op.Op):
__props__ = ()
def make_node(self, x, ind1, ind2):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
ind1 = tensor.as_tensor_variable(ind1)
ind2 = tensor.as_tensor_variable(ind2)
assert "int" in ind1.dtype
assert "int" in ind2.dtype
return gof.Apply(self, [x, ind1, ind2],
[theano.tensor.vector()])
def perform(self, node, inp, outputs):
(out,) = outputs
x = inp[0]
ind1 = inp[1]
ind2 = inp[2]
out[0] = numpy.asarray(x[ind1, ind2]).flatten()
"""
Here scipy returns the corresponding elements in a matrix which isn't
what we are aiming for. Using asarray and flatten, out[0] becomes an
array.
"""
def grad(self, inputs, g_outputs):
x, ind1, ind2 = inputs
gout, = g_outputs
return [GetItem2ListsGrad(self)(x, ind1, ind2, gout),
grad_undefined(self, 1, ind1, "No gradient for this input"),
grad_undefined(self, 1, ind2, "No gradient for this input")]
get_item_2lists = GetItem2Lists()
"""
Select elements of sparse matrix, returning them in a vector.
Parameters
----------
x
Sparse matrix.
index
List of two lists, first list indicating the row of each element and second
list indicating its column.
Returns
-------
theano.tensor.vector
The corresponding elements in `x`.
"""
class GetItem2ListsGrad(gof.op.Op):
__props__ = ()
def infer_shape(self, node, shapes):
return [(shapes[0])]
def make_node(self, x, ind1, ind2, gz):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
ind1 = tensor.as_tensor_variable(ind1)
ind2 = tensor.as_tensor_variable(ind2)
assert ind1.ndim == 1
assert ind2.ndim == 1
assert "int" in ind1.dtype
assert "int" in ind2.dtype
return gof.Apply(self, [x, ind1, ind2, gz], [x.type()])
def perform(self, node, inp, outputs):
(out,) = outputs
x = inp[0]
ind1 = inp[1]
ind2 = inp[2]
gz = inp[3]
if x.format in ["csr"]:
y = scipy.sparse.csr_matrix((x.shape[0], x.shape[1]))
else:
y = scipy.sparse.csc_matrix((x.shape[0], x.shape[1]))
z = 0
for z in range(0, len(ind1)):
y[(ind1[z], ind2[z])] = gz[z]
out[0] = y
get_item_2lists_grad = GetItem2ListsGrad()
class GetItem2d(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
# Fred:Too complicated for now. If you need it, look at
# the Subtensor.infer_shape.
# def infer_shape(self, node, i0_shapes):
# return i0_shapes
def make_node(self, x, index):
scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]]
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
assert len(index) in [1, 2]
input_op = [x]
generic_None = theano.gof.Constant(theano.gof.generic, None)
for ind in index:
if isinstance(ind, slice):
# in case of slice is written in theano variable
start = ind.start
stop = ind.stop
step = ind.step
# If start or stop or step are None, make them a Generic
# constant. Else, they should be converted to Tensor Variables
# of dimension 1 and int/uint dtype.
if scipy_ver < [0, 14] and ind.step is not None:
raise ValueError(
'Slice with step is not support with current'
' version of Scipy.')
if ind.step is None or ind.step == 1:
step = generic_None
else:
if not isinstance(step, gof.Variable):
step = tensor.as_tensor_variable(step)
if not (step.ndim == 0 and step.dtype in
tensor.discrete_dtypes):
raise ValueError((
"Impossible to index into a sparse matrix with "
"slice where step=%s" % step),
step.ndim, step.dtype)
if start is None:
start = generic_None
else:
if not isinstance(start, gof.Variable):
start = tensor.as_tensor_variable(start)
if not (start.ndim == 0 and start.dtype in
tensor.discrete_dtypes):
raise ValueError((
"Impossible to index into a sparse matrix with "
"slice where start=%s" % start),
start.ndim, start.dtype)
if stop is None:
stop = generic_None
else:
if not isinstance(stop, gof.Variable):
stop = tensor.as_tensor_variable(stop)
if not (stop.ndim == 0 and stop.dtype in
tensor.discrete_dtypes):
raise ValueError((
"Impossible to index into a sparse matrix with "
"slice where stop=%s" % stop),
stop.ndim, stop.dtype)
elif ((isinstance(ind, gof.Variable) and
getattr(ind, 'ndim', -1) == 0) or
numpy.isscalar(ind)):
raise NotImplementedError(
'Theano has no sparse vector' +
'Use X[a:b, c:d], X[a:b, c:c+1] or X[a:b] instead.')
else:
raise ValueError((
'Advanced indexing is not implemented for sparse '
'matrices. Argument not supported: %s' % ind))
input_op += [start, stop, step]
if len(index) == 1:
input_op += [generic_None, generic_None, generic_None]
return gof.Apply(self, input_op, [x.type()])
def perform(self, node, inputs, outputs):
(x, start1, stop1, step1, start2, stop2, step2) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = x[start1:stop1:step1, start2:stop2:step2]
get_item_2d = GetItem2d()
"""
Implement a subtensor of sparse variable, returning a sparse matrix.
If you want to take only one element of a sparse matrix see
`GetItemScalar` that returns a tensor scalar.
Parameters
----------
x
Sparse matrix.
index
Tuple of slice object.
Returns
-------
sparse matrix
The corresponding slice in `x`.
Notes
-----
Subtensor selection always returns a matrix, so indexing with [a:b, c:d]
is forced. If one index is a scalar, for instance, x[a:b, c] or x[a, b:c],
an error will be raised. Use instead x[a:b, c:c+1] or x[a:a+1, b:c].
The above indexing methods are not supported because the return value
would be a sparse matrix rather than a sparse vector, which is a
deviation from numpy indexing rule. This decision is made largely
to preserve consistency between numpy and theano. This may be revised
when sparse vectors are supported.
The grad is not implemented for this op.
"""
class GetItemScalar(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
def infer_shape(self, node, shapes):
return [()]
def make_node(self, x, index):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
assert len(index) == 2
input_op = [x]
for ind in index:
if isinstance(ind, slice):
raise Exception("GetItemScalar called with a slice as index!")
# in case of indexing using int instead of theano variable
elif isinstance(ind, integer_types):
ind = theano.tensor.constant(ind)
input_op += [ind]
# in case of indexing using theano variable
elif ind.ndim == 0:
input_op += [ind]
else:
raise NotImplemented()
return gof.Apply(self, input_op, [tensor.scalar(dtype=x.dtype)])
def perform(self, node, inputs, outputs):
(x, ind1, ind2) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = theano._asarray(x[ind1, ind2], x.dtype)
get_item_scalar = GetItemScalar()
"""
Implement a subtensor of a sparse variable that takes two scalars as index and
returns a scalar.
If you want to take a slice of a sparse matrix see `GetItem2d` that returns a
sparse matrix.
Parameters
----------
x
Sparse matrix.
index
Tuple of scalars.
Returns
-------
theano.tensor.scalar
The corresponding item in `x`.
Notes
-----
The grad is not implemented for this op.
"""
# Linear Algebra
class Transpose(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
view_map = {0: [0]}
format_map = {'csr': 'csc',
'csc': 'csr'}
__props__ = ()
def __str__(self):
return "Sparse" + self.__class__.__name__
def make_node(self, x):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
return gof.Apply(self,
[x],
[SparseType(dtype=x.type.dtype,
format=self.format_map[x.type.format])()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = x.transpose()
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_sparse_variable(gz)
return transpose(gz),
def infer_shape(self, node, shapes):
return [shapes[0][::-1]]
transpose = Transpose()
"""
Return the transpose of the sparse matrix.
Parameters
----------
x
Sparse matrix.
Returns
-------
sparse matrix
`x` transposed.
Notes
-----
The returned matrix will not be in the same format. `csc` matrix will be changed
in `csr` matrix and `csr` matrix in `csc` matrix.
The grad is regular, i.e. not structured.
"""
class Neg(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
def __str__(self):
return "Sparse" + self.__class__.__name__
def make_node(self, x):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
return gof.Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = -x
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_sparse_variable(gz)
return -gz,
def infer_shape(self, node, shapes):
return [shapes[0]]
neg = Neg()
"""
Return the negation of the sparse matrix.
Parameters
----------
x
Sparse matrix.
Returns
-------
sparse matrix
-`x`.
Notes
-----
The grad is regular, i.e. not structured.
"""
class ColScaleCSC(gof.op.Op):
# Scale each columns of a sparse matrix by the corresponding
# element of a dense vector
# :param x: A sparse matrix.
# :param s: A dense vector with length equal to the number
# of columns of `x`.
# :return: A sparse matrix in the same format as `x` which
# each column had been multiply by the corresponding
# element of `s`.
# :note: The grad implemented is structured.
__props__ = ()
def make_node(self, x, s):
if x.format != 'csc':
raise ValueError('x was not a csc matrix')
return gof.Apply(self, [x, s], [x.type()])
def perform(self, node, inputs, outputs):
(x, s) = inputs
(z,) = outputs
M, N = x.shape
assert x.format == 'csc'
assert s.shape == (N, )
y = x.copy()
for j in xrange(0, N):
y.data[y.indptr[j]: y.indptr[j + 1]] *= s[j]
z[0] = y
def grad(self, inputs, gout):
(x, s) = inputs
(gz,) = gout
return [col_scale(gz, s), sp_sum(x * gz, axis=0)]
def infer_shape(self, node, ins_shapes):
return [ins_shapes[0]]
class RowScaleCSC(gof.op.Op):
# Scale each row of a sparse matrix by the corresponding element of
# a dense vector
# :param x: A sparse matrix.
# :param s: A dense vector with length equal to the number
# of rows of `x`.
# :return: A sparse matrix in the same format as `x` which
# each row had been multiply by the corresponding
# element of `s`.
# :note: The grad implemented is structured.
view_map = {0: [0]}
__props__ = ()
def make_node(self, x, s):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
return gof.Apply(self, [x, s], [x.type()])
def perform(self, node, inputs, outputs):
(x, s) = inputs
(z,) = outputs
M, N = x.shape
assert x.format == 'csc'
assert s.shape == (M,)
indices = x.indices
indptr = x.indptr
y_data = x.data.copy()
for j in xrange(0, N):
for i_idx in xrange(indptr[j], indptr[j + 1]):
y_data[i_idx] *= s[indices[i_idx]]
z[0] = scipy.sparse.csc_matrix((y_data, indices, indptr), (M, N))
def grad(self, inputs, gout):
(x, s) = inputs
(gz,) = gout
return [row_scale(gz, s), sp_sum(x * gz, axis=1)]
def infer_shape(self, node, ins_shapes):
return [ins_shapes[0]]
def col_scale(x, s):
"""
Scale each columns of a sparse matrix by the corresponding element of a
dense vector.
Parameters
----------
x
A sparse matrix.
s
A dense vector with length equal to the number of columns of `x`.
Returns
-------
A sparse matrix in the same format as `x` which each column had been
multiply by the corresponding element of `s`.
Notes
-----
The grad implemented is structured.
"""
if x.format == 'csc':
return ColScaleCSC()(x, s)
elif x.format == 'csr':
return RowScaleCSC()(x.T, s).T
else:
raise NotImplementedError()
def row_scale(x, s):
"""
Scale each row of a sparse matrix by the corresponding element of
a dense vector.
Parameters
----------
x
A sparse matrix.
s
A dense vector with length equal to the number of rows of `x`.
Returns
-------
A sparse matrix
A sparse matrix in the same format as `x` whose each row has been
multiplied by the corresponding element of `s`.
Notes
-----
The grad implemented is structured.
"""
return col_scale(x.T, s).T
class SpSum(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ("axis",)
# WARNING: judgement call...
# We are not using the structured in the comparison or hashing
# because it doesn't change the perform method therefore, we
# *do* want Sums with different structured values to be merged
# by the merge optimization and this requires them to compare equal.
def __init__(self, axis=None, sparse_grad=True):
super(SpSum, self).__init__()
self.axis = axis
self.structured = sparse_grad
if self.axis not in (None, 0, 1):
raise ValueError('Illegal value for self.axis.')
def make_node(self, x):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
b = ()
if self.axis is not None:
b = (False,)
z = tensor.TensorType(broadcastable=b, dtype=x.dtype)()
return gof.Apply(self, [x], [z])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
if self.axis is None:
z[0] = numpy.asarray(x.sum())
else:
z[0] = numpy.asarray(x.sum(self.axis)).ravel()
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.dtype not in continuous_dtypes:
return [x.zeros_like(dtype=theano.config.floatX)]
if self.structured:
if self.axis is None:
r = gz * theano.sparse.sp_ones_like(x)
elif self.axis == 0:
r = col_scale(theano.sparse.sp_ones_like(x), gz)
elif self.axis == 1:
r = row_scale(theano.sparse.sp_ones_like(x), gz)
else:
raise ValueError('Illegal value for self.axis.')
else:
o_format = x.format
x = dense_from_sparse(x)
if _is_sparse_variable(gz):
gz = dense_from_sparse(gz)
if self.axis is None:
r = tensor.second(x, gz)
else:
ones = tensor.ones_like(x)
if self.axis == 0:
r = tensor.addbroadcast(gz.dimshuffle('x', 0), 0) * ones
elif self.axis == 1:
r = tensor.addbroadcast(gz.dimshuffle(0, 'x'), 1) * ones
else:
raise ValueError('Illegal value for self.axis.')
r = SparseFromDense(o_format)(r)
return [r]
def infer_shape(self, node, shapes):
r = None
if self.axis is None:
r = [()]
elif self.axis == 0:
r = [(shapes[0][1],)]
else:
r = [(shapes[0][0],)]
return r
def __str__(self):
return self.__class__.__name__ + "{axis=%s}" % str(self.axis)
def sp_sum(x, axis=None, sparse_grad=False):
"""
Calculate the sum of a sparse matrix along the specified axis.
It operates a reduction along the specified axis. When `axis` is `None`,
it is applied along all axes.
Parameters
----------
x
Sparse matrix.
axis
Axis along which the sum is applied. Integer or `None`.
sparse_grad : bool
`True` to have a structured grad.
Returns
-------
object
The sum of `x` in a dense format.
Notes
-----
The grad implementation is controlled with the `sparse_grad` parameter.
`True` will provide a structured grad and `False` will provide a regular
grad. For both choices, the grad returns a sparse matrix having the same
format as `x`.
This op does not return a sparse matrix, but a dense tensor matrix.
"""
return SpSum(axis, sparse_grad)(x)
class Diag(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
def make_node(self, x):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
return gof.Apply(self, [x], [tensor.tensor(broadcastable=(False,),
dtype=x.dtype)])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
N, M = x.shape
if N != M:
raise ValueError('Diag only apply on square matrix')
z[0] = x.diagonal()
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [square_diagonal(gz)]
def infer_shape(self, nodes, shapes):
return [(tensor.minimum(*shapes[0]), )]
diag = Diag()
"""
Extract the diagonal of a square sparse matrix as a dense vector.
Parameters
----------
x
A square sparse matrix in csc format.
Returns
-------
theano.tensor.vector
A dense vector representing the diagonal elements.
Notes
-----
The grad implemented is regular, i.e. not structured, since the output is a
dense vector.
"""
class SquareDiagonal(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
def make_node(self, diag):
diag = tensor.as_tensor_variable(diag)
if diag.type.ndim != 1:
raise TypeError('data argument must be a vector', diag.type)
return gof.Apply(self, [diag],
[SparseType(dtype=diag.dtype, format='csc')()])
def perform(self, node, inputs, outputs):
(z,) = outputs
diag = inputs[0]
N = len(diag)
data = diag[:N]
indices = list(range(N))
indptr = list(range(N + 1))
tup = (data, indices, indptr)
z[0] = scipy.sparse.csc_matrix(tup, copy=True)
def grad(self, inputs, gout):
(gz,) = gout
return [diag(gz)]
def infer_shape(self, nodes, shapes):
return [(shapes[0][0], shapes[0][0])]
square_diagonal = SquareDiagonal()
"""
Return a square sparse (csc) matrix whose diagonal is given by the dense vector
argument.
Parameters
----------
x
Dense vector for the diagonal.
Returns
-------
sparse matrix
A sparse matrix having `x` as diagonal.
Notes
-----
The grad implemented is regular, i.e. not structured.
"""
class EnsureSortedIndices(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ("inplace",)
def __init__(self, inplace):
self.inplace = inplace
if self.inplace:
self.view_map = {0: [0]}
def make_node(self, x):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
return gof.Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
if self.inplace:
z[0] = x.sort_indices()
else:
z[0] = x.sorted_indices()
def grad(self, inputs, output_grad):
return [output_grad[0]]
def infer_shape(self, node, i0_shapes):
return i0_shapes
def __str__(self):
if self.inplace:
return self.__class__.__name__ + "{inplace}"
else:
return self.__class__.__name__ + "{no_inplace}"
ensure_sorted_indices = EnsureSortedIndices(inplace=False)
"""
Re-sort indices of a sparse matrix.
CSR column indices are not necessarily sorted. Likewise
for CSC row indices. Use `ensure_sorted_indices` when sorted
indices are required (e.g. when passing data to other
libraries).
Parameters
----------
x
A sparse matrix.
Returns
-------
sparse matrix
The same as `x` with indices sorted.
Notes
-----
The grad implemented is regular, i.e. not structured.
"""
def clean(x):
"""
Remove explicit zeros from a sparse matrix, and re-sort indices.
CSR column indices are not necessarily sorted. Likewise
for CSC row indices. Use `clean` when sorted
indices are required (e.g. when passing data to other
libraries) and to ensure there are no zeros in the data.
Parameters
----------
x
A sparse matrix.
Returns
-------
A sparse matrix
The same as `x` with indices sorted and zeros
removed.
Notes
-----
The grad implemented is regular, i.e. not structured.
"""
return ensure_sorted_indices(remove0(x))
class AddSS(gof.op.Op):
# add(sparse, sparse).
# see the doc of add() for more detail.
__props__ = ()
def make_node(self, x, y):
x, y = map(as_sparse_variable, [x, y])
assert x.format in ["csr", "csc"]
assert y.format in ["csr", "csc"]
out_dtype = scalar.upcast(x.type.dtype, y.type.dtype)
return gof.Apply(self,
[x, y],
[SparseType(dtype=out_dtype,
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and _is_sparse(y)
assert x.shape == y.shape
out[0] = x + y
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_sparse_variable(y)
assert _is_sparse_variable(gz)
return gz, gz
def infer_shape(self, node, shapes):
return [shapes[0]]
add_s_s = AddSS()
class AddSSData(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
def make_node(self, x, y):
x, y = map(as_sparse_variable, [x, y])
assert x.format in ["csr", "csc"]
assert y.format in ["csr", "csc"]
if x.type.dtype != y.type.dtype:
raise NotImplementedError()
if x.type.format != y.type.format:
raise NotImplementedError()
return gof.Apply(self,
[x, y],
[SparseType(dtype=x.type.dtype,
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and _is_sparse(y)
assert x.shape == y.shape
assert x.data.shape == y.data.shape
out[0] = x.copy()
out[0].data += y.data
def grad(self, inputs, gout):
(gz,) = gout
is_continuous = [(i.dtype in continuous_dtypes)
for i in inputs]
derivative = {True: gz, False: None}
return [derivative[b] for b in is_continuous]
def infer_shape(self, node, ins_shapes):
return [ins_shapes[0]]
add_s_s_data = AddSSData()
"""
Add two sparse matrices assuming they have the same sparsity pattern.
Parameters
----------
x
Sparse matrix.
y
Sparse matrix.
Returns
-------
A sparse matrix
The sum of the two sparse matrices element wise.
Notes
-----
`x` and `y` are assumed to have the same sparsity pattern.
The grad implemented is structured.
"""
class AddSD(gof.op.Op):
# add(sparse, sparse).
# see the doc of add() for more detail.
__props__ = ()
def make_node(self, x, y):
x, y = as_sparse_variable(x), tensor.as_tensor_variable(y)
assert x.format in ["csr", "csc"]
out_dtype = scalar.upcast(x.type.dtype, y.type.dtype)
# The magic number two here arises because L{scipy.sparse}
# objects must be matrices (have dimension 2)
assert y.type.ndim == 2
return gof.Apply(self,
[x, y],
[tensor.TensorType(dtype=out_dtype,
broadcastable=y.type.broadcastable
)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_dense(y)
# The asarray is needed as in some case, this return a
# numpy.matrixlib.defmatrix.matrix object and not an ndarray.
out[0] = theano._asarray(x + y, dtype=node.outputs[0].type.dtype)
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_dense_variable(y)
assert _is_dense_variable(gz)
return sp_ones_like(x) * gz, gz
def infer_shape(self, node, shapes):
return [shapes[1]]
add_s_d = AddSD()
class StructuredAddSV(gof.op.Op):
__props__ = ()
def make_node(self, x, y):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
y = tensor.as_tensor_variable(y)
assert y.type.ndim == 1
if x.type.dtype != y.type.dtype:
raise NotImplementedError()
return gof.Apply(self,
[x, y],
[SparseType(dtype=x.type.dtype,
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and not _is_sparse(y)
assert x.shape[1] == y.shape[0]
out[0] = x.__class__(x + (x.toarray() != 0) * y)
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and not _is_sparse_variable(y)
assert _is_sparse_variable(gz)
return gz, sp_sum(gz, axis=0, sparse_grad=True)
def infer_shape(self, node, ins_shapes):
return [ins_shapes[0]]
structured_add_s_v = StructuredAddSV()
"""
Structured addition of a sparse matrix and a dense vector.
The elements of the vector are only added to the corresponding
non-zero elements of the sparse matrix. Therefore, this operation
outputs another sparse matrix.
Parameters
----------
x
Sparse matrix.
y
Tensor type vector.
Returns
-------
A sparse matrix
A sparse matrix containing the addition of the vector to
the data of the sparse matrix.
Notes
-----
The grad implemented is structured since the op is structured.
"""
def add(x, y):
"""
Add two matrices, at least one of which is sparse.
This method will provide the right op according
to the inputs.
Parameters
----------
x
A matrix variable.
y
A matrix variable.
Returns
-------
A sparse matrix
`x` + `y`
Notes
-----
At least one of `x` and `y` must be a sparse matrix.
The grad will be structured only when one of the variable will be a dense
matrix.
"""
if hasattr(x, 'getnnz'):
x = as_sparse_variable(x)
if hasattr(y, 'getnnz'):
y = as_sparse_variable(y)
if not isinstance(x, theano.Variable):
x = theano.tensor.as_tensor_variable(x)
if not isinstance(y, theano.Variable):
y = theano.tensor.as_tensor_variable(y)
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
assert x_is_sparse_variable or y_is_sparse_variable
if x_is_sparse_variable and y_is_sparse_variable:
return add_s_s(x, y)
elif x_is_sparse_variable and not y_is_sparse_variable:
return add_s_d(x, y)
elif y_is_sparse_variable and not x_is_sparse_variable:
return add_s_d(y, x)
else:
raise NotImplementedError()
def sub(x, y):
"""
Subtract two matrices, at least one of which is sparse.
This method will provide the right op according
to the inputs.
Parameters
----------
x
A matrix variable.
y
A matrix variable.
Returns
-------
A sparse matrix
`x` - `y`
Notes
-----
At least one of `x` and `y` must be a sparse matrix.
The grad will be structured only when one of the variable will be a dense
matrix.
"""
return x + (-y)
class MulSS(gof.op.Op):
# mul(sparse, sparse)
# See the doc of mul() for more detail
__props__ = ()
def make_node(self, x, y):
x, y = as_sparse_variable(x), as_sparse_variable(y)
assert x.format in ["csr", "csc"]
assert y.format in ["csr", "csc"]
out_dtype = scalar.upcast(x.type.dtype, y.type.dtype)
return gof.Apply(self,
[x, y],
[SparseType(dtype=out_dtype,
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and _is_sparse(y)
assert len(x.shape) == 2
assert y.shape == x.shape
# This calls the element-wise multiple
# x * y calls dot...
out[0] = x.multiply(y)
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
return y * gz, x * gz
def infer_shape(self, node, shapes):
return [shapes[0]]
mul_s_s = MulSS()
class MulSD(gof.op.Op):
# mul(sparse, dense)
# See the doc of mul() for more detail
__props__ = ()
def make_node(self, x, y):
x, y = as_sparse_variable(x), tensor.as_tensor_variable(y)
assert x.format in ["csr", "csc"]
# upcast the tensor. Is the cast of sparse done implemented?
dtype = scalar.upcast(x.type.dtype, y.type.dtype)
# The magic number two here arises because L{scipy.sparse}
# objects must be matrices (have dimension 2)
# Broadcasting of the sparse matrix is not supported.
# We support nd == 0 used by grad of SpSum()
assert y.type.ndim in [0, 2]
out = SparseType(dtype=dtype,
format=x.type.format)()
return gof.Apply(self, [x, y], [out])
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and _is_dense(y)
if len(y.shape) == 0:
out_dtype = node.outputs[0].dtype
if x.dtype == out_dtype:
z = x.copy()
else:
z = x.astype(out_dtype)
out[0] = z
out[0].data *= y
elif len(y.shape) == 1:
raise NotImplementedError() # RowScale / ColScale
elif len(y.shape) == 2:
# if we have enough memory to fit y, maybe we can fit x.asarray()
# too?
# TODO: change runtime from O(M*N) to O(nonzeros)
M, N = x.shape
assert x.shape == y.shape
out_dtype = node.outputs[0].dtype
if x.format == 'csc':
indices = x.indices
indptr = x.indptr
if x.dtype == out_dtype:
z = x.copy()
else:
z = x.astype(out_dtype)
z_data = z.data
for j in xrange(0, N):
for i_idx in xrange(indptr[j], indptr[j + 1]):
i = indices[i_idx]
z_data[i_idx] *= y[i, j]
out[0] = z
elif x.format == 'csr':
indices = x.indices
indptr = x.indptr
if x.dtype == out_dtype:
z = x.copy()
else:
z = x.astype(out_dtype)
z_data = z.data
for i in xrange(0, M):
for j_idx in xrange(indptr[i], indptr[i + 1]):
j = indices[j_idx]
z_data[j_idx] *= y[i, j]
out[0] = z
else:
print((
"WARNING: crappy implementation of MulSD"
), x.format, file=sys.stderr)
out[0] = type(x)(x.toarray() * y)
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_dense_variable(y)
assert _is_sparse_variable(gz)
return y * gz, dense_from_sparse(x * gz)
def infer_shape(self, node, shapes):
return [shapes[0]]
mul_s_d = MulSD()
class MulSV(gof.op.Op):
__props__ = ()
def make_node(self, x, y):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
y = tensor.as_tensor_variable(y)
assert y.type.ndim == 1
if x.type.dtype != y.type.dtype:
raise NotImplementedError(
"MulSV not implemented for differing dtypes."
"Got %s and %s." % (str(x.type.dtype), str(y.type.dtype)))
return gof.Apply(self,
[x, y],
[SparseType(dtype=x.type.dtype,
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and not _is_sparse(y)
assert x.shape[1] == y.shape[0]
out[0] = x.__class__(x.toarray() * y)
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_dense_variable(y)
assert _is_sparse_variable(gz)
# mul_s_v is not implemented if the types vary
if gz.dtype == 'float64' and y.dtype == 'float32':
y = y.astype('float64')
if gz.dtype == 'float32' and y.dtype == 'float64':
gz = gz.astype('float64')
return mul_s_v(gz, y), sp_sum(x * gz, axis=0, sparse_grad=True)
def infer_shape(self, node, ins_shapes):
return [ins_shapes[0]]
mul_s_v = MulSV()
"""
Multiplication of sparse matrix by a broadcasted dense vector element wise.
Parameters
----------
x
Sparse matrix to multiply.
y
Tensor broadcastable vector.
Returns
-------
A sparse matrix
The product x * y element wise.
Notes
-----
The grad implemented is regular, i.e. not structured.
"""
def mul(x, y):
"""
Multiply elementwise two matrices, at least one of which is sparse.
This method will provide the right op according to the inputs.
Parameters
----------
x
A matrix variable.
y
A matrix variable.
Returns
-------
A sparse matrix
`x` + `y`
Notes
-----
At least one of `x` and `y` must be a sparse matrix.
The grad is regular, i.e. not structured.
"""
x = as_sparse_or_tensor_variable(x)
y = as_sparse_or_tensor_variable(y)
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
assert x_is_sparse_variable or y_is_sparse_variable
if x_is_sparse_variable and y_is_sparse_variable:
# mul_s_s is not implemented if the types differ
if y.dtype == 'float64' and x.dtype == 'float32':
x = x.astype('float64')
return mul_s_s(x, y)
elif x_is_sparse_variable and not y_is_sparse_variable:
# mul is unimplemented if the dtypes differ
if y.dtype == 'float64' and x.dtype == 'float32':
x = x.astype('float64')
return mul_s_d(x, y)
elif y_is_sparse_variable and not x_is_sparse_variable:
return mul_s_d(y, x)
else:
raise NotImplementedError()
class __ComparisonOpSS(gof.op.Op):
"""
Used as a superclass for all comparisons between two sparses matrices.
Parameters
----------
x
First compared sparse matrix.
y
Second compared sparse matrix
Returns
-------
object
Comparison(x,y)
"""
__props__ = ()
# Function to override
def comparison(self, x, y):
raise NotImplementedError()
def make_node(self, x, y):
x = as_sparse_variable(x)
y = as_sparse_variable(y)
if x.type.format != y.type.format:
raise NotImplementedError()
return gof.Apply(self,
[x, y],
[SparseType(dtype='uint8',
format=x.type.format)()])
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x) and _is_sparse(y)
assert x.shape == y.shape
out[0] = self.comparison(x, y).astype('uint8')
def infer_shape(self, node, ins_shapes):
return [ins_shapes[0]]
class __ComparisonOpSD(gof.op.Op):
"""
Used as a superclass for all comparisons between sparse and dense matrix.
Parameters
----------
x
Sparse matrix.
y
Dense matrix.
Returns
-------
object
Comparison(x,y)
"""
__props__ = ()
# Function to override
def comparison(self, x, y):
raise NotImplementedError()
def make_node(self, x, y):
x, y = as_sparse_variable(x), tensor.as_tensor_variable(y)
assert y.type.ndim == 2
out = tensor.TensorType(dtype='uint8', broadcastable=(False, False))()
return gof.Apply(self,
[x, y],
[out])
def perform(self, node, inputs, outputs):
(x, y) = inputs
(out,) = outputs
assert _is_sparse(x)
assert x.shape == y.shape
assert _is_dense(y)
o = self.comparison(x, y).astype('uint8')
o = numpy.asarray(o)
out[0] = o
def infer_shape(self, node, ins_shapes):
return [ins_shapes[0]]
def __ComparisonSwitch(SS, SD, DS):
"""
Parameters
----------
SS
Function to apply between two sparses matrices.
SD
Function to apply between a sparse and a dense matrix.
DS
Function to apply between a dense and a sparse matrix.
Returns
-------
function
Switch function taking two matrices as input.
Notes
-----
At least one of `x` and `y` must be a sparse matrix.
DS swap input as a dense matrix cannot be a left operand.
"""
def helper(x, y):
scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]]
assert scipy_ver >= [0, 13]
if hasattr(x, 'getnnz'):
x = as_sparse_variable(x)
if hasattr(y, 'getnnz'):
y = as_sparse_variable(y)
if not isinstance(x, theano.Variable):
x = theano.tensor.as_tensor_variable(x)
if not isinstance(y, theano.Variable):
y = theano.tensor.as_tensor_variable(y)
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
assert x_is_sparse_variable or y_is_sparse_variable
if x_is_sparse_variable and y_is_sparse_variable:
return SS(x, y)
elif x_is_sparse_variable and not y_is_sparse_variable:
return SD(x, y)
elif y_is_sparse_variable and not x_is_sparse_variable:
return DS(y, x)
else:
raise NotImplementedError()
return helper
class EqualSS(__ComparisonOpSS):
def comparison(self, x, y):
return x == y
equal_s_s = EqualSS()
class EqualSD(__ComparisonOpSD):
def comparison(self, x, y):
return x == y
equal_s_d = EqualSD()
class NotEqualSS(__ComparisonOpSS):
def comparison(self, x, y):
return x != y
not_equal_s_s = NotEqualSS()
class NotEqualSD(__ComparisonOpSD):
def comparison(self, x, y):
return x != y
not_equal_s_d = NotEqualSD()
class LessThanSS(__ComparisonOpSS):
def comparison(self, x, y):
return x < y
less_than_s_s = LessThanSS()
class LessThanSD(__ComparisonOpSD):
def comparison(self, x, y):
return x < y
less_than_s_d = LessThanSD()
class GreaterThanSS(__ComparisonOpSS):
def comparison(self, x, y):
return x > y
greater_than_s_s = GreaterThanSS()
class GreaterThanSD(__ComparisonOpSD):
def comparison(self, x, y):
return x > y
greater_than_s_d = GreaterThanSD()
class LessEqualSS(__ComparisonOpSS):
def comparison(self, x, y):
return x <= y
less_equal_s_s = LessEqualSS()
class LessEqualSD(__ComparisonOpSD):
def comparison(self, x, y):
return x <= y
less_equal_s_d = LessEqualSD()
class GreaterEqualSS(__ComparisonOpSS):
def comparison(self, x, y):
return x >= y
greater_equal_s_s = GreaterEqualSS()
class GreaterEqualSD(__ComparisonOpSD):
def comparison(self, x, y):
return x >= y
greater_equal_s_d = GreaterEqualSD()
eq = __ComparisonSwitch(equal_s_s, equal_s_d, equal_s_d)
"""
Parameters
----------
x
A matrix variable.
y
A matrix variable.
Returns
-------
matrix variable
`x` == `y`
Notes
-----
At least one of `x` and `y` must be a sparse matrix.
"""
neq = __ComparisonSwitch(not_equal_s_s, not_equal_s_d, not_equal_s_d)
"""
Parameters
----------
x
A matrix variable.
y
A matrix variable.
Returns
-------
matrix variable
`x` != `y`
Notes
-----
At least one of `x` and `y` must be a sparse matrix.
"""
lt = __ComparisonSwitch(less_than_s_s, less_than_s_d, greater_than_s_d)
"""
Parameters
----------
x
A matrix variable.
y
A matrix variable.
Returns
-------
matrix variable
`x` < `y`
Notes
-----
At least one of `x` and `y` must be a sparse matrix.
"""
gt = __ComparisonSwitch(greater_than_s_s, greater_than_s_d, less_than_s_d)
"""
Parameters
----------
x
A matrix variable.
y
A matrix variable.
Returns
-------
matrix variable
`x` > `y`
Notes
-----
At least one of `x` and `y` must be a sparse matrix.
"""
le = __ComparisonSwitch(less_equal_s_s, less_equal_s_d, greater_equal_s_d)
"""
Parameters
----------
x
A matrix variable.
y
A matrix variable.
Returns
-------
matrix variable
`x` <= `y`
Notes
-----
At least one of `x` and `y` must be a sparse matrix.
"""
ge = __ComparisonSwitch(greater_equal_s_s, greater_equal_s_d,
less_equal_s_d)
"""
Parameters
----------
x
A matrix variable.
y
A matrix variable.
Returns
-------
matrix variable
`x` >= `y`
Notes
-----
At least one of `x` and `y` must be a sparse matrix.
"""
class HStack(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ("format", "dtype")
def __init__(self, format=None, dtype=None):
if format is None:
self.format = 'csc'
else:
self.format = format
if dtype is None:
raise ValueError('The output dtype must be specified.')
self.dtype = dtype
def make_node(self, *mat):
if not mat:
raise ValueError('Cannot join an empty list of sparses.')
var = [as_sparse_variable(x) for x in mat]
for x in var:
assert x.format in ["csr", "csc"]
return gof.Apply(self,
var,
[SparseType(dtype=self.dtype,
format=self.format)()])
def perform(self, node, block, outputs):
(out,) = outputs
for b in block:
assert _is_sparse(b)
out[0] = scipy.sparse.hstack(block, format=self.format,
dtype=self.dtype)
# Some version of scipy (at least 0.14.0.dev-c4314b0)
# Do not cast to the wanted dtype.
if out[0].dtype != self.dtype:
out[0] = out[0].astype(self.dtype)
def grad(self, inputs, gout):
(gz,) = gout
is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes)
for i in range(len(inputs))]
if _is_sparse_variable(gz):
gz = dense_from_sparse(gz)
split = tensor.Split(len(inputs))(gz, 1,
tensor.stack(
[x.shape[1]
for x in inputs]))
if not isinstance(split, list):
split = [split]
derivative = [SparseFromDense(self.format)(s) for s in split]
def choose(continuous, derivative):
if continuous:
return derivative
else:
return None
return [choose(c, d) for c, d in zip(is_continuous, derivative)]
def infer_shape(self, node, ins_shapes):
def _get(l):
return l[1]
d = sum(map(_get, ins_shapes))
return [(ins_shapes[0][0], d)]
def __str__(self):
return "%s(%s,%s)" % (self.__class__.__name__, self.format, self.dtype)
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise).
This wrap the method hstack from scipy.
Parameters
----------
blocks
List of sparse array of compatible shape.
format
String representing the output format. Default is csc.
dtype
Output dtype.
Returns
-------
array
The concatenation of the sparse array column wise.
Notes
-----
The number of line of the sparse matrix must agree.
The grad implemented is regular, i.e. not structured.
"""
blocks = [as_sparse_variable(i) for i in blocks]
if dtype is None:
dtype = theano.scalar.upcast(*[i.dtype for i in blocks])
return HStack(format=format, dtype=dtype)(*blocks)
class VStack(HStack):
# See doc in instance of this Op or function after this class definition.
def perform(self, node, block, outputs):
(out,) = outputs
for b in block:
assert _is_sparse(b)
out[0] = scipy.sparse.vstack(block, format=self.format,
dtype=self.dtype)
# Some version of scipy (at least 0.14.0.dev-c4314b0)
# Do not cast to the wanted dtype.
if out[0].dtype != self.dtype:
out[0] = out[0].astype(self.dtype)
def grad(self, inputs, gout):
(gz,) = gout
is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes)
for i in range(len(inputs))]
if _is_sparse_variable(gz):
gz = dense_from_sparse(gz)
split = tensor.Split(len(inputs))(gz, 0,
tensor.stack(
[x.shape[0]
for x in inputs]))
if not isinstance(split, list):
split = [split]
derivative = [SparseFromDense(self.format)(s) for s in split]
def choose(continuous, derivative):
if continuous:
return derivative
else:
return None
return [choose(c, d) for c, d in zip(is_continuous, derivative)]
def infer_shape(self, node, ins_shapes):
def _get(l):
return l[0]
d = sum(map(_get, ins_shapes))
return [(d, ins_shapes[0][1])]
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise).
This wrap the method vstack from scipy.
Parameters
----------
blocks
List of sparse array of compatible shape.
format
String representing the output format. Default is csc.
dtype
Output dtype.
Returns
-------
array
The concatenation of the sparse array row wise.
Notes
-----
The number of column of the sparse matrix must agree.
The grad implemented is regular, i.e. not structured.
"""
blocks = [as_sparse_variable(i) for i in blocks]
if dtype is None:
dtype = theano.scalar.upcast(*[i.dtype for i in blocks])
return VStack(format=format, dtype=dtype)(*blocks)
class Remove0(gof.Op):
# See doc in instance of this Op or a function after the class definition.
__props__ = ("inplace",)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
def __str__(self):
l = []
if self.inplace:
l.append('inplace')
return self.__class__.__name__ + '{%s}' % ', '.join(l)
def make_node(self, x):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
return gof.Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
if self.inplace:
c = x
else:
c = x.copy()
c.eliminate_zeros()
z[0] = c
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [gz]
def infer_shape(self, node, i0_shapes):
return i0_shapes
remove0 = Remove0()
"""
Remove explicit zeros from a sparse matrix.
Parameters
----------
x
Sparse matrix.
Returns
-------
sparse matrix
Exactly `x` but with a data attribute exempt of zeros.
Notes
-----
The grad implemented is regular, i.e. not structured.
"""
# Structured monoid
def structured_monoid(tensor_op):
# Generic operation to perform many kinds of monoid element-wise
# operations on the non-zeros of a sparse matrix.
# The first parameter must always be a sparse matrix. The other parameters
# must be scalars which will be passed as argument to the tensor_op.
def decorator(f):
def wrapper(*args):
x = as_sparse_variable(args[0])
assert x.format in ["csr", "csc"]
xs = [scalar.as_scalar(arg) for arg in args[1:]]
data, ind, ptr, shape = csm_properties(x)
data = tensor_op(data, *xs)
return CSM(x.format)(data, ind, ptr, shape)
wrapper.__name__ = str(tensor_op.scalar_op)
return wrapper
return decorator
@structured_monoid(tensor.nnet.sigmoid)
def structured_sigmoid(x):
"""
Structured elemwise sigmoid.
"""
# see decorator for function body
@structured_monoid(tensor.exp)
def structured_exp(x):
"""
Structured elemwise exponential.
"""
# see decorator for function body
@structured_monoid(tensor.log)
def structured_log(x):
"""
Structured elemwise logarithm.
"""
# see decorator for function body
@structured_monoid(tensor.pow)
def structured_pow(x, y):
"""
Structured elemwise power of sparse matrix x by scalar y.
"""
# see decorator for function body
@structured_monoid(tensor.minimum)
def structured_minimum(x, y):
"""
Structured elemwise minimum of sparse matrix x by scalar y.
"""
# see decorator for function body
@structured_monoid(tensor.maximum)
def structured_maximum(x, y):
"""
Structured elemwise maximum of sparse matrix x by scalar y.
"""
# see decorator for function body
@structured_monoid(tensor.add)
def structured_add(x):
"""
Structured addition of sparse matrix x and scalar y.
"""
# see decorator for function body
# Sparse operation (map 0 to 0)
@structured_monoid(tensor.sin)
def sin(x):
"""
Elemwise sinus of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.tan)
def tan(x):
"""
Elemwise tan of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.arcsin)
def arcsin(x):
"""
Elemwise arcsinus of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.arctan)
def arctan(x):
"""
Elemwise arctan of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.sinh)
def sinh(x):
"""
Elemwise sinh of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.arcsinh)
def arcsinh(x):
"""
Elemwise arcsinh of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.tanh)
def tanh(x):
"""
Elemwise tanh of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.arctanh)
def arctanh(x):
"""
Elemwise arctanh of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.round_half_to_even)
def rint(x):
"""
Elemwise round half to even of `x`.
"""
# see decorator for function body
# Give it a simple name instead of the complex one that would automatically
# be derived from `tensor.round_half_to_even`.
rint.__name__ = 'rint'
@structured_monoid(tensor.sgn)
def sgn(x):
"""
Elemwise signe of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.ceil)
def ceil(x):
"""
Elemwise ceiling of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.floor)
def floor(x):
"""
Elemwise floor of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.log1p)
def log1p(x):
"""
Elemwise log(1 + `x`).
"""
# see decorator for function body
@structured_monoid(tensor.expm1)
def expm1(x):
"""
Elemwise e^`x` - 1.
"""
# see decorator for function body
@structured_monoid(tensor.deg2rad)
def deg2rad(x):
"""
Elemwise degree to radian.
"""
# see decorator for function body
@structured_monoid(tensor.rad2deg)
def rad2deg(x):
"""
Elemwise radian to degree.
"""
# see decorator for function body
@structured_monoid(tensor.trunc)
def trunc(x):
"""
Elemwise truncature.
"""
# see decorator for function body
@structured_monoid(tensor.sqr)
def sqr(x):
"""
Elemwise `x` * `x`.
"""
# see decorator for function body
@structured_monoid(tensor.sqrt)
def sqrt(x):
"""
Elemwise square root of `x`.
"""
# see decorator for function body
@structured_monoid(tensor.conj)
def conj(x):
"""
Elemwise complex conjugate of `x`.
"""
# see decorator for function body
class TrueDot(gof.op.Op):
# TODO
# Simplify code by splitting into DotSS and DotSD.
__props__ = ()
# The grad_preserves_dense attribute doesn't change the
# execution behavior. To let the optimizer merge nodes with
# different values of this attribute we shouldn't compare it
# here.
def __init__(self, grad_preserves_dense=True):
self.grad_preserves_dense = grad_preserves_dense
def make_node(self, x, y):
# NOTE
# Because of trickiness of implementing,
# we assume that the left argument x is a
# SparseVariable (not dense)
if x.type.dtype != y.type.dtype:
raise NotImplementedError()
if not _is_sparse_variable(x):
raise TypeError(x)
# These are the conversions performed by scipy.sparse.dot
if x.type.format == "csc" or x.type.format == "coo":
myformat = "csc"
elif x.type.format == "csr":
myformat = "csr"
else:
raise NotImplementedError()
inputs = [x, y] # Need to convert? e.g. assparse
outputs = [SparseType(dtype=x.type.dtype, format=myformat)()]
return gof.Apply(self, inputs, outputs)
def perform(self, node, inp, out_):
# TODO
# -Verify that output is sufficiently sparse,
# and raise a warning if it is not.
# -Also determine that we are storing the
# output in the best storage format?
x, y = inp
out, = out_
rval = x.dot(y)
if not scipy.sparse.issparse(rval):
rval = getattr(scipy.sparse, x.format + '_matrix')(rval)
# x.dot call tocsr() that will "upcast" to ['int8', 'uint8', 'short',
# 'ushort', 'intc', 'uintc', 'longlong', 'ulonglong', 'single',
# 'double', 'longdouble', 'csingle', 'cdouble', 'clongdouble']
# But ulonglong is uint64 on x86-64, but with a different typenum!
if rval.dtype.num != numpy.dtype(str(rval.dtype)).num:
assert str(rval.dtype) == node.outputs[0].dtype
# Create a view with the expected typenum.
format = node.outputs[0].type.format
data = rval.data.view(dtype=node.outputs[0].dtype)
indices = rval.indices
indptr = rval.indptr
shape = rval.shape
# No need to copy indices and indptr as in CSM.perform(),
# as there is only one user of them.
if format == 'csc':
rval = scipy.sparse.csc_matrix((data, indices, indptr),
shape, copy=False)
else:
assert format == 'csr'
rval = scipy.sparse.csr_matrix((data, indices, indptr),
shape, copy=False)
out[0] = rval
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(gz)
assert _is_sparse_variable(x)
rval = [true_dot(gz, y.T), true_dot(x.T, gz)]
if _is_dense_variable(y):
if self.grad_preserves_dense:
rval[1] = dense_from_sparse(rval[1])
return rval
def infer_shape(self, node, shapes):
return [(shapes[0][0], shapes[1][1])]
def true_dot(x, y, grad_preserves_dense=True):
"""
Operation for efficiently calculating the dot product when
one or all operands are sparse. Supported formats are CSC and CSR.
The output of the operation is sparse.
Parameters
----------
x
Sparse matrix.
y
Sparse matrix or 2d tensor variable.
grad_preserves_dense : bool
If True (default), makes the grad of dense inputs dense.
Otherwise the grad is always sparse.
Returns
-------
The dot product `x`.`y` in a sparse format.
Notex
-----
The grad implemented is regular, i.e. not structured.
"""
# TODO
# Maybe the triple-transposition formulation
# (when x is dense) is slow. See if there is a
# direct way to do this.
if hasattr(x, 'getnnz'):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
if hasattr(y, 'getnnz'):
y = as_sparse_variable(y)
assert y.format in ["csr", "csc"]
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
if not x_is_sparse_variable and not y_is_sparse_variable:
raise TypeError()
if x_is_sparse_variable:
return TrueDot(grad_preserves_dense)(x, y)
else:
assert y_is_sparse_variable
return transpose(TrueDot(grad_preserves_dense)(y.T, x.T))
# Dot
class StructuredDot(gof.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
def make_node(self, a, b):
a = as_sparse_variable(a)
assert a.format in ["csr", "csc", "bsr"]
if not _is_sparse_variable(a):
raise TypeError('First argument must be of type SparseVariable '
'or SparseConstant')
dtype_out = scalar.upcast(a.type.dtype, b.type.dtype)
if b.type.ndim != 2:
raise NotImplementedError('non-matrix b')
if _is_sparse_variable(b):
return gof.Apply(self, [a, b],
[SparseType(a.type.format, dtype_out)()])
else:
return gof.Apply(self, [a, b],
[tensor.tensor(dtype_out,
(False, b.type.broadcastable[1]))])
def perform(self, node, inputs, outputs):
(a, b) = inputs
(out,) = outputs
if a.shape[1] != b.shape[0]:
raise ValueError('shape mismatch in StructuredDot.perform',
(a.shape, b.shape))
variable = a * b
if isinstance(node.outputs[0].type, SparseType):
assert _is_sparse(variable)
out[0] = variable
return
assert _is_dense(variable) # scipy 0.7 automatically converts to dense
# dot of an NxM sparse matrix, with a Mx1 dense matrix, returns vector
# not matrix
if variable.ndim == 1:
variable = numpy.expand_dims(variable, 1)
elif variable.ndim != 2:
raise Exception('Output of structured dot should be a matrix '
'(ndim=2)')
assert variable.ndim == 2
if variable.shape != (a.shape[0], b.shape[1]):
if b.shape[0] == 1:
raise Exception("a.shape=%s, b.shape=%s, "
"variable.shape=%s ??? This is probably "
"because scipy.csc_matrix dot has a bug "
"with singleton dimensions (i.e. "
"b.shape[0]=1), for scipy 0.6. Use scipy "
"0.7. NB you have scipy version %s" %
(a.shape, b.shape, variable.shape,
scipy.__version__))
else:
raise Exception("a.shape=%s, b.shape=%s, variable.shape=%s "
" ??? I have no idea why")
# The cast is needed as otherwise we hit the bug mentioned into
# theano._asarray function documentation.
out[0] = theano._asarray(variable, str(variable.dtype))
def grad(self, inputs, gout):
# a is sparse, b is dense, g_out is dense
# ga = g_out x b.T
# gb = a.T x g_out
(a, b) = inputs
(g_out,) = gout
return [structured_dot_grad(a, b, g_out), structured_dot(a.T, g_out)]
def infer_shape(self, node, shapes):
return [(shapes[0][0], shapes[1][1])]
_structured_dot = StructuredDot()
def structured_dot(x, y):
"""
Structured Dot is like dot, except that only the
gradient wrt non-zero elements of the sparse matrix
`a` are calculated and propagated.
The output is presumed to be a dense matrix, and is represented by a
TensorType instance.
Parameters
----------
a
A sparse matrix.
b
A sparse or dense matrix.
Returns
-------
A sparse matrix
The dot product of `a` and `b`.
Notes
-----
The grad implemented is structured.
"""
# @todo: Maybe the triple-transposition formulation (when x is dense)
# is slow. See if there is a direct way to do this.
# (JB 20090528: Transposing tensors and sparse matrices is constant-time,
# inplace, and fast.)
if hasattr(x, 'getnnz'):
x = as_sparse_variable(x)
assert x.format in ["csr", "csc"]
if hasattr(y, 'getnnz'):
y = as_sparse_variable(y)
assert y.format in ["csr", "csc"]
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
if not x_is_sparse_variable and not y_is_sparse_variable:
raise TypeError('structured_dot requires at least one sparse argument')
if x_is_sparse_variable:
return _structured_dot(x, y)
else:
assert y_is_sparse_variable
return _structured_dot(y.T, x.T).T
class StructuredDotGradCSC(gof.Op):
# Op that produces the grad of StructuredDot.
# :param a_indices: Matrix indicies
# :param a_indptr: Matrix indptr
# :param b: Right operand
# :param g_ab: Accumulated gradient.
# :return: The grad of `a`.`b` for `a` accumulated
# with g_ab.
# :note: The grad implemented is structured.
# :note: a_* are the corresponding properties of a sparse
# matrix in csc format.
__props__ = ()
def make_node(self, a_indices, a_indptr, b, g_ab):
return gof.Apply(self, [a_indices, a_indptr, b, g_ab],
[tensor.tensor(g_ab.dtype, (False,))])
def perform(self, node, inputs, outputs):
(a_indices, a_indptr, b, g_ab) = inputs
(out,) = outputs
g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype)
for j in xrange(len(a_indptr) - 1):
ind0 = a_indptr[j]
ind1 = a_indptr[j + 1]
for i_idx in xrange(ind0, ind1):
i = a_indices[i_idx]
# Depending on the type of g_ab and b (sparse or dense),
# the following dot product can result in a scalar or
# a (1, 1) sparse matrix.
dot_val = numpy.dot(g_ab[i], b[j].T)
if isinstance(dot_val, scipy.sparse.spmatrix):
dot_val = dot_val[0, 0]
g_a_data[i_idx] = dot_val
out[0] = g_a_data
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inputs, outputs, sub):
(_indices, _indptr, _d, _g) = inputs
(_zout,) = outputs
if node.inputs[2].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for '
'g_ab')
return """
if (PyArray_NDIM(%(_d)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(d) != 2"); %(fail)s;}
if (PyArray_NDIM(%(_g)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(g) != 2"); %(fail)s;}
if (PyArray_NDIM(%(_indices)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indices) != 1"); %(fail)s;}
if (PyArray_NDIM(%(_indptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indptr) != 1"); %(fail)s;}
if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "C"); %(fail)s;}
if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "D"); %(fail)s;}
if( PyArray_DIMS(%(_d)s)[1] != PyArray_DIMS(%(_g)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "d and g have different numbers of columns"); %(fail)s;}
if (!%(_zout)s
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]))
{
Py_XDECREF(%(_zout)s);
%(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1, PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_g)s));
}
{ //makes it compile even though labels jump over variable definitions.
npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];
npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1; //TODO: error checking with this
npy_intp Sindices = PyArray_STRIDES(%(_indices)s)[0]/PyArray_DESCR(%(_indices)s)->elsize;
npy_intp Sindptr = PyArray_STRIDES(%(_indptr)s)[0]/PyArray_DESCR(%(_indptr)s)->elsize;
const npy_intp Sd1 = PyArray_STRIDES(%(_d)s)[1]/PyArray_DESCR(%(_d)s)->elsize;
const npy_intp Sg1 = PyArray_STRIDES(%(_g)s)[1]/PyArray_DESCR(%(_g)s)->elsize;
const npy_intp K = PyArray_DIMS(%(_d)s)[1];
const npy_int32 * __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);
const npy_int32 * __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);
// loop over columns
for (npy_int32 j = 0; j < N; ++j)
{
// extract j-th row of dense matrix
const dtype_%(_d)s* __restrict__ d_row = (dtype_%(_d)s*)(PyArray_BYTES(%(_d)s) + PyArray_STRIDES(%(_d)s)[0] * j);
if(j >= PyArray_DIMS(%(_d)s)[0]) {PyErr_SetString(PyExc_NotImplementedError, "G"); %(fail)s;}
// for each non-null value in the sparse column
for (npy_int32 i_idx = indptr[j * Sindptr]; i_idx < indptr[(j+1) * Sindptr]; ++i_idx)
{
// extract row index of non-null value
npy_int32 i = indices[i_idx * Sindices];
// extract corresponding row in gradient
const dtype_%(_g)s* __restrict__ g_row = (dtype_%(_g)s*)(PyArray_BYTES(%(_g)s) + PyArray_STRIDES(%(_g)s)[0] * i);
double ip = 0.0;
// make sure that row index is not bigger than actual number of rows
// Note: wouldn't the above operation fail if that were the case ?
// when would this ever be true anyway ?
if (i >= PyArray_DIMS(%(_g)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "H"); %(fail)s;}
// perform dot product of dense and sparse rows
for(int k = 0; k < K; ++k)
{
ip += d_row[k * Sd1] * g_row[k*Sg1];
}
// write resulting gradient to sparse output
((dtype_%(_zout)s* __restrict__)(PyArray_BYTES(%(_zout)s) + i_idx * PyArray_STRIDES(%(_zout)s)[0]))[0] = ip;
}
}
}
""" % dict(locals(), **sub)
def infer_shape(self, node, shapes):
return [shapes[0]]
sdg_csc = StructuredDotGradCSC()
class StructuredDotGradCSR(gof.Op):
# Op that produces the grad of StructuredDot.
# :param a_indices: Matrix indicies
# :param a_indptr: Matrix indptr
# :param b: Right operand
# :param g_ab: Accumulated gradient.
# :return: The grad of `a`.`b` for `a` accumulated
# with g_ab.
# :note: The grad implemented is structured.
# :note: a_* are the corresponding properties of a sparse
# matrix in csr format.
__props__ = ()
def make_node(self, a_indices, a_indptr, b, g_ab):
return gof.Apply(self, [a_indices, a_indptr, b, g_ab],
[tensor.tensor(b.dtype, (False,))])
def perform(self, node, inputs, outputs):
(a_indices, a_indptr, b, g_ab) = inputs
(out,) = outputs
g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype)
for i in xrange(len(a_indptr) - 1): # loop over rows
ind0 = a_indptr[i]
ind1 = a_indptr[i + 1]
# loop over values in that row (columns)
for j_idx in xrange(ind0, ind1):
j = a_indices[j_idx]
# grad is dot product of i-th row of gradient with j-th row of b
# Depending on the type of g_ab and b (sparse or dense),
# the following dot product can result in a scalar or
# a (1, 1) sparse matrix.
dot_val = numpy.dot(g_ab[i], b[j].T)
if isinstance(dot_val, scipy.sparse.spmatrix):
dot_val = dot_val[0, 0]
g_a_data[j_idx] = dot_val
out[0] = g_a_data
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inputs, outputs, sub):
(_indices, _indptr, _d, _g) = inputs
(_zout,) = outputs
if node.inputs[2].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for '
'g_ab')
return """
if (PyArray_NDIM(%(_d)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(d) != 2"); %(fail)s;}
if (PyArray_NDIM(%(_g)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(g) != 2"); %(fail)s;}
if (PyArray_NDIM(%(_indices)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indices) != 1"); %(fail)s;}
if (PyArray_NDIM(%(_indptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(indptr) != 1"); %(fail)s;}
if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "C"); %(fail)s;}
if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "D"); %(fail)s;}
if( PyArray_DIMS(%(_d)s)[1] != PyArray_DIMS(%(_g)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "d and g have different numbers of columns"); %(fail)s;}
if (!%(_zout)s
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]))
{
Py_XDECREF(%(_zout)s);
%(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1, PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_g)s));
}
{ //makes it compile even though labels jump over variable definitions.
npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];
// extract number of rows
npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1; //TODO: error checking with this
npy_intp Sindices = PyArray_STRIDES(%(_indices)s)[0]/PyArray_DESCR(%(_indices)s)->elsize;
npy_intp Sindptr = PyArray_STRIDES(%(_indptr)s)[0]/PyArray_DESCR(%(_indptr)s)->elsize;
const npy_intp Sd1 = PyArray_STRIDES(%(_d)s)[1]/PyArray_DESCR(%(_d)s)->elsize;
const npy_intp Sg1 = PyArray_STRIDES(%(_g)s)[1]/PyArray_DESCR(%(_g)s)->elsize;
const npy_intp K = PyArray_DIMS(%(_d)s)[1];
const npy_int32 * __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);
const npy_int32 * __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);
// loop over columns of sparse matrix
for (npy_int32 i = 0; i < N; ++i)
{
// for each non-null value in the sparse row
for (npy_int32 j_idx = indptr[i * Sindptr]; j_idx < indptr[(i+1) * Sindptr]; ++j_idx)
{
// extract column index of non-null value
npy_int32 j = indices[j_idx * Sindices];
// extract j-th row of dense matrix
const dtype_%(_d)s* __restrict__ d_row = (dtype_%(_d)s*)(PyArray_BYTES(%(_d)s) + PyArray_STRIDES(%(_d)s)[0] * j);
if(j >= PyArray_DIMS(%(_d)s)[0]) {PyErr_SetString(PyExc_NotImplementedError, "G"); %(fail)s;}
// extract corresponding row in gradient
const dtype_%(_g)s* __restrict__ g_row = (dtype_%(_g)s*)(PyArray_BYTES(%(_g)s) + PyArray_STRIDES(%(_g)s)[0] * i);
double ip = 0.0;
// make sure that row index is not bigger than actual number of rows
// Note: wouldn't the above operation fail if that were the case ?
// when would this ever be true anyway ?
if (i >= PyArray_DIMS(%(_g)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "H"); %(fail)s;}
// perform dot product of dense and sparse rows
for(int k = 0; k < K; ++k)
{
ip += d_row[k * Sd1] * g_row[k*Sg1];
}
// write resulting gradient to sparse output
((dtype_%(_zout)s* __restrict__)(PyArray_BYTES(%(_zout)s) + j_idx * PyArray_STRIDES(%(_zout)s)[0]))[0] = ip;
}
}
}
""" % dict(locals(), **sub)
def infer_shape(self, node, shapes):
return [shapes[0]]
sdg_csr = StructuredDotGradCSR()
def structured_dot_grad(sparse_A, dense_B, ga):
if sparse_A.type.format in ('csc', 'csr'):
if sparse_A.type.format == 'csc':
sdgcsx = sdg_csc
CSx = CSC
else:
sdgcsx = sdg_csr
CSx = CSR
g_A_data = sdgcsx(csm_indices(sparse_A),
csm_indptr(sparse_A), dense_B, ga)
return CSx(g_A_data, csm_indices(sparse_A),
csm_indptr(sparse_A), csm_shape(sparse_A))
else:
raise NotImplementedError()
class SamplingDot(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
def make_node(self, x, y, p):
x = tensor.as_tensor_variable(x)
y = tensor.as_tensor_variable(y)
p = as_sparse_variable(p)
assert p.format in ["csr", "csc"]
if not _is_sparse_variable(p):
raise TypeError(p)
# TODO: use it.
dtype_out = scalar.upcast(x.type.dtype, y.type.dtype, p.type.dtype) # noqa
return gof.Apply(self, [x, y, p], [p.type()])
def perform(self, node, inputs, outputs):
(x, y, p) = inputs
(out,) = outputs
if _is_sparse(x):
raise TypeError(x)
if _is_sparse(y):
raise TypeError(y)
if not _is_sparse(p):
raise TypeError(p)
out[0] = p.__class__(p.multiply(numpy.dot(x, y.T)))
def grad(self, inputs, gout):
(x, y, p) = inputs
(gz,) = gout
rval = [
dot(p * gz, y),
dot((p * gz).T, x),
grad_not_implemented(self, 2, p)
]
return rval
def infer_shape(self, node, ins_shapes):
return [ins_shapes[2]]
sampling_dot = SamplingDot()
"""
Operand for calculating the dot product dot(`x`, `y`.T) = `z` when you
only want to calculate a subset of `z`.
It is equivalent to `p` o (`x` . `y`.T) where o is the element-wise
product, `x` and `y` operands of the dot product and `p` is a matrix that
contains 1 when the corresponding element of `z` should be calculated
and 0 when it shouldn't. Note that SamplingDot has a different interface
than `dot` because SamplingDot requires `x` to be a `m`x`k` matrix while
`y` is a `n`x`k` matrix instead of the usual `k`x`n` matrix.
Notes
-----
It will work if the pattern is not binary value, but if the
pattern doesn't have a high sparsity proportion it will be slower
then a more optimized dot followed by a normal elemwise
multiplication.
The grad implemented is regular, i.e. not structured.
Parameters
----------
x
Tensor matrix.
y
Tensor matrix.
p
Sparse matrix in csr format.
Returns
-------
sparse matrix
A dense matrix containing the dot product of `x` by `y`.T only
where `p` is 1.
"""
class Dot(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
def __str__(self):
return "Sparse" + self.__class__.__name__
def infer_shape(self, node, shapes):
xshp, yshp = shapes
x, y = node.inputs
if x.ndim == 2 and y.ndim == 2:
return [(xshp[0], yshp[1])]
if x.ndim == 1 and y.ndim == 2:
return [(yshp[1],)]
if x.ndim == 2 and y.ndim == 1:
return [(xshp[0],)]
if x.ndim == 1 and y.ndim == 1:
return [()]
raise NotImplementedError()
def make_node(self, x, y):
dtype_out = scalar.upcast(x.dtype, y.dtype)
# Sparse dot product should have at least one sparse variable
# as input. If the other one is not sparse, it has to be converted
# into a tensor.
if isinstance(x, scipy.sparse.spmatrix):
x = as_sparse_variable(x)
if isinstance(y, scipy.sparse.spmatrix):
y = as_sparse_variable(y)
x_is_sparse_var = _is_sparse_variable(x)
y_is_sparse_var = _is_sparse_variable(y)
if not x_is_sparse_var and not y_is_sparse_var:
raise TypeError(
"Sparse dot product should have at least one "
"sparse variable as inputs, but the inputs are "
"%s (%s) and %s (%s)." % (x, x.type, y, y.type))
if not x_is_sparse_var:
x = tensor.as_tensor_variable(x)
assert y.format in ["csr", "csc"]
if x.ndim not in (1, 2):
raise TypeError(
'theano.sparse.Dot: input 0 (0-indexed) must have ndim of '
'1 or 2, %d given.' % x.ndim)
if not y_is_sparse_var:
y = tensor.as_tensor_variable(y)
assert x.format in ["csr", "csc"]
if y.ndim not in (1, 2):
raise TypeError(
'theano.sparse.Dot: input 1 (1-indexed) must have ndim of '
'1 or 2, %d given.' % y.ndim)
if y.ndim == 1 or x.ndim == 1:
bz = (False,)
else:
bz = (False, False)
return gof.Apply(self, [x, y], [tensor.tensor(dtype=dtype_out,
broadcastable=bz)])
def perform(self, node, inputs, out):
x, y = inputs
out = out[0]
x_is_sparse = _is_sparse(x)
y_is_sparse = _is_sparse(y)
if not x_is_sparse and not y_is_sparse:
raise TypeError(x)
rval = x * y
if x_is_sparse and y_is_sparse:
rval = rval.toarray()
out[0] = theano._asarray(rval, dtype=node.outputs[0].dtype)
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert _is_sparse_variable(x) or _is_sparse_variable(y)
rval = []
if _is_dense_variable(y):
rval.append(tensor.dot(gz, y.T))
else:
rval.append(dot(gz, y.T))
if _is_dense_variable(x):
rval.append(tensor.dot(x.T, gz))
else:
rval.append(dot(x.T, gz))
return rval
_dot = Dot()
def dot(x, y):
"""
Operation for efficiently calculating the dot product when
one or all operands is sparse. Supported format are CSC and CSR.
The output of the operation is dense.
Parameters
----------
x
Sparse or dense matrix variable.
y
Sparse or dense matrix variable.
Returns
-------
The dot product `x`.`y` in a dense format.
Notes
-----
The grad implemented is regular, i.e. not structured.
At least one of `x` or `y` must be a sparse matrix.
When the operation has the form dot(csr_matrix, dense)
the gradient of this operation can be performed inplace
by UsmmCscDense. This leads to significant speed-ups.
"""
if hasattr(x, 'getnnz'):
x = as_sparse_variable(x)
if hasattr(y, 'getnnz'):
y = as_sparse_variable(y)
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
if not x_is_sparse_variable and not y_is_sparse_variable:
raise TypeError()
return _dot(x, y)
class Usmm(gof.op.Op):
# See doc in instance of this Op or function after this class definition.
# We don't implement the infer_shape as it is
# inserted by optimization only.
__props__ = ()
def __str__(self):
return 'Usmm{no_inplace}'
def make_node(self, alpha, x, y, z):
if not _is_sparse_variable(x) and not _is_sparse_variable(y):
# If x and y are tensor, we don't want to use this class
# We should use Dot22 and Gemm in that case.
raise TypeError(x)
dtype_out = scalar.upcast(alpha.type.dtype, x.type.dtype,
y.type.dtype, z.type.dtype)
alpha = tensor.as_tensor_variable(alpha)
z = tensor.as_tensor_variable(z)
assert z.ndim == 2
assert alpha.type.broadcastable == (True,) * alpha.ndim
if not _is_sparse_variable(x):
x = tensor.as_tensor_variable(x)
assert y.format in ["csr", "csc"]
assert x.ndim == 2
if not _is_sparse_variable(y):
y = tensor.as_tensor_variable(y)
assert x.format in ["csr", "csc"]
assert y.ndim == 2
return gof.Apply(self, [alpha, x, y, z],
[tensor.tensor(dtype=dtype_out,
broadcastable=(False, False))])
def perform(self, node, inputs, outputs):
(alpha, x, y, z) = inputs
(out,) = outputs
x_is_sparse = _is_sparse(x)
y_is_sparse = _is_sparse(y)
if not x_is_sparse and not y_is_sparse:
raise TypeError(x)
rval = x * y
if isinstance(rval, scipy.sparse.spmatrix):
rval = rval.toarray()
if rval.dtype == alpha.dtype:
rval *= alpha # Faster because operation is inplace
else:
rval = rval * alpha
if rval.dtype == z.dtype:
rval += z # Faster because operation is inplace
else:
rval = rval + z
out[0] = rval
usmm = Usmm()
"""
Performs the expression `alpha` * `x` `y` + `z`.
Parameters
----------
x
Matrix variable.
y
Matrix variable.
z
Dense matrix.
alpha
A tensor scalar.
Returns
-------
The dense matrix resulting from `alpha` * `x` `y` + `z`.
Notes
-----
The grad is not implemented for this op.
At least one of `x` or `y` must be a sparse matrix.
"""
class ConstructSparseFromList(gof.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
def make_node(self, x, values, ilist):
"""
Parameters
----------
x
A dense matrix that specify the output shape.
values
A dense matrix with the values to use for output.
ilist
A dense vector with the same length as the number of rows of values.
It specify where in the output to put the corresponding rows.
This create a sparse matrix with the same shape as `x`. Its
values are the rows of `values` moved. Pseudo-code::
output = csc_matrix.zeros_like(x, dtype=values.dtype)
for in_idx, out_idx in enumerate(ilist):
output[out_idx] = values[in_idx]
"""
x_ = theano.tensor.as_tensor_variable(x)
values_ = theano.tensor.as_tensor_variable(values)
ilist_ = theano.tensor.as_tensor_variable(ilist)
if ilist_.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim != 2:
raise TypeError(
'cannot create a sparse matrix with %d dimensions' %
x_.type.ndim)
if values_.type.ndim != 2:
raise TypeError(
'cannot create a sparse matrix from values with %d ndim' %
values_.type.ndim)
# We only need the shape of `x` in the perform
# If we keep in the graph the x variable as input of the Apply node,
# this can rise the memory usage. That is why the Apply node
# take `x_.shape` as input and not `x`.
return gof.Apply(self, [x_.shape, values_, ilist_],
[csc_matrix(dtype=x.dtype)])
def perform(self, node, inp, out_):
out_shape, values, ilist = inp
out, = out_
rows, cols = values.shape
assert rows == len(ilist)
indptr = numpy.arange(cols + 1) * rows
indices = as_strided(ilist,
strides=(0, ilist.strides[0]),
shape=(cols, ilist.shape[0])).flatten()
data = values.T.flatten()
out[0] = scipy.sparse.csc_matrix((data, indices, indptr),
shape=out_shape,
dtype=values.dtype)
def infer_shape(self, node, ishapes):
x = node.inputs[0]
return [[x[0], x[1]]]
def R_op(self, inputs, eval_points):
if None in eval_points[:2]:
return [None]
return self.make_node(eval_points[0], eval_points[1],
*inputs[2:]).outputs
def connection_pattern(self, node):
rval = [[True], [True], [False]]
return rval
def grad(self, inputs, grads):
g_output, = grads
x, y = inputs[:2]
idx_list = inputs[2:]
gx = g_output
gy = theano.tensor.advanced_subtensor1(g_output, *idx_list)
return [gx, gy] + [DisconnectedType()()] * len(idx_list)
construct_sparse_from_list = ConstructSparseFromList()
"""
Constructs a sparse matrix out of a list of 2-D matrix rows.
Notes
-----
The grad implemented is regular, i.e. not structured.
"""
| {
"content_hash": "8224195eefc42da84cbec3a82d51b884",
"timestamp": "",
"source": "github",
"line_count": 4370,
"max_line_length": 133,
"avg_line_length": 28.402517162471394,
"alnum_prop": 0.5572877641618125,
"repo_name": "marcsans/cnn-physics-perception",
"id": "dad078afe6e7447800efbab9f699cb0299739660",
"size": "124119",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "phy/lib/python2.7/site-packages/theano/sparse/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "489272"
},
{
"name": "C++",
"bytes": "3521811"
},
{
"name": "CSS",
"bytes": "7132"
},
{
"name": "Cuda",
"bytes": "232079"
},
{
"name": "FORTRAN",
"bytes": "9868"
},
{
"name": "HTML",
"bytes": "131419"
},
{
"name": "JavaScript",
"bytes": "23881"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "75861"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "36682149"
},
{
"name": "Shell",
"bytes": "3878"
},
{
"name": "TeX",
"bytes": "14053"
}
],
"symlink_target": ""
} |
from rest_framework import viewsets
from .models import Note
from .serializers import NoteSerializer
class NotesViewSet(viewsets.ModelViewSet):
queryset = Note.objects.all()
serializer_class = NoteSerializer
| {
"content_hash": "dc2d99c90780844a4cbd52f5f1bcff57",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 27.25,
"alnum_prop": 0.7981651376146789,
"repo_name": "xunil154/ExampleREST",
"id": "0de645b0ea458e896f43a9a51de2bcc5f5ce2f47",
"size": "218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/notes/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4714"
}
],
"symlink_target": ""
} |
__author__ = 'pp'
__version__ = '1.0.0'
import os
import re
import copy
import time
class TextFileInfo(object):
def __init__(self, file_id, extension=None, delimiter=None, quotechar=None):
self.file_id = file_id
self.extension = 'csv' if extension is None else extension
self.delimiter = ',' if delimiter is None else delimiter
self.quotechar = '|' if quotechar is None else quotechar
class FileIterator(object):
def __init__(self, path=None, include='.*', exclude=None):
self.path = path
self.include_patterns = include if isinstance(include, list) else [include] if isinstance(include, str) else []
self.exclude_patterns = exclude if isinstance(exclude, list) else [exclude] if isinstance(exclude, str) else []
def get_next_file(
self,
path=None,
include=None,
exclude=None,
predicate=None,
so='t',
only_directories=False,
timestamp_filter=None
):
"""
Iterator over a list of recursively child files (filtered and sorted)
:param path: str - parent directory path
:param include: - str o list - pattern (str) or list of patterns to be matched against each file fullpath.
The checked file will be INCLUDED iff ALL patterns match (AND)
:param exclude: - str o list - pattern (str) or list of patterns ([str,])to be matched against each file fullpath
The file will be EXLUDED iff exists at least one match with any of the patterns (OR)
:param predicate: fn - function to be evaluated on each file fullpath.
The file will be included iff predicate returns True, ignored otherwise
:param so: str - Criteria for classification of the final list of included files:
t: by file creation timestamp, oldest first
T: by file creation timestamp, newest first
n: by name, ascendent (a-z)
N: by name, descendent (z-a)
:param only_directories: bool
:param timestamp_filter: timestamp - If indicated, all files returned must have creation time greater than it
:return: iterator
"""
_path = self.path if path is None else path
if _path is None:
raise Exception('FileIterator path is mandatory')
_path = _path.replace('\\', '/')
if not os.path.exists(_path):
raise Exception('FileIterator path not exists')
_include_patterns = []
if include is not None:
if isinstance(include, list):
_include_patterns = include
elif isinstance(include, str):
_include_patterns = [include]
else:
_include_patterns = self.include_patterns
_exclude_patterns = []
if exclude is not None:
if isinstance(exclude, list):
_exclude_patterns = exclude
elif isinstance(exclude, str):
_exclude_patterns = [exclude]
else:
_exclude_patterns = self.exclude_patterns
filtered = []
filenames = []
for root, dirs, files in os.walk(_path):
if only_directories:
if root != _path:
filenames.append(root)
else:
filenames += [os.path.join(root, f).replace('\\', '/') for f in files]
for fullpath in filenames:
if timestamp_filter is not None:
if time.mktime(timestamp_filter.timetuple()) > os.path.getmtime(fullpath):
continue
fail = False
for include_pattern in _include_patterns:
if not re.compile(include_pattern).search(fullpath):
fail = True
break
if fail:
continue
for exclude_pattern in _exclude_patterns:
if re.compile(exclude_pattern).search(fullpath):
fail = True
break
if fail:
continue
if predicate is not None and hasattr(predicate, '__call__'):
if not predicate(fullpath):
continue
filtered.append(dict(fullpath=fullpath, mtime=int(os.path.getmtime(fullpath))))
if len(filtered):
if so in ('t', 'T'):
filtered = sorted(filtered, key=lambda k: k['mtime'], reverse=(so == 'T'))
elif so in ('n', 'N'):
filtered = sorted(filtered, key=lambda k: k['fullpath'], reverse=(so == 'N'))
for f in filtered:
yield f['fullpath']
def file_existence_validator_from_path(patterns_to_validate, path, include=None, exclude=None):
"""
Helper for validate files existence
:param patterns_to_validate:
:param path:
:param include:
:param exclude:
:return:
"""
validated_patterns = copy.deepcopy(patterns_to_validate)
iterator = FileIterator()
for f in iterator.get_next_file(path=path, include=include, exclude=exclude):
validated_patterns = _validate_files_against_paterns(f, patterns_to_validate, validated_patterns)
return dict(count=validated_patterns, result=reduce(lambda x, key: x and validated_patterns[key] == 0, validated_patterns.keys(), True))
def file_existence_validator_from_list(patterns_to_validate, files):
validated_patterns = copy.deepcopy(patterns_to_validate)
for f in files:
validated_patterns = _validate_files_against_paterns(f, patterns_to_validate, validated_patterns)
return dict(count=validated_patterns, result=reduce(lambda x, key: x and validated_patterns[key] == 0, validated_patterns.keys(), True))
def _validate_files_against_paterns(f, patterns_to_validate, validated_patterns):
for k in patterns_to_validate:
if re.match(k, os.path.basename(f)):
validated_patterns[k] -= 1
break
return validated_patterns
def remove_tail_lines(filepath, nlines):
f = open(filepath, 'r+')
f.seek(0, os.SEEK_END)
pos = f.tell() - 1
line_counter = 0
while pos > 0 and line_counter < nlines:
if f.read(1) == '\n':
line_counter += 1
if line_counter < nlines:
pos -= 1
f.seek(pos, os.SEEK_SET)
if pos > 0:
f.seek(pos, os.SEEK_SET)
f.truncate()
f.close()
| {
"content_hash": "1af077e9b3fc7fcbfbd8c59d8f71a30f",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 140,
"avg_line_length": 38.17261904761905,
"alnum_prop": 0.5928582566661469,
"repo_name": "Cocuyo-Labs-Team/ccytsk",
"id": "a697105f5b52de3e1a06a8ff9bb0ad5f4d2607aa",
"size": "6428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccytsk/core/files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "244478"
}
],
"symlink_target": ""
} |
'''This utility cleans up the html files as emitted by doxygen so
that they are suitable for publication on a Google documentation site.
'''
import glob
import optparse
import os
import re
import shutil
import sys
try:
from BeautifulSoup import BeautifulSoup, Tag
except (ImportError, NotImplementedError):
print ("This tool requires the BeautifulSoup package "
"(see http://www.crummy.com/software/BeautifulSoup/).\n"
"Make sure that the file BeautifulSoup.py is either in this directory "
"or is available in your PYTHON_PATH")
raise
def Trace(msg):
if Trace.verbose:
sys.stderr.write(str(msg) + '\n')
Trace.verbose = False
FILES_TO_REMOVE = [
'*.css',
'*.map',
'*.md5',
'annotated.html',
'bc_s.png',
'classes.html',
'closed.png',
'doxygen.png',
'files.html',
'functions*.html',
'globals_0x*.html',
'globals_enum.html',
'globals_eval.html',
'globals_func.html',
'globals.html',
'globals_type.html',
'globals_vars.html',
'graph_legend.html',
'graph_legend.png',
'hierarchy.html',
'index_8dox.html',
'index.html',
'modules.html',
'namespacemembers_func.html',
'namespacemembers.html',
'namespaces.html',
'nav_f.png',
'nav_h.png',
'open.png',
'tab_a.png',
'tab_b.png',
'tab_h.png',
'tab_s.png',
]
class HTMLFixer(object):
'''This class cleans up the html strings as produced by Doxygen
'''
def __init__(self, html):
self.soup = BeautifulSoup(html)
def FixTableHeadings(self):
'''Fixes the doxygen table headings.
This includes:
- Using bare <h2> title row instead of row embedded in <tr><td> in table
- Putting the "name" attribute into the "id" attribute of the <tr> tag.
- Splitting up tables into multiple separate tables if a table
heading appears in the middle of a table.
For example, this html:
<table>
<tr><td colspan="2"><h2><a name="pub-attribs"></a>
Data Fields List</h2></td></tr>
...
</table>
would be converted to this:
<h2>Data Fields List</h2>
<table>
...
</table>
'''
table_headers = []
for tag in self.soup.findAll('tr'):
if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']:
#tag['id'] = tag.td.h2.a['name']
tag.string = tag.td.h2.a.next
tag.name = 'h2'
table_headers.append(tag)
# reverse the list so that earlier tags don't delete later tags
table_headers.reverse()
# Split up tables that have multiple table header (th) rows
for tag in table_headers:
Trace("Header tag: %s is %s" % (tag.name, tag.string.strip()))
# Is this a heading in the middle of a table?
if tag.findPreviousSibling('tr') and tag.parent.name == 'table':
Trace("Splitting Table named %s" % tag.string.strip())
table = tag.parent
table_parent = table.parent
table_index = table_parent.contents.index(table)
new_table = Tag(self.soup, name='table', attrs=table.attrs)
table_parent.insert(table_index + 1, new_table)
tag_index = table.contents.index(tag)
for index, row in enumerate(table.contents[tag_index:]):
new_table.insert(index, row)
# Now move the <h2> tag to be in front of the <table> tag
assert tag.parent.name == 'table'
table = tag.parent
table_parent = table.parent
table_index = table_parent.contents.index(table)
table_parent.insert(table_index, tag)
def RemoveTopHeadings(self):
'''Removes <div> sections with a header, tabs, or navpath class attribute'''
header_tags = self.soup.findAll(
name='div',
attrs={'class' : re.compile('^(header|tabs[0-9]*|navpath)$')})
[tag.extract() for tag in header_tags]
def RemoveVersionNumbers(self, html):
'''Horrible hack to strip _#_# from struct names.'''
return re.sub(r'(_\d_\d)(?=[": <])', '', html)
def FixAll(self):
self.FixTableHeadings()
self.RemoveTopHeadings()
html = str(self.soup)
html = self.RemoveVersionNumbers(html)
return html
def main(argv):
"""Main entry for the doxy_cleanup utility
doxy_cleanup cleans up the html files generated by doxygen.
"""
parser = optparse.OptionParser(usage='Usage: %prog [options] directory')
parser.add_option('-v', '--verbose', help='verbose output.',
action='store_true')
options, files = parser.parse_args(argv)
if len(files) != 1:
parser.error('Expected one directory')
if options.verbose:
Trace.verbose = True
root_dir = files[0]
html_dir = os.path.join(root_dir, 'html')
# Doxygen puts all files in an 'html' directory.
# First, move all files from that directory to root_dir.
for filename in glob.glob(os.path.join(html_dir, '*')):
Trace('Moving %s -> %s' % (filename, root_dir))
shutil.move(filename, root_dir)
# Now remove the 'html' directory.
Trace('Removing %s' % html_dir)
os.rmdir(html_dir)
# Then remove unneeded files.
for wildcard in FILES_TO_REMOVE:
Trace('Removing "%s":' % wildcard)
path = os.path.join(root_dir, wildcard)
for filename in glob.glob(path):
Trace(' Removing "%s"' % filename)
os.remove(filename)
# Now, fix the HTML files we've kept.
Trace('Fixing HTML files...')
for root, _, files in os.walk(root_dir):
for filename in files:
if not os.path.splitext(filename)[1] == '.html':
Trace('Skipping %s' % filename)
continue
filename = os.path.join(root, filename)
Trace('Processing "%s"...' % filename)
try:
with open(filename) as f:
html = f.read()
fixer = HTMLFixer(html)
output = fixer.FixAll()
with open(filename, 'w') as f:
f.write(output)
except:
sys.stderr.write("Error while processing %s\n" % filename)
raise
return 0
if __name__ == '__main__':
try:
rtn = main(sys.argv[1:])
except KeyboardInterrupt:
sys.stderr.write('%s: interrupted\n' % os.path.basename(__file__))
rtn = 1
sys.exit(rtn)
| {
"content_hash": "c362812b84db837d6ff578123b19a9ec",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 80,
"avg_line_length": 28.483568075117372,
"alnum_prop": 0.626174386022746,
"repo_name": "krieger-od/nwjs_chromium.src",
"id": "d90f214f8e4f78a56be1a78a345091e354cbb0f3",
"size": "6256",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "native_client_sdk/src/doc/doxygen/doxy_cleanup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23945"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "4123085"
},
{
"name": "C++",
"bytes": "225911506"
},
{
"name": "CSS",
"bytes": "875874"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Go",
"bytes": "18155"
},
{
"name": "HTML",
"bytes": "27190037"
},
{
"name": "Java",
"bytes": "7645280"
},
{
"name": "JavaScript",
"bytes": "18828195"
},
{
"name": "Makefile",
"bytes": "96270"
},
{
"name": "Objective-C",
"bytes": "1228317"
},
{
"name": "Objective-C++",
"bytes": "7573158"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "248854"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "418340"
},
{
"name": "Python",
"bytes": "8032628"
},
{
"name": "Shell",
"bytes": "464218"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
} |
import os
import os.path as osp
import sys
import click
import torch
import torchfcn
import yaml
from dataset import DatasetV1
this_dir = osp.dirname(osp.realpath(__file__))
def git_hash():
import shlex
import subprocess
cmd = 'git log -n 1 --pretty="%h"'
hash = subprocess.check_output(shlex.split(cmd)).strip()
return hash
def load_config(config_file):
import datetime
import pytz
config = yaml.load(open(config_file))
assert 'max_iteration' in config
assert 'optimizer' in config
assert 'lr' in config
assert 'weight_decay' in config
assert 'aug' in config
now = datetime.datetime.now(pytz.timezone('Asia/Tokyo'))
now = now.replace(tzinfo=None)
out = osp.splitext(osp.basename(config_file))[0]
setting = osp.basename(osp.dirname(osp.abspath(config_file)))
for key, value in sorted(config.items()):
if isinstance(value, basestring):
value = value.replace('/', 'SLASH')
value = value.replace(':', 'COLON')
out += '_{key}-{value}'.format(key=key.upper(), value=value)
out += '_VCS-%s' % git_hash()
out += '_TIME-%s' % now.strftime('%Y%m%d-%H%M%S')
config['out'] = osp.join(this_dir, 'logs', setting, out)
config['config_file'] = osp.realpath(config_file)
config['timestamp'] = datetime.datetime.now(
pytz.timezone('Asia/Tokyo')).isoformat()
if not osp.exists(config['out']):
os.makedirs(config['out'])
with open(osp.join(config['out'], 'params.yaml'), 'w') as f:
yaml.safe_dump(config, f, default_flow_style=False)
return config
@click.command()
@click.argument('config_file', type=click.Path(exists=True))
@click.option('--resume', type=click.Path(exists=True))
def main(config_file, resume):
config = load_config(config_file)
yaml.safe_dump(config, sys.stderr, default_flow_style=False)
cuda = torch.cuda.is_available()
seed = 1
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed(seed)
# 1. dataset
kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
train_loader = torch.utils.data.DataLoader(
DatasetV1(split='train', transform=True, aug=config['aug']),
batch_size=config.get('batch_size', 1), shuffle=True, **kwargs)
valid_loader = torch.utils.data.DataLoader(
DatasetV1(split='valid', transform=True, aug=config['aug']),
batch_size=1, shuffle=False, **kwargs)
# 2. model
n_class = len(DatasetV1.class_names)
model = torchfcn.models.FCN32s(n_class=n_class, nodeconv=True)
start_epoch = 0
if resume:
checkpoint = torch.load(resume)
model.load_state_dict(checkpoint['model_state_dict'])
start_epoch = checkpoint['epoch']
else:
vgg16 = torchfcn.models.VGG16(pretrained=True)
model.copy_params_from_vgg16(vgg16, copy_fc8=False, init_upscore=False)
if cuda:
model = model.cuda()
# 3. optimizer
optim = getattr(torch.optim, config['optimizer'])
optim = optim(model.parameters(), lr=config['lr'],
weight_decay=config['weight_decay'])
if resume:
optim.load_state_dict(checkpoint['optim_state_dict'])
trainer = torchfcn.Trainer(
cuda=cuda,
model=model,
optimizer=optim,
train_loader=train_loader,
val_loader=valid_loader,
out=config['out'],
max_iter=config['max_iteration'],
)
trainer.epoch = start_epoch
trainer.iteration = start_epoch * len(train_loader)
trainer.train()
if __name__ == '__main__':
main()
| {
"content_hash": "32e1bd03186a63a78393ffda7152e5e6",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 79,
"avg_line_length": 29.442622950819672,
"alnum_prop": 0.6322383073496659,
"repo_name": "pazeshun/jsk_apc",
"id": "168fefc28a879c21e4390531ea9208a18e7b30d8",
"size": "3615",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jsk_arc2017_common/experiments/fcn_object_segmentation/train_fcn32s.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "101871"
},
{
"name": "CMake",
"bytes": "42995"
},
{
"name": "Common Lisp",
"bytes": "695864"
},
{
"name": "Dockerfile",
"bytes": "1503"
},
{
"name": "HTML",
"bytes": "6364"
},
{
"name": "Python",
"bytes": "406153"
},
{
"name": "Shell",
"bytes": "4475"
}
],
"symlink_target": ""
} |
"""Unit tests for parser ccio module."""
import os
import tempfile
import unittest
import cclib
class guess_fileypeTest(unittest.TestCase):
def setUp(self):
self.guess = cclib.io.ccio.guess_filetype
def test_fail(self):
"""Does the function fail as expected?"""
self.assertIsNone(self.guess([]))
self.assertIsNone(self.guess(None))
self.assertIsNone(self.guess(os.devnull))
self.assertIsNone(self.guess(['test', 'random', 'quantum chemistry']))
def test_programs(self):
"""Does the function catch programs as expected?"""
self.assertEqual(self.guess(["Amsterdam Density Functional"]), cclib.parser.ADF)
self.assertEqual(self.guess(['Dalton - An Electronic Structure Program']), cclib.parser.DALTON)
self.assertEqual(self.guess(['GAMESS']), cclib.parser.GAMESS)
self.assertEqual(self.guess(['G A M E S S - U K']), cclib.parser.GAMESSUK)
self.assertEqual(self.guess(['Gaussian, Inc.']), cclib.parser.Gaussian)
self.assertEqual(self.guess(['Jaguar']), cclib.parser.Jaguar)
self.assertEqual(self.guess(['PROGRAM SYSTEM MOLPRO']), cclib.parser.Molpro)
self.assertEqual(self.guess(['MOPAC2016']), cclib.parser.MOPAC)
self.assertEqual(self.guess(['Northwest Computational Chemistry Package']), cclib.parser.NWChem)
self.assertEqual(self.guess(['O R C A']), cclib.parser.ORCA)
self.assertEqual(self.guess(["PSI ...Ab Initio Electronic Structure"]), cclib.parser.Psi)
self.assertEqual(self.guess(['A Quantum Leap Into The Future Of Chemistry']), cclib.parser.QChem)
class ccreadTest(unittest.TestCase):
def setUp(self):
self.ccread = cclib.io.ccio.ccread
def test_fail(self):
"""Does the function fail as expected?"""
self.assertIsNone(self.ccread("", quiet=True))
self.assertIsNone(self.ccread([], quiet=True))
self.assertIsNone(self.ccread(None, quiet=True))
class ccopenTest(unittest.TestCase):
def setUp(self):
self.ccopen = cclib.io.ccio.ccopen
def test_ccopen_fail(self):
"""Does the function fail as expected?"""
self.assertIsNone(self.ccopen("", quiet=True))
self.assertIsNone(self.ccopen([], quiet=True))
self.assertIsNone(self.ccopen(None, quiet=True))
def test_cjson_empty_tempfile(self):
"""Do we get a CJSON object when the keyword argument used?"""
with tempfile.NamedTemporaryFile() as tf:
self.assertIsInstance(self.ccopen(tf.name, cjson=True), cclib.io.cjsonreader.CJSON)
# This should also work if cjsonreader supported streams.
#def test_cjson(self):
# """Do we get a CJSON object then keyword argument used?"""
# self.assertIsInstance(self.ccopen(StringIO.StringIO(""), cjson=True), cclib.io.cjsonreader.CJSON)
class fallbackTest(unittest.TestCase):
def setUp(self):
self.fallback = cclib.io.ccio.fallback
def test_fallback_fail(self):
"""Does the functin fail as expected?"""
self.assertIsNone(self.fallback(None))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "5e281538976a82015a3ff99880c6de7e",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 106,
"avg_line_length": 37.93975903614458,
"alnum_prop": 0.667513496348047,
"repo_name": "Schamnad/cclib",
"id": "d7a03d4b77f0d6a7c43ce0cc28b705bfdb867b70",
"size": "3347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/io/testccio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arc",
"bytes": "18395"
},
{
"name": "DIGITAL Command Language",
"bytes": "21581"
},
{
"name": "Python",
"bytes": "836753"
},
{
"name": "Shell",
"bytes": "867"
},
{
"name": "TeX",
"bytes": "29388"
}
],
"symlink_target": ""
} |
"""
Created on 6 Feb 2018
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
Ten Little Algorithms, Part 2: The Single-Pole Low-Pass Filter
https://www.embeddedrelated.com/showarticle/779.php
"""
# --------------------------------------------------------------------------------------------------------------------
class LowPassFilter(object):
"""
classdocs
"""
@classmethod
def construct(cls, delta_t, cut_off_frequency):
tau = 1 / cut_off_frequency
alpha = delta_t / tau
return LowPassFilter(alpha)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, alpha):
"""
Constructor
"""
self.__alpha = alpha
self.__y = None
def reset(self):
self.__y = None
# ----------------------------------------------------------------------------------------------------------------
def compute(self, x):
if self.__y is None:
self.__y = x
self.__y += self.__alpha * (x - self.__y)
return self.__y
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "LowPassFilter:{alpha:%s, y:%s}" % (self.__alpha, self.__y)
| {
"content_hash": "ea1dd972b9fde47c8c32e095d0aeb240",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 118,
"avg_line_length": 25.40740740740741,
"alnum_prop": 0.358600583090379,
"repo_name": "south-coast-science/scs_core",
"id": "2d64fd8fb268792019bf3f838732f776febe8339",
"size": "1372",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/scs_core/data/low_pass_filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1461551"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
import os
import sys
from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker
from twisted.application import internet
from twisted.internet import reactor
from twisted.application.service import MultiService
from smap import core, loader, smapconf
from smap.server import getSite
try:
from smap.ssl import SslServerContextFactory
except ImportError:
pass
class Options(usage.Options):
optParameters = [["data-dir", "d", None, "directory for data"],
["port", "p", None, "service port number"],
["sslport", "s", None, "ssl port number"],
["key", "k", None, "ssl server key"],
["cert", "c", None, "ssl crl list"]]
def parseArgs(self, conf):
self['conf'] = conf
if not os.access(self['conf'], os.R_OK):
print >>sys.stderr, "ERROR: no such configuration file: " + self['conf']
sys.exit(1)
class SmapServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "smap"
description = "A sMAP server"
options = Options
def makeService(self, options):
if options['data-dir'] != None:
if not os.access(options['data-dir'], os.X_OK | os.W_OK):
raise core.SmapException("Cannot access " + options['data-dir'])
smapconf.SERVER['DataDir'] = options['data-dir']
inst = loader.load(options['conf'])
# override defaults with command-line args
smapconf.SERVER.update(dict([(k.lower(), v) for (k, v) in
options.iteritems() if v != None]))
if 'SuggestThreadPool' in smapconf.SERVER:
reactor.suggestThreadPoolSize(int(smapconf.SERVER['SuggestThreadPool']))
inst.start()
reactor.addSystemEventTrigger('before', 'shutdown', inst.stop)
site = getSite(inst, docroot=smapconf.SERVER['docroot'])
service = MultiService()
# add HTTP and HTTPS servers to the twisted multiservice
if 'port' in smapconf.SERVER:
service.addService(internet.TCPServer(int(smapconf.SERVER['port']), site))
if 'sslport' in smapconf.SERVER:
service.addService(internet.SSLServer(int(smapconf.SERVER['sslport']),
site,
SslServerContextFactory(smapconf.SERVER)))
return service
serviceMaker = SmapServiceMaker()
| {
"content_hash": "c172769fbe7d1d5f5a9385e99d4fb69d",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 92,
"avg_line_length": 39.42574257425743,
"alnum_prop": 0.6762933199397287,
"repo_name": "immesys/smap",
"id": "642821e58d1d9bff65c19866804bda33d936a15b",
"size": "3982",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/twisted/plugins/smap_plugin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "301328"
},
{
"name": "Perl",
"bytes": "534"
},
{
"name": "Python",
"bytes": "1381824"
},
{
"name": "R",
"bytes": "8015"
},
{
"name": "Shell",
"bytes": "557"
},
{
"name": "XSLT",
"bytes": "5081"
}
],
"symlink_target": ""
} |
import fileinput
import string
import sys
import os
# Goldstone old
fortran_compiler = 'ifort'
fortran_link_flags = '-O1 -mtune=core2 -msse3 -align '
fortran_opt_flags = '-O3 -mtune=core2 -msse3 -align -DBIGBLOCK=8 -c '
src_dir = '/home/jeff/code/spaghetty_backup/python/archive/src/'
exe_dir = '/home/jeff/code/spaghetty_backup/python/archive/exe/'
lib_name = 'tce_sort_new.a'
modlabel = ''
def perm(l):
sz = len(l)
if sz <= 1:
return [l]
return [p[:i]+[l[0]]+p[i:] for i in xrange(sz) for p in perm(l[1:])]
indices = ['4','3','2','1']
#all_permutations = perm(indices)
#all_permutations = [indices]
transpose_list = [indices]
#loop_list = perm(indices)
#transpose_list = perm(indices)
loop_list = perm(indices)
for transpose_order in transpose_list:
A = transpose_order[0]
B = transpose_order[1]
C = transpose_order[2]
D = transpose_order[3]
for loop_order in loop_list:
a = loop_order[0]
b = loop_order[1]
c = loop_order[2]
d = loop_order[3]
subroutine_name = 'transpose_'+A+B+C+D+'_loop_'+a+b+c+d+modlabel
source_name = subroutine_name+'.F'
print fortran_compiler+' '+fortran_opt_flags+' '+src_dir+source_name
os.system(fortran_compiler+' '+fortran_opt_flags+' '+src_dir+source_name)
os.system('ar -r '+lib_name+' '+subroutine_name+'.o')
os.system('rm '+subroutine_name+'.o')
#os.system('mv '+subroutine_name+'.F '+src_dir)
| {
"content_hash": "3ffe4138dd4cccdfc97c0ab41296d446",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 75,
"avg_line_length": 28.224489795918366,
"alnum_prop": 0.6616052060737527,
"repo_name": "jeffhammond/spaghetty",
"id": "bffc36b134cb5c5553413b98a6620e1ef2708081",
"size": "1383",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "branches/spaghetty3/python/archive/compile_source_basic.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "28448"
},
{
"name": "C++",
"bytes": "54394"
},
{
"name": "Cuda",
"bytes": "176198"
},
{
"name": "FORTRAN",
"bytes": "237825"
},
{
"name": "Perl",
"bytes": "420"
},
{
"name": "Python",
"bytes": "855226"
},
{
"name": "Shell",
"bytes": "6989"
},
{
"name": "TeX",
"bytes": "290786"
}
],
"symlink_target": ""
} |
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
| {
"content_hash": "d4eee9e702fc38682d30664a7ff67dc6",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 24.672131147540984,
"alnum_prop": 0.6132890365448505,
"repo_name": "cloudera/hue",
"id": "b0b7dccd80df68327768e8d61adbb7a9397571c4",
"size": "1701",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/docutils-0.14/tools/rst2odt_prepstyles.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
"""Counter for the days until an HTTPS (TLS) certificate will expire."""
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
DEVICE_CLASS_TIMESTAMP,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DEFAULT_PORT, DOMAIN
SCAN_INTERVAL = timedelta(hours=12)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up certificate expiry sensor."""
@callback
def schedule_import(_):
"""Schedule delayed import after HA is fully started."""
async_call_later(hass, 10, do_import)
@callback
def do_import(_):
"""Process YAML import."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=dict(config)
)
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, schedule_import)
async def async_setup_entry(hass, entry, async_add_entities):
"""Add cert-expiry entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
sensors = [
SSLCertificateTimestamp(coordinator),
]
async_add_entities(sensors, True)
class CertExpiryEntity(CoordinatorEntity):
"""Defines a base Cert Expiry entity."""
_attr_icon = "mdi:certificate"
@property
def extra_state_attributes(self):
"""Return additional sensor state attributes."""
return {
"is_valid": self.coordinator.is_cert_valid,
"error": str(self.coordinator.cert_error),
}
class SSLCertificateTimestamp(CertExpiryEntity, SensorEntity):
"""Implementation of the Cert Expiry timestamp sensor."""
_attr_device_class = DEVICE_CLASS_TIMESTAMP
def __init__(self, coordinator) -> None:
"""Initialize a Cert Expiry timestamp sensor."""
super().__init__(coordinator)
self._attr_name = f"Cert Expiry Timestamp ({coordinator.name})"
self._attr_unique_id = f"{coordinator.host}:{coordinator.port}-timestamp"
@property
def native_value(self):
"""Return the state of the sensor."""
if self.coordinator.data:
return self.coordinator.data.isoformat()
return None
| {
"content_hash": "b81397a76c95016e3477b23974251569",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 86,
"avg_line_length": 30.043478260869566,
"alnum_prop": 0.6794500723589001,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "7b6445a2f35740d4c6e29f3a18df88222d9ef06d",
"size": "2764",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/cert_expiry/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import getpass
import inspect
import os
import sys
import textwrap
import prettytable
import six
from six import moves
from rally.openstack.common.apiclient import exceptions
from rally.openstack.common.gettextutils import _
from rally.openstack.common import strutils
from rally.openstack.common import uuidutils
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, 'im_self', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise exceptions.MissingArgs(missing)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': fields[sortby_index]}
pt = prettytable.PrettyTable(fields, caching=False)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
print(strutils.safe_encode(pt.get_string(**kwargs)))
def print_dict(dct, dict_property="Property", wrap=0):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
"""
pt = prettytable.PrettyTable([dict_property, 'Value'], caching=False)
pt.align = 'l'
for k, v in six.iteritems(dct):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
print(strutils.safe_encode(pt.get_string()))
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in moves.range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def find_resource(manager, name_or_id, **find_args):
"""Look for resource in a given manager.
Used as a helper for the _find_* methods.
Example:
def _find_hypervisor(cs, hypervisor):
#Get a hypervisor by name or ID.
return cliutils.find_resource(cs.hypervisors, hypervisor)
"""
# first try to get entity as integer id
try:
return manager.get(int(name_or_id))
except (TypeError, ValueError, exceptions.NotFound):
pass
# now try to get entity as uuid
try:
tmp_id = strutils.safe_encode(name_or_id)
if uuidutils.is_uuid_like(tmp_id):
return manager.get(tmp_id)
except (TypeError, ValueError, exceptions.NotFound):
pass
# for str id which is not uuid
if getattr(manager, 'is_alphanum_id_allowed', False):
try:
return manager.get(name_or_id)
except exceptions.NotFound:
pass
try:
try:
return manager.find(human_id=name_or_id, **find_args)
except exceptions.NotFound:
pass
# finally try to find entity by name
try:
resource = getattr(manager, 'resource_class', None)
name_attr = resource.NAME_ATTR if resource else 'name'
kwargs = {name_attr: name_or_id}
kwargs.update(find_args)
return manager.find(**kwargs)
except exceptions.NotFound:
msg = _("No %(name)s with a name or "
"ID of '%(name_or_id)s' exists.") % \
{
"name": manager.resource_class.__name__.lower(),
"name_or_id": name_or_id
}
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = _("Multiple %(name)s matches found for "
"'%(name_or_id)s', use an ID to be more specific.") % \
{
"name": manager.resource_class.__name__.lower(),
"name_or_id": name_or_id
}
raise exceptions.CommandError(msg)
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def exit(msg=''):
if msg:
print (msg, file=sys.stderr)
sys.exit(1)
| {
"content_hash": "47c14870727aa9da9b9f5840568a2fdf",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 77,
"avg_line_length": 29.213058419243985,
"alnum_prop": 0.5864015998117869,
"repo_name": "ytsarev/rally",
"id": "a3ce8cc4b1167f7db762e69268a6b3ab5d1a5b63",
"size": "9219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rally/openstack/common/cliutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "984256"
},
{
"name": "Shell",
"bytes": "14201"
}
],
"symlink_target": ""
} |
"""
MySQL Connector/Python - MySQL drive written in Python
"""
# Python Db API v2
apilevel = '2.0'
threadsafety = 1
paramstyle = 'pyformat'
from .connection import MySQLConnection
from .errors import (
Error, Warning, InterfaceError, DatabaseError,
NotSupportedError, DataError, IntegrityError, ProgrammingError,
OperationalError, InternalError, custom_error_exception)
from .constants import FieldFlag, FieldType, CharacterSet,\
RefreshOption, ClientFlag
from .dbapi import *
def Connect(*args, **kwargs):
"""Shortcut for creating a mysql.MySQL object."""
return MySQLConnection(*args, **kwargs)
connect = Connect
__all__ = [
'MySQLConnection', 'Connect', 'custom_error_exception',
# Some useful constants
'FieldType','FieldFlag','ClientFlag','CharacterSet','RefreshOption',
# Error handling
'Error','Warning',
'InterfaceError','DatabaseError',
'NotSupportedError','DataError','IntegrityError','ProgrammingError',
'OperationalError','InternalError',
# DBAPI PEP 249 required exports
'connect','apilevel','threadsafety','paramstyle',
'Date', 'Time', 'Timestamp', 'Binary',
'DateFromTicks', 'DateFromTicks', 'TimestampFromTicks',
'STRING', 'BINARY', 'NUMBER',
'DATETIME', 'ROWID',
]
| {
"content_hash": "7524301ebacc6d24bae5c2250ec4fb00",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 72,
"avg_line_length": 30.547619047619047,
"alnum_prop": 0.6991426344505066,
"repo_name": "rcosnita/fantastico",
"id": "77b11b4db93b3d4c8bb9573e7287cee5b6fa1136",
"size": "2413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virtual_env/libs/mysql-connector/python3/mysql/connector/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6802"
},
{
"name": "Python",
"bytes": "2168052"
},
{
"name": "Shell",
"bytes": "13309"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Cache/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.redisenterprise.RedisEnterpriseManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available REST API operations of the Microsoft.Cache provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.redisenterprise.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Cache/operations"}
| {
"content_hash": "4e3f8205b5a534e21bbf4175174cc824",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 113,
"avg_line_length": 41,
"alnum_prop": 0.6354216483341304,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d5c99c1979cffc83c79f0cb7cb88747a122eac0e",
"size": "6773",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/redisenterprise/azure-mgmt-redisenterprise/azure/mgmt/redisenterprise/operations/_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
line.client
~~~~~~~~~~~
LineClient for sending and receiving message from LINE server.
:copyright: (c) 2014 by Taehoon Kim.
:license: BSD, see LICENSE for more details.
"""
import rsa
import requests
try:
import simplejson as json
except ImportError:
import json
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import THttpClient
from thrift.protocol import TCompactProtocol
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
#from curve import CurveThrift
from curve import CurveThrift
from curve.ttypes import TalkException
from curve.ttypes import ToType, ContentType
class LineAPI(object):
"""This class is a wrapper of LINE API
"""
LINE_DOMAIN = "http://gd2.line.naver.jp"
LINE_HTTP_URL = LINE_DOMAIN + "/api/v4/TalkService.do"
LINE_HTTP_IN_URL = LINE_DOMAIN + "/P4"
LINE_CERTIFICATE_URL = LINE_DOMAIN + "/Q"
LINE_SESSION_LINE_URL = LINE_DOMAIN + "/authct/v1/keys/line"
LINE_SESSION_NAVER_URL = LINE_DOMAIN + "/authct/v1/keys/naver"
CERT_FILE = ".line.crt"
ip = "127.0.0.1"
version = "5.1.2"
com_name = ""
revision = 0
certificate = ""
_session = requests.session()
_headers = {}
def ready(self):
"""
After login, make `client` and `client_in` instance
to communicate with LINE server
"""
self.transport = THttpClient.THttpClient(self.LINE_HTTP_URL)
self.transport_in = THttpClient.THttpClient(self.LINE_HTTP_IN_URL)
self.transport.setCustomHeaders(self._headers)
self.transport_in.setCustomHeaders(self._headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self.protocol_in = TCompactProtocol.TCompactProtocol(self.transport_in)
self._client = CurveThrift.Client(self.protocol)
self._client_in = CurveThrift.Client(self.protocol_in)
self.transport.open()
self.transport_in.open()
def updateAuthToken(self):
"""
After login, update authToken to avoid expiration of
authToken. This method skip the PinCode validation step.
"""
if self.certificate:
self.login()
self.tokenLogin()
return True
else:
self.raise_error("You need to login first. There is no valid certificate")
def tokenLogin(self):
self.transport = THttpClient.THttpClient(self.LINE_HTTP_URL)
self.transport.setCustomHeaders(self._headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._client = CurveThrift.Client(self.protocol)
def login(self):
"""Login to LINE server."""
if self.provider == CurveThrift.Provider.LINE: # LINE
j = self._get_json(self.LINE_SESSION_LINE_URL)
else: # NAVER
j = self._get_json(self.LINE_SESSION_NAVER_URL)
session_key = j['session_key']
message = (chr(len(session_key)) + session_key +
chr(len(self.id)) + self.id +
chr(len(self.password)) + self.password).encode('utf-8')
keyname, n, e = j['rsa_key'].split(",")
pub_key = rsa.PublicKey(int(n,16), int(e,16))
crypto = rsa.encrypt(message, pub_key).encode('hex')
self.transport = THttpClient.THttpClient(self.LINE_HTTP_URL)
self.transport.setCustomHeaders(self._headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._client = CurveThrift.Client(self.protocol)
try:
with open(self.CERT_FILE,'r') as f:
self.certificate = f.read()
f.close()
except:
self.certificate = ""
msg = self._client.loginWithIdentityCredentialForCertificate(
self.id, self.password, keyname, crypto, True, self.ip,
self.com_name, self.provider, self.certificate)
if msg.type == 1:
self.certificate = msg.certificate
self.authToken = self._headers['X-Line-Access'] = msg.authToken
elif msg.type == 2:
msg = "require QR code"
self.raise_error(msg)
elif msg.type == 3:
self._headers['X-Line-Access'] = msg.verifier
self._pinCode = msg.pinCode
print "Enter PinCode '%s' to your mobile phone in 2 minutes"\
% self._pinCode
j = self.get_json(self.LINE_CERTIFICATE_URL)
self.verifier = j['result']['verifier']
msg = self._client.loginWithVerifierForCertificate(self.verifier)
if msg.type == 1:
if msg.certificate is not None:
with open(self.CERT_FILE,'w') as f:
f.write(msg.certificate)
self.certificate = msg.certificate
if msg.authToken is not None:
self.authToken = self._headers['X-Line-Access'] = msg.authToken
return True
else:
return False
else:
msg = "Require device confirm"
self.raise_error(msg)
#raise Exception("Code is removed because of the request of LINE corporation")
else:
self.authToken = self._headers['X-Line-Access'] = msg.authToken
return True
def get_json(self, url):
"""Get josn from given url with saved session and headers"""
return json.loads(self._session.get(url, headers=self._headers).text)
def _getProfile(self):
"""Get profile information
:returns: Profile object
- picturePath
- displayName
- phone (base64 encoded?)
- allowSearchByUserid
- pictureStatus
- userid
- mid # used for unique id for account
- phoneticName
- regionCode
- allowSearchByEmail
- email
- statusMessage
"""
return self._client.getProfile()
def _getAllContactIds(self):
"""Get all contacts of your LINE account"""
return self._client.getAllContactIds()
def _getBlockedContactIds(self):
"""Get all blocked contacts of your LINE account"""
return self._client.getBlockedContactIds()
def _getContacts(self, ids):
"""Get contact information list from ids
:returns: List of Contact list
- status
- capableVideoCall
- dispalyName
- settings
- pictureStatus
- capableVoiceCall
- capableBuddy
- mid
- displayNameOverridden
- relation
- thumbnailUrl
- createdTime
- facoriteTime
- capableMyhome
- attributes
- type
- phoneticName
- statusMessage
"""
if type(ids) != list:
msg = "argument should be list of contact ids"
self.raise_error(msg)
return self._client.getContacts(ids)
def _findAndAddContactsByMid(self, mid, seq=0):
"""Find and add contacts by Mid"""
return self._client.findAndAddContactsByMid(seq, mid)
def _findContactByUserid(self, userid):
"""Find contacts by Userid"""
return self._client.findContactByUserid(userid)
def _findAndAddContactsByUserid(self, userid, seq=0):
"""Find and add contacts by Userid"""
return self._client.findAndAddContactsByUserid(seq, userid)
def _findContactsByPhone(self, phones):
"""Find contacts by phone"""
return self._client.findContactsByPhone(phones)
def _findAndAddContactsByPhone(self, phones, seq=0):
"""Find and add contacts by phone"""
return self._client.findAndAddContactsByPhone(seq, phones)
def _findContactsByEmail(self, emails):
"""Find contacts by email"""
return self._client.findContactsByEmail(emails)
def _findAndAddContactsByEmail(self, emails, seq=0):
"""Find and add contacts by email"""
return self._client.findAndAddContactsByEmail(seq, emails)
def _createRoom(self, ids, seq=0):
"""Create a chat room"""
return self._client.createRoom(seq, ids)
def _getRoom(self, id):
"""Get a chat room"""
return self._client.getRoom(id)
def _inviteIntoRoom(self, roomId, contactIds=[]):
"""Invite contacts into room"""
return self._client.inviteIntoRoom(0, roomId, contactIds)
def _leaveRoom(self, id):
"""Leave a chat room"""
return self._client.leaveRoom(0, id)
def _createGroup(self, name, ids, seq=0):
"""Create a group"""
return self._client.createGroup(seq, name, ids)
def _getGroups(self, ids):
"""Get a list of group with ids"""
if type(ids) != list:
msg = "argument should be list of group ids"
self.raise_error(msg)
return self._client.getGroups(ids)
def _getGroupIdsJoined(self):
"""Get group id that you joined"""
return self._client.getGroupIdsJoined()
def _getGroupIdsInvited(self):
"""Get group id that you invited"""
return self._client.getGroupIdsInvited()
def _acceptGroupInvitation(self, groupId, seq=0):
"""Accept a group invitation"""
return self._client.acceptGroupInvitation(seq, groupId)
def _cancelGroupInvitation(self, groupId, contactIds=[], seq=0):
"""Cancel a group invitation"""
return self._client.cancelGroupInvitation(seq, groupId, contactIds)
def _inviteIntoGroup(self, groupId, contactIds=[], seq=0):
"""Invite contacts into group"""
return self._client.inviteIntoGroup(seq, groupId, contactIds)
def _leaveGroup(self, id):
"""Leave a group"""
return self._client.leaveGroup(0, id)
def _getRecentMessages(self, id, count=1):
"""Get recent messages from `id`"""
return self._client.getRecentMessages(id, count)
def _sendMessage(self, message, seq=0):
"""Send a message to `id`. `id` could be contact id or group id
:param message: `message` instance
"""
return self._client.sendMessage(seq, message)
def _getLastOpRevision(self):
return self._client.getLastOpRevision()
def _fetchOperations(self, revision, count=50):
return self._client.fetchOperations(revision, count)
def _getMessageBoxCompactWrapUp(self, id):
try:
return self._client.getMessageBoxCompactWrapUp(id)
except:
return None
def _getMessageBoxCompactWrapUpList(self, start=1, count=50):
try:
return self._client.getMessageBoxCompactWrapUpList(start, count)
except Exception as e:
msg = e
self.raise_error(msg)
def raise_error(self, msg):
"""Error format"""
raise Exception("Error: %s" % msg)
def _get_json(self, url):
"""Get josn from given url with saved session and headers"""
return json.loads(self._session.get(url, headers=self._headers).text)
def post_content(self, url, data=None, files=None):
return self._session.post(url, headers=self._headers, data=data, files=files)
| {
"content_hash": "48a32f3a81b8102e6490de17e226cc21",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 90,
"avg_line_length": 34.46607669616519,
"alnum_prop": 0.5890961999315303,
"repo_name": "unixz/line-max",
"id": "d8092a8601adc2b8e9952f95fb2715beff9274d9",
"size": "11708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "line/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "101"
},
{
"name": "Python",
"bytes": "42736"
}
],
"symlink_target": ""
} |
import multiprocessing
import warnings
import six
from chainer.backends import cuda
from chainer.dataset import convert
from chainer import reporter
from chainer.training.updaters import standard_updater
try:
from cupy.cuda import nccl
_available = True
except ImportError:
_available = False
import numpy
class _Worker(multiprocessing.Process):
def __init__(self, proc_id, pipe, master):
super(_Worker, self).__init__()
self.proc_id = proc_id
self.pipe = pipe
self.converter = master.converter
self.model = master._master
self.device = master._devices[proc_id]
self.iterator = master._mpu_iterators[proc_id]
self.n_devices = len(master._devices)
def setup(self):
_, comm_id = self.pipe.recv()
self.comm = nccl.NcclCommunicator(self.n_devices, comm_id,
self.proc_id)
self.model.to_gpu(self.device)
self.reporter = reporter.Reporter()
self.reporter.add_observer('main', self.model)
self.reporter.add_observers('main',
self.model.namedlinks(skipself=True))
def run(self):
dev = cuda.Device(self.device)
dev.use()
self.setup()
gp = None
while True:
job, data = self.pipe.recv()
if job == 'finalize':
dev.synchronize()
break
if job == 'update':
# For reducing memory
self.model.cleargrads()
batch = self.converter(self.iterator.next(), self.device)
observation = {}
with self.reporter.scope(observation):
loss = _calc_loss(self.model, batch)
self.model.cleargrads()
loss.backward()
del loss
gg = gather_grads(self.model)
nccl_data_type = _get_nccl_data_type(gg.dtype)
null_stream = cuda.Stream.null
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM, 0,
null_stream.ptr)
del gg
self.model.cleargrads()
gp = gather_params(self.model)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0,
null_stream.ptr)
scatter_params(self.model, gp)
gp = None
class MultiprocessParallelUpdater(standard_updater.StandardUpdater):
"""Implementation of a multiprocess parallel GPU Updater.
This is an implementation of :class:`Updater` that uses multiple GPUs
with multi-process data parallelism. It uses Nvidia NCCL for communication
between multiple GPUs.
It behaves similarly to
:class:`~chainer.training.updaters.StandardUpdater`.
The update routine is modified to support data-parallel
computation on multiple GPUs in one machine.
It is based on synchronous parallel SGD: it
parallelizes the gradient computation over a mini-batch, and updates the
parameters only in the main device.
It does not transfer the values collected by :class:`Reporter` in the sub
devices to the main device. So you can only see the reported values in
the main device.
Args:
iterators: List of dataset iterator for the training dataset. The
number of the iterators must be same to the number of GPUs you use.
optimizer: Optimizer to update parameters. The model should be attached
to the optimizer.
converter: Converter function to build input arrays. Each batch
extracted by the iterator is split equally between the devices and
then passed with corresponding ``device`` option to this function.
:func:`~chainer.dataset.concat_examples` is used by default.
devices: Dictionary or list of devices to which the training data is
sent. The master device will be the first one in the list or the
value attached to the key ``'main'``.
auto_new_epoch (bool): If ``True``,
:meth:`~chainer.Optimizer.new_epoch` of the main optimizer is
automatically called when the ``is_new_epoch`` attribute of the
main iterator is ``True``.
"""
def __init__(self, iterators, optimizer, converter=convert.concat_examples,
devices=None, auto_new_epoch=True):
if not MultiprocessParallelUpdater.available():
raise Exception(
'NCCL is not enabled. MultiprocessParallelUpdater '
'requires NCCL.\n'
'Please reinstall CuPy after you install NCCL.\n'
'(see https://docs-cupy.chainer.org/en/latest/install.html)')
try:
cuda.cupy.cuda.driver.ctxGetCurrent()
_cuda_initialized = True
except cuda.cupy.cuda.driver.CUDADriverError:
# The context is not initialized, it will be fine.
_cuda_initialized = False
if _cuda_initialized:
raise RuntimeError(
'The CUDA context has been already initialized. '
'MultiprocessParallelUpdater assumes the context is '
'uninitialized. Please do not call CUDA API before '
'MultiprocessParallelUpdater creates processes.')
assert len(iterators) == len(devices)
for iterator in iterators[1:]:
assert len(iterator.dataset) == len(iterators[0].dataset)
# Correct optimizer parameters for new minibatch size
optim = optimizer.__class__.__name__
if optim in ('Adam', 'AdaGrad', 'RMSprop'):
optimizer.eps *= len(devices)
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif optim in ('RMSpropGraves', 'AdaDelta'):
optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif hasattr(optimizer, 'lr'):
optimizer.lr /= len(devices)
warnings.warn('optimizer.lr is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.lr))
super(MultiprocessParallelUpdater, self).__init__(
iterator=iterators[0],
optimizer=optimizer,
converter=converter,
auto_new_epoch=auto_new_epoch,
)
if isinstance(devices, dict):
devices = devices.copy()
main = devices.pop('main')
devices = list(six.itervalues(devices))
devices = [main] + devices
elif isinstance(devices, (list, tuple)):
devices = list(devices)
else:
raise ValueError(
'devices argument should be either dict, list or tuple,'
' but {} was given.'.format(type(devices)))
if devices is None or any(device is None for device in devices):
raise ValueError('must specify GPU devices')
self._master = optimizer.target
self._devices = devices
self._mpu_iterators = iterators
self._initialized = False
self._pipes = []
self._workers = []
self.comm = None
@staticmethod
def available():
return _available
def _send_message(self, message):
for pipe in self._pipes:
pipe.send(message)
def setup_workers(self):
if self._initialized:
return
self._initialized = True
self._master.cleargrads()
for i in six.moves.range(1, len(self._devices)):
pipe, worker_end = multiprocessing.Pipe()
worker = _Worker(i, worker_end, self)
worker.start()
self._workers.append(worker)
self._pipes.append(pipe)
with cuda.Device(self._devices[0]):
self._master.to_gpu(self._devices[0])
if len(self._devices) > 1:
comm_id = nccl.get_unique_id()
self._send_message(("set comm_id", comm_id))
self.comm = nccl.NcclCommunicator(len(self._devices),
comm_id, 0)
def update_core(self):
self.setup_workers()
self._send_message(('update', None))
with cuda.Device(self._devices[0]):
# For reducing memory
self._master.cleargrads()
optimizer = self.get_optimizer('main')
iterator = self.get_iterator('main')
batch = iterator.next()
batch = self.converter(batch, self._devices[0])
loss = _calc_loss(self._master, batch)
self._master.cleargrads()
loss.backward()
# NCCL: reduce grads
null_stream = cuda.Stream.null
if self.comm is not None:
gg = gather_grads(self._master)
nccl_data_type = _get_nccl_data_type(gg.dtype)
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM,
0, null_stream.ptr)
scatter_grads(self._master, gg)
del gg
optimizer.update()
if self.comm is not None:
gp = gather_params(self._master)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type,
0, null_stream.ptr)
if self.auto_new_epoch and iterator.is_new_epoch:
optimizer.new_epoch(auto=True)
def finalize(self):
self._send_message(('finalize', None))
for worker in self._workers:
worker.join()
def _calc_loss(model, in_arrays):
if isinstance(in_arrays, tuple):
return model(*in_arrays)
elif isinstance(in_arrays, dict):
return model(**in_arrays)
else:
return model(in_arrays)
def size_num_grads(link):
"""Count total size of all gradient arrays of a given link
Args:
link (chainer.link.Link): Target link object.
"""
size = 0
num = 0
for param in link.params():
if param.size == 0:
continue
size += param.size
num += 1
return size, num
def _memcpy_gather():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info',
'raw float32 dst',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_dst = i;
int i_src = i;
if (id > 0) i_src -= info[id];
dst[i_dst] = 0;
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *src = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = src[i_src];
}
else { // fp16
float16 *src = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float>(src[i_src]);
}
}
id_pre = id;
''',
'_memcpy_gather',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _gather(link, target):
size, num = size_num_grads(link)
ptrs = numpy.empty(num, dtype=numpy.uint64)
dtypes = numpy.empty(num, dtype=numpy.int8)
info = numpy.empty(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is not None:
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_gather()(ptrs, dtypes, info, size=size)
def gather_grads(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('gather_grads works only on GPU.')
return _gather(link, "grad")
def gather_params(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('Link.gather_params works only on GPU.')
return _gather(link, "data")
def _memcpy_scatter():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info, raw float32 array',
'',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_src = i;
int i_dst = i;
if (id > 0) i_dst -= info[id];
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *dst = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = array[i_src];
}
else { // fp16
float16 *dst = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float16>(array[i_src]);
}
}
id_pre = id;
''',
'_memcpy_scatter',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _scatter(link, array, target):
size, num = size_num_grads(link)
ptrs = numpy.zeros(num, dtype=numpy.uint64)
dtypes = numpy.zeros(num, dtype=numpy.int8)
info = numpy.zeros(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is None:
d = cuda.cupy.zeros(param.shape, dtype=param.dtype)
setattr(param, target, d)
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
if i != num:
raise()
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_scatter()(ptrs, dtypes, info, array, size=size)
def scatter_grads(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_grads()
"""
return _scatter(link, array, "grad")
def scatter_params(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_params()
"""
return _scatter(link, array, "data")
def _get_nccl_data_type(dtype):
"""Get data type for NCCL"""
if dtype == numpy.float32:
nccl_data_type = nccl.NCCL_FLOAT
elif dtype == numpy.float16:
nccl_data_type = nccl.NCCL_HALF
elif dtype == numpy.float64:
nccl_data_type = nccl.NCCL_DOUBLE
else:
raise RuntimeError('Unexpected data type:{}'.format(dtype))
return nccl_data_type
| {
"content_hash": "41f876b19ba39a56232d4c1e043ecc4f",
"timestamp": "",
"source": "github",
"line_count": 490,
"max_line_length": 79,
"avg_line_length": 33.512244897959185,
"alnum_prop": 0.5433895621460325,
"repo_name": "ktnyt/chainer",
"id": "3da8656f263ed2c21d15cb38b099a201865cf822",
"size": "16421",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chainer/training/updaters/multiprocess_parallel_updater.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1440363"
},
{
"name": "CMake",
"bytes": "42822"
},
{
"name": "Cuda",
"bytes": "53858"
},
{
"name": "Dockerfile",
"bytes": "1242"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5128330"
},
{
"name": "Shell",
"bytes": "19475"
}
],
"symlink_target": ""
} |
from .app import app
from .tags import *
from .images import *
import config
cfg = config.load()
if cfg.standalone is not False:
# If standalone mode is enabled (default), load the fake Index routes
from .index import *
| {
"content_hash": "6b34172d5cebe4050a7cb8f8de5e9da9",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 73,
"avg_line_length": 23,
"alnum_prop": 0.717391304347826,
"repo_name": "airbnb/docker-registry",
"id": "f8d94ed9d48b6c7dd0b6e162f4834ed51e1bdd29",
"size": "255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registry/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import unittest
import env
import hiisi
import h5py
import numpy as np
import uuid
import os
class Test(unittest.TestCase):
def setUp(self):
self.unique_attr_path = None
self.unique_attr_value = None
self.reoccuring_attr_paths = []
self.reoccuring_attr_items = []
self.dataset_paths = []
self.group_paths = ['/']
self.data_filename = 'hiisi_test_data.h5'
self.create_hdf5_test_data()
self.h5file = hiisi.HiisiHDF(self.data_filename, 'r')
print('run setUp')
def create_hdf5_test_data(self):
"""Creates random hdf5 file for testing
"""
n_branches = 3
n_datasets = 3
unique_attr = uuid.uuid1().hex
reoccuring_attr = [uuid.uuid1().hex for x in range(n_branches)]
dataset_data = np.zeros((3,3))
h5f = h5py.File(self.data_filename, 'w')
for i in range(n_branches):
group_path = '/branch{}'.format(i)
self.group_paths.append(group_path)
branch = h5f.create_group(group_path)
branch.attrs['reoccuring_attr'] = reoccuring_attr[i]
self.reoccuring_attr_paths.append(branch.name)
self.reoccuring_attr_items.append((branch.name, reoccuring_attr[i]))
for j in range(n_datasets):
dataset_name='/branch{}/data{}/dataset'.format(i, j)
self.group_paths.append('/branch{}/data{}'.format(i, j))
dataset = h5f.create_dataset(dataset_name, data=np.int8(dataset_data), dtype='int8')
self.dataset_paths.append(dataset.name)
if i==1 and j==1:
dataset.attrs['unique_attr'] = unique_attr
self.unique_attr_path = dataset.name
self.unique_attr_value = unique_attr
h5f.close()
def tearDown(self):
os.remove(self.data_filename)
def test_is_unique_attribute_true(self):
self.assertTrue(self.h5file.is_unique_attr('unique_attr'))
def test_is_unique_attribute_false(self):
self.assertFalse(self.h5file.is_unique_attr('reoccuring_attr'))
self.assertFalse(self.h5file.is_unique_attr('not_existing_attr'))
def test_attr_exists_true(self):
self.assertTrue(self.h5file.attr_exists('unique_attr'))
def test_attr_exists_false(self):
self.assertFalse(self.h5file.attr_exists('not_existing_attr'))
def test_datasets(self):
assert list(self.h5file.datasets()) == self.dataset_paths
def test_datasets_no_datasets_found(self):
with hiisi.HiisiHDF('tmp.h5', 'w') as h5f:
assert list(h5f.datasets()) == []
os.remove('tmp.h5')
def test_groups(self):
assert list(self.h5file.groups()) == self.group_paths
def test_groups_no_groups_found(self):
with hiisi.HiisiHDF('tmp.h5', 'w') as h5f:
assert h5f.groups() == ['/']
os.remove('tmp.h5')
def test_attr_gen(self):
attr_gen = self.h5file.attr_gen('reoccuring_attr')
attr_items = []
for i in attr_gen:
attr_items.append((i.path, i.value))
assert attr_items == self.reoccuring_attr_items
def test_attr_gen_no_match(self):
attr_gen = self.h5file.attr_gen('not_existing_attr')
with self.assertRaises(StopIteration):
next(attr_gen)
def test_create_from_filedict_new_file(self):
filename = 'create_from_filedict_test.h5'
with hiisi.HiisiHDF(filename, 'w') as h5f:
file_dict = {}
file_dict['/'] = {'A':1, 'B':2}
file_dict['/dataset1/data1/data'] = {'DATASET':np.arange(9).reshape((3,3)), 'C':'c'}
file_dict['/dataset1/data1/what'] = {'D':123}
h5f.create_from_filedict(file_dict)
with hiisi.HiisiHDF(filename, 'r') as h5f:
assert h5f['/'].attrs['A'] == 1
assert h5f['/'].attrs['B'] == 2
assert h5f['/dataset1/data1/data'].attrs['C'] == 'c'
np.testing.assert_array_equal(h5f['/dataset1/data1/data'][:], np.arange(9).reshape((3,3)))
assert h5f['/dataset1/data1/what'].attrs['D'] == 123
os.remove(filename)
def test_create_from_filedict_append_new_group(self):
filename = './create_from_filedict_test.h5'
# Create the file
with hiisi.HiisiHDF(filename, 'w') as h5f:
file_dict = {}
file_dict['/'] = {'A':1, 'B':2}
file_dict['/dataset1/data1/data'] = {'DATASET':np.arange(9).reshape((3,3)), 'C':'c'}
file_dict['/dataset1/data1/what'] = {'D':123}
h5f.create_from_filedict(file_dict)
# Append the file created above
with hiisi.HiisiHDF(filename, 'a') as h5f:
h5f.create_from_filedict({'/added_group':{'attr1':1}})
# Check the results
with hiisi.HiisiHDF(filename, 'r') as h5f:
assert h5f['/'].attrs['A'] == 1
assert h5f['/'].attrs['B'] == 2
assert h5f['/dataset1/data1/data'].attrs['C'] == 'c'
np.testing.assert_array_equal(h5f['/dataset1/data1/data'][:], np.arange(9).reshape((3,3)))
assert h5f['/dataset1/data1/what'].attrs['D'] == 123
assert h5f['/added_group'].attrs['attr1'] == 1
os.remove(filename)
def test_create_from_filedict_modify_existing_content(self):
filename = 'create_from_filedict_test.h5'
# Create the file
with hiisi.HiisiHDF(filename, 'w') as h5f:
file_dict = {}
file_dict['/'] = {'A':1, 'B':2}
file_dict['/dataset1/data1/data'] = {'DATASET':np.arange(9).reshape((3,3)), 'C':'c'}
file_dict['/dataset1/data1/what'] = {'D':123}
h5f.create_from_filedict(file_dict)
# Append the file created above
with hiisi.HiisiHDF(filename, 'a') as h5f:
h5f.create_from_filedict({'/dataset1/data1/data':{'C':'new_value'}})
# Check the results
with hiisi.HiisiHDF(filename, 'r') as h5f:
assert h5f['/'].attrs['A'] == 1
assert h5f['/'].attrs['B'] == 2
assert h5f['/dataset1/data1/data'].attrs['C'] == 'new_value'
np.testing.assert_array_equal(h5f['/dataset1/data1/data'][:], np.arange(9).reshape((3,3)))
assert h5f['/dataset1/data1/what'].attrs['D'] == 123
os.remove(filename)
def test_search_no_match(self):
assert [] == list(self.h5file.search('madeupkey', 'xyz'))
def test_search_single_match(self):
assert [self.unique_attr_path] == list(self.h5file.search('unique_attr', self.unique_attr_value))
def test_search_multiple_matches(self):
filename = 'test_search_multiple_matches.h5'
with hiisi.HiisiHDF(filename, 'w') as h5f:
groups = ['/group1', '/group2', '/basegroup/group1']
for g in groups:
group = h5f.create_group(g)
group.attrs['attribute'] = 'attribute'
with hiisi.HiisiHDF(filename, 'r') as h5f:
assert sorted(groups) == sorted(list(h5f.search('attribute', 'attribute')))
os.remove(filename)
def test_search_numerical_attribute_within_tolerance(self):
filename = 'test_search_numerical_attribute.h5'
with hiisi.HiisiHDF(filename, 'w') as h5f:
group = h5f.create_group('/group1')
group.attrs['attribute'] = 7.3
group = h5f.create_group('/group2')
group.attrs['attribute'] = 0.1001
group = h5f.create_group('/basegroup/group1')
group.attrs['attribute'] = 0.5245
with hiisi.HiisiHDF(filename, 'r') as h5f:
assert ['/basegroup/group1'] == list(h5f.search('attribute', 0.5, 0.1))
os.remove(filename)
def test_search_numerical_attribute_outside_tolerance(self):
filename = 'test_search_numerical_attribute.h5'
with hiisi.HiisiHDF(filename, 'w') as h5f:
group = h5f.create_group('/group1')
group.attrs['attribute'] = 7.3
group = h5f.create_group('/group2')
group.attrs['attribute'] = 0.1001
group = h5f.create_group('/basegroup/group1')
group.attrs['attribute'] = 0.5245
with hiisi.HiisiHDF(filename,'r') as h5f:
assert [] == list(h5f.search('attribute', 7, 0.1))
os.remove(filename)
if __name__=='__main__':
unittest.main()
| {
"content_hash": "31037b50cc28925c68082b416d858ec1",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 105,
"avg_line_length": 41.677725118483416,
"alnum_prop": 0.5499204002729133,
"repo_name": "karjaljo/hiisi",
"id": "8e2f837e456438794e0b14e05b4e45e4221a0667",
"size": "8818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_hiisi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40903"
}
],
"symlink_target": ""
} |
import unittest
import time
import Queue
import logging
import sys
import xmlrunner
sys.path.append('../space_machines')
sys.path.append('space_machines')
from door import DoorState
class DoorTests(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.WARN)
self.doorstate = DoorState()
self.doorstate.setup(None, name="TEST_DOOR")
self.doorstate.start()
def test_quick_open_close(self):
self.assertEqual(self.doorstate.current_state(), "CLOSED_LOCKED")
self.doorstate.send_message({"event": "VALID_KEY"})
time.sleep(0.1)
self.assertEqual(self.doorstate.current_state(), "CLOSED_UNLOCKING")
self.doorstate.send_message({"event":"DOOR_OPENED"})
time.sleep(0.1)
self.assertEqual(self.doorstate.current_state(), "OPEN_UNLOCKING")
self.doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(0.1)
self.assertEqual(self.doorstate.current_state(), "CLOSED_LOCKED")
def test_open_wait_close(self):
self.assertEqual(self.doorstate.current_state(), "CLOSED_LOCKED")
self.doorstate.send_message({"event": "VALID_KEY"})
time.sleep(2)
self.assertEqual(self.doorstate.current_state(), "CLOSED_UNLOCKING")
self.doorstate.send_message({"event":"DOOR_OPENED"})
time.sleep(0.2)
self.assertEqual(self.doorstate.current_state(), "OPEN_UNLOCKING")
time.sleep(2)
self.assertEqual(self.doorstate.current_state(), "OPEN_LOCKED")
self.doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(2)
self.assertEqual(self.doorstate.current_state(), "CLOSED_LOCKED")
def test_open_longwait_close(self):
self.assertEqual(self.doorstate.current_state(), "CLOSED_LOCKED")
self.doorstate.send_message({"event": "VALID_KEY"})
time.sleep(2)
self.assertEqual(self.doorstate.current_state(), "CLOSED_UNLOCKING")
self.doorstate.send_message({"event":"DOOR_OPENED"})
time.sleep(20)
self.assertEqual(self.doorstate.current_state(), "STUCK_OPEN")
self.doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(2)
self.assertEqual(self.doorstate.current_state(), "CLOSED_LOCKED")
def test_force_open(self):
self.assertEqual(self.doorstate.current_state(), "CLOSED_LOCKED")
self.doorstate.send_message({"event":"DOOR_OPENED"})
time.sleep(0.2)
self.assertEqual(self.doorstate.current_state(), "FORCED_OPEN")
self.doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(0.2)
self.assertEqual(self.doorstate.current_state(), "CLOSED_LOCKED")
if __name__ == '__main__':
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output='test-reports'),
# these make sure that some options that are not applicable
# remain hidden from the help menu.
failfast=False, buffer=False, catchbreak=False) | {
"content_hash": "e42b49e68a881223ef9674a369cb266f",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 72,
"avg_line_length": 38.81944444444444,
"alnum_prop": 0.7030411449016101,
"repo_name": "bvesperman/Sector67RaspberryPiAccessControl",
"id": "74ede237f27089b1d6e6d8efc4b886b13c2bb0aa",
"size": "2795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "space_machines/tests/testdoor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76491"
},
{
"name": "Shell",
"bytes": "1564"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
import markupfield.fields
class Migration(migrations.Migration):
dependencies = [
("sponsors", "0019_sponsor_twitter_handle"),
]
operations = [
migrations.CreateModel(
name="StatementOfWork",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"status",
models.CharField(
choices=[
("draft", "Draft"),
("outdated", "Outdated"),
("approved review", "Approved by reviewer"),
("awaiting signature", "Awaiting signature"),
("executed", "Executed"),
("nullified", "Nullified"),
],
db_index=True,
default="draft",
max_length=20,
),
),
(
"revision",
models.PositiveIntegerField(default=0, verbose_name="Revision nº"),
),
(
"document",
models.FileField(
blank=True,
upload_to="sponsors/statements_of_work/",
verbose_name="Unsigned PDF",
),
),
(
"signed_document",
models.FileField(
blank=True,
upload_to="sponsors/statmentes_of_work/signed/",
verbose_name="Signed PDF",
),
),
("sponsor_info", models.TextField(verbose_name="Sponsor information")),
("sponsor_contact", models.TextField(verbose_name="Sponsor contact")),
("benefits_list", markupfield.fields.MarkupField(rendered_field=True)),
(
"benefits_list_markup_type",
models.CharField(
choices=[
("", "--"),
("html", "HTML"),
("plain", "Plain"),
("markdown", "Markdown"),
("restructuredtext", "Restructured Text"),
],
default="markdown",
editable=False,
max_length=30,
),
),
("legal_clauses", markupfield.fields.MarkupField(rendered_field=True)),
("_benefits_list_rendered", models.TextField(editable=False)),
(
"legal_clauses_markup_type",
models.CharField(
choices=[
("", "--"),
("html", "HTML"),
("plain", "Plain"),
("markdown", "Markdown"),
("restructuredtext", "Restructured Text"),
],
default="markdown",
editable=False,
max_length=30,
),
),
("created_on", models.DateField(auto_now_add=True)),
("_legal_clauses_rendered", models.TextField(editable=False)),
("last_update", models.DateField(auto_now=True)),
("sent_on", models.DateField(null=True)),
(
"sponsorship",
models.OneToOneField(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="statement_of_work",
to="sponsors.Sponsorship",
),
),
],
options={
"verbose_name": "Statement of Work",
"verbose_name_plural": "Statements of Work",
},
),
]
| {
"content_hash": "a89c0f59087298ff69de2d646263be16",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 87,
"avg_line_length": 38.66086956521739,
"alnum_prop": 0.36707152496626183,
"repo_name": "python/pythondotorg",
"id": "e451ee8f253bc6ad2764048cc0127afcd2018e3f",
"size": "4497",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "sponsors/migrations/0019_statementofwork.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7686"
},
{
"name": "Dockerfile",
"bytes": "229"
},
{
"name": "HTML",
"bytes": "498813"
},
{
"name": "JavaScript",
"bytes": "24050"
},
{
"name": "Makefile",
"bytes": "1615"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Procfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "1145343"
},
{
"name": "Ruby",
"bytes": "1464"
},
{
"name": "SCSS",
"bytes": "198033"
}
],
"symlink_target": ""
} |
import neural_network as nn
# Define some constants that we'll want to have for our project.
LOG_DIR = "logs_dir"
TRAINING_FILE_NAME = 'train.csv'
TESTING_FILE_NAME = 'test.csv'
OUTPUT_FILE_NAME = 'results.csv'
# Load up our data but make sure we train on everything.
data = nn.NNData(TRAINING_FILE_NAME, TESTING_FILE_NAME, split_type=None)
# Setup our neural network and train it.
sess, merged_summary, writer = nn.setup(LOG_DIR)
nn.load_model(sess, 'dr')
# Finally, save the results of our actual use case.
nn.save_outputs(sess, data, OUTPUT_FILE_NAME, batch=int(data.output.length / 10))
# Acknowledge that we've saved our results.
print()
print()
print("======================")
print("======================")
print("Output saved to: {}".format(OUTPUT_FILE_NAME))
print("======================")
print("======================")
| {
"content_hash": "514a559786e86dda1acfbd8b62f31e5d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 81,
"avg_line_length": 32.23076923076923,
"alnum_prop": 0.6455847255369929,
"repo_name": "GEMISIS/machine-learning",
"id": "334b1e80741465a4254b1b7f1a37d8aa2f3d9c4e",
"size": "838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Kaggle/Digit Recognizer/runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "650"
},
{
"name": "JavaScript",
"bytes": "4336"
},
{
"name": "Python",
"bytes": "97891"
}
],
"symlink_target": ""
} |
from fnmatch import filter
from sys import stderr
import argparse
import filecmp
import os
import subprocess
import sys
LINKTEST="./link_test"
ESCAPE="\033[%sm"
BOLD=ESCAPE % "1"
RED=ESCAPE % "31"
NORMAL=ESCAPE % "0"
FAILED=RED+"failed"+NORMAL
def find(dir, file_filter=None):
files = [walkdir[0]+"/"+file for walkdir in os.walk(dir) for file in walkdir[2]]
if file_filter != None:
files = filter(files, file_filter)
return files
def error(message):
stderr.write("Error: %s\n" % (message,))
def warn(message):
stderr.write("Warning: %s\n" % (message,))
def extract_functions(file):
functions = []
in_function = None
for line in open(file):
if line.startswith("# -- Begin "):
if in_function != None:
warn("Missing end of function %s" % (in_function,))
funcname = line[12:-1]
in_function = funcname
text = line
elif line.startswith("# -- End "):
function_name = line[10:-1]
if in_function != function_name:
warn("End %s does not match begin %s" % (function_name, in_function))
else:
text += line
functions.append( (in_function, text) )
in_function = None
elif in_function != None:
text += line
return functions
def replace_function(file, function, replacement, dest):
out = open(dest, "w")
skip = False
found = False
in_function = None
for line in open(file):
if line.startswith("# -- Begin "):
if in_function != None:
warn("Missing end of function %s" % (in_function,))
funcname = line[12:-1]
in_function = funcname
if in_function == function:
out.write(replacement)
skip = True
elif line.startswith("# -- End "):
function_name = line[10:-1]
if in_function != function_name:
warn("End %s does not match begin %s" % (function_name, in_function))
in_function = None
if skip:
skip = False
continue
if not skip:
out.write(line)
def announce_test(name):
stderr.write("%s%s%s: " % (BOLD, name, NORMAL))
stderr.flush()
def announce_result(result, info):
stderr.write(result)
if info != "":
stderr.write(": %s" % info)
stderr.write("\n")
stderr.flush()
def testrun(files):
linkline="%s %s" % (LINKTEST, " ".join(files),)
res = subprocess.call(linkline, shell=True)
if res != 0:
announce_result(FAILED, "'%s' exitcode != 0" % LINKTEST)
return False
else:
announce_result("ok", "")
return True
def check_files():
"""Check files mode"""
for i in range(0, len(NO_PREFIX)):
f = NO_PREFIX[i]
b=baddir+"/"+f
if b not in BAD_FILES:
warn("There is no corresponding file to '%s' in %s" \
% (gooddir+"/"+f, baddir))
continue
announce_test(f + " [%s/%s]" % (i+1, len(NO_PREFIX)))
# combine files (everything from good except f)
testfiles=[]
skip=False
for c in NO_PREFIX:
badfile = baddir+"/"+c
goodfile = gooddir+"/"+c
if c == f:
testfiles.append(badfile)
if filecmp.cmp(goodfile, badfile):
announce_result("skipped", "same content")
skip = True
break
else:
testfiles.append(goodfile)
if skip:
continue
testrun(testfiles)
def check_functions_in_file(base, goodfile, badfile):
functions = extract_functions(goodfile)
if len(functions) == 0:
warn("Couldn't find any function in %s, missing annotations?" % (goodfile,))
return
badfunctions = dict(extract_functions(badfile))
if len(functions) == 0:
warn("Couldn't find any function in %s, missing annotations?" % (badfile,))
return
COMBINED="/tmp/combined.s"
i = 0
for (func,func_text) in functions:
announce_test(func + " [%s/%s]" % (i+1, len(functions)))
i+=1
if func not in badfunctions:
warn("Function '%s' missing from bad file" % func)
continue
if badfunctions[func] == func_text:
announce_result("skipped", "same content")
continue
replace_function(goodfile, func, badfunctions[func], COMBINED)
testfiles=[]
for c in NO_PREFIX:
if c == base:
testfiles.append(COMBINED)
continue
testfiles.append(gooddir + "/" + c)
testrun(testfiles)
parser = argparse.ArgumentParser()
parser.add_argument('--a', dest='dir_a', default='before')
parser.add_argument('--b', dest='dir_b', default='after')
parser.add_argument('--insane', help='Skip sanity check', action='store_true')
parser.add_argument('file', metavar='file', nargs='?')
config = parser.parse_args()
gooddir=config.dir_a
baddir=config.dir_b
BAD_FILES=find(baddir, "*")
GOOD_FILES=find(gooddir, "*")
NO_PREFIX=sorted([x[len(gooddir)+1:] for x in GOOD_FILES])
# "Checking whether build environment is sane ..."
if not config.insane:
announce_test("sanity check")
if not os.access(LINKTEST, os.X_OK):
error("Expect '%s' to be present and executable" % (LINKTEST,))
exit(1)
res = testrun(GOOD_FILES)
if not res:
# "build environment is grinning and holding a spatula. Guess not."
linkline="%s %s" % (LINKTEST, " ".join(GOOD_FILES),)
stderr.write("\n%s\n\n" % linkline)
stderr.write("Returned with exitcode != 0\n")
sys.exit(1)
if config.file is not None:
# File exchange mode
goodfile = gooddir+"/"+config.file
badfile = baddir+"/"+config.file
check_functions_in_file(config.file, goodfile, badfile)
else:
# Function exchange mode
check_files()
| {
"content_hash": "bf0402717ace0fee520c7dee20211584",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 85,
"avg_line_length": 31.27979274611399,
"alnum_prop": 0.5623654132847441,
"repo_name": "ensemblr/llvm-project-boilerplate",
"id": "ad6a3e0ea8d22d90eaf489ef5dcf382323ffe5b4",
"size": "8132",
"binary": false,
"copies": "29",
"ref": "refs/heads/master",
"path": "include/llvm/utils/abtest/abtest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "32"
},
{
"name": "AppleScript",
"bytes": "1429"
},
{
"name": "Assembly",
"bytes": "15649629"
},
{
"name": "Awk",
"bytes": "1747037"
},
{
"name": "Batchfile",
"bytes": "34481"
},
{
"name": "Brainfuck",
"bytes": "284"
},
{
"name": "C",
"bytes": "85584624"
},
{
"name": "C#",
"bytes": "20737"
},
{
"name": "C++",
"bytes": "168418524"
},
{
"name": "CMake",
"bytes": "1174816"
},
{
"name": "CSS",
"bytes": "49900"
},
{
"name": "Cuda",
"bytes": "414703"
},
{
"name": "Emacs Lisp",
"bytes": "110018"
},
{
"name": "Forth",
"bytes": "1490"
},
{
"name": "Fortran",
"bytes": "356707"
},
{
"name": "GAP",
"bytes": "6167"
},
{
"name": "Go",
"bytes": "132137"
},
{
"name": "HTML",
"bytes": "1751124"
},
{
"name": "JavaScript",
"bytes": "141512"
},
{
"name": "LLVM",
"bytes": "62219250"
},
{
"name": "Limbo",
"bytes": "7437"
},
{
"name": "Logos",
"bytes": "1572537943"
},
{
"name": "Lua",
"bytes": "86606"
},
{
"name": "M",
"bytes": "2008"
},
{
"name": "M4",
"bytes": "109560"
},
{
"name": "Makefile",
"bytes": "616437"
},
{
"name": "Mathematica",
"bytes": "7845"
},
{
"name": "Matlab",
"bytes": "53817"
},
{
"name": "Mercury",
"bytes": "1194"
},
{
"name": "Mirah",
"bytes": "1079943"
},
{
"name": "OCaml",
"bytes": "407143"
},
{
"name": "Objective-C",
"bytes": "5910944"
},
{
"name": "Objective-C++",
"bytes": "1720450"
},
{
"name": "OpenEdge ABL",
"bytes": "690534"
},
{
"name": "PHP",
"bytes": "15986"
},
{
"name": "POV-Ray SDL",
"bytes": "19471"
},
{
"name": "Perl",
"bytes": "591927"
},
{
"name": "PostScript",
"bytes": "845774"
},
{
"name": "Protocol Buffer",
"bytes": "20013"
},
{
"name": "Python",
"bytes": "1895427"
},
{
"name": "QMake",
"bytes": "15580"
},
{
"name": "RenderScript",
"bytes": "741"
},
{
"name": "Roff",
"bytes": "94555"
},
{
"name": "Rust",
"bytes": "200"
},
{
"name": "Scheme",
"bytes": "2654"
},
{
"name": "Shell",
"bytes": "1144090"
},
{
"name": "Smalltalk",
"bytes": "144607"
},
{
"name": "SourcePawn",
"bytes": "1544"
},
{
"name": "Standard ML",
"bytes": "2841"
},
{
"name": "Tcl",
"bytes": "8285"
},
{
"name": "TeX",
"bytes": "320484"
},
{
"name": "Vim script",
"bytes": "17239"
},
{
"name": "Yacc",
"bytes": "163484"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from Game.GameMode import GameMode
class TestGameMode(TestCase):
def setUp(self):
pass
def test_gm_new(self):
gm = GameMode(mode="test", priority=1)
self.assertIsInstance(gm, GameMode)
def test_gm_new_bad_priority(self):
with self.assertRaises(KeyError):
GameMode(mode="test")
def test_gm_new_no_mode(self):
with self.assertRaises(KeyError):
GameMode()
def test_gm_new_extra_params(self):
with self.assertRaises(TypeError):
GameMode(mode="test", priority=3, super_digits="*")
def test_gm_new_bad_mode(self):
with self.assertRaises(TypeError):
GameMode(mode="test", priority=3, digits="S")
def test_gm_property_priority(self):
gm = GameMode(mode="test", priority=3)
gm.priority = 5
self.assertEqual(gm.priority, 5)
def test_gm_property_priority_bad(self):
gm = GameMode(mode="test", priority=3)
with self.assertRaises(TypeError):
gm.priority = "X"
def test_gm_property_digits(self):
gm = GameMode(mode="test", priority=3)
gm.digits = 5
self.assertEqual(gm.digits, 5)
def test_gm_property_digits_bad(self):
gm = GameMode(mode="test", priority=3)
with self.assertRaises(TypeError):
gm.digits = "X"
def test_gm_property_digit_type(self):
gm = GameMode(mode="test", priority=3)
gm.digit_type = 5
self.assertEqual(gm.digit_type, 5)
def test_gm_property_digit_type_bad(self):
gm = GameMode(mode="test", priority=3)
with self.assertRaises(TypeError):
gm.digit_type = "X"
def test_gm_property_guesses_allowed(self):
gm = GameMode(mode="test", priority=3, guesses_allowed=5)
self.assertEqual(gm.guesses_allowed, 5)
gm.guesses_allowed = 7
self.assertEqual(gm.guesses_allowed, 7)
def test_gm_property_guesses_allowed_bad(self):
gm = GameMode(mode="test", priority=3)
with self.assertRaises(TypeError):
gm.guesses_allowed = "X"
def test_gm_property_instructions(self):
gm = GameMode(mode="test", priority=3, instruction_text="Test")
self.assertEqual(gm.instruction_text, "Test")
gm.instruction_text = "This is some test text"
self.assertEqual(gm.instruction_text, "This is some test text")
def test_gm_property_helptext(self):
gm = GameMode(mode="test", priority=3, help_text="Help")
self.assertEqual(gm.help_text, "Help")
gm.help_text = "This is some help"
self.assertEqual(gm.help_text, "This is some help")
| {
"content_hash": "d7b87ce3f796f2d787b6f16a7fe6926f",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 71,
"avg_line_length": 33.65,
"alnum_prop": 0.6222139673105498,
"repo_name": "dsandersAzure/python_cowbull_server",
"id": "35e32dfea581f1875c7f5d40eba0289a19437986",
"size": "2692",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unittests/TestGameMode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2221"
},
{
"name": "Python",
"bytes": "149374"
},
{
"name": "Shell",
"bytes": "1198"
}
],
"symlink_target": ""
} |
import re
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
def static(prefix, **kwargs):
return patterns('',
url(r'^%s(?P<path>.*)$' % re.escape(prefix.lstrip('/')),
'django.views.static.serve', kwargs=kwargs),
)
urlpatterns = patterns('',
url(r'^$', 'groupie.app.views.home', name='home'),
url(r'^deadline_hack/(?P<voting_hash>[a-zA-Z0-9]{8})$', 'groupie.app.views.deadline_hack', name='deadline_hack'),
url(r'^(?P<voting_hash>[a-zA-Z0-9]{8})$', 'groupie.app.views.voting', name='voting'),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| {
"content_hash": "8ca94d304a7e9a57d35c8878f3550bbb",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 117,
"avg_line_length": 32.82608695652174,
"alnum_prop": 0.6556291390728477,
"repo_name": "grunskis/groupie",
"id": "4581f2e855badb650771d097477db8d6b076a402",
"size": "755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groupie/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "153996"
},
{
"name": "JavaScript",
"bytes": "139721"
},
{
"name": "Python",
"bytes": "16554"
}
],
"symlink_target": ""
} |
"""Tests for hparam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.platform import test
class HParamsTest(test.TestCase):
def testEmpty(self):
hparams = hparam.HParams()
self.assertDictEqual({}, hparams.values())
hparams.parse('')
self.assertDictEqual({}, hparams.values())
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('xyz=123')
def testContains(self):
hparams = hparam.HParams(foo=1)
self.assertTrue('foo' in hparams)
self.assertFalse('bar' in hparams)
def testSomeValues(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6')
self.assertDictEqual({'aaa': 1, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
expected_str = '[(\'aaa\', 1), (\'b\', 2.0), (\'c_c\', \'relu6\')]'
self.assertEqual(expected_str, str(hparams.__str__()))
self.assertEqual(expected_str, str(hparams))
self.assertEqual(1, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
hparams.parse('aaa=12')
self.assertDictEqual({
'aaa': 12,
'b': 2.0,
'c_c': 'relu6'
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
hparams.parse('c_c=relu4, b=-2.0e10')
self.assertDictEqual({
'aaa': 12,
'b': -2.0e10,
'c_c': 'relu4'
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(-2.0e10, hparams.b)
self.assertEqual('relu4', hparams.c_c)
hparams.parse('c_c=,b=0,')
self.assertDictEqual({'aaa': 12, 'b': 0, 'c_c': ''}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(0.0, hparams.b)
self.assertEqual('', hparams.c_c)
hparams.parse('c_c=2.3",b=+2,')
self.assertEqual(2.0, hparams.b)
self.assertEqual('2.3"', hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=123')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=poipoi')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=1.0')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=12x')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=relu')
with self.assertRaisesRegexp(ValueError, 'Must not pass a list'):
hparams.parse('aaa=[123]')
self.assertEqual(12, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('2.3"', hparams.c_c)
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEqual(12, hparams2.aaa)
self.assertEqual(2.0, hparams2.b)
self.assertEqual('2.3"', hparams2.c_c)
def testSetFromMap(self):
hparams = hparam.HParams(a=1, b=2.0, c='tanh')
hparams.override_from_dict({'a': -2, 'c': 'identity'})
self.assertDictEqual({'a': -2, 'c': 'identity', 'b': 2.0}, hparams.values())
hparams = hparam.HParams(x=1, b=2.0, d=[0.5])
hparams.override_from_dict({'d': [0.1, 0.2, 0.3]})
self.assertDictEqual({'d': [0.1, 0.2, 0.3], 'x': 1, 'b': 2.0},
hparams.values())
def testBoolParsing(self):
for value in 'true', 'false', 'True', 'False', '1', '0':
for initial in False, True:
hparams = hparam.HParams(use_gpu=initial)
hparams.parse('use_gpu=' + value)
self.assertEqual(hparams.use_gpu, value in ['True', 'true', '1'])
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
self.assertEqual(hparams.use_gpu, hparams2.use_gpu)
# Check that hparams2.use_gpu is a bool rather than an int.
# The assertEqual() call above won't catch this, since
# (0 == False) and (1 == True) in Python.
self.assertEqual(bool, type(hparams2.use_gpu))
def testBoolParsingFail(self):
hparams = hparam.HParams(use_gpu=True)
with self.assertRaisesRegexp(ValueError, r'Could not parse.*use_gpu'):
hparams.parse('use_gpu=yep')
def testLists(self):
hparams = hparam.HParams(aaa=[1], b=[2.0, 3.0], c_c=['relu6'])
self.assertDictEqual({
'aaa': [1],
'b': [2.0, 3.0],
'c_c': ['relu6']
}, hparams.values())
self.assertEqual([1], hparams.aaa)
self.assertEqual([2.0, 3.0], hparams.b)
self.assertEqual(['relu6'], hparams.c_c)
hparams.parse('aaa=[12]')
self.assertEqual([12], hparams.aaa)
hparams.parse('aaa=[12,34,56]')
self.assertEqual([12, 34, 56], hparams.aaa)
hparams.parse('c_c=[relu4,relu12],b=[1.0]')
self.assertEqual(['relu4', 'relu12'], hparams.c_c)
self.assertEqual([1.0], hparams.b)
hparams.parse('c_c=[],aaa=[-34]')
self.assertEqual([-34], hparams.aaa)
self.assertEqual([], hparams.c_c)
hparams.parse('c_c=[_12,3\'4"],aaa=[+3]')
self.assertEqual([3], hparams.aaa)
self.assertEqual(['_12', '3\'4"'], hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=[123]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[poipoi]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[1.0]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[12x]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[relu]')
with self.assertRaisesRegexp(ValueError, 'Must pass a list'):
hparams.parse('aaa=123')
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEqual([3], hparams2.aaa)
self.assertEqual([1.0], hparams2.b)
self.assertEqual(['_12', '3\'4"'], hparams2.c_c)
def testParseValuesWithIndexAssigment1(self):
"""Assignment to an index position."""
parse_dict = hparam.parse_values('arr[1]=10', {'arr': int})
self.assertEqual(len(parse_dict), 1)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {1: 10})
def testParseValuesWithIndexAssigment2(self):
"""Assignment to multiple index positions."""
parse_dict = hparam.parse_values('arr[0]=10,arr[5]=20', {'arr': int})
self.assertEqual(len(parse_dict), 1)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {0: 10, 5: 20})
def testParseValuesWithIndexAssigment3(self):
"""Assignment to index positions in multiple names."""
parse_dict = hparam.parse_values('arr[0]=10,arr[1]=20,L[5]=100,L[10]=200',
{'arr': int,
'L': int})
self.assertEqual(len(parse_dict), 2)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {0: 10, 1: 20})
self.assertTrue(isinstance(parse_dict['L'], dict))
self.assertDictEqual(parse_dict['L'], {5: 100, 10: 200})
def testParseValuesWithIndexAssigment4(self):
"""Assignment of index positions and scalars."""
parse_dict = hparam.parse_values('x=10,arr[1]=20,y=30',
{'x': int,
'y': int,
'arr': int})
self.assertEqual(len(parse_dict), 3)
self.assertTrue(isinstance(parse_dict['arr'], dict))
self.assertDictEqual(parse_dict['arr'], {1: 20})
self.assertEqual(parse_dict['x'], 10)
self.assertEqual(parse_dict['y'], 30)
def testParseValuesWithIndexAssigment5(self):
"""Different variable types."""
parse_dict = hparam.parse_values('a[0]=5,b[1]=true,c[2]=abc,d[3]=3.14', {
'a': int,
'b': bool,
'c': str,
'd': float
})
self.assertEqual(set(parse_dict.keys()), {'a', 'b', 'c', 'd'})
self.assertTrue(isinstance(parse_dict['a'], dict))
self.assertDictEqual(parse_dict['a'], {0: 5})
self.assertTrue(isinstance(parse_dict['b'], dict))
self.assertDictEqual(parse_dict['b'], {1: True})
self.assertTrue(isinstance(parse_dict['c'], dict))
self.assertDictEqual(parse_dict['c'], {2: 'abc'})
self.assertTrue(isinstance(parse_dict['d'], dict))
self.assertDictEqual(parse_dict['d'], {3: 3.14})
def testParseValuesWithBadIndexAssigment1(self):
"""Reject assignment of list to variable type."""
with self.assertRaisesRegexp(ValueError,
r'Assignment of a list to a list index.'):
hparam.parse_values('arr[1]=[1,2,3]', {'arr': int})
def testParseValuesWithBadIndexAssigment2(self):
"""Reject if type missing."""
with self.assertRaisesRegexp(ValueError,
r'Unknown hyperparameter type for arr'):
hparam.parse_values('arr[1]=5', {})
def testParseValuesWithBadIndexAssigment3(self):
"""Reject type of the form name[index]."""
with self.assertRaisesRegexp(ValueError,
'Unknown hyperparameter type for arr'):
hparam.parse_values('arr[1]=1', {'arr[1]': int})
def testWithReusedVariables(self):
with self.assertRaisesRegexp(ValueError,
'Multiple assignments to variable \'x\''):
hparam.parse_values('x=1,x=1', {'x': int})
with self.assertRaisesRegexp(ValueError,
'Multiple assignments to variable \'arr\''):
hparam.parse_values('arr=[100,200],arr[0]=10', {'arr': int})
with self.assertRaisesRegexp(
ValueError, r'Multiple assignments to variable \'arr\[0\]\''):
hparam.parse_values('arr[0]=10,arr[0]=20', {'arr': int})
with self.assertRaisesRegexp(ValueError,
'Multiple assignments to variable \'arr\''):
hparam.parse_values('arr[0]=10,arr=[100]', {'arr': int})
def testJson(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self.assertDictEqual({
'aaa': 1,
'b': 2.0,
'c_c': 'relu6',
'd': True
}, hparams.values())
self.assertEqual(1, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
hparams.parse_json('{"aaa": 12, "b": 3.0, "c_c": "relu4", "d": false}')
self.assertDictEqual({
'aaa': 12,
'b': 3.0,
'c_c': 'relu4',
'd': False
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(3.0, hparams.b)
self.assertEqual('relu4', hparams.c_c)
json_str = hparams.to_json()
hparams2 = hparam.HParams(aaa=10, b=20.0, c_c='hello', d=False)
hparams2.parse_json(json_str)
self.assertEqual(12, hparams2.aaa)
self.assertEqual(3.0, hparams2.b)
self.assertEqual('relu4', hparams2.c_c)
self.assertEqual(False, hparams2.d)
hparams3 = hparam.HParams(aaa=123)
self.assertEqual('{"aaa": 123}', hparams3.to_json())
self.assertEqual('{\n "aaa": 123\n}', hparams3.to_json(indent=2))
self.assertEqual('{"aaa"=123}', hparams3.to_json(separators=(';', '=')))
hparams4 = hparam.HParams(aaa=123, b='hello', c_c=False)
self.assertEqual(
'{"aaa": 123, "b": "hello", "c_c": false}',
hparams4.to_json(sort_keys=True))
def testSetHParam(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self.assertDictEqual({
'aaa': 1,
'b': 2.0,
'c_c': 'relu6',
'd': True
}, hparams.values())
self.assertEqual(1, hparams.aaa)
self.assertEqual(2.0, hparams.b)
self.assertEqual('relu6', hparams.c_c)
hparams.set_hparam('aaa', 12)
hparams.set_hparam('b', 3.0)
hparams.set_hparam('c_c', 'relu4')
hparams.set_hparam('d', False)
self.assertDictEqual({
'aaa': 12,
'b': 3.0,
'c_c': 'relu4',
'd': False
}, hparams.values())
self.assertEqual(12, hparams.aaa)
self.assertEqual(3.0, hparams.b)
self.assertEqual('relu4', hparams.c_c)
def testSetHParamListNonListMismatch(self):
hparams = hparam.HParams(a=1, b=[2.0, 3.0])
with self.assertRaisesRegexp(ValueError, r'Must not pass a list'):
hparams.set_hparam('a', [1.0])
with self.assertRaisesRegexp(ValueError, r'Must pass a list'):
hparams.set_hparam('b', 1.0)
def testSetHParamTypeMismatch(self):
hparams = hparam.HParams(
int_=1, str_='str', bool_=True, float_=1.1, list_int=[1, 2], none=None)
with self.assertRaises(ValueError):
hparams.set_hparam('str_', 2.2)
with self.assertRaises(ValueError):
hparams.set_hparam('int_', False)
with self.assertRaises(ValueError):
hparams.set_hparam('bool_', 1)
with self.assertRaises(ValueError):
hparams.set_hparam('int_', 2.2)
with self.assertRaises(ValueError):
hparams.set_hparam('list_int', [2, 3.3])
with self.assertRaises(ValueError):
hparams.set_hparam('int_', '2')
# Casting int to float is OK
hparams.set_hparam('float_', 1)
# Getting stuck with NoneType :(
hparams.set_hparam('none', '1')
self.assertEqual('1', hparams.none)
def testNonProtoFails(self):
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1.0)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def='hello')
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=[1, 2, 3])
def testGet(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True, e=[5.0, 6.0])
# Existing parameters with default=None.
self.assertEqual(1, hparams.get('aaa'))
self.assertEqual(2.0, hparams.get('b'))
self.assertEqual('relu6', hparams.get('c_c'))
self.assertEqual(True, hparams.get('d'))
self.assertEqual([5.0, 6.0], hparams.get('e', None))
# Existing parameters with compatible defaults.
self.assertEqual(1, hparams.get('aaa', 2))
self.assertEqual(2.0, hparams.get('b', 3.0))
self.assertEqual(2.0, hparams.get('b', 3))
self.assertEqual('relu6', hparams.get('c_c', 'default'))
self.assertEqual(True, hparams.get('d', True))
self.assertEqual([5.0, 6.0], hparams.get('e', [1.0, 2.0, 3.0]))
self.assertEqual([5.0, 6.0], hparams.get('e', [1, 2, 3]))
# Existing parameters with incompatible defaults.
with self.assertRaises(ValueError):
hparams.get('aaa', 2.0)
with self.assertRaises(ValueError):
hparams.get('b', False)
with self.assertRaises(ValueError):
hparams.get('c_c', [1, 2, 3])
with self.assertRaises(ValueError):
hparams.get('d', 'relu')
with self.assertRaises(ValueError):
hparams.get('e', 123.0)
with self.assertRaises(ValueError):
hparams.get('e', ['a', 'b', 'c'])
# Nonexistent parameters.
self.assertEqual(None, hparams.get('unknown'))
self.assertEqual(123, hparams.get('unknown', 123))
self.assertEqual([1, 2, 3], hparams.get('unknown', [1, 2, 3]))
if __name__ == '__main__':
test.main()
| {
"content_hash": "1f47179d7e80aa5950f1a8455b1c64eb",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 80,
"avg_line_length": 37.872549019607845,
"alnum_prop": 0.6179135387004918,
"repo_name": "ravindrapanda/tensorflow",
"id": "16397622edd382bc6dcb12870de5fa22130a2c2b",
"size": "16141",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/training/python/training/hparam_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7908"
},
{
"name": "C",
"bytes": "186817"
},
{
"name": "C++",
"bytes": "25164156"
},
{
"name": "CMake",
"bytes": "166422"
},
{
"name": "Go",
"bytes": "857510"
},
{
"name": "HTML",
"bytes": "568425"
},
{
"name": "Java",
"bytes": "317802"
},
{
"name": "JavaScript",
"bytes": "1399"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "227436"
},
{
"name": "Python",
"bytes": "22238905"
},
{
"name": "Shell",
"bytes": "338684"
},
{
"name": "TypeScript",
"bytes": "797972"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dir, n):
datadir = os.path.join(dir, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
bitcoind and bitcoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run bitcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ "bitcoind", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ "bitcoin-cli", "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dir, extra_args=None, rpchost=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dir, "node"+str(i))
args = [ "bitcoind", "-datadir="+datadir, "-keypool=1", "-discover=0" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ "bitcoin-cli", "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dir, extra_args=None, rpchost=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dir, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dir, n_node, logname):
return os.path.join(dir, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for i in range(len(nodes)):
nodes[i].stop()
del nodes[:] # Emptying array closes connections as a side effect
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
utxo = from_node.listunspent(1)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee*2, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
| {
"content_hash": "e4e13cc1d86f94cd2f7b1fa49bef2224",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 104,
"avg_line_length": 35.78476821192053,
"alnum_prop": 0.6245026371796059,
"repo_name": "TBoehm/greedynode",
"id": "6d0b21c927b1368f1c17c998742666e3556724a2",
"size": "11087",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "113529"
},
{
"name": "C++",
"bytes": "3373391"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Makefile",
"bytes": "60693"
},
{
"name": "Objective-C",
"bytes": "3036"
},
{
"name": "Objective-C++",
"bytes": "6310"
},
{
"name": "Python",
"bytes": "167964"
},
{
"name": "Shell",
"bytes": "51936"
}
],
"symlink_target": ""
} |
from django.urls import reverse
from menu import Menu, MenuItem
def profile_title(request):
"""
Return a personalized title for our profile menu item
"""
# we don't need to check if the user is authenticated because our menu
# item will have a check that does that for us
name = request.user.get_full_name() or request.user
return "%s's Profile" % name
Menu.add_item("side", MenuItem("Dashboard", reverse("home"), icon="fas fa-tachometer-alt"))
Menu.add_item(
"side",
MenuItem(
"People",
None,
icon="fas fa-user",
children=[
MenuItem("Members", reverse("members-list"), icon="fas fa-user"),
MenuItem("Playing adults", reverse("adults-list"), icon="fas fa-user"),
MenuItem("Parents", reverse("parents-list"), icon="fas fa-user"),
MenuItem("Juniors", reverse("juniors-list"), icon="fas fa-user"),
MenuItem("Enquiries", reverse("enquiry-list"), icon="fas fa-user"),
MenuItem("Applications", reverse("applied-list"), icon="fas fa-user"),
MenuItem("All people", reverse("all-people-list"), icon="fas fa-user"),
MenuItem("Search", reverse("people-search"), icon="fa fa-search"),
MenuItem("Create adult", reverse("person-create"), icon="fas fa-user"),
MenuItem("Create junior", reverse("person-junior-create"), icon="fas fa-user"),
],
),
)
Menu.add_item(
"side",
MenuItem(
"Groups",
None,
icon="fas fa-users",
children=[MenuItem("List groups", reverse("group-list"), icon="fas fa-list-ul")],
),
)
Menu.add_item(
"side",
MenuItem(
"Charts",
None,
icon="fas fa-chart-bar",
children=[MenuItem("Membership", reverse("charts-members"), icon="fa2 fa-circle")],
),
)
Menu.add_item(
"side",
MenuItem(
"Finance",
None,
icon="fas fa-pound-sign",
children=[
MenuItem("Analysis", reverse("billing-analysis"), icon="far fa-circle"),
MenuItem("Invoices", reverse("invoice-list"), icon="far fa-circle"),
MenuItem("Invoice queries", reverse("invoice-queries"), icon="far fa-circle"),
MenuItem("Invoice items", reverse("item-table"), icon="far fa-circle"),
MenuItem("Payments", reverse("payment-list"), icon="far fa-circle"),
MenuItem("Scheduled payments", reverse("payment-task-list"), icon="far fa-circle"),
MenuItem("Refunds", reverse("refund-list"), icon="far fa-circle"),
# MenuItem(
# "Update from GoCardless",
# reverse("cardless_payment_process"),
# icon="far fa-circle",
# ),
MenuItem("Payouts", reverse("cardless_payout_list"), icon="far fa-circle"),
],
),
)
Menu.add_item(
"side",
MenuItem(
"Billing",
None,
icon="fas fa-pound-sign",
children=[
MenuItem("Period-end", reverse("billing-period"), icon="far fa-circle"),
MenuItem("Year-end", reverse("billing-year-end"), icon="far fa-circle"),
],
),
)
Menu.add_item(
"side",
MenuItem(
"Mail",
None,
icon="far fa-envelope",
children=[
MenuItem("Sent mails", reverse("mailtask-list"), icon="far fa-circle"),
MenuItem("Invoice templates", reverse("text-invoice-list"), icon="far fa-circle"),
MenuItem("Text blocks", reverse("text-list"), icon="far fa-circle"),
MenuItem("Mail types", reverse("mailtype-list"), icon="far fa-circle"),
],
),
)
Menu.add_item(
"side",
MenuItem(
"Settings",
None,
icon="fas fa-cog",
children=[
MenuItem("Fees", reverse("fees-list"), icon="far fa-circle"),
MenuItem("Membership year", reverse("billing-set-year"), icon="far fa-circle"),
MenuItem("Membership categories", reverse("membership-list"), icon="far fa-circle"),
],
),
)
Menu.add_item(
"side",
MenuItem(
"POS",
None,
icon="fas fa-shopping-cart",
children=[
MenuItem("POS admin", reverse("pos_admin"), icon="fas fa-shopping-cart"),
MenuItem("Transactions", reverse("pos_transactions_main"), icon="fas fa-shipping-cart"),
],
),
)
| {
"content_hash": "63a438f54a4e3b6d7f90e999f78f90f5",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 100,
"avg_line_length": 32.167883211678834,
"alnum_prop": 0.5625141819832086,
"repo_name": "ianastewart/cwltc-admin",
"id": "0a1f1fc83c59aac47081119efcbf85bd9d077fdd",
"size": "4407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "members/menus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "945975"
},
{
"name": "Dockerfile",
"bytes": "882"
},
{
"name": "HTML",
"bytes": "526368"
},
{
"name": "JavaScript",
"bytes": "843481"
},
{
"name": "Python",
"bytes": "8389886"
},
{
"name": "Shell",
"bytes": "1023"
}
],
"symlink_target": ""
} |
"""Sparse attention for the transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
import tensorflow.compat.v1 as tf
from state_of_sparsity.sparse_transformer.layers import common_sparse
from tensorflow.contrib.model_pruning.python import pruning # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import inplace_ops # pylint: disable=g-direct-tensorflow-import
def compute_attention_component(antecedent,
total_depth,
filter_width=1,
padding="VALID",
name="c",
vars_3d_num_heads=0,
sparsity_technique=None,
threshold=3.0,
training=True,
clip_alpha=None,
initial_sparsity=None,
split_heads=False,
num_heads=None):
"""Computes attention compoenent (query, key or value).
Args:
antecedent: a Tensor with shape [batch, length, channels]
total_depth: an integer
filter_width: An integer specifying how wide you want the attention
component to be.
padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
name: a string specifying scope name.
vars_3d_num_heads: an optional integer (if we want to use 3d variables)
sparsity_technique: technique used for sparsifying weights.
threshold: log alpha threshold used for evaluation with variational dropout.
training: whether model is being trained or not.
clip_alpha: alpha clipping threshold for variational dropout.
initial_sparsity: initial sparsity level for lottery ticket &
scratch experiments.
split_heads: Whether to prune each head separately.
num_heads: The number of heads in the attention module.
Returns:
c : [batch, length, depth] tensor
"""
# We don't support 3d attention variables or filter_width > 1 with sparsity
# techniques
assert not sparsity_technique or (not vars_3d_num_heads and filter_width == 1)
if vars_3d_num_heads > 0:
assert filter_width == 1
input_depth = antecedent.get_shape().as_list()[-1]
depth_per_head = total_depth // vars_3d_num_heads
initializer_stddev = input_depth ** -0.5
if "q" in name:
initializer_stddev *= depth_per_head ** -0.5
var = tf.get_variable(
name, [input_depth,
vars_3d_num_heads,
total_depth // vars_3d_num_heads],
initializer=tf.random_normal_initializer(stddev=initializer_stddev))
var = tf.cast(var, antecedent.dtype)
var = tf.reshape(var, [input_depth, total_depth])
return tf.tensordot(antecedent, var, axes=1)
if filter_width == 1:
if sparsity_technique:
if split_heads:
# Prune each heads weights separately so that they are free
# to have different weight magnitude distributions.
if num_heads is None:
raise ValueError("`num_heads` must be set for split head pruning.")
if total_depth % num_heads != 0:
raise ValueError("`total_depth` must be divisible by `num_heads`.")
input_depth = antecedent.get_shape().as_list()[-1]
depth_per_head = int(total_depth / num_heads)
masked_head_weights = []
for head_id in range(num_heads):
head_name = name + "_shard_{}".format(head_id)
with tf.variable_scope(head_name) as vs:
head_weights = tf.get_variable(
"kernel", [input_depth, depth_per_head])
masked_head_weights.append(pruning.apply_mask(head_weights, vs))
component_weights = tf.concat(masked_head_weights, axis=1)
# compute the full component result
return tf.tensordot(antecedent, component_weights, axes=1)
else:
return common_sparse.dense(
antecedent,
total_depth,
use_bias=False,
sparsity_technique=sparsity_technique,
threshold=threshold,
training=training,
clip_alpha=clip_alpha,
name=name,
initial_sparsity=initial_sparsity)
else:
return common_layers.dense(
antecedent, total_depth, use_bias=False, name=name)
else:
return common_layers.conv1d(
antecedent, total_depth, filter_width, padding=padding, name=name)
def compute_qkv(query_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth,
q_filter_width=1,
kv_filter_width=1,
q_padding="VALID",
kv_padding="VALID",
vars_3d_num_heads=0,
sparsity_technique=None,
threshold=3.0,
training=True,
clip_alpha=None,
initial_sparsity=None,
split_heads=False,
num_heads=None):
"""Computes query, key and value.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels]
total_key_depth: an integer
total_value_depth: an integer
q_filter_width: An integer specifying how wide you want the query to be.
kv_filter_width: An integer specifying how wide you want the keys and values
to be.
q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
vars_3d_num_heads: an optional (if we want to use 3d variables)
sparsity_technique: technique used for sparsifying weights.
threshold: log alpha threshold used for evaluation with variational dropout.
training: whether model is being trained or not.
clip_alpha: alpha clipping threshold for variational dropout.
initial_sparsity: initial sparsity level for lottery ticket &
scratch experiments.
split_heads: Whether to prune each head separately.
num_heads: The number of heads in the attention module.
Returns:
q, k, v : [batch, length, depth] tensors
"""
if memory_antecedent is None:
memory_antecedent = query_antecedent
q = compute_attention_component(
query_antecedent,
total_key_depth,
q_filter_width,
q_padding,
"q",
vars_3d_num_heads=vars_3d_num_heads,
sparsity_technique=sparsity_technique,
threshold=threshold,
training=training,
clip_alpha=clip_alpha,
initial_sparsity=initial_sparsity,
split_heads=split_heads,
num_heads=num_heads)
k = compute_attention_component(
memory_antecedent,
total_key_depth,
kv_filter_width,
kv_padding,
"k",
vars_3d_num_heads=vars_3d_num_heads,
sparsity_technique=sparsity_technique,
threshold=threshold,
training=training,
clip_alpha=clip_alpha,
initial_sparsity=initial_sparsity,
split_heads=split_heads,
num_heads=num_heads)
v = compute_attention_component(
memory_antecedent,
total_value_depth,
kv_filter_width,
kv_padding,
"v",
vars_3d_num_heads=vars_3d_num_heads,
sparsity_technique=sparsity_technique,
threshold=threshold,
training=training,
clip_alpha=clip_alpha,
initial_sparsity=initial_sparsity,
split_heads=split_heads,
num_heads=num_heads)
return q, k, v
def multihead_attention(query_antecedent,
memory_antecedent,
bias,
total_key_depth,
total_value_depth,
output_depth,
num_heads,
dropout_rate,
attention_type="dot_product",
image_shapes=None,
q_filter_width=1,
kv_filter_width=1,
q_padding="VALID",
kv_padding="VALID",
cache=None,
name="multihead_attention",
save_weights_to=None,
make_image_summary=True,
dropout_broadcast_dims=None,
vars_3d=False,
sparsity_technique=None,
threshold=3.0,
training=True,
clip_alpha=None,
initial_sparsity=None,
split_heads=False,
**kwargs):
"""Multihead scaled-dot-product attention with input/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels] or None
bias: bias Tensor (see attention_bias())
total_key_depth: an integer
total_value_depth: an integer
output_depth: an integer
num_heads: an integer dividing total_key_depth and total_value_depth
dropout_rate: a floating point number
attention_type: a string, either "dot_product", "dot_product_relative",
"local_mask_right", "local_unmasked", "masked_dilated_1d",
"unmasked_dilated_1d", graph, or any attention function
with the signature (query, key, value, **kwargs)
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
q_filter_width: An integer specifying how wide you want the query to be.
kv_filter_width: An integer specifying how wide you want the keys and values
to be.
q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID":
no padding.
cache: dict containing Tensors which are the results of previous
attentions, used for fast decoding. Expects the dict to contrain two
keys ('k' and 'v'), for the initial call the values for these keys
should be empty Tensors of the appropriate shape.
'k' [batch_size, 0, key_channels]
'v' [batch_size, 0, value_channels]
name: an optional string.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
vars_3d: use 3-dimensional variables for input/output transformations
sparsity_technique: technique used for sparsifying weights.
threshold: log alpha threshold used for evaluation with variational dropout.
training: whether model is being trained or not.
clip_alpha: alpha clipping threshold for variational dropout.
initial_sparsity: initial sparsity level for lottery ticket &
scratch experiments.
split_heads: Whether to prune each head separately.
**kwargs (dict): Parameters for the attention function
Caching:
WARNING: For decoder self-attention, i.e. when memory_antecedent == None,
the caching assumes that the bias contains future masking.
The caching works by saving all the previous key and value values so that
you are able to send just the last query location to this attention
function. I.e. if the cache dict is provided it assumes the query is of the
shape [batch_size, 1, hidden_dim] rather than the full memory.
Returns:
The result of the attention transformation. The output shape is
[batch_size, length_q, hidden_dim]
unless the cache dict is provided in which case only the last memory
position is calculated and the output shape is [batch_size, 1, hidden_dim]
Optionally returns an additional loss parameters (ex: load balance loss for
the experts) returned by the attention_type function.
Raises:
ValueError: if the key depth or value depth are not divisible by the
number of attention heads.
"""
if total_key_depth % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_key_depth, num_heads))
if total_value_depth % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_value_depth, num_heads))
if vars_3d:
raise ValueError("3d attention variables not supported.")
if attention_type != "dot_product":
raise ValueError(
"Sparse multihead attention only supports dot_product attention.")
vars_3d_num_heads = 0
with tf.variable_scope(
name,
default_name="multihead_attention",
values=[query_antecedent, memory_antecedent]):
if cache is None or memory_antecedent is None:
q, k, v = compute_qkv(query_antecedent, memory_antecedent,
total_key_depth, total_value_depth, q_filter_width,
kv_filter_width, q_padding, kv_padding,
vars_3d_num_heads=vars_3d_num_heads,
sparsity_technique=sparsity_technique,
threshold=threshold,
training=training,
clip_alpha=clip_alpha,
initial_sparsity=initial_sparsity,
split_heads=split_heads,
num_heads=num_heads)
if cache is not None:
if bias is None:
raise ValueError("Bias required for caching. See function docstring "
"for details.")
if memory_antecedent is not None:
# Encoder-Decoder Attention Cache
q = compute_attention_component(query_antecedent, total_key_depth,
q_filter_width, q_padding, "q",
vars_3d_num_heads=vars_3d_num_heads,
sparsity_technique=sparsity_technique,
threshold=threshold,
training=training,
clip_alpha=clip_alpha,
initial_sparsity=initial_sparsity,
split_heads=split_heads,
num_heads=num_heads)
k = cache["k_encdec"]
v = cache["v_encdec"]
else:
k = common_attention.split_heads(k, num_heads)
v = common_attention.split_heads(v, num_heads)
decode_loop_step = kwargs.get("decode_loop_step")
if decode_loop_step is None:
k = cache["k"] = tf.concat([cache["k"], k], axis=2)
v = cache["v"] = tf.concat([cache["v"], v], axis=2)
else:
# Inplace update is required for inference on TPU.
# Inplace_ops only supports inplace_update on the first dimension.
# The performance of current implementation is better than updating
# the tensor by adding the result of matmul(one_hot,
# update_in_current_step)
tmp_k = tf.transpose(cache["k"], perm=[2, 0, 1, 3])
tmp_k = inplace_ops.alias_inplace_update(
tmp_k, decode_loop_step, tf.squeeze(k, axis=2))
k = cache["k"] = tf.transpose(tmp_k, perm=[1, 2, 0, 3])
tmp_v = tf.transpose(cache["v"], perm=[2, 0, 1, 3])
tmp_v = inplace_ops.alias_inplace_update(
tmp_v, decode_loop_step, tf.squeeze(v, axis=2))
v = cache["v"] = tf.transpose(tmp_v, perm=[1, 2, 0, 3])
q = common_attention.split_heads(q, num_heads)
if cache is None:
k = common_attention.split_heads(k, num_heads)
v = common_attention.split_heads(v, num_heads)
key_depth_per_head = total_key_depth // num_heads
if not vars_3d:
q *= key_depth_per_head**-0.5
# compute the attention
x = common_attention.dot_product_attention(
q, k, v, bias, dropout_rate, image_shapes,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=dropout_broadcast_dims)
x = common_attention.combine_heads(x)
# Set last dim specifically.
x.set_shape(x.shape.as_list()[:-1] + [total_value_depth])
if sparsity_technique:
x = common_sparse.dense(
x,
output_depth,
use_bias=False,
sparsity_technique=sparsity_technique,
threshold=threshold,
training=training,
clip_alpha=clip_alpha,
name="output_transform",
initial_sparsity=initial_sparsity)
else:
x = common_layers.dense(
x,
output_depth,
use_bias=False,
name="output_transform")
return x
| {
"content_hash": "8fe9e436d123debff55bc4ccf04bb5fc",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 105,
"avg_line_length": 43.06218905472637,
"alnum_prop": 0.6011784414534111,
"repo_name": "google-research/google-research",
"id": "78566d922cf6321d93e9eae391c8d3d0a0223025",
"size": "17919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "state_of_sparsity/sparse_transformer/layers/sparse_attention.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
__author__ = 'zelgadis'
import bpy
import sys
UPDATE = "update"
WIDTH = "width"
HEIGHT = "height"
STEREO_CAMERA = "camera"
AUDIOFILE = "audiofile"
FORMAT = "format"
CYCLES_SAMPLES = "cycles_samples"
PRERENDER_COUNT = "prerender_count"
GPU_DEVICE = "gpu_device"
params = {UPDATE: False,
WIDTH: 480,
HEIGHT: 270,
STEREO_CAMERA: "",
AUDIOFILE:"/tmp/renderchan-test.wav",
FORMAT: "png",
CYCLES_SAMPLES: None,
PRERENDER_COUNT: 0,
GPU_DEVICE: ""}
def find_camera(cameras, base, prefixes, suffixes, side_index = 0):
for camera in cameras:
for prefix in prefixes:
if camera.name.lower().startswith(prefix[side_index]) and camera.name[len(prefix[side_index]):] == base:
return camera
for suffix in suffixes:
if camera.name.lower().endswith(suffix[side_index]) and camera.name[:-len(suffix[side_index])] == base:
return camera
return None
def main():
update = params[UPDATE]
sce = bpy.context.scene
sce.frame_current=sce.frame_current+1
sce.frame_current=sce.frame_current-1
have_builtin_stereo = "use_multiview" in dir(sce.render)
if params[STEREO_CAMERA] != "":
# Search for old camera simulation first
found=False
for ob in sce.objects:
if ob.name == params[STEREO_CAMERA] and ob.type == 'CAMERA':
sce.camera = ob
found=True
break
if not found:
separators = ['_', '.', '-', ' ']
side = params[STEREO_CAMERA]
alt_side = "left" if side == "right" else "right"
sides = [side, side[0]]
alt_sides = [alt_side, alt_side[0]]
prefixes = [(sides[side_index] + sep, alt_sides[side_index] + sep) for sep in separators for side_index in range(0, len(sides))]
prefixes.append((side, alt_side))
suffixes = [(sep + sides[side_index], sep + alt_sides[side_index]) for sep in separators for side_index in range(0, len(sides))]
suffixes.append((side, alt_side))
cameras = [obj for obj in sce.objects if obj.type == "CAMERA"]
if len(cameras) < 1:
print("Error: Cannot render, no camera in file " + bpy.data.filepath, file=sys.stderr)
exit(1)
else:
selected_camera = find_camera(cameras, sce.camera.name, prefixes, suffixes)
base = None
if not selected_camera:
for prefix in prefixes:
if sce.camera.name.lower().startswith(prefix[0]):
base = sce.camera.name[len(prefix[0]):]
break
if sce.camera.name.lower().startswith(prefix[1]):
base = sce.camera.name[len(prefix[1]):]
break
if base:
selected_camera = find_camera(cameras, base, prefixes, suffixes)
if not selected_camera:
for suffix in suffixes:
if sce.camera.name.lower().endswith(suffix[0]):
base = sce.camera.name[:-len(suffix[0])]
break
if sce.camera.name.lower().endswith(suffix[1]):
base = sce.camera.name[:-len(suffix[1])]
break
if base:
selected_camera = find_camera(cameras, base, prefixes, suffixes)
if selected_camera:
sce.camera = selected_camera
found = True
if found:
# We use old method, disable multiview
if have_builtin_stereo:
sce.render.use_multiview = False
else:
if have_builtin_stereo:
# Use native blender
sce.render.use_multiview = True
side = params[STEREO_CAMERA]
alt_side = "left" if side == "right" else "right"
sce.render.views[side].use = True
sce.render.views[alt_side].use = False
sce.render.views[side].file_suffix = ""
sce.render.views[alt_side].file_suffix = "_"+alt_side
sce.render.views_format == 'INDIVIDUAL'
else:
print(prefixes)
print(suffixes)
print([c.name for c in cameras])
print(sce.camera.name)
print("Error: Could not find " + params[STEREO_CAMERA] + " camera for the stereo render of " + bpy.data.filepath, file=sys.stderr)
exit(1)
else:
if have_builtin_stereo:
sce.render.use_multiview = False
rend = sce.render
rend.resolution_percentage = 100
#rend.alpha_mode = "PREMUL"
#rend.color_mode = "RGBA"
# Cycles special tweaks
if sce.render.engine == 'CYCLES':
# Allow to override smples from .conf file
if params[CYCLES_SAMPLES]!=None:
sce.cycles.samples = params[CYCLES_SAMPLES]
# Allow to set GPU device from RenderChan module settings
# For information how to identify your GPU device from
# a cluster console, see http://www.dalaifelinto.com/?p=746
if params[GPU_DEVICE]==None:
# That means we have to explicitly force CPU rendering
sce.cycles.device = 'CPU'
elif params[GPU_DEVICE]!="":
print("Cycles: GPU configuration found")
error=False
if 'CUDA' in bpy.context.user_preferences.system.compute_device_type:
bpy.context.user_preferences.system.compute_device_type = 'CUDA'
else:
error = True
print("ERROR: Cannot activate CUDA.")
if not error and params[GPU_DEVICE] in bpy.context.user_preferences.system.bl_rna.properties['compute_device'].enum_items.keys():
bpy.context.user_preferences.system.compute_device = params[GPU_DEVICE]
else:
error = True
# FIXME: This test probably should go somewhere else (in modules's CheckRequirements?)
print("ERROR: Cannot set GPU device (%s) - not found." % params[GPU_DEVICE])
print()
print("Available devices:")
for device in bpy.context.user_preferences.system.bl_rna.properties['compute_device'].enum_items.keys():
print(" * %s\n" % device)
print()
if not error:
sce.cycles.device = 'GPU'
else:
sce.cycles.device = 'CPU'
# Optimize tiles for speed depending on rendering device
# See tip #3 at http://www.blenderguru.com/4-easy-ways-to-speed-up-cycles/
if sce.cycles.device == 'GPU':
print("Cycles: GPU device used")
sce.render.tile_x = 256
sce.render.tile_y = 256
sce.cycles.debug_use_spatial_splits = False
else:
print("Cycles: CPU device used")
bpy.context.user_preferences.system.compute_device_type = 'NONE'
sce.render.tile_x = 64
sce.render.tile_y = 64
sce.cycles.debug_use_spatial_splits = True
print()
#sce.cycles.use_cache = True # Cache BVH
sce.cycles.debug_bvh_type = 'STATIC_BVH'
#sce.render.use_persistent_data = True # Persistent Images
# Suff for updating file
size_x = rend.resolution_x
size_y = rend.resolution_y
fps = rend.fps
# This is a dirty hack to make objects initialized properly.
# Sometimes (especially when using linked groups with armatures and complex expressions)
# the objects are not properly initialized and appear correctly only on second-third render.
# With the trick below you can instruct blender to make some count of pre-renders to
# ensure that all objects are properly initialized.
# Just put a relevant option for .conf file, like this:
# blender_prerender_count=1
for _ in range(params[PRERENDER_COUNT]):
rend.resolution_x = 32
rend.resolution_y = 32
bpy.ops.render.render()
rend.resolution_x = params[WIDTH]
rend.resolution_y = params[HEIGHT]
#rend.fps = $FPS
# OPENEXR stuff
#rend.exr_zbuf = False
#rend.use_exr_half = True
#rend.exr_preview = False
rend.use_placeholder = False
rend.use_overwrite = True
# Force format here
if params[FORMAT] == "png":
rend.image_settings.file_format = "PNG"
elif params[FORMAT] == "avi":
if (2, 79, 0) < bpy.app.version:
rend.image_settings.file_format = "H264"
rend.ffmpeg.format = "H264"
rend.ffmpeg.use_lossless_output=True
#rend.ffmpeg.audio_codec="AAC"
else:
rend.image_settings.file_format = "FFMPEG"
rend.ffmpeg.format = "AVI"
rend.ffmpeg.codec='H264'
rend.ffmpeg.constant_rate_factor='LOSSLESS'
rend.ffmpeg.use_lossless_output=True
rend.ffmpeg.audio_codec='PCM'
bpy.ops.sound.bake_animation()
# Update .blend file if permitted and we have width or height changed
if update and ( size_x != rend.resolution_x or size_y != rend.resolution_y or fps != rend.fps ):
bpy.ops.wm.save_mainfile("EXEC_DEFAULT", filepath=bpy.data.filepath)
# Dump audio track if any
#audio_found = False
#for path in bpy.utils.blend_paths(0):
# if path.endswith(".wav"):
# bpy.ops.sound.mixdown(filepath=params[AUDIOFILE], check_existing=False, container="WAV", codec='PCM', accuracy=32, format='S32')
# break
if __name__ == '__main__':
main()
| {
"content_hash": "6d4d8c56bdb23b9275db848cf659a330",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 146,
"avg_line_length": 37.325842696629216,
"alnum_prop": 0.5569937788480834,
"repo_name": "scribblemaniac/RenderChan",
"id": "6a6bf775ac643f949adb3b2518e1d09d019be449",
"size": "9966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "renderchan/contrib/blender/render.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "228315"
},
{
"name": "Shell",
"bytes": "1879"
}
],
"symlink_target": ""
} |
"""flask."""
from flask import Flask
from flask import abort
from flask import request
from flask import render_template
import driver
# from t_infinity.engineer_create import EngineerCreate
# from t_infinity.engineer_service_report import ServiceReport
# from t_infinity.login import Login
# from t_infinity.logistics_create import LogisticsCreate
# from t_infinity import utils
# from t_infinity.engineer_shipout import EngineerShipout
app = Flask(__name__)
@app.route("/")
def index():
"""return index page.
single page app so it's all here
TODO - index page
"""
return render_template('index.html')
@app.route("/api/1/t-infinity/", methods = ['GET'])
def list_all_endpoints():
"""return list of al endpoints"""
return abort(501)
app.debug = True
app.run() | {
"content_hash": "67715933a1a50d89fd4450c5aa2c51d8",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 62,
"avg_line_length": 23.38235294117647,
"alnum_prop": 0.7220125786163522,
"repo_name": "k33k00/tesseract_infinity",
"id": "f955bf9d17ab05704179a76ed7943c42e2a90bde",
"size": "795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "t_infinity.superold/gui/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "715454"
},
{
"name": "HTML",
"bytes": "27294"
},
{
"name": "JavaScript",
"bytes": "822463"
},
{
"name": "Python",
"bytes": "198945"
}
],
"symlink_target": ""
} |
from .sub_resource import SubResource
class VirtualNetworkGatewayIPConfiguration(SubResource):
"""IP configuration for virtual network gateway.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param private_ip_allocation_method: The private IP allocation method.
Possible values are: 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2016_09_01.models.IPAllocationMethod
:param subnet: Required. The reference of the subnet resource.
:type subnet: ~azure.mgmt.network.v2016_09_01.models.SubResource
:param public_ip_address: Required. The reference of the public IP
resource.
:type public_ip_address:
~azure.mgmt.network.v2016_09_01.models.SubResource
:ivar provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'subnet': {'required': True},
'public_ip_address': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkGatewayIPConfiguration, self).__init__(**kwargs)
self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None)
self.subnet = kwargs.get('subnet', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.provisioning_state = None
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
| {
"content_hash": "38004c6f72b99d100d378224063ccffb",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 103,
"avg_line_length": 43.33898305084746,
"alnum_prop": 0.6589753617520532,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "17a483af135717ee03c503082506e7fd33c3890d",
"size": "3031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/virtual_network_gateway_ip_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('beer', '0002_beer_beer_type'),
]
operations = [
migrations.RemoveField(
model_name='burger',
name='maker',
),
migrations.DeleteModel(
name='Burger',
),
]
| {
"content_hash": "16e7cfb1e64bb3382a9bdbda522855d6",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 40,
"avg_line_length": 19.1,
"alnum_prop": 0.5497382198952879,
"repo_name": "campovski/beernburger",
"id": "26f6d853e035b76c4013e592ecd7d3368f50311b",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beer/migrations/0003_auto_20170427_1140.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "55862"
},
{
"name": "HTML",
"bytes": "29577"
},
{
"name": "JavaScript",
"bytes": "97398"
},
{
"name": "Python",
"bytes": "46872"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_service_get_sso_token.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.api_management_service.get_sso_token(
resource_group_name="rg1",
service_name="apimService1",
)
print(response)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementServiceGetSsoToken.json
if __name__ == "__main__":
main()
| {
"content_hash": "f6802268a662ff19c01de509a9cb8321",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 154,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.7371737173717372,
"repo_name": "Azure/azure-sdk-for-python",
"id": "8219fe7c80f88fe6bd6b206ec93f928e5e348c72",
"size": "1579",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_service_get_sso_token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="parcats.line", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
colorscale_path=kwargs.pop("colorscale_path", "parcats.line.colorscale"),
**kwargs,
)
| {
"content_hash": "a4eaeb6e1f2fe2ff86f9342ce3ca2a9f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 40.92307692307692,
"alnum_prop": 0.618421052631579,
"repo_name": "plotly/plotly.py",
"id": "c682c57cc651bea1fcecfdec644d478bfb445fdd",
"size": "532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/parcats/line/_color.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
# def init(request, api_url, api_settings, viewer_id, group_id, is_app_user):
# return render(request, 'vk/index.html', context)
# api_url=https://api.vk.com/api.php
# &api_settings=0
# &viewer_id=123456
# &group_id=654321
# &is_app_user=0 \\
from django.views.decorators.clickjacking import xframe_options_exempt
from vkapp.bot.dao.newsDAO import get_news_proposed_today
from vkapp.bot.models import Blogger, News, AdminReview, Publication
from datetime import datetime, timedelta, time
@xframe_options_exempt
def init(request, **request_params):
param = list(request.GET.items()) #.get('viewer_id')
news_cnt = News.objects.filter(date_time__lte=today_end, date_time__gte=today_start).count()
return render(request, 'vk/index.html', {'news_count': news_cnt})
# context = {'data': param}
# return render(request, 'vk/index.html', context)
@xframe_options_exempt
def blogers(request):
return render(request, 'vk/news.html', {'news': news})
@xframe_options_exempt
def news(request):
return render(request, 'vk/news1.html', {'news': news})
@xframe_options_exempt
def news1(request):
return render(request, 'vk/news2.html', {'news': news})
@xframe_options_exempt
def news2(request):
return render(request, 'vk/news.html', {'news': news})
@xframe_options_exempt
def like(request, news_id):
news = News.objects.filter(id=news_id)
review = AdminReview.objects.get(news=news)
review.rating = 1
review.save()
return render(request, 'vk/init.html')
@xframe_options_exempt
def dislike(request, news_id):
news = News.objects.filter(id=news_id)
review = AdminReview.objects.get(news=news)
review.rating = -1
review.save()
return render(request, 'vk/init.html') | {
"content_hash": "1abee14f727bc78216ef6361f5625dd2",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 96,
"avg_line_length": 32.81481481481482,
"alnum_prop": 0.7065462753950339,
"repo_name": "ParuninPavel/lenta4_hack",
"id": "6b092633ebd77184c0cfa8bce6dee69c1818eef4",
"size": "1772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vkapp/vk/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4758"
},
{
"name": "HTML",
"bytes": "29228"
},
{
"name": "JavaScript",
"bytes": "817"
},
{
"name": "Jupyter Notebook",
"bytes": "2080923"
},
{
"name": "Python",
"bytes": "81323"
},
{
"name": "Shell",
"bytes": "8906"
}
],
"symlink_target": ""
} |
'''
This script computes a 2-7 year bandpass filtered coral series and plot it above the original one
Yuxin Zhou
'''
from scipy.signal import butter, lfilter, filtfilt
def butter_bandpass(lowcut, highcut, fs, order=4):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='bandpass')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=4):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = filtfilt(b, a, data)
return y
# name = 'CCSM4_historical_r1i1p1_185001-200512_Indo_Pacific_coral.nc'
# coralVar = DJF_mean.averager(name, 240, 300)
# coral = coralVar.getValue()
# fs = 1680
# lowcut = 20
# highcut = 70
# f_order = 3
# y = butter_bandpass_filter(coral, lowcut, highcut, fs, f_order)
# plt.clf()
# f1, = plt.plot(coral, alpha=0.3)
# f2, = plt.plot(y)
# plt.legend([f1,f2],['Original','filtered'])
# plt.title('monthly data, order = %s' % f_order)
# plt.savefig('CCSM4_coral.pdf')
| {
"content_hash": "b833a59c49cd5cb1e7a40b9528648601",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 97,
"avg_line_length": 29.264705882352942,
"alnum_prop": 0.6633165829145728,
"repo_name": "CommonClimate/EmileGeay_NatGeo2015",
"id": "5ce18b37db2859adf26a7213f8148f7a262f1f5d",
"size": "995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/python/bandpass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "689"
},
{
"name": "HTML",
"bytes": "36251"
},
{
"name": "Matlab",
"bytes": "472620"
},
{
"name": "Python",
"bytes": "20856"
}
],
"symlink_target": ""
} |
"""A module for manipulating Images, which are specially wrapped Pygame
surfaces.
"""
import pygame
import spyral
import copy
def _new_spyral_surface(size):
"""
Internal method for creating a new Spyral-compliant Pygame surface.
"""
return pygame.Surface((int(size[0]),
int(size[1])),
pygame.SRCALPHA, 32).convert_alpha()
def from_sequence(images, orientation="right", padding=0):
"""
A function that returns a new Image from a list of images by
placing them next to each other.
:param images: A list of images to lay out.
:type images: List of :class:`Image <spyral.Image>`
:param str orientation: Either 'left', 'right', 'above', 'below', or
'square' (square images will be placed in a grid
shape, like a chess board).
:param padding: The padding between each image. Can be specified as a
scalar number (for constant padding between all images)
or a list (for different paddings between each image).
:type padding: int or a list of ints.
:returns: A new :class:`Image <spyral.Image>`
"""
if orientation == 'square':
length = int(math.ceil(math.sqrt(len(images))))
max_height = 0
for index, image in enumerate(images):
if index % length == 0:
x = 0
y += max_height
max_height = 0
else:
x += image.width
max_height = max(max_height, image.height)
sequence.append((image, (x, y)))
else:
if orientation in ('left', 'right'):
selector = spyral.Vec2D(1, 0)
else:
selector = spyral.Vec2D(0, 1)
if orientation in ('left', 'above'):
reversed(images)
if type(padding) in (float, int, long):
padding = [padding] * len(images)
else:
padding = list(padding)
padding.append(0)
base = spyral.Vec2D(0, 0)
sequence = []
for image, padding in zip(images, padding):
sequence.append((image, base))
base = base + selector * (image.size + (padding, padding))
return from_conglomerate(sequence)
def from_conglomerate(sequence):
"""
A function that generates a new image from a sequence of
(image, position) pairs. These images will be placed onto a singe image
large enough to hold all of them. More explicit and less convenient than
:func:`from_seqeuence <spyral.image.from_sequence>`.
:param sequence: A list of (image, position) pairs, where the positions
are :class:`Vec2D <spyral.Vec2D>` s.
:type sequence: List of image, position pairs.
:returns: A new :class:`Image <spyral.Image>`
"""
width, height = 0, 0
for image, (x, y) in sequence:
width = max(width, x+image.width)
height = max(height, y+image.height)
new = Image(size=(width, height))
for image, (x, y) in sequence:
new.draw_image(image, (x, y))
return new
def render_nine_slice(image, size):
"""
Creates a new image by dividing the given image into a 3x3 grid, and stretching
the sides and center while leaving the corners the same size. This is ideal
for buttons and other rectangular shapes.
:param image: The image to stretch.
:type image: :class:`Image <spyral.Image>`
:param size: The new (width, height) of this image.
:type size: :class:`Vec2D <spyral.Vec2D>`
:returns: A new :class:`Image <spyral.Image>` similar to the old one.
"""
bs = spyral.Vec2D(size)
bw = size[0]
bh = size[1]
ps = image.size / 3
pw = int(ps[0])
ph = int(ps[1])
surf = image._surf
# Hack: If we don't make it one px large things get cut
image = spyral.Image(size=bs + (1, 1))
s = image._surf
# should probably fix the math instead, but it works for now
topleft = surf.subsurface(pygame.Rect((0, 0), ps))
left = surf.subsurface(pygame.Rect((0, ph), ps))
bottomleft = surf.subsurface(pygame.Rect((0, 2*pw), ps))
top = surf.subsurface(pygame.Rect((pw, 0), ps))
mid = surf.subsurface(pygame.Rect((pw, ph), ps))
bottom = surf.subsurface(pygame.Rect((pw, 2*ph), ps))
topright = surf.subsurface(pygame.Rect((2*pw, 0), ps))
right = surf.subsurface(pygame.Rect((2*ph, pw), ps))
bottomright = surf.subsurface(pygame.Rect((2*ph, 2*pw), ps))
# corners
s.blit(topleft, (0, 0))
s.blit(topright, (bw - pw, 0))
s.blit(bottomleft, (0, bh - ph))
s.blit(bottomright, bs - ps)
# left and right border
for y in range(ph, bh - ph - ph, ph):
s.blit(left, (0, y))
s.blit(right, (bw - pw, y))
s.blit(left, (0, bh - ph - ph))
s.blit(right, (bw - pw, bh - ph - ph))
# top and bottom border
for x in range(pw, bw - pw - pw, pw):
s.blit(top, (x, 0))
s.blit(bottom, (x, bh - ph))
s.blit(top, (bw - pw - pw, 0))
s.blit(bottom, (bw - pw - pw, bh - ph))
# center
for x in range(pw, bw - pw - pw, pw):
for y in range(ph, bh - ph - ph, ph):
s.blit(mid, (x, y))
for x in range(pw, bw - pw - pw, pw):
s.blit(mid, (x, bh - ph - ph))
for y in range(ph, bh - ph - ph, ph):
s.blit(mid, (bw - pw - pw, y))
s.blit(mid, (bw - pw - pw, bh - ph - ph))
return image
class Image(object):
"""
The image is the basic drawable item in spyral. They can be created
either by loading from common file formats, or by creating a new
image and using some of the draw methods. Images are not drawn on
their own, they are placed as the *image* attribute on Sprites to
be drawn.
Almost all of the methods of an Image instance return the Image itself,
enabling commands to be chained in a
`fluent interface <http://en.wikipedia.org/wiki/Fluent_interface>`_.
:param size: If size is passed, creates a new blank image of that size to
draw on. If you do not specify a size, you *must* pass in a
filename.
:type size: :class:`Vec2D <spyral.Vec2D>`
:param str filename: If filename is set, the file with that name is loaded.
The appendix has a list of the
:ref:`valid image formats<ref.image_formats>`. If you do
not specify a filename, you *must* pass in a size.
"""
def __init__(self, filename=None, size=None):
if size is not None and filename is not None:
raise ValueError("Must specify exactly one of size and filename. See http://platipy.org/en/latest/spyral_docs.html#spyral.image.Image")
if size is None and filename is None:
raise ValueError("Must specify exactly one of size and filename. See http://platipy.org/en/latest/spyral_docs.html#spyral.image.Image")
if size is not None:
self._surf = _new_spyral_surface(size)
self._name = None
else:
self._surf = pygame.image.load(filename).convert_alpha()
self._name = filename
self._version = 1
def _get_width(self):
return self._surf.get_width()
#: The width of this image in pixels (int). Read-only.
width = property(_get_width)
def _get_height(self):
return self._surf.get_height()
#: The height of this image in pixels (int). Read-only.
height = property(_get_height)
def _get_size(self):
return spyral.Vec2D(self._surf.get_size())
#: The (width, height) of the image (:class:`Vec2D <spyral.Vec2D`).
#: Read-only.
size = property(_get_size)
def fill(self, color):
"""
Fills the entire image with the specified color.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:returns: This image.
"""
self._surf.fill(color)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_rect(self, color, position, size=None,
border_width=0, anchor='topleft'):
"""
Draws a rectangle on this image.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param position: The starting position of the rect (top-left corner). If
position is a Rect, then size should be `None`.
:type position: :class:`Vec2D <spyral.Vec2D>` or
:class:`Rect <spyral.Rect>`
:param size: The size of the rectangle; should not be given if position
is a rect.
:type size: :class:`Vec2D <spyral.Vec2D>`
:param int border_width: The width of the border to draw. If it is 0,
the rectangle is filled with the color
specified.
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
if size is None:
rect = spyral.Rect(position)
else:
rect = spyral.Rect(position, size)
offset = self._calculate_offset(anchor, rect.size)
pygame.draw.rect(self._surf, color,
(rect.pos + offset, rect.size), border_width)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_lines(self, color, points, width=1, closed=False):
"""
Draws a series of connected lines on a image, with the
vertices specified by points. This does not draw any sort of
end caps on lines.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param points: A list of points that will be connected, one to another.
:type points: A list of :class:`Vec2D <spyral.Vec2D>` s.
:param int width: The width of the lines.
:param bool closed: If closed is True, the first and last point will be
connected. If closed is True and width is 0, the
shape will be filled.
:returns: This image.
"""
if width == 1:
pygame.draw.aalines(self._surf, color, closed, points)
else:
pygame.draw.lines(self._surf, color, closed, points, width)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_circle(self, color, position, radius, width=0, anchor='topleft'):
"""
Draws a circle on this image.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param position: The center of this circle
:type position: :class:`Vec2D <spyral.Vec2D>`
:param int radius: The radius of this circle
:param int width: The width of the circle. If it is 0, the circle is
filled with the color specified.
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
offset = self._calculate_offset(anchor)
pygame.draw.circle(self._surf, color, (position + offset).floor(),
radius, width)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_ellipse(self, color, position, size=None,
border_width=0, anchor='topleft'):
"""
Draws an ellipse on this image.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param position: The starting position of the ellipse (top-left corner).
If position is a Rect, then size should be `None`.
:type position: :class:`Vec2D <spyral.Vec2D>` or
:class:`Rect <spyral.Rect>`
:param size: The size of the ellipse; should not be given if position is
a rect.
:type size: :class:`Vec2D <spyral.Vec2D>`
:param int border_width: The width of the ellipse. If it is 0, the
ellipse is filled with the color specified.
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
if size is None:
rect = spyral.Rect(position)
else:
rect = spyral.Rect(position, size)
offset = self._calculate_offset(anchor, rect.size)
pygame.draw.ellipse(self._surf, color,
(rect.pos + offset, rect.size), border_width)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_point(self, color, position, anchor='topleft'):
"""
Draws a point on this image.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param position: The position of this point.
:type position: :class:`Vec2D <spyral.Vec2D>`
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
offset = self._calculate_offset(anchor)
self._surf.set_at(position + offset, color)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_arc(self, color, start_angle, end_angle,
position, size=None, border_width=0, anchor='topleft'):
"""
Draws an elliptical arc on this image.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param float start_angle: The starting angle, in radians, of the arc.
:param float end_angle: The ending angle, in radians, of the arc.
:param position: The starting position of the ellipse (top-left corner).
If position is a Rect, then size should be `None`.
:type position: :class:`Vec2D <spyral.Vec2D>` or
:class:`Rect <spyral.Rect>`
:param size: The size of the ellipse; should not be given if position is
a rect.
:type size: :class:`Vec2D <spyral.Vec2D>`
:param int border_width: The width of the ellipse. If it is 0, the
ellipse is filled with the color specified.
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
if size is None:
rect = spyral.Rect(position)
else:
rect = spyral.Rect(position, size)
offset = self._calculate_offset(anchor, rect.size)
pygame.draw.arc(self._surf, color, (rect.pos + offset, rect.size),
start_angle, end_angle, border_width)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_image(self, image, position=(0, 0), anchor='topleft'):
"""
Draws another image over this one.
:param image: The image to overlay on top of this one.
:type image: :class:`Image <spyral.Image>`
:param position: The position of this image.
:type position: :class:`Vec2D <spyral.Vec2D>`
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
offset = self._calculate_offset(anchor, image._surf.get_size())
self._surf.blit(image._surf, position + offset)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def rotate(self, angle):
"""
Rotates the image by angle degrees clockwise. This may change the image
dimensions if the angle is not a multiple of 90.
Successive rotations degrate image quality. Save a copy of the
original if you plan to do many rotations.
:param float angle: The number of degrees to rotate.
:returns: This image.
"""
self._surf = pygame.transform.rotate(self._surf, angle).convert_alpha()
self._version += 1
return self
def scale(self, size):
"""
Scales the image to the destination size.
:param size: The new size of the image.
:type size: :class:`Vec2D <spyral.Vec2D>`
:returns: This image.
"""
self._surf = pygame.transform.smoothscale(self._surf,
size).convert_alpha()
self._version += 1
return self
def flip(self, flip_x=True, flip_y=True):
"""
Flips the image horizontally, vertically, or both.
:param bool flip_x: whether to flip horizontally.
:param bool flip_y: whether to flip vertically.
:returns: This image.
"""
self._version += 1
self._surf = pygame.transform.flip(self._surf,
flip_x, flip_y).convert_alpha()
return self
def copy(self):
"""
Returns a copy of this image that can be changed while preserving the
original.
:returns: A new image.
"""
new = copy.copy(self)
new._surf = self._surf.copy()
return new
def crop(self, position, size=None):
"""
Removes the edges of an image, keeping the internal rectangle specified
by position and size.
:param position: The upperleft corner of the internal rectangle that
will be preserved.
:type position: a :class:`Vec2D <spyral.Vec2D>` or a
:class:`Rect <spyral.Rect>`.
:param size: The size of the internal rectangle to preserve. If a Rect
was passed in for position, this should be None.
:type size: :class:`Vec2D <spyral.Vec2D>` or None.
:returns: This image.
"""
if size is None:
rect = spyral.Rect(position)
else:
rect = spyral.Rect(position, size)
new = _new_spyral_surface(size)
new.blit(self._surf, (0, 0), (rect.pos, rect.size))
self._surf = new
self._version += 1
return self
def _calculate_offset(self, anchor_type, size=(0, 0)):
"""
Internal method for calculating the offset associated with an
anchor type.
:param anchor_type: A string indicating the position of the anchor,
taken from :ref:`anchor position <ref.anchors>`. A
numerical offset can also be specified.
:type anchor_type: str or a :class:`Vec2D <spyral.Vec2D>`.
:param size: The size of the region to offset in.
:type size: :class:`Vec2D <spyral.Vec2D>`.
"""
w, h = self._surf.get_size()
w2, h2 = size
if anchor_type == 'topleft':
return spyral.Vec2D(0, 0)
elif anchor_type == 'topright':
return spyral.Vec2D(w - w2, 0)
elif anchor_type == 'midtop':
return spyral.Vec2D((w - w2) / 2., 0)
elif anchor_type == 'bottomleft':
return spyral.Vec2D(0, h - h2)
elif anchor_type == 'bottomright':
return spyral.Vec2D(w - w2, h - h2)
elif anchor_type == 'midbottom':
return spyral.Vec2D((w - w2) / 2., h - h2)
elif anchor_type == 'midleft':
return spyral.Vec2D(0, (h - h2) / 2.)
elif anchor_type == 'midright':
return spyral.Vec2D(w - w2, (h - h2) / 2.)
elif anchor_type == 'center':
return spyral.Vec2D((w - w2) / 2., (h - h2) / 2.)
else:
return spyral.Vec2D(anchor_type) - spyral.Vec2D(w2, h2)
def get_subimage_by_pos(self, x, y, width, height):
"""
Extract image from larger sheet. Used for spritesheets.
"""
subimage = _new_spyral_surface((width, height))
subimage.blit(self._surf, (0, 0), (x, y, width, height))
new_image = Image(size=(width, height))
new_image._surf = subimage
return new_image
| {
"content_hash": "d2038f402c52bbb201dc4a41323dbdaf",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 147,
"avg_line_length": 39.509433962264154,
"alnum_prop": 0.5683381088825215,
"repo_name": "justinmeister/spaceinvaders-spyral",
"id": "8e577769602bcedf155e68525e67dbc612d9bac7",
"size": "20940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libraries/spyral/image.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "95"
},
{
"name": "Perl",
"bytes": "3224"
},
{
"name": "Python",
"bytes": "368084"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.indexed import TemplateIndexedNode
from test.parser.template.base import TemplateTestsBaseClass
class TemplateIndexedNodeTests(TemplateTestsBaseClass):
def test_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateIndexedNode()
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
| {
"content_hash": "b15c3571f562d01caa3f893e9aa07c3d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 29.476190476190474,
"alnum_prop": 0.7382875605815832,
"repo_name": "JustArchi/program-y",
"id": "80a960005602c42c36adb8b9a99d95521a28bb14",
"size": "619",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/test/parser/template/nodes/test_indexed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "HTML",
"bytes": "1580"
},
{
"name": "Python",
"bytes": "1027605"
},
{
"name": "Shell",
"bytes": "2835"
}
],
"symlink_target": ""
} |
"""
Helper routines for density processing, from the Gpcr project originally
"""
import numpy as np
def read_simple(filename,lownam,uppnam) :
mat = np.load(filename)
return mat[lownam],mat[uppnam]
def read_and_scale(filename,lownam,uppnam,scaling=None) :
lowmat,uppmat = read_simple(filename,lownam,uppnam)
if scaling["flag"] != None :
lowsel = lowmat!=0.000
uppsel = uppmat!=0.000
if scaling["flag"] > 0 :
uppmat[uppsel] = 1.987*0.3*np.log(uppmat[uppsel]/scaling["factor"][1])
lowmat[lowsel] = 1.987*0.3*np.log(lowmat[lowsel]/scaling["factor"][0])
else :
uppmat[uppsel] = uppmat[uppsel]/scaling["factor"][1]
lowmat[lowsel] = lowmat[lowsel]/scaling["factor"][0]
return lowmat,uppmat
def read_and_filter(filename,lownam,uppnam,lowfilter,uppfilter,scaling=None) :
lowmat,uppmat=read_and_scale(filename,lownam,uppnam,scaling)
if lowfilter != None and uppfilter != None :
lowmat[lowfilter] = 0.0
uppmat[uppfilter] = 0.0
return lowmat,uppmat
def mol_dist(filename,nam) :
MAT_SIZE = 70.0*70.0
mat = np.load(filename)
return mat[nam][:,0].mean()/float(MAT_SIZE),mat[nam][:,1].mean()/float(MAT_SIZE)
def set_filter(filter_opt,scaling,index) :
if filter_opt["density"] == None :
return None,None
lowmat,uppmat = read_and_scale(filter_opt["density"][index],filter_opt["lower"],filter_opt["upper"],scaling)
return lowmat < filter_opt["threshold"],uppmat < filter_opt["threshold"]
| {
"content_hash": "e5d111423700dda8d2519b770f94b80e",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 110,
"avg_line_length": 28.21153846153846,
"alnum_prop": 0.6837082481254261,
"repo_name": "SGenheden/Scripts",
"id": "db7c63c11db9d72fd421177db3169ccc5f357049",
"size": "1520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sgenlib/mat_routines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "7562"
},
{
"name": "Python",
"bytes": "950238"
},
{
"name": "Shell",
"bytes": "18990"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import spline
# from http://www.lfd.uci.edu/~gohlke/
try:
import tifffile
except ImportError:
print '''
The Tifffile module by Christoph Gohlke is required
for gel image analysis. Download from
http://www.lfd.uci.edu/~gohlke/, save in src/
(same folder as this script) and try again
(but save the rendered contents of the html
file tifffile.py.html as tifffile.py).
Warnings about additional modules can be
ignored after Tifffile installation.
'''
import sys
sys.exit()
def getLanes(imagefile,lanes,top,bottom):
'''Load pixel data summed by pixel rows down length of specified region corresponding to a gel lane'''
tif = tifffile.TiffFile(imagefile)
image = tif.asarray()
lanes1d = []
for l in lanes:
lanes1d += [np.sum(image[top:bottom,l[0]:l[1]],axis=1)]
return(lanes1d)
def getPerLaneLadder(ladder1x,ladder1peaks,ladder2x,ladder2peaks,samplelanes,sampleoffsets):
'''Linearly interpolate between corresponding bands in ladders on either side of samples for "per lane ladders"'''
ldiff = ladder2x - ladder1x
# take min peaks counting from small to big
nladderpeaks = min(len(ladder1peaks),len(ladder2peaks))
ladder1peaks = ladder1peaks[-nladderpeaks:]
ladder2peaks = ladder2peaks[-nladderpeaks:]
ladderbylanes = []
for n1,l in enumerate(samplelanes):
thesepeaks = []
soffset = sampleoffsets[n1] - ladder1x
for p in range(nladderpeaks):
peakdiff = ladder1peaks[p]-ladder2peaks[p]
thesepeaks += [ladder1peaks[p]-peakdiff*(soffset/float(ldiff))]
ladderbylanes += [np.array(thesepeaks)]
return(ladderbylanes)
def Quantify(ladderbylanes,laddersizes,allpeaks,samplenames,doplot,outfolder=None,prefix=None,desired_range=False,gelimagefile=None):
'''Interpolate ladder sizes versus migration distance with second order splines and quantify samples'''
sizes = {}
for n,ladder_peak_dists in enumerate(ladderbylanes):
sizes[samplenames[n]] = []
# interpolate and smooth between ladder peaks (which may not be linear or log-linear etc)
interpolated_lsizes = np.linspace(laddersizes.min(),laddersizes.max(),1000)
smoothed_lpeaks_dists = spline(laddersizes[::-1],ladder_peak_dists[::-1],interpolated_lsizes[::-1],order=2)[::-1]
if doplot:
fig = plt.figure(figsize=(12, 9))
axes = fig.add_subplot(111)
axes.plot(interpolated_lsizes,smoothed_lpeaks_dists,'-r',lw=0.7)
axes.plot(laddersizes,ladder_peak_dists,'og')
axes.set_xlabel('Fragment size (bp)')
axes.set_ylabel('Band migration (pixels)')
if desired_range:
drange = 'Desired range: %s bp, s'%desired_range
else:
drange = 'S'
#axes.set_title('Band fragment sizes by interpolation of ladder migration distances\n%sample lane %s, %s' % (drange,n+1,samplenames[n]))
plt.title('Band fragment sizes by interpolation of ladder migration distances\n%sample lane %s, %s, %s' % (drange,n+1,samplenames[n],gelimagefile))
for b,band in enumerate(ladder_peak_dists):
axes.annotate('%s bp' % laddersizes[b], xy=(laddersizes[b],band), xytext=(laddersizes[b]+5000,band), arrowprops=None)
# find where sample peaks intersect ladder spline and get corresponding DNA fragment size
xmin,xmax = axes.get_xlim()
ymin,ymax = axes.get_ylim()
for peak in allpeaks[n+1]:
s = [i for i,a in enumerate(smoothed_lpeaks_dists[1:]) if a < peak and smoothed_lpeaks_dists[i-1] >= peak][0]
sizes[samplenames[n]] += [int(round(interpolated_lsizes[s]))]
if doplot:
axes.axvline(x=interpolated_lsizes[s],ymax=(peak-ymin)/(ymax-ymin),color = 'purple',lw=1)
axes.axhline(y=peak,xmax=(interpolated_lsizes[s]-xmin)/(xmax-xmin),color = 'purple',lw=1)
if doplot:
axes.annotate('%s' % '\n'.join(['Quantification:']+[str(s)+' bp' for s in sorted(sizes[samplenames[n]])]), xy=((xmax-xmin)*0.6+xmin,(ymax-ymin)*0.8+ymin), xytext=((xmax-xmin)*0.6+xmin,(ymax-ymin)*0.8+ymin), arrowprops=None)
plt.savefig(outfolder+os.sep+prefix+'quantification_sample_lane_%s_%s%ssvg' % (n+1,samplenames[n],os.extsep), format='svg')
return(sizes)
def AnalyseGel(gelimagefile,samplenames,lane_x_ranges,top,bottom,allpeaks,laddersizes,outfolder,prefix,doplot,desired_range):
# get lane data from image file
lanes1d = getLanes(gelimagefile,lane_x_ranges,top,bottom)
if not os.path.exists(outfolder): os.mkdir(outfolder)
if doplot:
fig = plt.figure(figsize=(12, 9))
# plot ladder peak intensities
axes = fig.add_subplot(111)
axes.plot(lanes1d[0])
axes.set_xlabel('Distance along lane (pixels)')
axes.set_ylabel('Pixel row intensity')
plt.title('Lane intensities summed across pixel rows versus distance along ladder lane 1\n%s' % gelimagefile)
[axes.axvline(x=a,color = 'red') for a in allpeaks[0]]
plt.savefig(outfolder+os.sep+prefix+os.extsep.join(['ladder1','svg']), format='svg')
axes.clear()
axes.plot(lanes1d[-1])
axes.set_xlabel('Distance along lane (pixels)')
axes.set_ylabel('Pixel row intensity')
plt.title('Lane intensities summed across pixel rows versus distance along ladder lane 2\n%s' % gelimagefile)
[axes.axvline(x=a,color = 'red') for a in allpeaks[-1]]
plt.savefig(outfolder+os.sep+prefix+os.extsep.join(['ladder2','svg']), format='svg')
# plot samples
for n,lane in enumerate(lanes1d[1:-1]):
plt.cla()
axes.plot(lane)
axes.set_xlabel('Distance along lane (pixels)')
axes.set_ylabel('Pixel row intensity')
plt.title('Lane intensities summed across pixel rows versus distance along\nsample lane %s, %s, %s' % (n+1,samplenames[n],gelimagefile))
[axes.axvline(x=a,color = 'red') for a in allpeaks[n+1]]
plt.savefig(outfolder+os.sep+prefix+'intensities_sample_lane_%s_%s.svg' % ((n+1),samplenames[n]), format='svg')
# linear regress between ladder bands to create per lane ladders
ladder1x = sum(lane_x_ranges[0])/2
ladder2x = sum(lane_x_ranges[-1])/2
ladder1peaks = allpeaks[0]
ladder2peaks = allpeaks[-1]
samplepeaks = [np.array(a) for a in allpeaks[1:-1]]
sampleoffsets = [sum(n)/2 for n in lane_x_ranges[1:-1]]
ladderbylanes = getPerLaneLadder(ladder1x,ladder1peaks,ladder2x,ladder2peaks,samplepeaks,sampleoffsets)
# interpolate with a smoothed spline
sizes = Quantify(ladderbylanes,laddersizes,allpeaks,samplenames,doplot,outfolder,prefix,desired_range,gelimagefile)
# print sizes
for sample,thesesizes in sizes.items():
print 'Sample %s: %s' % (sample,', '.join([str(s) for s in sorted(thesesizes)]))
return(sizes)
# low range ladder
# https://www.neb.com/products/n0350-low-range-pfg-marker
lowrangeladder = [23100,9420,6550,4360,2320,2030]
# includes lambda ladder
# lambda ladder:
# http://www.neb.com/nebecomm/products/productn0340.asp
lambdaladder = [1018500,970000,921500,873000,824500,776000,727500, 679000, 630500, 582000, 533500, 485000, 436500, 388000, 339500, 291000, 242500, 194000, 145500, 97000, 48500]
# H. wingei chromosomes ladder:
# http://www.bio-rad.com/prd/en/US/adirect/biorad?cmd=catProductDetail&vertical=LSR&country=US&lang=en&productID=170-3667
Hwingeiladder = [3130000,2700000,2350000,1810000,1660000,1370000,1050000]
doplot = True
samples = ['E_1_37','A_3_34','C_4_22','D_4_27','B_4_28','K12']
outfolder = os.sep.join([os.pardir,'image','analysis'])
sizes = {}
def main():
## Small fragment ICeuI PFGE gel
desired_range = '< 145,500'
gelimagefile = os.sep.join([os.pardir,'image','gels',os.extsep.join(['ICeuI_small','tif'])])
laddersizes = np.array(lambdaladder[-4:]+lowrangeladder[:1])
## samples in left lanes
samplenames = samples[:3]
# x pixel ranges of lanes
lane_x_ranges = ((176,274),(334,434),(494,584),(642,726),(798,904))
# upper and lower pixels
(top,bottom) = (10,1128)
# band peak intensity distance down gel image -top
allpeaks = [np.array([ 25, 237, 549, 856, 1030]),
np.array([286, 415, 930]),
np.array([280, 428, 912]),
np.array([399, 512, 909]),
np.array([ 33, 243, 554, 862, 1032])]
prefix = 'ICeuI_small_123_'
print('\nAnalysing %s, plotting to %s%s%s*.svg\n' % (gelimagefile,outfolder,os.sep,prefix))
sizes[prefix] = AnalyseGel(gelimagefile,samplenames,lane_x_ranges,top,bottom,allpeaks,laddersizes,outfolder,prefix,doplot,desired_range)
## samples in right lanes
samplenames = samples[3:]
# x pixel ranges of lanes
lane_x_ranges = ((798,904),(962,1082),(1114,1224),(1264,1368),(1422,1530))
# band peak intensity distance down gel image -top
allpeaks = [np.array([ 33, 243, 554, 862, 1032]),
np.array([ 76, 408, 914]),
np.array([304, 575, 913]),
np.array([331, 588, 927]),
np.array([ 36, 255, 568, 879, 1053])]
prefix = 'ICeuI_small_456_'
print('\nAnalysing %s, plotting to %s%s%s*.svg\n' % (gelimagefile,outfolder,os.sep,prefix))
sizes[prefix] = AnalyseGel(gelimagefile,samplenames,lane_x_ranges,top,bottom,allpeaks,laddersizes,outfolder,prefix,doplot,desired_range)
## Mid fragment ICeuI PFGE gel
desired_range = '> 145,500 & < 1,000,000'
gelimagefile = os.sep.join([os.pardir,'image','gels',os.extsep.join(['ICeuI_medium','tif'])])
laddersizes = np.array(lambdaladder[-18:-9])
samplenames = samples
# x pixel ranges of lanes
lane_x_ranges = ((23,63),(96,148),(173,230),(255,305),(333,389),(415,466),(493,546),(584,622))
# upper and lower pixels
(top,bottom) = (0,260)
# band peak intensity distance down gel image -top
allpeaks = [np.array([ 8, 31, 55, 79, 106, 135, 167, 202, 240]),
np.array([ 38, 90, 193]),
np.array([105, 107, 210]),
np.array([ 79, 108, 207]),
np.array([ 32, 92, 202]),
np.array([ 88, 131, 202]),
np.array([ 99, 121, 212]),
np.array([ 17, 39, 62, 87, 113, 143, 175, 209, 247])]
prefix = 'ICeuI_medium_123456_'
print('\nAnalysing %s, plotting to %s%s%s*.svg\n' % (gelimagefile,outfolder,os.sep,prefix))
sizes[prefix] = AnalyseGel(gelimagefile,samplenames,lane_x_ranges,top,bottom,allpeaks,laddersizes,outfolder,prefix,doplot,desired_range)
## Large fragment ICeuI PFGE gel
desired_range = '> 2,000,000'
gelimagefile = os.sep.join([os.pardir,'image','gels',os.extsep.join(['ICeuI_large','tif'])])
laddersizes = np.array(Hwingeiladder[:3])
# upper and lower pixels
(top,bottom) = (130,482)
## sample in left lane
samplenames = samples[:1]
lane_x_ranges = ((162,204),(316,386),(472,514))
# band peak intensity distance down gel image -top
allpeaks = [np.array([ 84, 165, 263]), np.array([119]), np.array([ 87, 158, 254])]
prefix = 'ICeuI_large_1_'
print('\nAnalysing %s, plotting to %s%s%s*.svg\n' % (gelimagefile,outfolder,os.sep,prefix))
sizes[prefix] = AnalyseGel(gelimagefile,samplenames,lane_x_ranges,top,bottom,allpeaks,laddersizes,outfolder,prefix,doplot,desired_range)
## samples in middle lane
samplenames = samples[1:3]
lane_x_ranges = ((472,514),(598,660),(728,802),(878,922))
# band peak intensity distance down gel image -top
allpeaks = [np.array([ 87, 158, 248]), np.array([163]), np.array([116]), np.array([ 90, 163, 251])]
prefix = 'ICeuI_large_23_'
print('\nAnalysing %s, plotting to %s%s%s*.svg\n' % (gelimagefile,outfolder,os.sep,prefix))
sizes[prefix] = AnalyseGel(gelimagefile,samplenames,lane_x_ranges,top,bottom,allpeaks,laddersizes,outfolder,prefix,doplot,desired_range)
## samples in right lanes
samplenames = samples[3:]
lane_x_ranges = ((878,922),(1020,1076),(1160,1220),(1292,1362),(1436,1508))
# band peak intensity distance down gel image -top
allpeaks = [np.array([ 90, 163, 251]), np.array([107]), np.array([130]), np.array([145]), np.array([83, 166, 256])]
prefix = 'ICeuI_large_456_'
print('\nAnalysing %s, plotting to %s%s%s*.svg\n' % (gelimagefile,outfolder,os.sep,prefix))
sizes[prefix] = AnalyseGel(gelimagefile,samplenames,lane_x_ranges,top,bottom,allpeaks,laddersizes,outfolder,prefix,doplot,desired_range)
allsizes = {}
for sz in sizes.values():
for sm in samples:
if sm in sz:
if sm in allsizes:
allsizes[sm] += sz[sm]
else:
allsizes[sm] = sz[sm]
print('\n')
for sm,szs in sorted(allsizes.items()):
print('%s: %s' % (sm,sum(szs)))
print('\nSee ../image/analysis/ for analysis plots\n')
if __name__ == '__main__':
main()
| {
"content_hash": "c0886aa247211f563c7cbe51c0975475",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 229,
"avg_line_length": 46.90151515151515,
"alnum_prop": 0.6842997900177678,
"repo_name": "wltrimbl/kmerspectrumanalyzer",
"id": "e918eea495c1abf7ce8f9563f9eaf6072261cd70",
"size": "12407",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pfge_analysis/PFGE_analysis.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "2838"
},
{
"name": "Perl",
"bytes": "4666"
},
{
"name": "Python",
"bytes": "126361"
},
{
"name": "Roff",
"bytes": "14642049"
},
{
"name": "Shell",
"bytes": "17486"
}
],
"symlink_target": ""
} |
"""Module for testing the del address command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelAddress(TestBrokerCommand):
def testbasic(self):
self.dsdb_expect_delete(self.net.unknown[0].usable[13])
command = ["del_address", "--ip=%s" % self.net.unknown[0].usable[13]]
self.noouttest(command)
self.dsdb_verify()
def testverifybasic(self):
command = ["show_address", "--fqdn=arecord13.aqd-unittest.ms.com"]
self.notfoundtest(command)
def testdefaultenv(self):
self.dsdb_expect_delete(self.net.unknown[0].usable[14])
default = self.config.get("site", "default_dns_environment")
command = ["del_address", "--fqdn", "arecord14.aqd-unittest.ms.com",
"--dns_environment", default]
self.noouttest(command)
self.dsdb_verify()
def testverifydefaultenv(self):
command = ["show_address", "--fqdn=arecord14.aqd-unittest.ms.com"]
self.notfoundtest(command)
def testutenvenv(self):
command = ["del_address", "--ip", self.net.unknown[1].usable[14],
"--fqdn", "arecord14.aqd-unittest.ms.com",
"--dns_environment", "ut-env"]
self.noouttest(command)
def testverifyutenvenv(self):
command = ["show_address", "--fqdn", "arecord14.aqd-unittest.ms.com",
"--dns_environment", "ut-env"]
self.notfoundtest(command)
def testbadip(self):
ip = self.net.unknown[0].usable[14]
command = ["del_address", "--ip", ip,
"--fqdn=arecord15.aqd-unittest.ms.com"]
out = self.notfoundtest(command)
self.matchoutput(out, "DNS Record arecord15.aqd-unittest.ms.com, ip "
"%s not found." % ip, command)
def testcleanup(self):
self.dsdb_expect_delete(self.net.unknown[0].usable[15])
command = ["del_address", "--ip=%s" % self.net.unknown[0].usable[15],
"--fqdn=arecord15.aqd-unittest.ms.com"]
self.noouttest(command)
self.dsdb_verify()
def testfailbadenv(self):
default = self.config.get("site", "default_dns_environment")
command = ["del_address", "--ip=%s" % self.net.unknown[0].usable[15],
"--fqdn=arecord15.aqd-unittest.ms.com",
"--dns_environment=environment-does-not-exist"]
out = self.notfoundtest(command)
self.matchoutput(out,
"DNS Environment environment-does-not-exist not found.",
command)
def testfailprimary(self):
ip = self.net.unknown[0].usable[2]
command = ["del", "address", "--ip", ip, "--fqdn",
"unittest00.one-nyp.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out,
"DNS Record unittest00.one-nyp.ms.com [%s] is the "
"primary name of machine unittest00.one-nyp.ms.com, "
"therefore it cannot be deleted." % ip,
command)
def testfailipinuse(self):
ip = self.net.unknown[0].usable[3]
command = ["del", "address", "--ip", ip, "--fqdn",
"unittest00-e1.one-nyp.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out,
"IP address %s is still in use by public interface "
"eth1 of machine unittest00.one-nyp.ms.com" % ip,
command)
def testdelunittest20_e1(self):
ip = self.net.unknown[12].usable[0]
self.dsdb_expect_delete(ip)
command = ["del", "address", "--ip", ip,
"--fqdn", "unittest20-e1.aqd-unittest.ms.com"]
self.noouttest(command)
self.dsdb_verify()
def testdelzebra3(self):
ip = self.net.unknown[13].usable[0]
self.dsdb_expect_delete(ip)
command = ["del", "address", "--ip", ip,
"--fqdn", "zebra3.aqd-unittest.ms.com"]
self.noouttest(command)
self.dsdb_verify()
def test_delip_with_network_env(self):
ip = "192.168.3.1"
fqdn = "cardenvtest600.aqd-unittest.ms.com"
command = ["del", "address", "--ip", ip,
"--network_environment", "cardenv"]
self.noouttest(command)
# External IP addresses should not be added to DSDB
self.dsdb_verify(empty=True)
command = ["show_address", "--fqdn", fqdn,
"--network_environment", "cardenv"]
out = self.notfoundtest(command)
def test_delreservedreverse(self):
self.dsdb_expect_delete(self.net.unknown[0].usable[32])
command = ["del", "address",
"--fqdn", "arecord17.aqd-unittest.ms.com"]
self.noouttest(command)
self.dsdb_verify()
def test_verifydelreserve(self):
command = ["show", "address",
"--fqdn", "arecord17.aqd-unittest.ms.com"]
self.notfoundtest(command)
command = ["search", "dns", "--record_type", "reserved_name"]
out = self.commandtest(command)
self.matchclean(out, "reverse.restrict.aqd-unittest.ms.com", command)
self.matchclean(out, "reverse2.restrict.aqd-unittest.ms.com", command)
def test_610_addipfromip_with_network_env(self):
fqdn = "cardenvtest610.aqd-unittest.ms.com"
command = ["del", "address", "--fqdn", fqdn,
"--network_environment", "cardenv"]
self.noouttest(command)
# External IP addresses should not be added to DSDB
self.dsdb_verify(empty=True)
command = ["show_address", "--fqdn=%s" % fqdn]
out = self.notfoundtest(command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelAddress)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "3b4147c441212e0930ac9e12eb09b3bb",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 81,
"avg_line_length": 39.12418300653595,
"alnum_prop": 0.5711660541262947,
"repo_name": "stdweird/aquilon",
"id": "d3590c6f07a8e05c615dc92b002fc28532f2ab10",
"size": "6714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/broker/test_del_address.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
} |
import importlib
import time
import datetime
from datetime import timedelta
from unittest import mock
from django.conf import settings
from django.core.signals import request_finished, request_started
from django.test.testcases import TransactionTestCase
from post_request_task.task import _discard_tasks, _stop_queuing_tasks
from olympia.amo.tests import TestCase
from olympia.amo.celery import app, task
from olympia.amo.utils import utc_millesecs_from_epoch
fake_task_func = mock.Mock()
def test_celery_routes_in_queues():
queues_in_queues = set([q.name for q in settings.CELERY_TASK_QUEUES])
# check the default queue is defined in CELERY_QUEUES
assert settings.CELERY_TASK_DEFAULT_QUEUE in queues_in_queues
queues_in_routes = set([c['queue'] for c in settings.CELERY_TASK_ROUTES.values()])
assert queues_in_queues == queues_in_routes
def test_celery_routes_only_contain_valid_tasks():
# Import CELERY_IMPORTS like celery would to find additional tasks that
# are not automatically imported at startup otherwise.
for module_name in settings.CELERY_IMPORTS:
importlib.import_module(module_name)
# Force a re-discovery of the tasks - when running the tests the
# autodiscovery might happen too soon.
app.autodiscover_tasks(force=True)
# Make sure all tasks in CELERY_TASK_ROUTES are known.
known_tasks = app.tasks.keys()
for task_name in settings.CELERY_TASK_ROUTES.keys():
assert task_name in known_tasks
# Make sure all known tasks have an explicit route set.
for task_name in known_tasks:
assert task_name in settings.CELERY_TASK_ROUTES.keys()
@task(ignore_result=False)
def fake_task_with_result():
fake_task_func()
return 'foobar'
@task
def fake_task():
fake_task_func()
return 'foobar'
@task(track_started=True, ignore_result=False)
def sleeping_task(time_to_sleep):
time.sleep(time_to_sleep)
class TestCeleryWorker(TestCase):
@mock.patch('olympia.amo.celery.cache')
def test_start_task_timer(self, celery_cache):
result = fake_task_with_result.delay()
result.get()
assert celery_cache.set.called
assert celery_cache.set.call_args[0][0] == f'task_start_time.{result.id}'
@mock.patch('olympia.amo.celery.cache')
@mock.patch('olympia.amo.celery.statsd')
def test_track_run_time(self, celery_statsd, celery_cache):
minute_ago = datetime.datetime.now() - timedelta(minutes=1)
task_start = utc_millesecs_from_epoch(minute_ago)
celery_cache.get.return_value = task_start
result = fake_task_with_result.delay()
result.get()
approx_run_time = utc_millesecs_from_epoch() - task_start
assert (
celery_statsd.timing.call_args[0][0]
== 'tasks.olympia.amo.tests.test_celery.fake_task_with_result'
)
actual_run_time = celery_statsd.timing.call_args[0][1]
fuzz = 2000 # 2 seconds
assert actual_run_time >= (approx_run_time - fuzz) and actual_run_time <= (
approx_run_time + fuzz
)
assert celery_cache.get.call_args[0][0] == f'task_start_time.{result.id}'
assert celery_cache.delete.call_args[0][0] == f'task_start_time.{result.id}'
@mock.patch('olympia.amo.celery.cache')
@mock.patch('olympia.amo.celery.statsd')
def test_handle_cache_miss_for_stats(self, celery_cache, celery_statsd):
celery_cache.get.return_value = None # cache miss
fake_task.delay()
assert not celery_statsd.timing.called
class TestTaskQueued(TransactionTestCase):
"""Test that tasks are queued and only triggered when a request finishes.
Tests our integration with django-post-request-task.
"""
def setUp(self):
super().setUp()
fake_task_func.reset_mock()
_discard_tasks()
def tearDown(self):
super().tearDown()
fake_task_func.reset_mock()
_discard_tasks()
_stop_queuing_tasks()
def test_not_queued_outside_request_response_cycle(self):
fake_task.delay()
assert fake_task_func.call_count == 1
def test_queued_inside_request_response_cycle(self):
request_started.send(sender=self)
fake_task.delay()
assert fake_task_func.call_count == 0
request_finished.send_robust(sender=self)
assert fake_task_func.call_count == 1
def test_no_dedupe_outside_request_response_cycle(self):
fake_task.delay()
fake_task.delay()
assert fake_task_func.call_count == 2
def test_dedupe_inside_request_response_cycle(self):
request_started.send(sender=self)
fake_task.delay()
fake_task.delay()
assert fake_task_func.call_count == 0
request_finished.send_robust(sender=self)
assert fake_task_func.call_count == 1
| {
"content_hash": "b452439a1d7220baa2be6b77711bf46e",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 86,
"avg_line_length": 32.38666666666666,
"alnum_prop": 0.6755866611774393,
"repo_name": "bqbn/addons-server",
"id": "76e4c8f0622615eff507a6d20cd83d804f073614",
"size": "4858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/amo/tests/test_celery.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "810080"
},
{
"name": "Dockerfile",
"bytes": "2868"
},
{
"name": "HTML",
"bytes": "585550"
},
{
"name": "JavaScript",
"bytes": "1071952"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5323934"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11171"
},
{
"name": "Smarty",
"bytes": "1503"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import string
import itertools
import click
import xlrd
import os
from . import asciitable, __version__
from .compat import open_file, csv, out_proc
from .cursor import XCursor, CSVCursor
from six import PY3
from subprocess import Popen, PIPE
@click.command()
@click.option('--heading',
'-h',
type=int,
help='Row number containing the headings.')
@click.option("--file-type", "-f", type=click.Choice(["csv", "excel"]),
help="Force parsing of the file to the chosen format.")
@click.option("--delimiter", "-d", type=str, default=",",
help="Delimiter (only applicable to CSV files) [default: ',']")
@click.option("--quotechar", "-q", type=str, default='"',
help="Quote character (only applicable to CSV files) [default: '\"']")
@click.option("--encoding", "-e", type=str, default="utf-8",
help="Encoding [default: UTF-8]")
@click.version_option(version=__version__)
@click.argument('filename')
def cli(filename, heading, file_type, delimiter, quotechar, encoding):
"""Display Excel or CSV files directly on your terminal.
The file type is guessed from file extensions, but can be overridden with the --file-type option.
"""
if file_type is None:
if filename.endswith(".csv"):
file_type = "csv"
else:
file_type = "excel"
if file_type == "csv":
csv_rows = []
if not PY3:
delimiter = str(unicode(delimiter))
quotechar = str(unicode(quotechar))
with open_file(filename, encoding) as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
for row in reader:
csv_rows.append(row)
cursor = CSVCursor(csv_rows, heading)
else:
# As per https://secure.simplistix.co.uk/svn/xlrd/trunk/xlrd/doc/xlrd.html?p=4966
# encodings in Excel are usually UTF-8. So, we only override the encoding
# if an encoding is specified by the user.
if encoding.lower() != "utf-8":
workbook = xlrd.open_workbook(filename, encoding_override=encoding)
else:
workbook = xlrd.open_workbook(filename)
sheet = workbook.sheet_by_index(0)
cursor = XCursor(sheet, heading)
with out_proc() as out:
asciitable.draw(cursor, out=out)
| {
"content_hash": "e52108fb896ca3bfe7837f08764ca5fa",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 101,
"avg_line_length": 33.84507042253521,
"alnum_prop": 0.6179775280898876,
"repo_name": "krockode/x_x",
"id": "319d09b27ea3b6be346245c9efe965ae7e602745",
"size": "2403",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "x_x/x_x.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10203"
}
],
"symlink_target": ""
} |
from django.core.management import call_command
from django.core.management.base import NoArgsCommand
from pangolinland.users.factories import generate_users
class Command(NoArgsCommand):
help = 'Fill the database with test fixtures'
def handle(self, *args, **options):
self.stdout.write('Starting fill db\r\n')
# fixture_list = []
# call_command('loaddata', *fixture_list)
generate_users()
self.stdout.write('Completed fill db\r\n')
| {
"content_hash": "56937557c69119dbad2a2a14ff78a271",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.6912065439672802,
"repo_name": "skylifewww/pangolinland",
"id": "0165a0489846edcd00f8af92114238bc60645a94",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pangolinland/core/management/commands/filldb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "179721"
},
{
"name": "HTML",
"bytes": "195624"
},
{
"name": "JavaScript",
"bytes": "262432"
},
{
"name": "Makefile",
"bytes": "1485"
},
{
"name": "Nginx",
"bytes": "646"
},
{
"name": "Python",
"bytes": "116827"
}
],
"symlink_target": ""
} |
import sys
import os
import signal
import re
import cPickle
import logging
import datetime
from urlparse import urlparse
from traceback import print_exc
from time import time, gmtime, strftime, localtime
from random import shuffle
from types import StringType, IntType, LongType, ListType, DictType
from binascii import b2a_hex
from cStringIO import StringIO
from BTL.translation import _
from BTL.obsoletepythonsupport import *
from BitTorrent import platform
from BTL import BTFailure
from BTL.platform import decode_from_filesystem, efs2
from BTL.defer import DeferredEvent, ThreadedDeferred
from BTL.yielddefer import wrap_task
from BitTorrent.configfile import parse_configuration_and_args
#from BitTorrent.parseargs import parseargs, printHelp
from BitTorrent.RawServer_twisted import RawServer
from BitTorrent.HTTPHandler import HTTPHandler
from BTL.parsedir import parsedir
from BitTorrent.NatCheck import NatCheck
from BTL.bencode import bencode, bdecode, Bencached
from urllib import quote, unquote
from BTL.exceptions import str_exc
from BitTorrent import version
from BitTorrent.prefs import Preferences
from BitTorrent.defaultargs import get_defaults
from BitTorrent.UI import Size
import socket
import threading
import traceback
NOISY = False
# code duplication because ow.
MAX_INCOMPLETE = 100
if os.name == 'nt':
from BitTorrent.platform import win_version_num
# starting in XP SP2 the incomplete outgoing connection limit was set to 10
if win_version_num >= (2, 5, 1, 2, 0):
MAX_INCOMPLETE = 10
def statefiletemplate(x):
if type(x) != DictType:
raise ValueError
for cname, cinfo in x.iteritems():
if cname == 'peers':
for y in cinfo.itervalues(): # The 'peers' key is a dictionary of SHA hashes (torrent ids)
if type(y) != DictType: # ... for the active torrents, and each is a dictionary
raise ValueError
for peerid, info in y.iteritems(): # ... of client ids interested in that torrent
if (len(peerid) != 20):
raise ValueError
if type(info) != DictType: # ... each of which is also a dictionary
raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent
if type(info.get('ip', '')) != StringType:
raise ValueError
port = info.get('port')
if type(port) not in (IntType, LongType) or port < 0:
raise ValueError
left = info.get('left')
if type(left) not in (IntType, LongType) or left < 0:
raise ValueError
elif cname == 'completed':
if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids)
raise ValueError # ... for keeping track of the total completions per torrent
for y in cinfo.itervalues(): # ... each torrent has an integer value
if type(y) not in (IntType,LongType):
raise ValueError # ... for the number of reported completions for that torrent
elif cname == 'allowed':
if (type(cinfo) != DictType): # a list of info_hashes and included data
raise ValueError
if x.has_key('allowed_dir_files'):
adlist = [z[1] for z in x['allowed_dir_files'].itervalues()]
for y in cinfo.iterkeys(): # and each should have a corresponding key here
if not y in adlist:
raise ValueError
elif cname == 'allowed_dir_files':
if (type(cinfo) != DictType): # a list of files, their attributes and info hashes
raise ValueError
dirkeys = {}
for y in cinfo.itervalues(): # each entry should have a corresponding info_hash
if not y[1]:
continue
if not x['allowed'].has_key(y[1]):
raise ValueError
if dirkeys.has_key(y[1]): # and each should have a unique info_hash
raise ValueError
dirkeys[y[1]] = 1
alas = _("your file may exist elsewhere in the universe\nbut alas, not here\n")
def isotime():
#return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
return datetime.datetime.utcnow().isoformat()
http_via_filter = re.compile(' for ([0-9.]+)\Z')
def _get_forwarded_ip(headers):
if headers.has_key('x_forwarded_for'):
header = headers['x_forwarded_for']
try:
x,y = header.split(',')
except:
return header
if not is_local_ip(x):
return x
return y
if headers.has_key('client_ip'):
return headers['client_ip']
if headers.has_key('via'):
x = http_via_filter.search(headers['via'])
try:
return x.group(1)
except:
pass
if headers.has_key('from'):
return headers['from']
return None
def get_forwarded_ip(headers):
x = _get_forwarded_ip(headers)
if x is None or not is_valid_ipv4(x) or is_local_ip(x):
return None
return x
def compact_peer_info(ip, port):
try:
s = ( ''.join([chr(int(i)) for i in ip.split('.')])
+ chr((port & 0xFF00) >> 8) + chr(port & 0xFF) )
if len(s) != 6:
s = ''
except:
s = '' # not a valid IP, must be a domain name
return s
def is_valid_ipv4(ip):
a = ip.split('.')
if len(a) != 4:
return False
try:
for x in a:
chr(int(x))
return True
except:
return False
def is_local_ip(ip):
try:
v = [int(x) for x in ip.split('.')]
if v[0] == 10 or v[0] == 127 or v[:2] in ([192, 168], [169, 254]):
return 1
if v[0] == 172 and v[1] >= 16 and v[1] <= 31:
return 1
except ValueError:
return 0
default_headers = {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}
class Tracker(object):
def __init__(self, config, rawserver):
self.config = config
self.response_size = config['response_size']
self.max_give = config['max_give']
self.dfile = efs2(config['dfile'])
self.natcheck = config['nat_check']
favicon = config['favicon']
self.favicon = None
if favicon:
try:
h = open(favicon,'r')
self.favicon = h.read()
h.close()
except:
errorfunc(logging.WARNING,
_("specified favicon file -- %s -- does not exist.") %
favicon)
self.rawserver = rawserver
self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], [time3, l3, s3]]
self.cached_t = {} # format: infohash: [time, cache]
self.times = {}
self.state = {}
self.seedcount = {}
self.save_pending = False
self.parse_pending = False
self.only_local_override_ip = config['only_local_override_ip']
if self.only_local_override_ip == 2:
self.only_local_override_ip = not config['nat_check']
if os.path.exists(self.dfile):
try:
h = open(self.dfile, 'rb')
ds = h.read()
h.close()
try:
tempstate = cPickle.loads(ds)
except:
tempstate = bdecode(ds) # backwards-compatibility.
if not tempstate.has_key('peers'):
tempstate = {'peers': tempstate}
statefiletemplate(tempstate)
self.state = tempstate
except:
errorfunc(logging.WARNING,
_("statefile %s corrupt; resetting") % self.dfile)
self.downloads = self.state.setdefault('peers', {})
self.completed = self.state.setdefault('completed', {})
self.becache = {} # format: infohash: [[l1, s1], [l2, s2], [l3, s3]]
for infohash, ds in self.downloads.iteritems():
self.seedcount[infohash] = 0
for x, y in ds.iteritems():
if not y.get('nat', -1):
ip = y.get('given_ip')
if not (ip and self.allow_local_override(y['ip'], ip)):
ip = y['ip']
self.natcheckOK(infohash, x, ip, y['port'], y['left'])
if not y['left']:
self.seedcount[infohash] += 1
for infohash in self.downloads:
self.times[infohash] = {}
for peerid in self.downloads[infohash]:
self.times[infohash][peerid] = 0
self.reannounce_interval = config['reannounce_interval']
self.save_dfile_interval = config['save_dfile_interval']
self.show_names = config['show_names']
rawserver.add_task(self.save_dfile_interval, self.save_dfile)
self.prevtime = time()
self.timeout_downloaders_interval = config['timeout_downloaders_interval']
rawserver.add_task(self.timeout_downloaders_interval, self.expire_downloaders)
self.logfile = None
self.log = None
if (config['logfile'] != '') and (config['logfile'] != '-'):
try:
self.logfile = config['logfile']
self.log = open(self.logfile, 'a')
sys.stdout = self.log
print _("# Log Started: "), isotime()
except:
print _("**warning** could not redirect stdout to log file: "), sys.exc_info()[0]
if config['hupmonitor']:
def huphandler(signum, frame, self = self):
try:
self.log.close ()
self.log = open(self.logfile, 'a')
sys.stdout = self.log
print _("# Log reopened: "), isotime()
except:
print _("***warning*** could not reopen logfile")
signal.signal(signal.SIGHUP, huphandler)
self.allow_get = config['allow_get']
if config['allowed_dir'] != '':
self.allowed_dir = config['allowed_dir']
self.parse_dir_interval = config['parse_dir_interval']
self.allowed = self.state.setdefault('allowed', {})
self.allowed_dir_files = self.state.setdefault('allowed_dir_files', {})
self.allowed_dir_blocked = {}
self.parse_allowed()
else:
try:
del self.state['allowed']
except:
pass
try:
del self.state['allowed_dir_files']
except:
pass
self.allowed = None
self.uq_broken = unquote('+') != ' '
self.keep_dead = config['keep_dead']
def allow_local_override(self, ip, given_ip):
return is_valid_ipv4(given_ip) and (
not self.only_local_override_ip or is_local_ip(ip) )
def get_infopage(self):
try:
if not self.config['show_infopage']:
return (404, 'Not Found', default_headers, alas)
red = self.config['infopage_redirect']
if red != '':
return (302, 'Found', {'Content-Type': 'text/html', 'Location': red},
'<A HREF="'+red+'">Click Here</A>')
s = StringIO()
s.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \
'<html><head><title>BitTorrent download info</title>\n')
if self.favicon is not None:
s.write('<link rel="shortcut icon" href="/favicon.ico">\n')
s.write('</head>\n<body>\n' \
'<h3>BitTorrent download info</h3>\n'\
'<ul>\n'
'<li><strong>tracker version:</strong> %s</li>\n' \
'<li><strong>server time:</strong> %s</li>\n' \
'</ul>\n' % (version, isotime()))
if self.allowed is not None:
if self.show_names:
names = [ (value[1].name, infohash)
for infohash, value in self.allowed.iteritems()]
else:
names = [(None, infohash) for infohash in self.allowed]
else:
names = [ (None, infohash) for infohash in self.downloads]
if not names:
s.write('<p>not tracking any files yet...</p>\n')
else:
names.sort()
tn = 0
tc = 0
td = 0
tt = 0 # Total transferred
ts = 0 # Total size
nf = 0 # Number of files displayed
if self.allowed is not None and self.show_names:
s.write('<table summary="files" border="1">\n' \
'<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n')
else:
s.write('<table summary="files">\n' \
'<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n')
for name, infohash in names:
l = self.downloads[infohash]
n = self.completed.get(infohash, 0)
tn = tn + n
c = self.seedcount[infohash]
tc = tc + c
d = len(l) - c
td = td + d
nf = nf + 1
if self.allowed is not None and self.show_names:
if self.allowed.has_key(infohash):
sz = self.allowed[infohash][1].total_bytes # size
ts = ts + sz
szt = sz * n # Transferred for this torrent
tt = tt + szt
if self.allow_get == 1:
linkname = '<a href="/file?info_hash=' + quote(infohash) + '">' + name + '</a>'
else:
linkname = name
s.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \
% (b2a_hex(infohash), linkname, size_format(sz), c, d, n, size_format(szt)))
else:
s.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \
% (b2a_hex(infohash), c, d, n))
ttn = 0
for i in self.completed.itervalues():
ttn = ttn + i
if self.allowed is not None and self.show_names:
s.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i/%i</td><td align="right">%s</td></tr>\n'
% (nf, size_format(ts), tc, td, tn, ttn, size_format(tt)))
else:
s.write('<tr><td align="right">%i files</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i/%i</td></tr>\n'
% (nf, tc, td, tn, ttn))
s.write('</table>\n' \
'<ul>\n' \
'<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.torrent)</li>\n' \
'<li><em>complete:</em> number of connected clients with the complete file</li>\n' \
'<li><em>downloading:</em> number of connected clients still downloading</li>\n' \
'<li><em>downloaded:</em> reported complete downloads (total: current/all)</li>\n' \
'<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \
'</ul>\n')
s.write('</body>\n' \
'</html>\n')
return (200, 'OK',
{'Content-Type': 'text/html; charset=iso-8859-1'},
s.getvalue())
except:
print_exc()
return (500, 'Internal Server Error',
{'Content-Type': 'text/html; charset=iso-8859-1'},
'Server Error')
def scrapedata(self, infohash, return_name = True):
l = self.downloads[infohash]
n = self.completed.get(infohash, 0)
c = self.seedcount[infohash]
d = len(l) - c
f = {'complete': c, 'incomplete': d, 'downloaded': n}
if return_name and self.show_names and self.allowed is not None:
f['name'] = self.allowed[infohash]['name']
return (f)
def get_scrape(self, paramslist):
fs = {}
if paramslist.has_key('info_hash'):
if self.config['scrape_allowed'] not in ['specific', 'full']:
return (400, 'Not Authorized', default_headers,
bencode({'failure_reason':
"specific scrape function is not available with this tracker."}))
for infohash in paramslist['info_hash']:
if self.allowed is not None and infohash not in self.allowed:
continue
if infohash in self.downloads:
fs[infohash] = self.scrapedata(infohash)
else:
if self.config['scrape_allowed'] != 'full':
return (400, 'Not Authorized', default_headers,
bencode({'failure reason':
"full scrape function is not available with this tracker."}))
#bencode({'failure reason':
#_("full scrape function is not available with this tracker.")}))
if self.allowed is not None:
hashes = self.allowed
else:
hashes = self.downloads
for infohash in hashes:
fs[infohash] = self.scrapedata(infohash)
return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs}))
def get_file(self, infohash):
if not self.allow_get:
return (400, 'Not Authorized',
default_headers,
_("get function is not available with this tracker."))
if not self.allowed.has_key(infohash):
return (404, 'Not Found', default_headers, alas)
fname = self.allowed[infohash]['file']
fpath = self.allowed[infohash]['path']
return (200, 'OK', {'Content-Type': 'application/x-bittorrent',
'Content-Disposition': 'attachment; filename=' + fname},
open(fpath, 'rb').read())
def check_allowed(self, infohash, paramslist):
if self.allowed is not None:
if not self.allowed.has_key(infohash):
return (200, 'Not Authorized', default_headers,
bencode({'failure reason':
"Requested download is not authorized for use with this tracker."}))
#_("Requested download is not authorized for use with this tracker.")}))
if self.config['allowed_controls']:
if self.allowed[infohash].has_key('failure reason'):
return (200, 'Not Authorized', default_headers,
bencode({'failure reason': self.allowed[infohash]['failure reason']}))
return None
def add_data(self, infohash, event, ip, paramslist):
peers = self.downloads.setdefault(infohash, {})
ts = self.times.setdefault(infohash, {})
self.completed.setdefault(infohash, 0)
self.seedcount.setdefault(infohash, 0)
def params(key, default = None, l = paramslist):
if l.has_key(key):
return l[key][0]
return default
myid = params('peer_id','')
if len(myid) != 20:
raise ValueError, 'id not of length 20'
if event not in ['started', 'completed', 'stopped', 'snooped', None]:
raise ValueError, 'invalid event'
port = int(params('port',''))
if port < 0 or port > 65535:
raise ValueError, 'invalid port'
left = int(params('left',''))
if left < 0:
raise ValueError, 'invalid amount left'
peer = peers.get(myid)
mykey = params('key')
auth = not peer or peer.get('key', -1) == mykey or peer.get('ip') == ip
gip = params('ip')
local_override = gip and self.allow_local_override(ip, gip)
if local_override:
ip1 = gip
else:
ip1 = ip
if not auth and local_override and self.only_local_override_ip:
auth = True
if params('numwant') is not None:
rsize = min(int(params('numwant')), self.max_give)
else:
rsize = self.response_size
if event == 'stopped':
if peer and auth:
self.delete_peer(infohash,myid)
elif not peer:
ts[myid] = time()
peer = {'ip': ip, 'port': port, 'left': left}
if mykey:
peer['key'] = mykey
if gip:
peer['given ip'] = gip
if port:
if not self.natcheck or (local_override and self.only_local_override_ip):
peer['nat'] = 0
self.natcheckOK(infohash,myid,ip1,port,left)
else:
NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver)
else:
peer['nat'] = 2**30
if event == 'completed':
self.completed[infohash] += 1
if not left:
self.seedcount[infohash] += 1
peers[myid] = peer
else:
if not auth:
return rsize # return w/o changing stats
ts[myid] = time()
if not left and peer['left']:
self.completed[infohash] += 1
self.seedcount[infohash] += 1
if not peer.get('nat', -1):
for bc in self.becache[infohash]:
bc[1][myid] = bc[0][myid]
del bc[0][myid]
if peer['left']:
peer['left'] = left
recheck = False
if ip != peer['ip']:
peer['ip'] = ip
recheck = True
if gip != peer.get('given ip'):
if gip:
peer['given ip'] = gip
elif peer.has_key('given ip'):
del peer['given ip']
if local_override:
if self.only_local_override_ip:
self.natcheckOK(infohash,myid,ip1,port,left)
else:
recheck = True
if port and self.natcheck:
if recheck:
if peer.has_key('nat'):
if not peer['nat']:
l = self.becache[infohash]
y = not peer['left']
for x in l:
del x[y][myid]
del peer['nat'] # restart NAT testing
else:
natted = peer.get('nat', -1)
if natted and natted < self.natcheck:
recheck = True
if recheck:
NatCheck(self.connectback_result,infohash,myid,ip1,port,self.rawserver)
return rsize
def peerlist(self, infohash, stopped, is_seed, return_type, rsize):
data = {} # return data
seeds = self.seedcount[infohash]
data['complete'] = seeds
data['incomplete'] = len(self.downloads[infohash]) - seeds
if ( self.allowed is not None and self.config['allowed_controls'] and
self.allowed[infohash].has_key('warning message') ):
data['warning message'] = self.allowed[infohash]['warning message']
data['interval'] = self.reannounce_interval
if stopped or not rsize: # save some bandwidth
data['peers'] = []
return data
bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
len_l = len(bc[0][0])
len_s = len(bc[0][1])
if not (len_l+len_s): # caches are empty!
data['peers'] = []
return data
l_get_size = int(float(rsize)*(len_l)/(len_l+len_s))
cache = self.cached.setdefault(infohash,[None,None,None])[return_type]
if cache:
if cache[0] + self.config['min_time_between_cache_refreshes'] < time():
cache = None
else:
if ( (is_seed and len(cache[1]) < rsize)
or len(cache[1]) < l_get_size or not cache[1] ):
cache = None
if not cache:
vv = [[],[],[]]
cache = [ time(),
bc[return_type][0].values()+vv[return_type],
bc[return_type][1].values() ]
shuffle(cache[1])
shuffle(cache[2])
self.cached[infohash][return_type] = cache
for rr in xrange(len(self.cached[infohash])):
if rr != return_type:
try:
self.cached[infohash][rr][1].extend(vv[rr])
except:
pass
if len(cache[1]) < l_get_size:
peerdata = cache[1]
if not is_seed:
peerdata.extend(cache[2])
cache[1] = []
cache[2] = []
else:
if not is_seed:
peerdata = cache[2][l_get_size-rsize:]
del cache[2][l_get_size-rsize:]
rsize -= len(peerdata)
else:
peerdata = []
if rsize:
peerdata.extend(cache[1][-rsize:])
del cache[1][-rsize:]
if return_type == 2:
peerdata = ''.join(peerdata)
data['peers'] = peerdata
return data
def get(self, connection, path, headers):
ip = connection.get_ip()
nip = get_forwarded_ip(headers)
if nip and not self.only_local_override_ip:
ip = nip
paramslist = {}
def params(key, default = None, l = paramslist):
if l.has_key(key):
return l[key][0]
return default
try:
(scheme, netloc, path, pars, query, fragment) = urlparse(path)
if self.uq_broken == 1:
path = path.replace('+',' ')
query = query.replace('+',' ')
path = unquote(path)[1:]
for s in query.split('&'):
if s != '':
i = s.index('=')
kw = unquote(s[:i])
paramslist.setdefault(kw, [])
paramslist[kw] += [unquote(s[i+1:])]
if path == '' or path == 'index.html':
return self.get_infopage()
if path == 'scrape':
return self.get_scrape(paramslist)
if (path == 'file'):
return self.get_file(params('info_hash'))
if path == 'favicon.ico' and self.favicon is not None:
return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon)
if path != 'announce':
return (404, 'Not Found', default_headers, alas)
# main tracker function
infohash = params('info_hash')
if not infohash:
raise ValueError, 'no info hash'
notallowed = self.check_allowed(infohash, paramslist)
if notallowed:
if NOISY:
self._print_event( "get: NOT ALLOWED: info_hash=%s, %s" %
(infohash.encode('hex'). str(notallowed)) )
return notallowed
event = params('event')
rsize = self.add_data(infohash, event, ip, paramslist)
except ValueError, e:
print e
if NOISY:
self._print_exc( "get: ",e )
return (400, 'Bad Request',
{'Content-Type': 'text/plain'},
'you sent me garbage - ' + str_exc(e))
if params('compact'):
return_type = 2
elif params('no_peer_id'):
return_type = 1
else:
return_type = 0
data = self.peerlist(infohash, event=='stopped', not params('left'),
return_type, rsize)
if paramslist.has_key('scrape'):
data['scrape'] = self.scrapedata(infohash, False)
return (200, 'OK', default_headers, bencode(data))
def natcheckOK(self, infohash, peerid, ip, port, not_seed):
bc = self.becache.setdefault(infohash,[[{}, {}], [{}, {}], [{}, {}]])
bc[0][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port,
'peer id': peerid}))
bc[1][not not_seed][peerid] = Bencached(bencode({'ip': ip, 'port': port}))
bc[2][not not_seed][peerid] = compact_peer_info(ip, port)
def natchecklog(self, peerid, ip, port, result):
print isotime(), '"!natcheck-%s:%i" %s %i 0 - -' % (
ip, port, quote(peerid), result)
def connectback_result(self, result, downloadid, peerid, ip, port):
record = self.downloads.get(downloadid, {}).get(peerid)
if ( record is None
or (record['ip'] != ip and record.get('given ip') != ip)
or record['port'] != port ):
if self.config['log_nat_checks']:
self.natchecklog(peerid, ip, port, 404)
return
if self.config['log_nat_checks']:
if result:
x = 200
else:
x = 503
self.natchecklog(peerid, ip, port, x)
if not record.has_key('nat'):
record['nat'] = int(not result)
if result:
self.natcheckOK(downloadid,peerid,ip,port,record['left'])
elif result and record['nat']:
record['nat'] = 0
self.natcheckOK(downloadid,peerid,ip,port,record['left'])
elif not result:
record['nat'] += 1
def save_dfile(self):
if self.save_pending:
return
self.save_pending = True
# if this is taking all the time, threading it won't help anyway because
# of the GIL
#state = bencode(self.state)
state = cPickle.dumps(self.state) # pickle handles Unicode.
df = ThreadedDeferred(wrap_task(self.rawserver.external_add_task),
self._save_dfile, state)
def cb(r):
self.save_pending = False
if NOISY:
self._print_event( "save_dfile: Completed" )
def eb(etup):
self.save_pending = False
self._print_exc( "save_dfile: ", etup )
df.addCallbacks(cb, eb)
def _save_dfile(self, state):
exc_info = None
try:
h = open(self.dfile, 'wb')
h.write(state)
h.close()
except:
exc_info = sys.exc_info()
self.rawserver.external_add_task(self.save_dfile_interval, self.save_dfile)
if exc_info:
raise exc_info[0], exc_info[1], exc_info[2]
def parse_allowed(self):
if self.parse_pending:
return
self.parse_pending = True
df = ThreadedDeferred(wrap_task(self.rawserver.external_add_task),
self._parse_allowed, daemon=True)
def eb(etup):
self.parse_pending = False
self._print_exc("parse_dir: ", etup)
df.addCallbacks(self._parse_allowed_finished, eb)
def _parse_allowed(self):
def errfunc(message, exc_info=None):
# logging broken .torrent files would be useful but could confuse
# programs parsing log files
m = "parse_dir: %s" % message
if exc_info:
self._print_exc(m, exc_info)
else:
self._print_event(m)
pass
r = parsedir(self.allowed_dir, self.allowed, self.allowed_dir_files,
self.allowed_dir_blocked, errfunc, include_metainfo = False)
# register the call to parse a dir.
self.rawserver.external_add_task(self.parse_dir_interval,
self.parse_allowed)
return r
def _parse_allowed_finished(self, r):
self.parse_pending = False
( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked,
added, removed ) = r
if NOISY:
self._print_event("_parse_allowed_finished: removals: %s" %
str(removed))
for infohash in added:
self.downloads.setdefault(infohash, {})
self.completed.setdefault(infohash, 0)
self.seedcount.setdefault(infohash, 0)
self.state['allowed'] = self.allowed
self.state['allowed_dir_files'] = self.allowed_dir_files
def delete_peer(self, infohash, peerid):
dls = self.downloads[infohash]
peer = dls[peerid]
if not peer['left']:
self.seedcount[infohash] -= 1
if not peer.get('nat', -1):
l = self.becache[infohash]
y = not peer['left']
for x in l:
del x[y][peerid]
del self.times[infohash][peerid]
del dls[peerid]
def expire_downloaders(self):
for infohash, peertimes in self.times.iteritems():
items = peertimes.items()
for myid, t in items:
if t < self.prevtime:
self.delete_peer(infohash, myid)
self.prevtime = time()
if self.keep_dead != 1:
items = self.downloads.items()
for key, peers in items:
if len(peers) == 0 and (self.allowed is None or
key not in self.allowed):
del self.times[key]
del self.downloads[key]
del self.seedcount[key]
self.rawserver.add_task(self.timeout_downloaders_interval,
self.expire_downloaders)
def _print_event(self, message):
print datetime.datetime.utcnow().isoformat(), message
def _print_exc(self, note, etup):
print datetime.datetime.utcnow().isoformat(), note, ':'
traceback.print_exception(*etup)
def track(args):
assert type(args) == list and \
len([x for x in args if type(x)==str])==len(args)
config = {}
defaults = get_defaults('bittorrent-tracker') # hard-coded defaults.
try:
config, files = parse_configuration_and_args(defaults,
'bittorrent-tracker', args, 0, 0 )
except ValueError, e:
print _("error: ") + str_exc(e)
print _("run with -? for parameter explanations")
return
except BTFailure, e:
print _("error: ") + str_exc(e)
print _("run with -? for parameter explanations")
return
if config['dfile']=="":
config['dfile'] = decode_from_filesystem(
os.path.join(platform.get_temp_dir(), efs2(u"dfile") +
str(os.getpid())))
config = Preferences().initWithDict(config)
ef = lambda e: errorfunc(logging.WARNING, e)
platform.write_pid_file(config['pid'], ef)
t = None
try:
r = RawServer(config)
t = Tracker(config, r)
try:
#DEBUG
print "track: create_serversocket, port=", config['port']
#END
s = r.create_serversocket(config['port'], config['bind'])
handler = HTTPHandler(t.get, config['min_time_between_log_flushes'])
r.start_listening(s, handler)
except socket.error, e:
print ("Unable to open port %d. Use a different port?" %
config['port'])
return
r.listen_forever()
finally:
if t: t.save_dfile()
print _("# Shutting down: ") + isotime()
def size_format(s):
return str(Size(s))
def errorfunc( level, text ):
print "%s: %s" % (logging.getLevelName(level), text)
| {
"content_hash": "14b24e8cec39b12c136428725f200927",
"timestamp": "",
"source": "github",
"line_count": 923,
"max_line_length": 236,
"avg_line_length": 40.04875406283857,
"alnum_prop": 0.5083727850669553,
"repo_name": "sauloal/linuxscripts",
"id": "0dbb7eb0ea8406d55a53f10ed95343b33676d228",
"size": "37548",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apache/var/www/html/saulo/torrent/html/bin/clients/mainline/BitTorrent/track.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "616304"
},
{
"name": "C++",
"bytes": "24286"
},
{
"name": "JavaScript",
"bytes": "238160"
},
{
"name": "PHP",
"bytes": "9491076"
},
{
"name": "Perl",
"bytes": "877930"
},
{
"name": "Python",
"bytes": "3651261"
},
{
"name": "Racket",
"bytes": "4568"
},
{
"name": "Shell",
"bytes": "157362"
},
{
"name": "XSLT",
"bytes": "28086"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
from sentry import analytics
class IntegrationAddedEvent(analytics.Event):
type = "integration.added"
attributes = (
analytics.Attribute("provider"),
analytics.Attribute("id"),
analytics.Attribute("organization_id"),
analytics.Attribute("user_id", required=False),
analytics.Attribute("default_user_id"),
)
class IntegrationIssueCreatedEvent(analytics.Event):
type = "integration.issue.created"
attributes = (
analytics.Attribute("provider"),
analytics.Attribute("id"),
analytics.Attribute("organization_id"),
analytics.Attribute("user_id", required=False),
analytics.Attribute("default_user_id"),
)
class IntegrationIssueLinkedEvent(analytics.Event):
type = "integration.issue.linked"
attributes = (
analytics.Attribute("provider"),
analytics.Attribute("id"),
analytics.Attribute("organization_id"),
analytics.Attribute("user_id", required=False),
analytics.Attribute("default_user_id"),
)
class IntegrationIssueStatusSyncedEvent(analytics.Event):
type = "integration.issue.status.synced"
attributes = (
analytics.Attribute("provider"),
analytics.Attribute("id"),
analytics.Attribute("organization_id"),
)
class IntegrationIssueAssigneeSyncedEvent(analytics.Event):
type = "integration.issue.assignee.synced"
attributes = (
analytics.Attribute("provider"),
analytics.Attribute("id"),
analytics.Attribute("organization_id"),
)
class IntegrationIssueCommentsSyncedEvent(analytics.Event):
type = "integration.issue.comments.synced"
attributes = (
analytics.Attribute("provider"),
analytics.Attribute("id"),
analytics.Attribute("organization_id"),
)
class IntegrationRepoAddedEvent(analytics.Event):
type = "integration.repo.added"
attributes = (
analytics.Attribute("provider"),
analytics.Attribute("id"),
analytics.Attribute("organization_id"),
)
class IntegrationResolveCommitEvent(analytics.Event):
type = "integration.resolve.commit"
attributes = (
analytics.Attribute("provider"),
analytics.Attribute("id"),
analytics.Attribute("organization_id"),
)
class IntegrationResolvePREvent(analytics.Event):
type = "integration.resolve.pr"
attributes = (
analytics.Attribute("provider"),
analytics.Attribute("id"),
analytics.Attribute("organization_id"),
)
analytics.register(IntegrationAddedEvent)
analytics.register(IntegrationIssueCreatedEvent)
analytics.register(IntegrationIssueLinkedEvent)
analytics.register(IntegrationIssueStatusSyncedEvent)
analytics.register(IntegrationIssueAssigneeSyncedEvent)
analytics.register(IntegrationIssueCommentsSyncedEvent)
analytics.register(IntegrationRepoAddedEvent)
analytics.register(IntegrationResolveCommitEvent)
analytics.register(IntegrationResolvePREvent)
| {
"content_hash": "285c7075952969a6c19d984f6d755d09",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 59,
"avg_line_length": 27.78181818181818,
"alnum_prop": 0.7045157068062827,
"repo_name": "mvaled/sentry",
"id": "04d3024d710869c9d795a445306a287f08889b18",
"size": "3056",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/integrations/analytics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
from . import DatabaseInterface
import semidbm as dbm # @UnresolvedImport
import os
import json
class Dbm(DatabaseInterface):
def __init__(self, bundle, base_path, suffix=None):
self.bundle = bundle
self.suffix = suffix
self._path = base_path
if suffix:
self._path += '-'+suffix
self._path += '.dbm'
self._file = None
@property
def reader(self):
self.close()
self._file = dbm.open(self._path, 'r')
return self
@property
def writer(self):
"""Return a new writer. Will always create a new file"""
self.close()
self._file = dbm.open(self._path, 'n')
return self
@property
def appender(self):
"""Return a new writer, preserving the file if it already exists"""
self.close()
self._file = dbm.open(self._path, 'c')
return self
def delete(self):
if os.path.exists(self._path):
os.remove(self._path)
def close(self):
if self._file:
self._file.close()
self._file = None
def __getitem__(self, key):
return json.loads(self._file[key])
def __setitem__(self, key, val):
#print key,'<-',val
self._file[str(key)] = json.dumps(val)
def keys(self):
return self._file.keys()
| {
"content_hash": "fc065236d6d0c68c27602c990f1ff824",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 75,
"avg_line_length": 21.626666666666665,
"alnum_prop": 0.5283600493218249,
"repo_name": "kball/ambry",
"id": "3593cb4c6ca36d115a95df8f219fd3c66156dcc1",
"size": "1622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ambry/database/dbm.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1229770"
},
{
"name": "Ruby",
"bytes": "2885"
},
{
"name": "Shell",
"bytes": "16552"
}
],
"symlink_target": ""
} |
"""
byceps.blueprints.admin.site.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, request
from ....services.party import service as party_service
from ....services.site import (
service as site_service,
settings_service as site_settings_service,
)
from ....util.framework.blueprint import create_blueprint
from ....util.framework.flash import flash_error, flash_success
from ....util.framework.templating import templated
from ....util.views import redirect_to
from ...authorization.decorators import permission_required
from ...authorization.registry import permission_registry
from .authorization import SitePermission
from .forms import CreateForm, UpdateForm
blueprint = create_blueprint('site_admin', __name__)
permission_registry.register_enum(SitePermission)
@blueprint.route('/')
@permission_required(SitePermission.view)
@templated
def index():
"""List all sites."""
sites = site_service.get_all_sites()
parties = party_service.get_all_parties()
party_titles_by_id = {p.id: p.title for p in parties}
sites.sort(key=lambda site: (site.title, site.party_id))
return {
'sites': sites,
'party_titles_by_id': party_titles_by_id,
}
@blueprint.route('/sites/<site_id>')
@permission_required(SitePermission.view)
@templated
def view(site_id):
"""Show a site's settings."""
site = site_service.find_site(site_id)
if site is None:
abort(404)
settings = site_settings_service.get_settings(site.id)
return {
'site': site,
'settings': settings,
}
@blueprint.route('/sites/create')
@permission_required(SitePermission.create)
@templated
def create_form(erroneous_form=None):
"""Show form to create a site."""
party_id = request.args.get('party_id')
form = erroneous_form if erroneous_form else CreateForm(party_id=party_id)
form.set_email_config_choices()
form.set_party_choices()
return {
'form': form,
}
@blueprint.route('/sites', methods=['POST'])
@permission_required(SitePermission.create)
def create():
"""Create a site."""
form = CreateForm(request.form)
form.set_email_config_choices()
form.set_party_choices()
if not form.validate():
return create_form(form)
site_id = form.id.data.strip().lower()
title = form.title.data.strip()
server_name = form.server_name.data.strip()
email_config_id = form.email_config_id.data
party_id = form.party_id.data
enabled = form.enabled.data
user_account_creation_enabled = form.user_account_creation_enabled.data
login_enabled = form.login_enabled.data
if party_id:
party = party_service.find_party(party_id)
if not party:
flash_error(f'Die Party-ID "{party_id}" ist unbekannt.')
return create_form(form)
else:
party_id = None
site = site_service.create_site(
site_id,
title,
server_name,
email_config_id,
enabled,
user_account_creation_enabled,
login_enabled,
party_id=party_id,
)
flash_success(f'Die Site "{site.title}" wurde angelegt.')
return redirect_to('.view', site_id=site.id)
@blueprint.route('/sites/<site_id>/update')
@permission_required(SitePermission.update)
@templated
def update_form(site_id, erroneous_form=None):
"""Show form to update the site."""
site = _get_site_or_404(site_id)
form = erroneous_form if erroneous_form else UpdateForm(obj=site)
form.set_email_config_choices()
form.set_party_choices()
return {
'site': site,
'form': form,
}
@blueprint.route('/sites/<site_id>', methods=['POST'])
@permission_required(SitePermission.update)
def update(site_id):
"""Update the site."""
site = _get_site_or_404(site_id)
form = UpdateForm(request.form)
form.set_email_config_choices()
form.set_party_choices()
if not form.validate():
return update_form(site.id, form)
title = form.title.data.strip()
server_name = form.server_name.data.strip()
email_config_id = form.email_config_id.data
party_id = form.party_id.data
enabled = form.enabled.data
user_account_creation_enabled = form.user_account_creation_enabled.data
login_enabled = form.login_enabled.data
archived = form.archived.data
if party_id:
party = party_service.find_party(party_id)
if not party:
flash_error(f'Die Party-ID "{party_id}" ist unbekannt.')
return update_form(site.id, form)
else:
party_id = None
try:
site = site_service.update_site(
site.id,
title,
server_name,
email_config_id,
party_id,
enabled,
user_account_creation_enabled,
login_enabled,
archived,
)
except site_service.UnknownSiteId:
abort(404, f'Unknown site ID "{site_id}".')
flash_success(f'Die Site "{site.title}" wurde aktualisiert.')
return redirect_to('.view', site_id=site.id)
def _get_site_or_404(site_id):
site = site_service.find_site(site_id)
if site is None:
abort(404)
return site
| {
"content_hash": "7eac6de788f6008f82bb01cc207bd19f",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 78,
"avg_line_length": 26.402985074626866,
"alnum_prop": 0.6463161861692105,
"repo_name": "m-ober/byceps",
"id": "2bdc96e3c0a2ed670a27a9ff2a0289d525223878",
"size": "5307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byceps/blueprints/admin/site/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38499"
},
{
"name": "Dockerfile",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "369989"
},
{
"name": "JavaScript",
"bytes": "9483"
},
{
"name": "Python",
"bytes": "1152996"
}
],
"symlink_target": ""
} |
'''
@author: Frank
'''
import os
try:
if os.environ['TERM'].startswith('xterm'):
os.environ['TERM'] = 'vt100'
except:
os.environ['TERM'] = 'vt100'
import readline
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import shlex
import hashlib
import optparse
import termcolor
import pydoc
import time
import urllib3
import zstacklib.utils.log as log
#comment out next line to print detail zstack cli http command to screen.
log.configure_log('/var/log/zstack/zstack-cli', log_to_console=False)
import apibinding.inventory as inventory
import apibinding.api as api
import zstacklib.utils.jsonobject as jsonobject
import zstacklib.utils.filedb as filedb
import zstackcli.parse_config as parse_config
import zstackcli.deploy_config as deploy_config
import zstackcli.read_config as read_config
cld = termcolor.colored
cprint = termcolor.cprint
text_doc = pydoc.TextDoc()
CLI_LIB_FOLDER = os.path.expanduser('~/.zstack/cli')
CLI_HISTORY = '%s/command_history' % CLI_LIB_FOLDER
CLI_RESULT_HISTORY_FOLDER = '%s/result_history' % CLI_LIB_FOLDER
CLI_RESULT_HISTORY_KEY = '%s/result_key' % CLI_RESULT_HISTORY_FOLDER
CLI_RESSULT_FILE = '%s/result' % CLI_RESULT_HISTORY_FOLDER
SESSION_FILE = '%s/session' % CLI_LIB_FOLDER
CLI_MAX_CMD_HISTORY = 1000
CLI_MAX_RESULT_HISTORY = 1000
prompt = '>>>'
query_param_keys = \
['conditions', 'count', 'limit', 'start', 'timeout', \
'replyWithCount', 'sortBy', 'sortDirection', 'fields']
def clean_password_in_cli_history():
cmd_historys = open(CLI_HISTORY, 'r').readlines()
new_cmd_historys = []
for cmd in cmd_historys:
if 'password=' in cmd:
cmd_params = cmd.split()
cmd_list = []
for param in cmd_params:
if not 'password=' in param:
cmd_list.append(param)
else:
cmd_list.append(param.split('=')[0] + '=')
new_cmd_historys.append(' '.join(cmd_list))
else:
new_cmd_historys.append(cmd)
open(CLI_HISTORY, 'w').write('\n'.join(new_cmd_historys))
class CliError(Exception):
'''Cli Error'''
class Cli(object):
'''
classdocs
'''
msg_creator = {}
LOGIN_MESSAGE_NAME = 'APILogInByAccountMsg'
LOGOUT_MESSAGE_NAME = 'APILogOutMsg'
LOGIN_BY_USER_NAME = 'APILogInByUserMsg'
CREATE_ACCOUNT_NAME = 'APICreateAccountMsg'
CREATE_USER_NAME = 'APICreateUserMsg'
ACCOUNT_RESET_PASSWORD_NAME = 'APIUpdateAccountMsg'
USER_RESET_PASSWORD_NAME = 'APIUpdateUserMsg'
@staticmethod
def register_message_creator(apiname, func):
Cli.msg_creator[apiname] = func
def usage(self):
print '''
ZStack command line tool
Type "help" for more information
Type Tab key for auto-completion
Type "quit" or "exit" or Ctrl-d to exit
'''
def print_error(self, err):
print '\033[91m' + err + '\033[0m'
def complete(self, pattern, index):
'''
pattern is current input. index is current matched number of list.
complete will be kept calling, until it return None.
'''
def prepare_primitive_fields_words(apiname, separator='=', prefix=''):
if not prefix:
api_map_name = inventory.queryMessageInventoryMap[apiname].__name__
else:
api_map_name = apiname
query_pri_fields = eval('inventory.%s().PRIMITIVE_FIELDS' % api_map_name)
query_pri_fields = ['%s' % field for field in query_pri_fields]
temp_fields = list(query_pri_fields)
query_pri_fields = []
for field in temp_fields:
if prefix:
query_pri_fields.append('%s%s%s' % (prefix, field, separator))
else:
query_pri_fields.append('%s%s' % (field, separator))
self.words.extend(query_pri_fields)
def prepare_expanded_fields_words(apiname, separator='.', prefix=''):
if not prefix:
api_map_name = inventory.queryMessageInventoryMap[apiname].__name__
else:
api_map_name = apiname
query_ext_fields = eval('inventory.%s().EXPANDED_FIELDS' % api_map_name)
query_ext_fields = ['%s' % field for field in query_ext_fields]
temp_fields = list(query_ext_fields)
query_ext_fields = []
for field in temp_fields:
if prefix:
query_ext_fields.append('%s%s%s' % (prefix, field, separator))
else:
query_ext_fields.append('%s%s' % (field, separator))
self.words.extend(query_ext_fields)
if 'conditions=' in self.words:
self.words.remove('conditions=')
def prepare_query_words(apiname, prefix=''):
prepare_primitive_fields_words(apiname, '=', prefix)
prepare_expanded_fields_words(apiname, '.', prefix)
def prepare_fields_words(apiname, current_fields=[]):
prepare_primitive_fields_words(apiname, ',')
for field in current_fields:
new_field = '%s,' % field
if new_field in self.words:
self.words.remove(new_field)
def prepare_words():
currtext = readline.get_line_buffer()
apiname = currtext.split()[0]
if apiname in self.words_db:
self.is_cmd = False
self.words = ['%s=' % field for field in self.api_class_params['API%sMsg' % apiname]]
if apiname.startswith('Query'):
real_api_name = 'API%sMsg' % apiname
prepare_query_words(real_api_name)
if not ('UserTag' in apiname or 'SystemTag' in apiname):
self.words.append('__systemTag__=')
self.words.append('__userTag__=')
else:
self.is_cmd = True
self.words = self.words_db
if not self.words:
return None
prepare_words()
if not self.curr_pattern or pattern.lower() != self.curr_pattern.lower():
#self.matching_words = [w for w in self.words if w.lower().startswith(pattern.lower())]
if self.is_cmd:
self.matching_words = ['%s ' % w for w in self.words if pattern.lower() in w.lower()]
else:
#need to auto complete expanded fields.
if '.' in pattern:
currtext = readline.get_line_buffer()
fields_objects = pattern.split('.')
head_field = fields_objects[0]
fields_num = len(fields_objects)
apiname = currtext.split()[0]
new_api_name = 'API%sMsg' % apiname
if inventory.queryMessageInventoryMap.has_key(new_api_name):
api_obj_name = inventory.queryMessageInventoryMap[new_api_name].__name__
query_ext_fields = eval('inventory.%s().EXPANDED_FIELDS' % api_obj_name)
if head_field in query_ext_fields:
current_obj_name = eval('inventory.%s().QUERY_OBJECT_MAP["%s"]' % (api_obj_name, head_field))
for i in range(0, fields_num):
if i == fields_num - 2:
break
next_field = fields_objects[i + 1]
query_ext_fields = eval('inventory.%s().EXPANDED_FIELDS' % current_obj_name)
if next_field in query_ext_fields:
current_obj_name = eval('inventory.%s().QUERY_OBJECT_MAP["%s"]' % (current_obj_name, next_field))
else:
current_obj_name = None
else:
current_obj_name = None
else:
current_obj_name = None
if current_obj_name:
self.words = []
pattern_prefix = '.'.join(fields_objects[:-1])
prepare_query_words(current_obj_name, '%s.' % pattern_prefix)
currtext = readline.get_line_buffer()
last_field = currtext.split()[-1]
if not currtext.endswith(' ') and last_field.startswith('fields='):
apiname = currtext.split()[0]
new_api_name = 'API%sMsg' % apiname
api_map_name = inventory.queryMessageInventoryMap[new_api_name].__name__
self.words = []
fields = last_field.split('=')[1]
prepare_fields_words(new_api_name, fields.split(','))
self.matching_words = [w for w in self.words if pattern.lower() in w.lower()]
self.curr_pattern = pattern
try:
return self.matching_words[index]
except IndexError:
return None
def do_command(self, line):
def check_session(apiname):
if not self.session_uuid and apiname not in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME]:
self.print_error('''Please login before running any API message
example: %sLogInByAccount accountName=admin password=your_super_secure_admin_password''' % prompt)
return False
return True
def is_api_param_a_list(apiname, param):
optional_list = eval('isinstance(inventory.%s().%s, \
inventory.OptionalList)' % (apiname, param))
not_none_list = eval('isinstance(inventory.%s().%s, \
inventory.NotNoneList)' % (apiname, param))
if optional_list or not_none_list:
return True
def build_params():
def eval_string(key, value_string):
try:
return eval(value_string)
except Exception as e:
err_msg = """
Parse command parameters error:
eval '%s' error for: '%s'
the right format is like: "[{'KEY':'VALUE'}, {'KEY':['VALUE1', 'VALUE2']}]"
""" % (value_string, key)
self.print_error(err_msg)
raise e
pairs = shlex.split(line)
if pairs[0] in self.cli_cmd:
cmd = pairs[0]
if len(pairs) > 1:
return cmd, pairs[1:]
else:
return cmd, None
apiname = 'API%sMsg' % pairs[0]
if apiname not in inventory.api_names:
raise CliError('"%s" is not an API message' % apiname)
#'=' will be used for more meanings than 'equal' in Query API
if apiname.startswith('APIQuery'):
return apiname, pairs[1:]
all_params = {}
for param_str in pairs[1:]:
params = param_str.split('=', 1)
if len(params) != 2:
raise CliError('Invalid parameter[%s], the parameter must be split by "="' % param_str)
if apiname == 'APIAddSecurityGroupRuleMsg' and params[0] == 'rules':
all_params[params[0]] = eval(params[1])
elif apiname in ['APIGetHostMonitoringDataMsg', 'APIGetVmMonitoringDataMsg', 'APIMonitoringPassThroughMsg'] and params[0] == 'query':
all_params[params[0]] = eval(params[1])
elif apiname == 'APIAttachNetworkServiceToL3NetworkMsg' and params[0] == 'networkServices':
all_params[params[0]] = eval_string(params[0], params[1])
elif apiname == 'APIDetachNetworkServiceFromL3NetworkMsg' and params[0] == 'networkServices':
all_params[params[0]] = eval_string(params[0], params[1])
elif apiname == 'APICreatePolicyMsg' and params[0] == 'statements':
all_params[params[0]] = eval_string(params[0], params[1])
elif is_api_param_a_list(apiname, params[0]):
all_params[params[0]] = params[1].split(',')
else:
all_params[params[0]] = params[1]
return (apiname, all_params)
def generate_query_params(apiname, params):
'''
Query params will include conditions expression, which includes ops:
=, !=, >, <, >=, <=, ?=, !?=, ~=, !~=
?= means 'in'
!?= means 'not in'
~= means 'like'
!~= means 'not like'
=null means 'is null'
!=null means 'is not null'
'''
null = 'null'
eq = '='
gt = '>'
lt = '<'
nt = '!'
lk = '~'
qs = '?'
ps = '+'
ms = '-'
perc = '%'
underscore = '_'
conditions = []
new_params = {}
for param in params:
if eq in param:
key,value = param.split(eq, 1)
if not key in query_param_keys:
if key.endswith(nt):
if value != null:
conditions.append({'name':key[:-1], \
'op':'!=', 'value': value})
else:
conditions.append({'name':key[:-1], \
'op':'is not null', 'value': ''})
elif key.endswith(gt):
conditions.append({'name':key[:-1], \
'op':'>=', 'value': value})
elif key.endswith(lt):
conditions.append({'name':key[:-1], \
'op':'<=', 'value': value})
elif key.endswith('%s%s' % (nt, qs)):
conditions.append({'name':key[:-2], \
'op':'not in', 'value': value})
elif key.endswith(qs):
conditions.append({'name':key[:-1], \
'op':'in', 'value': value})
elif key.endswith('%s%s' % (nt, lk)):
#will help to add pattern %, if user not input
if not perc in value and not underscore in value:
value = '%s%s%s' % (perc, value, perc)
conditions.append({'name':key[:-2], \
'op':'not like', 'value': value})
elif key.endswith(lk):
#will help to add pattern %, if user not input
if not perc in value and not underscore in value:
value = '%s%s%s' % (perc, value, perc)
conditions.append({'name':key[:-1], \
'op':'like', 'value': value})
else:
if value != null:
conditions.append({'name':key, \
'op':eq, 'value': value})
else:
conditions.append({'name':key, \
'op':'is null', 'value': ''})
elif key == 'conditions':
conditions.extend(value)
elif key == 'fields':
#remove the last ','
if value.endswith(','):
value = value[:-1]
new_params[key] = value.split(',')
else:
if is_api_param_a_list(apiname, key):
new_params[key] = value.split(',')
else:
new_params[key] = value
elif gt in param:
key,value = param.split(gt, 1)
conditions.append({'name':key, \
'op':gt, 'value': value})
elif lt in param:
key,value = param.split(lt, 1)
conditions.append({'name':key, \
'op':lt, 'value': value})
new_params['conditions'] = conditions
return new_params
def create_msg(apiname, params):
creator = self.msg_creator.get(apiname)
if creator:
return creator(apiname, params)
if apiname.startswith('APIQuery'):
params = generate_query_params(apiname, params)
msg = eval('inventory.%s()' % apiname)
for key in params.keys():
value = params[key]
setattr(msg, key, value)
return msg
def set_session_to_api(msg):
session = inventory.Session()
session.uuid = self.session_uuid
msg.session = session
(apiname, all_params) = build_params()
if apiname in self.cli_cmd:
#self.write_more(apiname, None)
self.cli_cmd_func[apiname](all_params)
return
if not check_session(apiname):
raise CliError("No session uuid defined")
msg = create_msg(apiname, all_params)
set_session_to_api(msg)
try:
if apiname in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME, self.CREATE_ACCOUNT_NAME, self.CREATE_USER_NAME]:
if not msg.password:
raise CliError('"password" must be specified')
msg.password = hashlib.sha512(msg.password).hexdigest()
if apiname == self.USER_RESET_PASSWORD_NAME:
msg.password = hashlib.sha512(msg.password).hexdigest()
if apiname == self.LOGOUT_MESSAGE_NAME:
if not msg.sessionUuid:
setattr(msg, 'sessionUuid', self.session_uuid)
start_time = time.time()
(name, event) = self.api.async_call_wait_for_complete(msg, fail_soon=True)
end_time = time.time()
if apiname in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME]:
self.session_uuid = event.inventory.uuid
open(SESSION_FILE, 'w').write(self.session_uuid)
result = jsonobject.dumps(event, True)
print '%s\n' % result
#print 'Time costing: %fs' % (end_time - start_time)
self.write_more(line, result)
except urllib3.exceptions.MaxRetryError as urlerr:
self.print_error('Is %s reachable? Please make sure the management node is running.' % self.api.api_url)
self.print_error(str(urlerr))
raise ("Server: %s is not reachable" % self.hostname)
except Exception as e:
self.print_error(str(e))
self.write_more(line, str(e), False)
raise e
def main(self, cmd = None):
if not cmd:
self.usage()
exit_code = 0
while True:
try:
if cmd:
self.do_command(cmd)
else:
line = raw_input(prompt)
if line:
self.do_command(line)
except CliError as clierr:
self.print_error(str(clierr))
exit_code = 1
except (EOFError):
print ''
import atexit
if not os.path.exists(os.path.dirname(CLI_HISTORY)):
os.system('mkdir -p %s' % os.path.dirname(CLI_HISTORY))
atexit.register(clean_password_in_cli_history)
atexit.register(readline.write_history_file, CLI_HISTORY)
sys.exit(1)
except (KeyboardInterrupt):
print ''
except Exception as e:
exit_code = 3
self.print_error(str(e))
if cmd:
sys.exit(exit_code)
def build_api_parameters(self):
def rule_out_unneeded_params(keys):
excludes = ['session']
for k in excludes:
if k in keys:
keys.remove(k)
return keys
for apiname in inventory.api_names:
obj = eval("inventory.%s()" % apiname)
params = []
params.extend(obj.__dict__.keys())
self.api_class_params[apiname] = rule_out_unneeded_params(params)
def _parse_api_name(self, api_names):
'''
Remove API pattern 'API' and appendix 'MSG'
'''
short_api_name = []
for api in api_names:
if api.endswith('Msg'):
short_api_name.append(api[3:-3])
short_api_name.sort()
return short_api_name
def completer_print(self, substitution, matches, longest_match_length) :
def print_match(columes, new_matches, max_match_length):
cur_col = 1
for match in new_matches:
if cur_col == columes:
end_sign = '\n'
cur_col = 1
else:
end_sign = ' ' * (max_match_length - len(match))
cur_col += 1
try:
index = match.lower().index(self.curr_pattern.lower())
except Exception as e:
print "can't find pattern: %s in match: %s" % (self.curr_pattern, match)
print e
raise e
cprint(match[0:index], end='')
cprint(match[index:(len(self.curr_pattern) + index)], attrs=['bold', 'reverse'], end='')
cprint(match[(len(self.curr_pattern) + index):], end=end_sign)
def print_bold():
max_match_length = 0
matches_dot = []
matches_eq_cond = []
matches_eq_param = []
matches_ot = []
currtext = readline.get_line_buffer()
apiname = currtext.split()[0]
if apiname.startswith('Query'):
query_cmd = True
else:
query_cmd = False
for match in matches:
if len(match) > max_match_length:
max_match_length = len(match)
if match.endswith('.'):
matches_dot.append(match)
elif match.endswith('='):
for key in query_param_keys:
if query_cmd and match.startswith(key):
matches_eq_param.append(match)
break
else:
matches_eq_cond.append(match)
else:
matches_ot.append(match)
max_match_length += 2
try:
term_width = int(os.popen('stty size', 'r').read().split()[1])
except:
term_width = 80
columes = term_width/max_match_length
if columes == 0:
columes = 1
if matches_dot:
if query_cmd:
cprint('[Query Conditions:]', attrs=['bold'], end='\n')
print_match(columes, matches_dot, max_match_length)
print '\n'
if matches_eq_cond:
#cprint('[Primitive Query Conditions:]', attrs=['bold'], end='\n')
print_match(columes, matches_eq_cond, max_match_length)
print '\n'
if matches_eq_param:
if query_cmd:
cprint('[Parameters:]', attrs=['bold'], end='\n')
print_match(columes, matches_eq_param, max_match_length)
print '\n'
if matches_ot:
print_match(columes, matches_ot, max_match_length)
print '\n'
print ''
print_bold()
print ''
cprint('%s%s' % (prompt, readline.get_line_buffer()), end='')
#readline.redisplay()
def write_more(self, cmd, result, success=True):
if self.hd.get(self.start_key):
start_value = int(self.hd.get(self.start_key))
else:
start_value = 0
if self.hd.get(self.last_key):
last_value = int(self.hd.get(self.last_key))
else:
last_value = 0
if last_value <= start_value:
if start_value < CLI_MAX_RESULT_HISTORY:
start_value += 1
else:
start_value = 1
last_value = 2
else:
if last_value < CLI_MAX_RESULT_HISTORY:
start_value += 1
last_value += 1
else:
start_value += 1
last_value = 1
self.hd.set(self.start_key, start_value)
self.hd.set(self.last_key, last_value)
#filedb might leave more than 1 same key item.
while self.hd.get(str(start_value)):
self.hd.rem(str(start_value))
result_file = '%s%d' % (CLI_RESSULT_FILE, start_value)
open(result_file, 'w').write(result)
if not self.no_secure and 'password=' in cmd:
cmds = cmd.split()
cmds2 = []
for cmd2 in cmds:
if not 'password=' in cmd2:
cmds2.append(cmd2)
else:
cmds2.append(cmd2.split('=')[0] + '=' + '******')
cmd = ' '.join(cmds2)
self.hd.set(str(start_value), [cmd, success])
def read_more(self, num=None, need_print=True, full_info=True):
'''
need_print will indicate whether print the command result to screen.
full_info will indicate whether return command and params information
when return command results.
'''
start_value = self.hd.get(self.start_key)
last_value = self.hd.get(self.last_key)
more_usage_list = [text_doc.bold('Usage:'), text_doc.bold('\t%smore NUM\t #show the No. NUM Command result' % prompt), text_doc.bold('\t%smore\t\t #show all available NUM and Command. The failure command will be marked with "!" before it.' % prompt)]
more_usage = '\n'.join(more_usage_list)
if not start_value:
print 'No command history to display.'
return
if num:
if num.isdigit():
if int(num) > CLI_MAX_CMD_HISTORY:
print 'Not find result for number: %s' % num
print 'Max number is: %s ' % str(CLI_MAX_RESULT_HISTORY)
cprint(more_usage, attrs=['bold'], end='\n')
return
key = start_value - int(num) + 1
if key <= 0:
key += CLI_MAX_RESULT_HISTORY
#print key
result_list = self.hd.get(str(key))
result_file = '%s%d' % (CLI_RESSULT_FILE, key)
result = open(result_file, 'r').read()
if result_list:
output = 'Command: \n\t%s\nResult:\n%s' % \
(result_list[0], result)
if need_print:
pydoc.pager(output)
if full_info:
return [result_list[0], output]
else:
return [result_list[0], result]
else:
more_list = []
explamation = text_doc.bold('!')
if start_value < last_value:
for i in range(CLI_MAX_RESULT_HISTORY):
if start_value - i > 0:
key = start_value - i
else:
key = start_value - i + CLI_MAX_RESULT_HISTORY
cmd_result = self.hd.get(str(key))
cmd_result_list = cmd_result[0].split()
cmd = text_doc.bold(cmd_result_list[0])
if len(cmd_result_list) > 1:
cmd = cmd + ' ' + ' '.join(cmd_result_list[1:])
if len(cmd_result) <= 2 or cmd_result[2]:
more_list.append('[%s]\t %s' % (str(i + 1), cmd))
else:
more_list.append('[%s] %s\t %s' % (str(i + 1), \
explamation, cmd))
else:
for i in range(start_value):
cmd_result = self.hd.get(str(start_value - i))
cmd_result_list = cmd_result[0].split()
cmd = text_doc.bold(cmd_result_list[0])
if len(cmd_result_list) > 1:
cmd = cmd + ' ' + ' '.join(cmd_result_list[1:])
if len(cmd_result) <= 2 or cmd_result[2]:
more_list.append('[%s]\t %s' % (str(i + 1), \
cmd))
else:
more_list.append('[%s] %s\t %s' % (str(i + 1), \
explamation, cmd))
more_result = '\n'.join(more_list)
header = text_doc.bold('[NUM]\tCOMMAND')
more_result = '%s\n%s\n%s' % (header, '-' * 48, more_result)
more_result = '%s\n%s' % (more_result, more_usage)
pydoc.pager(more_result)
return
print 'Not find result for number: %s' % num
cprint(more_usage, attrs=['bold'], end='\n')
def save_json_to_file(self, all_params):
def write_to_file(output, file_name, num):
file_name = os.path.abspath(file_name)
open(file_name, 'w').write(output)
print "Saved command: %s result to file: %s" % (str(num), file_name)
if not all_params:
self.show_help()
return
nums = all_params[0].split(',')
if len(all_params) > 1:
file_folder = all_params[1]
if len(nums) > 1 and not os.path.isdir(file_folder):
print "%s must be a folder, to save more than 1 command" % file_folder
return
else:
file_folder = None
if len(all_params) > 2:
json_only = all_params[2]
else:
json_only = False
for num in nums:
return_result = self.read_more(num, False, not json_only)
if not return_result:
print "cannot find related command result to save"
return
cmd, output = return_result
if not file_folder:
new_file_folder = '%s-%s.json' % (cmd.split()[0], num)
else:
new_file_folder = file_folder
dirname = os.path.dirname(new_file_folder)
if not dirname:
file_name = new_file_folder
write_to_file(output, file_name, num)
else:
if os.path.isdir(new_file_folder):
file_name = '%s/%s-%s.json' % (new_file_folder, cmd.split()[0], num)
elif os.path.isdir(dirname):
write_to_file(output, file_name, num)
else:
print "Can't find folder: %s" % dirname
def show_more(self, all_params):
if not all_params:
num = None
else:
num = all_params[0]
self.read_more(num)
def show_help(self, all_params):
help_string = text_doc.bold('Usage:')
help_string += '''
-------------------------------------------------------------------------------
help show help
more [No.] show a single or multiple command history. If a command NUM is provided, only
history of that command will show.
>>> more
>>> more 1
save [No.] [TARGET_FILE_NAME|TARGET_FOLDER]
save a single or multiple command history to a file or a directory.
>>> save 1
save history command 1 result to ./COMMAND-NAME-1.json
>>> save 1,2,3,4
save command history 1,2,3,4 to ./COMMAND-1.json, ./COMMAND-2.json,
./COMMAND-3.json, and ./COMMAND-4.json
>>> save 1 /tmp
save command history 1 to /tmp/COMMAND-1.json
>>> save 1 /tmp/1.json
save command history 1 to /tmp/1.json
ZSTACK_API [API_PARAMS]
execute a API command like LogInByAccount, QueryHost.
>>> LogInByAccount accountName=admin password=password
>>> QueryHost
If API PARAMS is a list type, use ',' to split contents.
>>> AddVmNicToSecurityGroup \\
securityGroupUuid=561f792761124a9a8fa9198684eaf5f2 \\
vmNicUuids=f994b93fe9354fd89061ea549642c6a4,\\
aee96364351e470abe1cfd919ce630b8,\\
e0c8016595a548628523d97b70e984e8
the parameter 'rules' of AddSecurityGroupRule is a list containing items of
map, you need to use a JSON object in this case.
>>> AddSecurityGroupRule \\
securityGroupUuid=561f792761124a9a8fa9198684eaf5f2 \\
rules='[{"type":"Ingress","protocol":"TCP",\\
"startPort":100,"endPort":1000},\\
{"type":"Ingress","protocol":"UDP",\\
"startPort":100,"endPort":1000}]'
Query* [conditions] [Query_API_PARAMS]
query resources with query APIs; find details at http://zdoc.readthedocs.org/en/latest/userManual/query.html.
conditions are arranged in format of:
CONDITION_NAME(no space)OPERATOR(no space)VALUE
[CONDITION_NAME] is a field name of a resource, for example, uuid, name.
[OPERATOR] is one of: '='. '!=', '>', '<', '>=', '<=',
'?=', '!?=', '~=', '!~='
most operators are straightforward except follows:
'?=": check whether a value is within a set of values; values are split by ','; this
operator is equal to 'in' operator in SQL.
>>> QueryVmInstance name?=VM1,VM2
'!?=': check whether a value is NOT within a set of values; values are split by ',';
this operator is equal to 'not in' operator in SQL.
>>> QueryVmInstance vmNics.ip!?=192.168.0.1,192.168.0.2
'~=': simple pattern matching; use % to match any number of characters, even zero characters; use _
to match exactly one character; this operator is equal to 'like' operator in SQL.
>>> QueryHost name~=IntelCore%
>>> QueryHost name~=IntelCore_7
'!~=': negation of simple pattern matching; use % to match any number of characters, even zero
characters; use _ to matches exactly one character; this operator is equal to 'not like' in SQL.
>>> QueryHost name!~=IntelCore%
>>> QueryHost name!~=IntelCore_7
'=null': NULL value test
>>> QueryVolume vmInstanceUuid=null
'!=null': NOT NULL value test
>>> QueryVolume vmInstanceUuid!=null
[VALUE] is a string containing value as query a condition; ',' is used to split value into a string list.
strings are compared as case insensitive.
'''
help_string += text_doc.bold('ZStack API')
help_string += '''
-------------------------------------------------------------------------------
'''
for api in self.raw_words_db:
help_string += ' %s\n\n' % api
pydoc.pager(help_string)
def __init__(self, options):
'''
Constructor
'''
readline.parse_and_bind("tab: complete")
readline.set_completer(self.complete)
readline.set_completion_display_matches_hook(self.completer_print)
try:
readline.read_history_file(CLI_HISTORY)
except IOError:
pass
readline.set_history_length(CLI_MAX_CMD_HISTORY)
if not os.path.isdir(CLI_RESULT_HISTORY_FOLDER):
os.system('rm -rf %s' % os.path.dirname(CLI_RESULT_HISTORY_FOLDER))
os.system('mkdir -p %s' % os.path.dirname(CLI_RESULT_HISTORY_FOLDER))
try:
self.hd = filedb.FileDB(CLI_RESULT_HISTORY_KEY, is_abs_path=True)
except:
os.system('rm -rf %s' % CLI_RESULT_HISTORY_KEY)
self.hd = filedb.FileDB(CLI_RESULT_HISTORY_KEY, is_abs_path=True)
print "\nRead history file: %s error. Has recreate it.\n" % CLI_RESULT_HISTORY_KEY
self.start_key = 'start_key'
self.last_key = 'last_key'
self.cli_cmd_func = {'help': self.show_help, \
'history': self.show_help, \
'more': self.show_more, \
'quit': sys.exit, \
'exit': sys.exit, \
'save': self.save_json_to_file}
self.cli_cmd = self.cli_cmd_func.keys()
self.raw_words_db = self._parse_api_name(inventory.api_names)
self.words_db = list(self.raw_words_db)
self.words_db.extend(self.cli_cmd)
self.words = list(self.words_db)
self.is_cmd = False
self.curr_pattern = None
self.matching_words = None
self.api_class_params = {}
self.build_api_parameters()
self.api = None
self.session_uuid = None
if os.path.exists(SESSION_FILE):
self.session_uuid = open(SESSION_FILE, 'r').readline()
self.hostname = options.host
self.port = options.port
self.no_secure = options.no_secure
self.api = api.Api(host=self.hostname, port=self.port)
def main():
parser = optparse.OptionParser()
parser.add_option(
"-H",
"--host",
dest="host",
default='localhost',
action='store',
help="[Optional] IP address or DNS name of a ZStack management node. Default value: localhost")
parser.add_option(
"-p",
"--port",
dest="port",
default='8080',
action='store',
help="[Optional] Port that the ZStack management node is listening on. Default value: 8080")
parser.add_option(
"-d",
"--deploy",
dest="deploy_config_file",
default=None,
action='store',
help="[Optional] deploy a cloud from a XML file.")
parser.add_option(
"-t",
"--tempate",
dest="deploy_config_template_file",
default=None,
action='store',
help="[Optional] variable template file for XML file spcified in option '-d'")
parser.add_option(
"-D",
"--dump",
dest="zstack_config_dump_file",
default=None,
action='store',
help="[Optional] dump a cloud to a XML file")
parser.add_option(
"-P",
"--password",
dest="admin_password",
default='password',
action='store',
help="[Optional] admin account password for dumping and recovering cloud environment. It can only be used when set -D or -d option. Default is 'password'.")
parser.add_option(
"-s",
"--no-secure",
dest="no_secure",
default=False,
action='store_true',
help="[Optional] if setting -s, will save password information in command history. ")
(options, args) = parser.parse_args()
cmd = ' '.join(args)
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = options.host
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_PORT'] = options.port
if options.zstack_config_dump_file:
admin_passwd = hashlib.sha512(options.admin_password).hexdigest()
read_config.dump_zstack(options.zstack_config_dump_file, \
admin_passwd)
elif options.deploy_config_file:
#deploy ZStack pre-configed environment.
xml_config = parse_config.DeployConfig(options.deploy_config_file, \
options.deploy_config_template_file)
deploy_xml_obj = xml_config.get_deploy_config()
admin_passwd = hashlib.sha512(options.admin_password).hexdigest()
try:
deploy_xml_obj.deployerConfig
except:
deploy_config.deploy_initial_database(deploy_xml_obj, admin_passwd)
else:
deploy_config.deploy_initial_database(deploy_xml_obj.deployerConfig\
, admin_passwd)
print('Successfully deployed a cloud from: %s' % options.deploy_config_file)
else:
cli = Cli(options)
cli.main(cmd)
if __name__ == '__main__':
main()
| {
"content_hash": "7982e8ec81443623bceb00a12d3cdf6e",
"timestamp": "",
"source": "github",
"line_count": 1073,
"max_line_length": 258,
"avg_line_length": 38.5358807082945,
"alnum_prop": 0.49488500326489154,
"repo_name": "ghxandsky/zstack-utility",
"id": "97018685dea0a69d30db36d192af3c082b3e8d4f",
"size": "41349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zstackcli/zstackcli/cli.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "4277"
},
{
"name": "Puppet",
"bytes": "10604"
},
{
"name": "Python",
"bytes": "1507292"
},
{
"name": "Shell",
"bytes": "188218"
}
],
"symlink_target": ""
} |
"""Basic checks for HomeKitalarm_control_panel."""
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
from homeassistant.helpers import entity_registry as er
from .common import get_next_aid, setup_test_component
def create_security_system_service(accessory):
"""Define a security-system characteristics as per page 219 of HAP spec."""
service = accessory.add_service(ServicesTypes.SECURITY_SYSTEM)
cur_state = service.add_char(CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT)
cur_state.value = 0
targ_state = service.add_char(CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET)
targ_state.value = 0
# According to the spec, a battery-level characteristic is normally
# part of a separate service. However as the code was written (which
# predates this test) the battery level would have to be part of the lock
# service as it is here.
targ_state = service.add_char(CharacteristicsTypes.BATTERY_LEVEL)
targ_state.value = 50
async def test_switch_change_alarm_state(hass, utcnow):
"""Test that we can turn a HomeKit alarm on and off again."""
helper = await setup_test_component(hass, create_security_system_service)
await hass.services.async_call(
"alarm_control_panel",
"alarm_arm_home",
{"entity_id": "alarm_control_panel.testdevice"},
blocking=True,
)
helper.async_assert_service_values(
ServicesTypes.SECURITY_SYSTEM,
{
CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET: 0,
},
)
await hass.services.async_call(
"alarm_control_panel",
"alarm_arm_away",
{"entity_id": "alarm_control_panel.testdevice"},
blocking=True,
)
helper.async_assert_service_values(
ServicesTypes.SECURITY_SYSTEM,
{
CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET: 1,
},
)
await hass.services.async_call(
"alarm_control_panel",
"alarm_arm_night",
{"entity_id": "alarm_control_panel.testdevice"},
blocking=True,
)
helper.async_assert_service_values(
ServicesTypes.SECURITY_SYSTEM,
{
CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET: 2,
},
)
await hass.services.async_call(
"alarm_control_panel",
"alarm_disarm",
{"entity_id": "alarm_control_panel.testdevice"},
blocking=True,
)
helper.async_assert_service_values(
ServicesTypes.SECURITY_SYSTEM,
{
CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET: 3,
},
)
async def test_switch_read_alarm_state(hass, utcnow):
"""Test that we can read the state of a HomeKit alarm accessory."""
helper = await setup_test_component(hass, create_security_system_service)
await helper.async_update(
ServicesTypes.SECURITY_SYSTEM,
{CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 0},
)
state = await helper.poll_and_get_state()
assert state.state == "armed_home"
assert state.attributes["battery_level"] == 50
await helper.async_update(
ServicesTypes.SECURITY_SYSTEM,
{CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 1},
)
state = await helper.poll_and_get_state()
assert state.state == "armed_away"
await helper.async_update(
ServicesTypes.SECURITY_SYSTEM,
{CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 2},
)
state = await helper.poll_and_get_state()
assert state.state == "armed_night"
await helper.async_update(
ServicesTypes.SECURITY_SYSTEM,
{CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 3},
)
state = await helper.poll_and_get_state()
assert state.state == "disarmed"
await helper.async_update(
ServicesTypes.SECURITY_SYSTEM,
{CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT: 4},
)
state = await helper.poll_and_get_state()
assert state.state == "triggered"
async def test_migrate_unique_id(hass, utcnow):
"""Test a we can migrate a alarm_control_panel unique id."""
entity_registry = er.async_get(hass)
aid = get_next_aid()
alarm_control_panel_entry = entity_registry.async_get_or_create(
"alarm_control_panel",
"homekit_controller",
f"homekit-00:00:00:00:00:00-{aid}-8",
)
await setup_test_component(hass, create_security_system_service)
assert (
entity_registry.async_get(alarm_control_panel_entry.entity_id).unique_id
== f"00:00:00:00:00:00_{aid}_8"
)
| {
"content_hash": "8ea16e804752a71c3ce36f2f85c097be",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 84,
"avg_line_length": 33,
"alnum_prop": 0.6675324675324675,
"repo_name": "nkgilley/home-assistant",
"id": "2c2ff92ccb64cab967bd2d13268a20d0103698f0",
"size": "4620",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/homekit_controller/test_alarm_control_panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
"""
Starts a service to scan in intervals for new devices.
Will emit EVENT_PLATFORM_DISCOVERED whenever a new service has been discovered.
Knows which components handle certain types, will make sure they are
loaded before the EVENT_PLATFORM_DISCOVERED is fired.
"""
import logging
import threading
from homeassistant import bootstrap
from homeassistant.const import (
ATTR_DISCOVERED, ATTR_SERVICE, EVENT_HOMEASSISTANT_START,
EVENT_PLATFORM_DISCOVERED)
DOMAIN = "discovery"
REQUIREMENTS = ['netdisco==0.5.5']
SCAN_INTERVAL = 300 # seconds
SERVICE_WEMO = 'belkin_wemo'
SERVICE_HUE = 'philips_hue'
SERVICE_CAST = 'google_cast'
SERVICE_NETGEAR = 'netgear_router'
SERVICE_SONOS = 'sonos'
SERVICE_PLEX = 'plex_mediaserver'
SERVICE_SQUEEZEBOX = 'logitech_mediaserver'
SERVICE_HANDLERS = {
SERVICE_WEMO: "wemo",
SERVICE_CAST: "media_player",
SERVICE_HUE: "light",
SERVICE_NETGEAR: 'device_tracker',
SERVICE_SONOS: 'media_player',
SERVICE_PLEX: 'media_player',
SERVICE_SQUEEZEBOX: 'media_player',
}
def listen(hass, service, callback):
"""Setup listener for discovery of specific service.
Service can be a string or a list/tuple.
"""
if isinstance(service, str):
service = (service,)
else:
service = tuple(service)
def discovery_event_listener(event):
"""Listen for discovery events."""
if event.data[ATTR_SERVICE] in service:
callback(event.data[ATTR_SERVICE], event.data.get(ATTR_DISCOVERED))
hass.bus.listen(EVENT_PLATFORM_DISCOVERED, discovery_event_listener)
def discover(hass, service, discovered=None, component=None, hass_config=None):
"""Fire discovery event. Can ensure a component is loaded."""
if component is not None:
bootstrap.setup_component(hass, component, hass_config)
data = {
ATTR_SERVICE: service
}
if discovered is not None:
data[ATTR_DISCOVERED] = discovered
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, data)
def setup(hass, config):
"""Start a discovery service."""
logger = logging.getLogger(__name__)
from netdisco.service import DiscoveryService
# Disable zeroconf logging, it spams
logging.getLogger('zeroconf').setLevel(logging.CRITICAL)
lock = threading.Lock()
def new_service_listener(service, info):
"""Called when a new service is found."""
with lock:
logger.info("Found new service: %s %s", service, info)
component = SERVICE_HANDLERS.get(service)
# We do not know how to handle this service.
if not component:
return
# This component cannot be setup.
if not bootstrap.setup_component(hass, component, config):
return
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
# pylint: disable=unused-argument
def start_discovery(event):
"""Start discovering."""
netdisco = DiscoveryService(SCAN_INTERVAL)
netdisco.add_listener(new_service_listener)
netdisco.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_discovery)
return True
| {
"content_hash": "3598e28ef475e207b128a353b2b574ec",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 28.42105263157895,
"alnum_prop": 0.6679012345679012,
"repo_name": "aoakeson/home-assistant",
"id": "2d58c54ac0d6b7e97452f9ebc7ea0651fa04b1df",
"size": "3240",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/discovery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1510170"
},
{
"name": "Python",
"bytes": "1994353"
},
{
"name": "Shell",
"bytes": "3570"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from inspect import isclass
from celery.datastructures import AttributeDict
from django.utils.translation import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {0} is the status
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
editor_review_action = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
editor_review_action = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
editor_review_action = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
editor_review_action = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Super review requested')
keep = True
review_email_user = True
review_queue = True
hide_developer = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
editor_review_action = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
sanitize = _(u'The addon has been flagged for Admin Review. It\'s still '
u'in our review queue, but it will need to be checked by one '
u'of our admin reviewers. The review might take longer than '
u'usual.')
editor_review_action = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
editor_review_action = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
""" Expects: license, addon """
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0.name}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
"""Requires review.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {review} for {addon} deleted.')
editor_format = _(u'{user} deleted {review} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class ADDON_UNLISTED(_LOG):
id = 128
format = _(u'{addon} unlisted.')
keep = True
class BETA_SIGNED_VALIDATION_PASSED(_LOG):
id = 131
format = _(u'{file} was signed.')
keep = True
class BETA_SIGNED_VALIDATION_FAILED(_LOG):
id = 132
format = _(u'{file} was signed.')
keep = True
class DELETE_ADDON(_LOG):
id = 133
action_class = 'delete'
# L10n: {0} is the add-on GUID.
format = _(u'Addon id {0} with GUID {1} has been deleted')
keep = True
class EXPERIMENT_SIGNED(_LOG):
id = 134
format = _(u'{file} was signed.')
keep = True
class UNLISTED_SIGNED_VALIDATION_PASSED(_LOG):
id = 135
format = _(u'{file} was signed.')
keep = True
class UNLISTED_SIGNED_VALIDATION_FAILED(_LOG):
id = 136
format = _(u'{file} was signed.')
keep = True
class UNLISTED_SIDELOAD_SIGNED_VALIDATION_PASSED(_LOG):
id = 137
format = _(u'{file} was signed.')
keep = True
class UNLISTED_SIDELOAD_SIGNED_VALIDATION_FAILED(_LOG):
id = 138
format = _(u'{file} was signed.')
keep = True
class PRELIMINARY_ADDON_MIGRATED(_LOG):
id = 139
format = _(u'{addon} migrated from preliminary.')
keep = True
review_queue = True
class DEVELOPER_REPLY_VERSION(_LOG):
id = 140
format = _(u'Reply by developer on {addon} {version}.')
short = _(u'Developer Reply')
keep = True
review_queue = True
class REVIEWER_REPLY_VERSION(_LOG):
id = 141
format = _(u'Reply by reviewer on {addon} {version}.')
short = _(u'Reviewer Reply')
keep = True
review_queue = True
class APPROVAL_NOTES_CHANGED(_LOG):
id = 142
format = _(u'Approval notes changed for {addon} {version}.')
short = _(u'Approval notes changed')
keep = True
review_queue = True
class SOURCE_CODE_UPLOADED(_LOG):
id = 143
format = _(u'Source code uploaded for {addon} {version}.')
short = _(u'Source code uploaded')
keep = True
review_queue = True
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
# Make sure there's no duplicate IDs.
assert len(LOGS) == len(set(log.id for log in LOGS))
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
LOG_EDITOR_REVIEW_ACTION = [
l.id for l in LOGS if hasattr(l, 'editor_review_action')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False) or
l.id in LOG_ADMINS)]
# Review Queue logs to show to developer (i.e. hiding admin/private)
LOG_REVIEW_QUEUE_DEVELOPER = list(set(LOG_REVIEW_QUEUE) -
set(LOG_HIDE_DEVELOPER))
def log(action, *args, **kw):
"""
e.g. amo.log(amo.LOG.CREATE_ADDON, []),
amo.log(amo.LOG.ADD_FILE_TO_VERSION, file, version)
"""
from olympia.access.models import Group
from olympia.addons.models import Addon
from olympia.amo import get_user, logger_log
from olympia.devhub.models import (
ActivityLog, AddonLog, CommentLog, GroupLog, UserLog, VersionLog)
from olympia.users.models import UserProfile
from olympia.versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
# TODO(davedash): post-remora this may not be necessary.
if 'created' in kw:
al.created = kw['created']
# Double save necessary since django resets the created date on save.
al.save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Addon:
AddonLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
elif isinstance(arg, Addon):
AddonLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
# Index by any user who is mentioned as an argument.
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
# Index by every user
UserLog(activity_log=al, user=user).save()
return al
| {
"content_hash": "d53b070259dc6b105654214ee394e54d",
"timestamp": "",
"source": "github",
"line_count": 621,
"max_line_length": 79,
"avg_line_length": 24.10950080515298,
"alnum_prop": 0.6045952444563185,
"repo_name": "mstriemer/addons-server",
"id": "bc58282c20c50e909417cb97249a3ac5e7beab77",
"size": "15080",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/olympia/amo/log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "834050"
},
{
"name": "HTML",
"bytes": "729865"
},
{
"name": "JavaScript",
"bytes": "1324474"
},
{
"name": "Makefile",
"bytes": "7937"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "4348593"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "9643"
},
{
"name": "Smarty",
"bytes": "1824"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', # noqa
TemplateView.as_view(template_name='pages/home.html'),
name="home"),
url(r'^about/$',
TemplateView.as_view(template_name='pages/about.html'),
name="about"),
# Admin pages and documentation generator
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Uncomment the next line to enable avatars
url(r'^avatar/', include('avatar.urls')),
# Your stuff: custom urls go here
url(r'^registration/', include('registration.urls')),
url(r'^capitalism/', include('capitalism.urls'))
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"content_hash": "281a2b02b23a546bbf4cc8acb020b82b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 66,
"avg_line_length": 33.542857142857144,
"alnum_prop": 0.686541737649063,
"repo_name": "eldruz/tournament_registration",
"id": "20db8f0711a894a70dcc15d0fb79a93c2b892618",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tournament_registration/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1210"
},
{
"name": "JavaScript",
"bytes": "44"
},
{
"name": "Python",
"bytes": "100432"
},
{
"name": "Shell",
"bytes": "5129"
}
],
"symlink_target": ""
} |
from app import app_factory
if __name__ == '__main__':
app_factory().run()
| {
"content_hash": "cce077a037d04efa2fa2dcba682dad8d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 27,
"avg_line_length": 16.2,
"alnum_prop": 0.5679012345679012,
"repo_name": "comick/barduino",
"id": "ef62521583f4c76c76494078da022a2ace6a1acb",
"size": "128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "61962"
},
{
"name": "Python",
"bytes": "48678"
},
{
"name": "Shell",
"bytes": "118"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
#
class City(models.Model):
city = models.CharField(max_length=255, unique=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def __unicode__(self):
return self.city
__str__ = __unicode__
class AgeRange(models.Model):
age = models.CharField(max_length=10, unique=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def __unicode__(self):
return self.age
__str__ = __unicode__
@staticmethod
def get_or_add(min=0, max=1000):
if min == 0:
return AgeRange.objects.get_or_create(age=(str(max) + "+"))
elif max == 1000:
return AgeRange.objects.get_or_create(age=(">" + str(min)))
elif min != 0 and max != 1000:
return AgeRange.objects.get_or_create(age=(str(min) + "-" + str(max)))
class Interest(models.Model):
interest = models.CharField(max_length=255, unique=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def __unicode__(self):
return self.interest
__str__ = __unicode__
@staticmethod
def bulk_get_or_create(interests):
interests = [i.lower() for i in interests]
all_interests = Interest.objects.filter(interest__in=interests).values_list("interest", flat=True)
new_interests = [Interest(interest=i) for i in interests if i not in all_interests]
Interest.objects.bulk_create(new_interests)
return Interest.objects.filter(interest__in=interests)
class Userdata(models.Model):
user = models.OneToOneField(User)
name = models.CharField(max_length=255)
quora_name = models.CharField(max_length=255)
ansyahoo = models.CharField(max_length=255)
CHOICES = (
(True, "Male"),
(False, "Female")
)
gender = models.BooleanField(choices=CHOICES)
city = models.ForeignKey(City)
age = models.ForeignKey(AgeRange)
interests = models.ManyToManyField(Interest, through="UserInterest")
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def widget(self):
a = [i.interest for i in self.interests.all()[:2]]
random_likes = ", ".join(a[:2])
return '<br><div class="pure-g-r" style="background: #ededed; min-height: 112px"><div class="pure-u-1" style="margin: 4px"><div class="pure-u-1-3"><img src="https://graph.facebook.com/' + self.user.username +'/picture?type=normal" alt=""></div><div class="pure-u-1-2"><br><a href="user/' + str(self.id) + '">' + self.name + '</a><br><small>' + self.city.city + '<br></small>likes ' + random_likes + '</div></div></div>'
def small_widget(self):
a = [i.interest for i in self.interests.all()[:2]]
random_likes = ", ".join(a[:2])
return '<div class="pure-u-1-2" style="min-height: 112px"><div class="pure-u-1" style="margin: 4px"><div class="pure-u-1-3"><img src="https://graph.facebook.com/' + self.user.username +'/picture?type=normal" alt=""></div><div class="pure-u-1-2"><br><a href="user/' + str(self.id) + '">' + self.name + '</a><br><small>' + self.city.city + '<br></small>likes ' + random_likes + '</div></div></div>'
@staticmethod
def create_user(fb_data, quora_name, yahoo, interests):
try:
user = User.objects.create_user(fb_data["username"], fb_data["username"] + "@facebook.com", "pinterested")
except:
user = User.objects.get(username=fb_data["username"])
age_dict = {
"min": fb_data["age_range"]["min"] if fb_data["age_range"].has_key("min") else 0,
"max": fb_data["age_range"]["max"] if fb_data["age_range"].has_key("max") else 1000,
}
user_data_dict = {
"user": user,
"name": fb_data["name"],
"quora_name": quora_name,
"gender": True if fb_data["gender"] is "male" else "Female",
"city": City.objects.get_or_create(city=fb_data["location"]["name"])[0],
"ansyahoo": yahoo,
"age": AgeRange.get_or_add(**age_dict)[0]
}
userdata = Userdata.objects.create(**user_data_dict)
saved_interests = Interest.bulk_get_or_create(interests)
activities = [UserInterest(userdata=userdata, interest=s) for s in saved_interests]
UserInterest.objects.bulk_create(activities)
return user
class UserInterest(models.Model):
interest = models.ForeignKey(Interest)
userdata = models.ForeignKey(Userdata)
weight = models.IntegerField(default=1)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False) | {
"content_hash": "3b530c1540ff1017cf45a792aa06daef",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 427,
"avg_line_length": 39.883720930232556,
"alnum_prop": 0.6171039844509232,
"repo_name": "amol-mandhane/konnactivity",
"id": "dee15ab45c5a6aa9a464bfa3a8424f94cf80b391",
"size": "5145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "login/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62054"
}
],
"symlink_target": ""
} |
'''Slickbird backend tests'''
import os
import json
import tempfile
from slickbird.collectionjson import collectionjson
from slickbird import orm
from tornado.testing import gen_test
from . import base
pjoin = os.path.join
APP_ROOT = os.path.abspath(pjoin(os.path.dirname(__file__), '..'))
class TestSlickbirdBackend(base.TestSlickbirdBase):
@gen_test
def test_collectionjson(self):
yield self.collectionadd(
'dummy',
pjoin(APP_ROOT, 'tests/dummytest.dat'))
tmp = tempfile.NamedTemporaryFile(suffix='.json')
session = orm.make_session('sqlite:///' + self.db.name)()
cdb = session.query(orm.Collection)\
.first()
collectionjson(cdb, tmp)
tmp.flush()
tmp.seek(0)
json.loads(tmp.read().decode('utf-8'))
tmp.close()
| {
"content_hash": "ab55e082305f2479a85b4616901f4005",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 66,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6423444976076556,
"repo_name": "lpenz/slickbird",
"id": "eb891724a1d6676135a4f8311b1a1c1a7d0a6e68",
"size": "836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/backend_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "8388"
},
{
"name": "JavaScript",
"bytes": "6339"
},
{
"name": "Python",
"bytes": "47205"
}
],
"symlink_target": ""
} |
"""
WSGI config for pubmed_central_request project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pubmed_central_request.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "c6ab4e8dd2765b07cf078c6a44e045e8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 26.238095238095237,
"alnum_prop": 0.7713248638838476,
"repo_name": "allnightdiner/pubmed_central_request",
"id": "de06b9abddca0f8ab09be985bc9058b90e4223c4",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pubmed_central_request/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "HTML",
"bytes": "3908"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "17275"
}
],
"symlink_target": ""
} |
import pandas
import pyomo
import pyomo.opt
import pyomo.environ as pe
import logging
class MaxFlowInterdiction:
"""A class to compute max-flow interdictions."""
def __init__(self, nodefile, arcfile, attacks=0):
"""
All the files are CSVs with columns described below. Attacks is the number of attacks.
- nodefile:
Node
Every node must appear as a line in the nodefile. There are two special nodes called 'Start' and 'End' that define the source and sink of the max-flow problem.
- arcfile:
StartNode,EndNode,Capacity,Attackable
Every arc must appear in the arcfile. The data also describes the arc's capacity and whether we can attack this arc.
"""
# Read in the node_data
self.node_data = pandas.read_csv(nodefile)
self.node_data.set_index(['Node'], inplace=True)
self.node_data.sort_index(inplace=True)
# Read in the arc_data
self.arc_data = pandas.read_csv(arcfile)
self.arc_data['xbar'] = 0
self.arc_data.set_index(['StartNode','EndNode'], inplace=True)
self.arc_data.sort_index(inplace=True)
self.attacks = attacks
self.node_set = self.node_data.index.unique()
self.arc_set = self.arc_data.index.unique()
self.createPrimal()
self.createInterdictionDual()
def createPrimal(self):
"""Create the primal pyomo model.
This is used to compute flows after interdiction. The interdiction is stored in arc_data.xbar."""
model = pe.ConcreteModel()
# Tell pyomo to read in dual-variable information from the solver
model.dual = pe.Suffix(direction=pe.Suffix.IMPORT)
# Add the sets
model.node_set = pe.Set( initialize=self.node_set )
model.edge_set = pe.Set( initialize=self.arc_set, dimen=2)
# Create the variables
model.y = pe.Var(model.edge_set, domain=pe.NonNegativeReals)
model.v = pe.Var(domain=pe.NonNegativeReals)
# Create the objective
def obj_rule(model):
return model.v - 1.1*sum( data['xbar']*model.y[e] for e,data in self.arc_data.iterrows())
model.OBJ = pe.Objective(rule=obj_rule, sense=pe.maximize)
# Create the constraints, one for each node
def flow_bal_rule(model, n):
tmp = self.arc_data.reset_index()
successors = tmp.ix[ tmp.StartNode == n, 'EndNode'].values
predecessors = tmp.ix[ tmp.EndNode == n, 'StartNode'].values
lhs = sum(model.y[(i,n)] for i in predecessors) - sum(model.y[(n,i)] for i in successors)
start_node = int(n == 'Start')
end_node = int(n == 'End')
rhs = 0 - model.v*(start_node) + model.v*(end_node)
constr = (lhs == rhs)
if isinstance(constr, bool):
return pe.Constraint.Skip
return constr
model.FlowBalance = pe.Constraint(model.node_set, rule=flow_bal_rule)
# Capacity constraints, one for each edge
def capacity_rule(model, i, j):
capacity = self.arc_data['Capacity'].get((i,j),-1)
if capacity < 0:
return pe.Constraint.Skip
return model.y[(i,j)] <= capacity
model.Capacity = pe.Constraint(model.edge_set, rule=capacity_rule)
# Store the model
self.primal = model
def createInterdictionDual(self):
# Create the model
model = pe.ConcreteModel()
# Add the sets
model.node_set = pe.Set( initialize=self.node_set )
model.edge_set = pe.Set( initialize=self.arc_set, dimen=2)
# Create the variables
model.rho = pe.Var(model.node_set, domain=pe.Reals)
model.pi = pe.Var(model.edge_set, domain=pe.NonNegativeReals)
model.x = pe.Var(model.edge_set, domain=pe.Binary)
# Create the objective
def obj_rule(model):
return sum(data['Capacity']*model.pi[e] for e,data in self.arc_data.iterrows() if data['Capacity']>=0)
model.OBJ = pe.Objective(rule=obj_rule, sense=pe.minimize)
# Create the constraints for y_ij
def edge_constraint_rule(model, i, j):
attackable = int(self.arc_data['Attackable'].get((i,j),0))
hasCap = int(self.arc_data['Capacity'].get((i,j),-1)>=0)
return model.rho[j] - model.rho[i] + model.pi[(i,j)]*hasCap >= 0 - 1.1*model.x[(i,j)]*attackable
model.DualEdgeConstraint = pe.Constraint(model.edge_set, rule=edge_constraint_rule)
# Set the x's for non-blockable arcs
def v_constraint_rule(model):
return model.rho['Start'] - model.rho['End'] == 1
model.VConstraint = pe.Constraint(rule=v_constraint_rule)
# Create the interdiction budget constraint
def block_limit_rule(model):
model.attacks = self.attacks
return pe.summation(model.x) <= model.attacks
model.BlockLimit = pe.Constraint(rule=block_limit_rule)
# Create, save the model
self.Idual = model
def solve(self, tee=False):
solver = pyomo.opt.SolverFactory('gurobi')
# Solve the dual first
self.Idual.BlockLimit.construct()
self.Idual.BlockLimit._constructed = False
del self.Idual.BlockLimit._data[None]
self.Idual.BlockLimit.reconstruct()
self.Idual.preprocess()
results = solver.solve(self.Idual, tee=tee, keepfiles=False, options_string="mip_tolerances_integrality=1e-9 mip_tolerances_mipgap=0")
# Check that we actually computed an optimal solution, load results
if (results.solver.status != pyomo.opt.SolverStatus.ok):
logging.warning('Check solver not ok?')
if (results.solver.termination_condition != pyomo.opt.TerminationCondition.optimal):
logging.warning('Check solver optimality?')
self.Idual.solutions.load_from(results)
# Now put interdictions into xbar and solve primal
for e in self.arc_data.index:
self.arc_data.ix[e,'xbar'] = self.Idual.x[e].value
self.primal.OBJ.construct()
self.primal.OBJ._constructed = False
self.primal.OBJ._init_sense = pe.maximize
del self.primal.OBJ._data[None]
self.primal.OBJ.reconstruct()
self.primal.preprocess()
results = solver.solve(self.primal, tee=tee, keepfiles=False, options_string="mip_tolerances_integrality=1e-9 mip_tolerances_mipgap=0")
# Check that we actually computed an optimal solution, load results
if (results.solver.status != pyomo.opt.SolverStatus.ok):
logging.warning('Check solver not ok?')
if (results.solver.termination_condition != pyomo.opt.TerminationCondition.optimal):
logging.warning('Check solver optimality?')
self.primal.solutions.load_from(results)
def printSolution(self):
print()
print('Using %d attacks:'%self.attacks)
print()
edges = sorted(self.arc_set)
for e in edges:
if self.Idual.x[e].value > 0:
print('Interdict arc %s -> %s'%(str(e[0]), str(e[1])))
print()
for e0,e1 in self.arc_set:
flow = self.primal.y[(e0,e1)].value
if flow > 0:
print('Flow on arc %s -> %s: %.2f'%(str(e0), str(e1), flow))
print()
print('----------')
print('Total flow = %.2f (primal) %.2f (dual)'%(self.primal.OBJ(), self.Idual.OBJ()))
########################
# Now lets do something
########################
if __name__ == '__main__':
m = MaxFlowInterdiction('sample_nodes_data.csv', 'sample_arcs_data.csv')
m.solve()
m.printSolution()
m.attacks = 1
m.solve()
m.printSolution()
m.attacks = 2
m.solve()
m.printSolution()
| {
"content_hash": "fb4b27b516b393bf3999c721eb4fdd8d",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 168,
"avg_line_length": 37.804761904761904,
"alnum_prop": 0.6005794180627283,
"repo_name": "Pyomo/PyomoGallery",
"id": "96743f38458d4abf00ba54204fea43d4a1d4a05d",
"size": "7939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "network_interdiction/max_flow/max_flow_interdict.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "143903"
},
{
"name": "Python",
"bytes": "66340"
}
],
"symlink_target": ""
} |
"""This module contains the User Model."""
from google.appengine.api import users
from google.appengine.ext import db
from django.utils.translation import ugettext
import soc.models.linkable
class User(soc.models.linkable.Linkable):
"""A user and associated login credentials, the fundamental identity entity.
User is a separate Model class from Person because the same login
ID may be used to, for example, serve as Contributor in one Program
and a Reviewer in another.
Also, this allows a Person to, in the future, re-associate that
Person entity with a different Google Account if necessary.
A User entity participates in the following relationships implemented
as a db.ReferenceProperty elsewhere in another db.Model:
persons) a 1:many relationship of Person entities identified by the
User. This relation is implemented as the 'persons' back-reference
Query of the Person model 'user' reference.
documents) a 1:many relationship of Document entities identified by the
User. This relation is implemented as the 'user' back-reference
Query of the Document model 'user' reference.
responses) a 1:many relationship of Response entities submitted by the
User. This relation is implemented as the 'responses' back-reference
Query of the Response model 'respondent' reference.
"""
@property
def url_id(self):
"""URL ID property.
It provides a unique string identifier of the user that is to be used
as a part of various URLs. The returned string is URL safe and can be
validated by linkable.LINK_ID_REGEX regular expression.
Returns:
a string containing URL ID property
"""
return self.key().name()
#: A Google Account, which also provides a "private" email address.
#: This email address is only used in an automated fashion by
#: Melange web applications and is not made visible to other users
#: of any Melange application.
account = db.UserProperty(required=True,
verbose_name=ugettext('User account'))
account.help_text = ugettext(
'A valid Google Account.')
#: Google Account unique user id
user_id = db.StringProperty(required=False)
#: A list (possibly empty) of former Google Accounts associated with
#: this User.
former_accounts = db.ListProperty(users.User)
#: Required field storing publicly-displayed name. Can be a real name
#: (though this is not recommended), or a nick name or some other public
#: alias. Public names can be any valid UTF-8 text.
name = db.StringProperty(
default='', required=False, verbose_name=ugettext('Public name'))
name.help_text = ugettext(
'Human-readable name (UTF-8) that will be displayed publicly on the'
' site.')
#: field storing whether User is a Developer with site-wide access.
is_developer = db.BooleanProperty(default=False,
verbose_name=ugettext('Is Developer'))
is_developer.help_text = ugettext(
'Field used to indicate user with site-wide Developer access.')
#: List of Sponsors that the user is a host for
host_for = db.ListProperty(item_type=db.Key, default=[])
host_for.help_text = ugettext('List of program owners which '
'the user is a program administrator for.')
#: field storing the user preference as whether to disable TinyMCE
disable_tinymce = db.BooleanProperty(default=False,
verbose_name=ugettext('Disable TinyMCE'))
disable_tinymce.help_text = ugettext(
'Disable the TinyMCE editor.')
#: field storing the user preference as to how many rows to show
nr_list_rows = db.IntegerProperty(
required=False, verbose_name=ugettext('Number of list rows'))
nr_list_rows.help_text = ugettext(
'Controls how many rows will be shown per list by default. '
'Defaults to 5 if not set.')
#: field storing wheter the User has agreed to the site-wide Terms of Service.
#: (Not a required field because the Terms of Service might not be present
#: when the first User profile is created when bootstrapping the site.)
agreed_to_tos = db.BooleanProperty(required=False, default=False,
verbose_name=ugettext('I Agree to the Terms of Service'))
agreed_to_tos.help_text = ugettext(
'Indicates whether the user agreed to the site-wide Terms of Service.')
#: field storing when the User has agreed to the site-wide Terms of Service.
#: (Not a required field because the Terms of Service might not be present
#: when the first User profile is created when bootstrapping the site.)
agreed_to_tos_on = db.DateTimeProperty(required=False, default=None,
verbose_name=ugettext('Has agreed to the Terms of Service on'))
agreed_to_tos_on.help_text = ugettext(
'Indicates when the user agreed to the site-wide Terms of Service.')
#: field storing the status of this User.
#: valid: Is just that, it's a valid User.
#: invalid: This means that this User has been excluded
#: from using the website.
status = db.StringProperty(required=True, default='valid',
choices=['valid', 'invalid'],)
status.help_text = ugettext(
'Indicates the status of the User. Invalid means that this account '
'has been excluded from using the website.')
| {
"content_hash": "33e189010d5f7031036c4337d020ba44",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 80,
"avg_line_length": 42.04838709677419,
"alnum_prop": 0.724012274645186,
"repo_name": "rhyolight/nupic.son",
"id": "b550d05e59ca2cac6d39575b084709747ab3138c",
"size": "5797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/soc/models/user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "681301"
},
{
"name": "JavaScript",
"bytes": "392600"
},
{
"name": "PHP",
"bytes": "217376"
},
{
"name": "Python",
"bytes": "5162564"
}
],
"symlink_target": ""
} |
from collections import namedtuple
import sqlparse
from django.db import DatabaseError
from django.db.backends.base.introspection import BaseDatabaseIntrospection
from django.db.backends.base.introspection import FieldInfo as BaseFieldInfo
from django.db.backends.base.introspection import TableInfo
from django.db.models import Index
from django.utils.regex_helper import _lazy_re_compile
FieldInfo = namedtuple(
"FieldInfo", BaseFieldInfo._fields + ("pk", "has_json_constraint")
)
field_size_re = _lazy_re_compile(r"^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$")
def get_field_size(name):
"""Extract the size number from a "varchar(11)" type name"""
m = field_size_re.search(name)
return int(m[1]) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
"bool": "BooleanField",
"boolean": "BooleanField",
"smallint": "SmallIntegerField",
"smallint unsigned": "PositiveSmallIntegerField",
"smallinteger": "SmallIntegerField",
"int": "IntegerField",
"integer": "IntegerField",
"bigint": "BigIntegerField",
"integer unsigned": "PositiveIntegerField",
"bigint unsigned": "PositiveBigIntegerField",
"decimal": "DecimalField",
"real": "FloatField",
"text": "TextField",
"char": "CharField",
"varchar": "CharField",
"blob": "BinaryField",
"date": "DateField",
"datetime": "DateTimeField",
"time": "TimeField",
}
def __getitem__(self, key):
key = key.lower().split("(", 1)[0].strip()
return self.base_data_types_reverse[key]
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.pk and field_type in {
"BigIntegerField",
"IntegerField",
"SmallIntegerField",
}:
# No support for BigAutoField or SmallAutoField as SQLite treats
# all integer primary keys as signed 64-bit integers.
return "AutoField"
if description.has_json_constraint:
return "JSONField"
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute(
"""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name"""
)
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
cursor.execute(
"PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name)
)
table_info = cursor.fetchall()
if not table_info:
raise DatabaseError(f"Table {table_name} does not exist (empty pragma).")
collations = self._get_column_collations(cursor, table_name)
json_columns = set()
if self.connection.features.can_introspect_json_field:
for line in table_info:
column = line[1]
json_constraint_sql = '%%json_valid("%s")%%' % column
has_json_constraint = cursor.execute(
"""
SELECT sql
FROM sqlite_master
WHERE
type = 'table' AND
name = %s AND
sql LIKE %s
""",
[table_name, json_constraint_sql],
).fetchone()
if has_json_constraint:
json_columns.add(column)
return [
FieldInfo(
name,
data_type,
None,
get_field_size(data_type),
None,
None,
not notnull,
default,
collations.get(name),
pk == 1,
name in json_columns,
)
for cid, name, data_type, notnull, default, pk in table_info
]
def get_sequences(self, cursor, table_name, table_fields=()):
pk_col = self.get_primary_key_column(cursor, table_name)
return [{"table": table_name, "column": pk_col}]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {column_name: (ref_column_name, ref_table_name)}
representing all foreign keys in the given table.
"""
cursor.execute(
"PRAGMA foreign_key_list(%s)" % self.connection.ops.quote_name(table_name)
)
return {
column_name: (ref_column_name, ref_table_name)
for (
_,
_,
ref_table_name,
column_name,
ref_column_name,
*_,
) in cursor.fetchall()
}
def get_primary_key_column(self, cursor, table_name):
"""Return the column name of the primary key for the given table."""
cursor.execute(
"PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name)
)
for _, name, *_, pk in cursor.fetchall():
if pk:
return name
return None
def _parse_column_or_constraint_definition(self, tokens, columns):
token = None
is_constraint_definition = None
field_name = None
constraint_name = None
unique = False
unique_columns = []
check = False
check_columns = []
braces_deep = 0
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, "("):
braces_deep += 1
elif token.match(sqlparse.tokens.Punctuation, ")"):
braces_deep -= 1
if braces_deep < 0:
# End of columns and constraints for table definition.
break
elif braces_deep == 0 and token.match(sqlparse.tokens.Punctuation, ","):
# End of current column or constraint definition.
break
# Detect column or constraint definition by first token.
if is_constraint_definition is None:
is_constraint_definition = token.match(
sqlparse.tokens.Keyword, "CONSTRAINT"
)
if is_constraint_definition:
continue
if is_constraint_definition:
# Detect constraint name by second token.
if constraint_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
constraint_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
constraint_name = token.value[1:-1]
# Start constraint columns parsing after UNIQUE keyword.
if token.match(sqlparse.tokens.Keyword, "UNIQUE"):
unique = True
unique_braces_deep = braces_deep
elif unique:
if unique_braces_deep == braces_deep:
if unique_columns:
# Stop constraint parsing.
unique = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
unique_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
unique_columns.append(token.value[1:-1])
else:
# Detect field name by first token.
if field_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
field_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
field_name = token.value[1:-1]
if token.match(sqlparse.tokens.Keyword, "UNIQUE"):
unique_columns = [field_name]
# Start constraint columns parsing after CHECK keyword.
if token.match(sqlparse.tokens.Keyword, "CHECK"):
check = True
check_braces_deep = braces_deep
elif check:
if check_braces_deep == braces_deep:
if check_columns:
# Stop constraint parsing.
check = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
if token.value in columns:
check_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
if token.value[1:-1] in columns:
check_columns.append(token.value[1:-1])
unique_constraint = (
{
"unique": True,
"columns": unique_columns,
"primary_key": False,
"foreign_key": None,
"check": False,
"index": False,
}
if unique_columns
else None
)
check_constraint = (
{
"check": True,
"columns": check_columns,
"primary_key": False,
"unique": False,
"foreign_key": None,
"index": False,
}
if check_columns
else None
)
return constraint_name, unique_constraint, check_constraint, token
def _parse_table_constraints(self, sql, columns):
# Check constraint parsing is based of SQLite syntax diagram.
# https://www.sqlite.org/syntaxdiagrams.html#table-constraint
statement = sqlparse.parse(sql)[0]
constraints = {}
unnamed_constrains_index = 0
tokens = (token for token in statement.flatten() if not token.is_whitespace)
# Go to columns and constraint definition
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, "("):
break
# Parse columns and constraint definition
while True:
(
constraint_name,
unique,
check,
end_token,
) = self._parse_column_or_constraint_definition(tokens, columns)
if unique:
if constraint_name:
constraints[constraint_name] = unique
else:
unnamed_constrains_index += 1
constraints[
"__unnamed_constraint_%s__" % unnamed_constrains_index
] = unique
if check:
if constraint_name:
constraints[constraint_name] = check
else:
unnamed_constrains_index += 1
constraints[
"__unnamed_constraint_%s__" % unnamed_constrains_index
] = check
if end_token.match(sqlparse.tokens.Punctuation, ")"):
break
return constraints
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Find inline check constraints.
try:
table_schema = cursor.execute(
"SELECT sql FROM sqlite_master WHERE type='table' and name=%s"
% (self.connection.ops.quote_name(table_name),)
).fetchone()[0]
except TypeError:
# table_name is a view.
pass
else:
columns = {
info.name for info in self.get_table_description(cursor, table_name)
}
constraints.update(self._parse_table_constraints(table_schema, columns))
# Get the index info
cursor.execute(
"PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name)
)
for row in cursor.fetchall():
# SQLite 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
cursor.execute(
"SELECT sql FROM sqlite_master "
"WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index)
)
# There's at most one row.
(sql,) = cursor.fetchone() or (None,)
# Inline constraints are already detected in
# _parse_table_constraints(). The reasons to avoid fetching inline
# constraints from `PRAGMA index_list` are:
# - Inline constraints can have a different name and information
# than what `PRAGMA index_list` gives.
# - Not all inline constraints may appear in `PRAGMA index_list`.
if not sql:
# An inline constraint
continue
# Get the index info for that index
cursor.execute(
"PRAGMA index_info(%s)" % self.connection.ops.quote_name(index)
)
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": None,
"check": False,
"index": True,
}
constraints[index]["columns"].append(column)
# Add type and column orders for indexes
if constraints[index]["index"]:
# SQLite doesn't support any index type other than b-tree
constraints[index]["type"] = Index.suffix
orders = self._get_index_columns_orders(sql)
if orders is not None:
constraints[index]["orders"] = orders
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": None,
"check": False,
"index": False,
}
relations = enumerate(self.get_relations(cursor, table_name).items())
constraints.update(
{
f"fk_{index}": {
"columns": [column_name],
"primary_key": False,
"unique": False,
"foreign_key": (ref_table_name, ref_column_name),
"check": False,
"index": False,
}
for index, (column_name, (ref_column_name, ref_table_name)) in relations
}
)
return constraints
def _get_index_columns_orders(self, sql):
tokens = sqlparse.parse(sql)[0]
for token in tokens:
if isinstance(token, sqlparse.sql.Parenthesis):
columns = str(token).strip("()").split(", ")
return ["DESC" if info.endswith("DESC") else "ASC" for info in columns]
return None
def _get_column_collations(self, cursor, table_name):
row = cursor.execute(
"""
SELECT sql
FROM sqlite_master
WHERE type = 'table' AND name = %s
""",
[table_name],
).fetchone()
if not row:
return {}
sql = row[0]
columns = str(sqlparse.parse(sql)[0][-1]).strip("()").split(", ")
collations = {}
for column in columns:
tokens = column[1:].split()
column_name = tokens[0].strip('"')
for index, token in enumerate(tokens):
if token == "COLLATE":
collation = tokens[index + 1]
break
else:
collation = None
collations[column_name] = collation
return collations
| {
"content_hash": "292e90117ec41857961d4a4181d5af44",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 88,
"avg_line_length": 39.906392694063925,
"alnum_prop": 0.5225127295611877,
"repo_name": "solarissmoke/django",
"id": "a74153757b122abe3d9ea2ebed1cf5d261c54c29",
"size": "17479",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/db/backends/sqlite3/introspection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "87587"
},
{
"name": "HTML",
"bytes": "236871"
},
{
"name": "JavaScript",
"bytes": "146495"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "15962069"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
} |
import time, codecs, sys, traceback, cgi, re, os
sys.setrecursionlimit(50000)
sys.path.append("/Libs/jsoup-1.8.2.jar")
sys.path.append("/Libs/lucene-analyzers-common-4.10.4.jar")
sys.path.append("/Libs/lucene-core-4.10.4.jar")
sys.path.append("/Libs/lucene-queries-4.10.4.jar")
sys.path.append("/Libs/lucene-queryparser-4.10.4.jar")
sys.path.append("/Libs/jython-standalone-2.7.0.jar")
sys.path.append("/Libs/mysql-connector-java-5.1.22-bin.jar")
sys.path.append("/Libs/py4j-0.9.jar")
sys.path.append("/Libs/org.apache.commons.lang_2.6.0.v201205030909.jar")
sys.path.append("/Libs/org.eclipse.cdt.core_5.6.0.201402142303.jar")
sys.path.append("/Libs/org.eclipse.core.contenttype_3.4.200.v20120523-2004.jar")
sys.path.append("/Libs/org.eclipse.core.jobs_3.5.200.v20120521-2346.jar")
sys.path.append("/Libs/org.eclipse.core.resources.win32.x86_3.5.100.v20110423-0524.jar")
sys.path.append("/Libs/org.eclipse.core.resources_3.8.0.v20120522-2034.jar")
sys.path.append("/Libs/org.eclipse.core.runtime_3.8.0.v20120521-2346.jar")
sys.path.append("/Libs/org.eclipse.equinox.common_3.6.100.v20120522-1841.jar")
sys.path.append("/Libs/org.eclipse.equinox.common_3.6.200.v20130402-1505.jar")
sys.path.append("/Libs/org.eclipse.equinox.preferences_3.5.0.v20120522-1841.jar")
sys.path.append("/Libs/org.eclipse.jdt.core_3.8.1.v20120531-0637.jar")
sys.path.append("/Libs/org.eclipse.jdt.ui_3.8.2.v20130107-165834.jar")
sys.path.append("/Libs/org.eclipse.jface.text_3.8.0.v20120531-0600.jar")
sys.path.append("/Libs/org.eclipse.ltk.core.refactoring_3.6.100.v20130605-1748.jar")
sys.path.append("/Libs/org.eclipse.osgi_3.8.0.v20120529-1548.jar")
sys.path.append("/Libs/org.eclipse.text_3.5.0.jar")
sys.path.append("/Libs/bson-3.0.2.jar")
sys.path.append("/Libs/mongodb-driver-3.0.2.jar")
sys.path.append("/Libs/mongodb-driver-core-3.0.2.jar")
import GitSearch_Result_speed_1
from operator import attrgetter
from GitSearch.MyUtils import write_file
from GitSearch.DBManager import DBManager
from GitSearch.FrontEnd.Generator_Code_Query import Generator
from GitSearch.Searcher.Snippet_Searcher_test import SnippetSearcher
from GitSearch.Searcher.Question_Searcher_test import GettingQuestionDocs, SimilarQsSearcher
from GitSearch.Searcher.BigCloneBench_Searcher_test import find_answer_ids, GettingAnswerDocs, BenchSearcher, recommend
from org.apache.lucene.search import IndexSearcher, SearcherManager, SearcherFactory, ReferenceManager
from org.apache.lucene.store import SimpleFSDirectory, FSDirectory
from java.io import File
DBManager.init()
DBManager.autoconnection()
INDICES_PATH = '/Indices/'
hitlog_path = '/Users/Falcon/Desktop/Tracing/hit_logs_for_each_1.txt'
scorelog_path = '/Users/Falcon/Desktop/Tracing/Score_logs_1/'
base_path = '/Users/Falcon/Desktop/IJA/dataset/Experiment/'
def save_experiment_result(input, string):
write_file(base_path + "New_Recommended_Set_1/" + str(input) + "_result.txt", string)
def read_content_in_file(file_path):
try:
with codecs.open(file_path, mode='r', encoding='utf-8') as file:
text = file.read()
file.close()
except:
return None
return text
def write_file(file_path, content):
file = codecs.open(file_path, mode='a', encoding='utf-8')
file.write(content + '\n')
file.close()
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(counter=0)
def run(name=None):
total_time = time.time()
if not os.path.exists(scorelog_path):
os.makedirs(scorelog_path)
targets = read_content_in_file(base_path + "targets_for_recommendation_1.txt")
for query_number in targets.splitlines():
hit_logs_for_each = ''
score_logs_for_each = ''
run.counter += 1
print '[%s] : [%d] / [%d] : ' % (str(query_number), run.counter, len(targets.split('\n'))), round(
(float(run.counter) / float(len(targets.split('\n'))) * 100), 2), '%', 'In progress..'
target_file = base_path + "targets/" + query_number + ".txt"
code_query = read_content_in_file(target_file)
code_query = cgi.escape(re.sub(r'[^\x00-\x80]+', '', code_query))
try:
if code_query:
search_time = time.time()
final_items, hit_logs_for_each, score_logs_for_each = query_index(code_query, hit_logs_for_each,
score_logs_for_each)
git_search_result = GitSearch_Result_speed_1.GitSearchResult(final_items)
for i in git_search_result.items:
# print u'************************************************', i.file_name
save_experiment_result(query_number, i.file_name.split('.')[0])
if not git_search_result.items:
save_experiment_result(query_number, '0')
write_file(hitlog_path, hit_logs_for_each)
score_final_path = scorelog_path + query_number + '.txt'
write_file(score_final_path, score_logs_for_each + '\n')
print("Item Searching Time : %s seconds" % (time.time() - search_time))
print ('Total Time Taken : %s seconds' % (time.time() - total_time))
print "***********************************************************************************"
except:
hit_logs_for_each += '0' # java lexer recursion limit exceeded
write_file(hitlog_path, hit_logs_for_each)
score_final_path = scorelog_path + query_number + '.txt'
write_file(score_final_path, score_logs_for_each + '\n')
save_experiment_result(query_number, '0')
print(traceback.format_exc())
print ('Total Time Taken : %s seconds' % (time.time() - total_time))
print "***********************************************************************************"
def query_index(query, hit_logs_for_each, score_logs_for_each):
### 1_Query Alternation
user_code_query = Generator(query)
directory = SimpleFSDirectory(File(INDICES_PATH+'stackoverflow'))
searchermgr = SearcherManager(directory, SearcherFactory())
searchermgr.maybeRefresh()
searcher = searchermgr.acquire()
### 2_Finding 3 Answer Snippets using the User Query (refined)
answers = SnippetSearcher(searcher, user_code_query)
answer_ids = answers.more_like_this(20, query=user_code_query)
searchermgr.release(searcher)
searchermgr.close()
searcher = None
directory.close()
directory = None
# Log : Answer count
if answer_ids:
hit_logs_for_each += str(len(answer_ids)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
### 3_Finding the Associated Questions
question_ids = answers.find_question_ids(answer_ids)
# Log : Answer - Question count
if question_ids:
hit_logs_for_each += str(len(question_ids)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
directory = SimpleFSDirectory(File(INDICES_PATH + 'questionIndex'))
searchermgr = SearcherManager(directory, SearcherFactory())
searchermgr.maybeRefresh()
searcher = searchermgr.acquire()
getDoc = GettingQuestionDocs(searcher)
item_docs = getDoc.search(question_ids, 20)[0:7] # 순위대로 최소 7개의 question을 얻기 위해서 여기서 7개를 자름.
searchermgr.release(searcher)
searchermgr.close()
searcher = None
directory.close()
directory = None
# Log : Question ItemDoc count
if item_docs:
hit_logs_for_each += str(len(item_docs)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
directory = SimpleFSDirectory(File(INDICES_PATH + 'questionIndex'))
searchermgr = SearcherManager(directory, SearcherFactory())
searchermgr.maybeRefresh()
searcher = searchermgr.acquire()
### 4_Finding 3 Similar Questions per a Question (3 X 3)
similar_questions = []
question = SimilarQsSearcher(searcher)
# Log : Similar Question count for each of Question ItemDoc
i = 1
if item_docs:
for item_doc in item_docs:
similar_question = question.more_like_this2(item_doc, 7) # 각 question 들에 대해 7개씩 비슷한 것들 찾음.
if similar_question:
hit_logs_for_each += str(len(similar_question)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
similar_questions += similar_question
i += 1
else:
hit_logs_for_each += (
'0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t' + '0' + '\t') # 7개
searchermgr.release(searcher)
searchermgr.close()
searcher = None
directory.close()
directory = None
# Log : Similar Question result count
if similar_questions:
hit_logs_for_each += str(len(similar_questions)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
### 5_Finding Associated Answers for each Question (9 - 9)
answer_ids = find_answer_ids(similar_questions)
# Log : Question - Answer count
if answer_ids:
hit_logs_for_each += str(len(answer_ids)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
directory = SimpleFSDirectory(File(INDICES_PATH + 'stackoverflow'))
searchermgr = SearcherManager(directory, SearcherFactory())
searchermgr.maybeRefresh()
searcher = searchermgr.acquire()
### 6_Getting Answer Docs for the Final Query
getDoc = GettingAnswerDocs(searcher)
answer_docs = getDoc.search(answer_ids)
searchermgr.release(searcher)
searchermgr.close()
searcher = None
directory.close()
directory = None
# Log : Answer Docs count
if answer_docs:
hit_logs_for_each += str(len(answer_docs)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
directory = SimpleFSDirectory(File(INDICES_PATH + 'bigclonebench_1'))
searchermgr = SearcherManager(directory, SearcherFactory())
searchermgr.maybeRefresh()
searcher = searchermgr.acquire()
bench_results = []
benchsearcher = BenchSearcher(searcher) # BigCloneBench
# Exceptional
### 7_Appending for the user query results
bench_result, score_logs_for_each = benchsearcher.more_like_this2(100, answer_docs[0], score_logs_for_each,
user_code_query, 1)
if bench_result:
hit_logs_for_each += str(len(bench_result)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
bench_results += bench_result
### 8_Querying for the Final Results
# Log : Bench_result for each query
for answer_doc in answer_docs:
bench_result, score_logs_for_each = benchsearcher.more_like_this2(100, answer_doc, score_logs_for_each,
user_code_query,
0) # , user_query=user_code_query)
if bench_result:
hit_logs_for_each += str(len(bench_result)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
bench_results += bench_result
searchermgr.release(searcher)
searchermgr.close()
searcher = None
directory.close()
directory = None
if answer_docs < 49:
for a in range(49 - len(answer_docs)):
hit_logs_for_each += ('0' + '\t')
if bench_results:
hit_logs_for_each += str(len(bench_results)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
sorted_bench_results = sorted(bench_results, key=attrgetter('score'), reverse=True)
print 'Search Count : ', len(sorted_bench_results)
recommended = recommend(sorted_bench_results)
print 'Final Count : ', len(recommended)
if bench_results:
hit_logs_for_each += str(len(recommended)) + '\t'
else:
hit_logs_for_each += ('0' + '\t')
return recommended, hit_logs_for_each, score_logs_for_each
if __name__ == "__main__":
run() | {
"content_hash": "6383bdf3dffff29ca40a70879b940a87",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 119,
"avg_line_length": 39.38235294117647,
"alnum_prop": 0.6174591320222388,
"repo_name": "facoy/facoy",
"id": "9f8bef52155452d253fb656a93a3dc40c1893a74",
"size": "12171",
"binary": false,
"copies": "1",
"ref": "refs/heads/release-1.0",
"path": "FrontEnd/Experiment_SpeedUp/FaCoY_ver1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16161"
},
{
"name": "Java",
"bytes": "1038237"
},
{
"name": "JavaScript",
"bytes": "6570"
}
],
"symlink_target": ""
} |
import boto
import re
from boto.utils import find_class
import uuid
from boto.sdb.db.key import Key
from boto.sdb.db.model import Model
from boto.sdb.db.blob import Blob
from boto.sdb.db.property import ListProperty, MapProperty
from datetime import datetime, date, time
from boto.exception import SDBPersistenceError
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
class TimeDecodeError(Exception):
pass
class SDBConverter(object):
"""
Responsible for converting base Python types to format compatible with underlying
database. For SimpleDB, that means everything needs to be converted to a string
when stored in SimpleDB and from a string when retrieved.
To convert a value, pass it to the encode or decode method. The encode method
will take a Python native value and convert to DB format. The decode method will
take a DB format value and convert it to Python native format. To find the appropriate
method to call, the generic encode/decode methods will look for the type-specific
method by searching for a method called "encode_<type name>" or "decode_<type name>".
"""
def __init__(self, manager):
self.manager = manager
self.type_map = { bool : (self.encode_bool, self.decode_bool),
int : (self.encode_int, self.decode_int),
long : (self.encode_long, self.decode_long),
float : (self.encode_float, self.decode_float),
Model : (self.encode_reference, self.decode_reference),
Key : (self.encode_reference, self.decode_reference),
datetime : (self.encode_datetime, self.decode_datetime),
date : (self.encode_date, self.decode_date),
time : (self.encode_time, self.decode_time),
Blob: (self.encode_blob, self.decode_blob),
str: (self.encode_string, self.decode_string),
}
def encode(self, item_type, value):
try:
if Model in item_type.mro():
item_type = Model
except:
pass
if item_type in self.type_map:
encode = self.type_map[item_type][0]
return encode(value)
return value
def decode(self, item_type, value):
if item_type in self.type_map:
decode = self.type_map[item_type][1]
return decode(value)
return value
def encode_list(self, prop, value):
if value in (None, []):
return []
if not isinstance(value, list):
# This is a little trick to avoid encoding when it's just a single value,
# since that most likely means it's from a query
item_type = getattr(prop, "item_type")
return self.encode(item_type, value)
# Just enumerate(value) won't work here because
# we need to add in some zero padding
# We support lists up to 1,000 attributes, since
# SDB technically only supports 1024 attributes anyway
values = {}
for k,v in enumerate(value):
values["%03d" % k] = v
return self.encode_map(prop, values)
def encode_map(self, prop, value):
import urllib
if value == None:
return None
if not isinstance(value, dict):
raise ValueError, 'Expected a dict value, got %s' % type(value)
new_value = []
for key in value:
item_type = getattr(prop, "item_type")
if Model in item_type.mro():
item_type = Model
encoded_value = self.encode(item_type, value[key])
if encoded_value != None:
new_value.append('%s:%s' % (urllib.quote(key), encoded_value))
return new_value
def encode_prop(self, prop, value):
if isinstance(prop, ListProperty):
return self.encode_list(prop, value)
elif isinstance(prop, MapProperty):
return self.encode_map(prop, value)
else:
return self.encode(prop.data_type, value)
def decode_list(self, prop, value):
if not isinstance(value, list):
value = [value]
if hasattr(prop, 'item_type'):
item_type = getattr(prop, "item_type")
dec_val = {}
for val in value:
if val != None:
k,v = self.decode_map_element(item_type, val)
try:
k = int(k)
except:
k = v
dec_val[k] = v
value = dec_val.values()
return value
def decode_map(self, prop, value):
if not isinstance(value, list):
value = [value]
ret_value = {}
item_type = getattr(prop, "item_type")
for val in value:
k,v = self.decode_map_element(item_type, val)
ret_value[k] = v
return ret_value
def decode_map_element(self, item_type, value):
"""Decode a single element for a map"""
import urllib
key = value
if ":" in value:
key, value = value.split(':',1)
key = urllib.unquote(key)
if Model in item_type.mro():
value = item_type(id=value)
else:
value = self.decode(item_type, value)
return (key, value)
def decode_prop(self, prop, value):
if isinstance(prop, ListProperty):
return self.decode_list(prop, value)
elif isinstance(prop, MapProperty):
return self.decode_map(prop, value)
else:
return self.decode(prop.data_type, value)
def encode_int(self, value):
value = int(value)
value += 2147483648
return '%010d' % value
def decode_int(self, value):
try:
value = int(value)
except:
boto.log.error("Error, %s is not an integer" % value)
value = 0
value = int(value)
value -= 2147483648
return int(value)
def encode_long(self, value):
value = long(value)
value += 9223372036854775808
return '%020d' % value
def decode_long(self, value):
value = long(value)
value -= 9223372036854775808
return value
def encode_bool(self, value):
if value == True or str(value).lower() in ("true", "yes"):
return 'true'
else:
return 'false'
def decode_bool(self, value):
if value.lower() == 'true':
return True
else:
return False
def encode_float(self, value):
"""
See http://tools.ietf.org/html/draft-wood-ldapext-float-00.
"""
s = '%e' % value
l = s.split('e')
mantissa = l[0].ljust(18, '0')
exponent = l[1]
if value == 0.0:
case = '3'
exponent = '000'
elif mantissa[0] != '-' and exponent[0] == '+':
case = '5'
exponent = exponent[1:].rjust(3, '0')
elif mantissa[0] != '-' and exponent[0] == '-':
case = '4'
exponent = 999 + int(exponent)
exponent = '%03d' % exponent
elif mantissa[0] == '-' and exponent[0] == '-':
case = '2'
mantissa = '%f' % (10 + float(mantissa))
mantissa = mantissa.ljust(18, '0')
exponent = exponent[1:].rjust(3, '0')
else:
case = '1'
mantissa = '%f' % (10 + float(mantissa))
mantissa = mantissa.ljust(18, '0')
exponent = 999 - int(exponent)
exponent = '%03d' % exponent
return '%s %s %s' % (case, exponent, mantissa)
def decode_float(self, value):
case = value[0]
exponent = value[2:5]
mantissa = value[6:]
if case == '3':
return 0.0
elif case == '5':
pass
elif case == '4':
exponent = '%03d' % (int(exponent) - 999)
elif case == '2':
mantissa = '%f' % (float(mantissa) - 10)
exponent = '-' + exponent
else:
mantissa = '%f' % (float(mantissa) - 10)
exponent = '%03d' % abs((int(exponent) - 999))
return float(mantissa + 'e' + exponent)
def encode_datetime(self, value):
if isinstance(value, str) or isinstance(value, unicode):
return value
return value.strftime(ISO8601)
def decode_datetime(self, value):
try:
return datetime.strptime(value, ISO8601)
except:
return None
def encode_date(self, value):
if isinstance(value, str) or isinstance(value, unicode):
return value
return value.isoformat()
def decode_date(self, value):
try:
value = value.split("-")
return date(int(value[0]), int(value[1]), int(value[2]))
except:
return None
encode_time = encode_date
def decode_time(self, value):
""" converts strings in the form of HH:MM:SS.mmmmmm
(created by datetime.time.isoformat()) to
datetime.time objects.
Timzone-aware strings ("HH:MM:SS.mmmmmm+HH:MM") won't
be handled right now and will raise TimeDecodeError.
"""
if '-' in value or '+' in value:
# TODO: Handle tzinfo
raise TimeDecodeError("Can't handle timezone aware objects: %r" % value)
tmp = value.split('.')
arg = map(int, tmp[0].split(':'))
if len(tmp) == 2:
arg.append(int(tmp[1]))
return time(*arg)
def encode_reference(self, value):
if value in (None, 'None', '', ' '):
return None
if isinstance(value, str) or isinstance(value, unicode):
return value
else:
return value.id
def decode_reference(self, value):
if not value or value == "None":
return None
return value
def encode_blob(self, value):
if not value:
return None
if isinstance(value, str):
return value
if not value.id:
bucket = self.manager.get_blob_bucket()
key = bucket.new_key(str(uuid.uuid4()))
value.id = "s3://%s/%s" % (key.bucket.name, key.name)
else:
match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value.id)
if match:
s3 = self.manager.get_s3_connection()
bucket = s3.get_bucket(match.group(1), validate=False)
key = bucket.get_key(match.group(2))
else:
raise SDBPersistenceError("Invalid Blob ID: %s" % value.id)
if value.value != None:
key.set_contents_from_string(value.value)
return value.id
def decode_blob(self, value):
if not value:
return None
match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value)
if match:
s3 = self.manager.get_s3_connection()
bucket = s3.get_bucket(match.group(1), validate=False)
key = bucket.get_key(match.group(2))
else:
return None
if key:
return Blob(file=key, id="s3://%s/%s" % (key.bucket.name, key.name))
else:
return None
def encode_string(self, value):
"""Convert ASCII, Latin-1 or UTF-8 to pure Unicode"""
if not isinstance(value, str): return value
try:
return unicode(value, 'utf-8')
except: # really, this should throw an exception.
# in the interest of not breaking current
# systems, however:
arr = []
for ch in value:
arr.append(unichr(ord(ch)))
return u"".join(arr)
def decode_string(self, value):
"""Decoding a string is really nothing, just
return the value as-is"""
return value
class SDBManager(object):
def __init__(self, cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, ddl_dir, enable_ssl, consistent=None):
self.cls = cls
self.db_name = db_name
self.db_user = db_user
self.db_passwd = db_passwd
self.db_host = db_host
self.db_port = db_port
self.db_table = db_table
self.ddl_dir = ddl_dir
self.enable_ssl = enable_ssl
self.s3 = None
self.bucket = None
self.converter = SDBConverter(self)
self._sdb = None
self._domain = None
if consistent == None and hasattr(cls, "__consistent__"):
consistent = cls.__consistent__
self.consistent = consistent
@property
def sdb(self):
if self._sdb is None:
self._connect()
return self._sdb
@property
def domain(self):
if self._domain is None:
self._connect()
return self._domain
def _connect(self):
args = dict(aws_access_key_id=self.db_user,
aws_secret_access_key=self.db_passwd,
is_secure=self.enable_ssl)
try:
region = [x for x in boto.sdb.regions() if x.endpoint == self.db_host][0]
args['region'] = region
except IndexError:
pass
self._sdb = boto.connect_sdb(**args)
# This assumes that the domain has already been created
# It's much more efficient to do it this way rather than
# having this make a roundtrip each time to validate.
# The downside is that if the domain doesn't exist, it breaks
self._domain = self._sdb.lookup(self.db_name, validate=False)
if not self._domain:
self._domain = self._sdb.create_domain(self.db_name)
def _object_lister(self, cls, query_lister):
for item in query_lister:
obj = self.get_object(cls, item.name, item)
if obj:
yield obj
def encode_value(self, prop, value):
if value == None:
return None
if not prop:
return str(value)
return self.converter.encode_prop(prop, value)
def decode_value(self, prop, value):
return self.converter.decode_prop(prop, value)
def get_s3_connection(self):
if not self.s3:
self.s3 = boto.connect_s3(self.db_user, self.db_passwd)
return self.s3
def get_blob_bucket(self, bucket_name=None):
s3 = self.get_s3_connection()
bucket_name = "%s-%s" % (s3.aws_access_key_id, self.domain.name)
bucket_name = bucket_name.lower()
try:
self.bucket = s3.get_bucket(bucket_name)
except:
self.bucket = s3.create_bucket(bucket_name)
return self.bucket
def load_object(self, obj):
if not obj._loaded:
a = self.domain.get_attributes(obj.id,consistent_read=self.consistent)
if a.has_key('__type__'):
for prop in obj.properties(hidden=False):
if a.has_key(prop.name):
value = self.decode_value(prop, a[prop.name])
value = prop.make_value_from_datastore(value)
try:
setattr(obj, prop.name, value)
except Exception, e:
boto.log.exception(e)
obj._loaded = True
def get_object(self, cls, id, a=None):
obj = None
if not a:
a = self.domain.get_attributes(id,consistent_read=self.consistent)
if a.has_key('__type__'):
if not cls or a['__type__'] != cls.__name__:
cls = find_class(a['__module__'], a['__type__'])
if cls:
params = {}
for prop in cls.properties(hidden=False):
if a.has_key(prop.name):
value = self.decode_value(prop, a[prop.name])
value = prop.make_value_from_datastore(value)
params[prop.name] = value
obj = cls(id, **params)
obj._loaded = True
else:
s = '(%s) class %s.%s not found' % (id, a['__module__'], a['__type__'])
boto.log.info('sdbmanager: %s' % s)
return obj
def get_object_from_id(self, id):
return self.get_object(None, id)
def query(self, query):
query_str = "select * from `%s` %s" % (self.domain.name, self._build_filter_part(query.model_class, query.filters, query.sort_by, query.select))
if query.limit:
query_str += " limit %s" % query.limit
rs = self.domain.select(query_str, max_items=query.limit, next_token = query.next_token)
query.rs = rs
return self._object_lister(query.model_class, rs)
def count(self, cls, filters, quick=True, sort_by=None, select=None):
"""
Get the number of results that would
be returned in this query
"""
query = "select count(*) from `%s` %s" % (self.domain.name, self._build_filter_part(cls, filters, sort_by, select))
count = 0
for row in self.domain.select(query):
count += int(row['Count'])
if quick:
return count
return count
def _build_filter(self, property, name, op, val):
if name == "__id__":
name = 'itemName()'
if name != "itemName()":
name = '`%s`' % name
if val == None:
if op in ('is','='):
return "%(name)s is null" % {"name": name}
elif op in ('is not', '!='):
return "%s is not null" % name
else:
val = ""
if property.__class__ == ListProperty:
if op in ("is", "="):
op = "like"
elif op in ("!=", "not"):
op = "not like"
if not(op in ["like", "not like"] and val.startswith("%")):
val = "%%:%s" % val
return "%s %s '%s'" % (name, op, val.replace("'", "''"))
def _build_filter_part(self, cls, filters, order_by=None, select=None):
"""
Build the filter part
"""
import types
query_parts = []
if select:
query_parts.append("(%s)" % select)
order_by_filtered = False
if order_by:
if order_by[0] == "-":
order_by_method = "DESC";
order_by = order_by[1:]
else:
order_by_method = "ASC";
if isinstance(filters, str) or isinstance(filters, unicode):
query = "WHERE %s AND `__type__` = '%s'" % (filters, cls.__name__)
if order_by != None:
query += " ORDER BY `%s` %s" % (order_by, order_by_method)
return query
for filter in filters:
filter_parts = []
filter_props = filter[0]
if type(filter_props) != list:
filter_props = [filter_props]
for filter_prop in filter_props:
(name, op) = filter_prop.strip().split(" ", 1)
value = filter[1]
property = cls.find_property(name)
if name == order_by:
order_by_filtered = True
if types.TypeType(value) == types.ListType:
filter_parts_sub = []
for val in value:
val = self.encode_value(property, val)
if isinstance(val, list):
for v in val:
filter_parts_sub.append(self._build_filter(property, name, op, v))
else:
filter_parts_sub.append(self._build_filter(property, name, op, val))
filter_parts.append("(%s)" % (" OR ".join(filter_parts_sub)))
else:
val = self.encode_value(property, value)
if isinstance(val, list):
for v in val:
filter_parts.append(self._build_filter(property, name, op, v))
else:
filter_parts.append(self._build_filter(property, name, op, val))
query_parts.append("(%s)" % (" or ".join(filter_parts)))
type_query = "(`__type__` = '%s'" % cls.__name__
for subclass in self._get_all_decendents(cls).keys():
type_query += " or `__type__` = '%s'" % subclass
type_query +=")"
query_parts.append(type_query)
order_by_query = ""
if order_by:
if not order_by_filtered:
query_parts.append("`%s` LIKE '%%'" % order_by)
order_by_query = " ORDER BY `%s` %s" % (order_by, order_by_method)
if len(query_parts) > 0:
return "WHERE %s %s" % (" AND ".join(query_parts), order_by_query)
else:
return ""
def _get_all_decendents(self, cls):
"""Get all decendents for a given class"""
decendents = {}
for sc in cls.__sub_classes__:
decendents[sc.__name__] = sc
decendents.update(self._get_all_decendents(sc))
return decendents
def query_gql(self, query_string, *args, **kwds):
raise NotImplementedError, "GQL queries not supported in SimpleDB"
def save_object(self, obj, expected_value=None):
if not obj.id:
obj.id = str(uuid.uuid4())
attrs = {'__type__' : obj.__class__.__name__,
'__module__' : obj.__class__.__module__,
'__lineage__' : obj.get_lineage()}
del_attrs = []
for property in obj.properties(hidden=False):
value = property.get_value_for_datastore(obj)
if value is not None:
value = self.encode_value(property, value)
if value == []:
value = None
if value == None:
del_attrs.append(property.name)
continue
attrs[property.name] = value
if property.unique:
try:
args = {property.name: value}
obj2 = obj.find(**args).next()
if obj2.id != obj.id:
raise SDBPersistenceError("Error: %s must be unique!" % property.name)
except(StopIteration):
pass
# Convert the Expected value to SDB format
if expected_value:
prop = obj.find_property(expected_value[0])
v = expected_value[1]
if v is not None and not type(v) == bool:
v = self.encode_value(prop, v)
expected_value[1] = v
self.domain.put_attributes(obj.id, attrs, replace=True, expected_value=expected_value)
if len(del_attrs) > 0:
self.domain.delete_attributes(obj.id, del_attrs)
return obj
def delete_object(self, obj):
self.domain.delete_attributes(obj.id)
def set_property(self, prop, obj, name, value):
setattr(obj, name, value)
value = prop.get_value_for_datastore(obj)
value = self.encode_value(prop, value)
if prop.unique:
try:
args = {prop.name: value}
obj2 = obj.find(**args).next()
if obj2.id != obj.id:
raise SDBPersistenceError("Error: %s must be unique!" % prop.name)
except(StopIteration):
pass
self.domain.put_attributes(obj.id, {name : value}, replace=True)
def get_property(self, prop, obj, name):
a = self.domain.get_attributes(obj.id,consistent_read=self.consistent)
# try to get the attribute value from SDB
if name in a:
value = self.decode_value(prop, a[name])
value = prop.make_value_from_datastore(value)
setattr(obj, prop.name, value)
return value
raise AttributeError, '%s not found' % name
def set_key_value(self, obj, name, value):
self.domain.put_attributes(obj.id, {name : value}, replace=True)
def delete_key_value(self, obj, name):
self.domain.delete_attributes(obj.id, name)
def get_key_value(self, obj, name):
a = self.domain.get_attributes(obj.id, name,consistent_read=self.consistent)
if a.has_key(name):
return a[name]
else:
return None
def get_raw_item(self, obj):
return self.domain.get_item(obj.id)
| {
"content_hash": "bb2324abeb7d929ae647bfd809e112c3",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 152,
"avg_line_length": 36.531065088757394,
"alnum_prop": 0.5195383680907066,
"repo_name": "drawquest/drawquest-web",
"id": "4e51d2d7e35df3691bd5810a7f40932bb3608f37",
"size": "25860",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "common/boto/sdb/db/manager/sdbmanager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "634659"
},
{
"name": "CoffeeScript",
"bytes": "8968"
},
{
"name": "HTML",
"bytes": "898627"
},
{
"name": "JavaScript",
"bytes": "1507053"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "7220727"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "3700"
}
],
"symlink_target": ""
} |
class ImageComparisonFailure(AssertionError):
"""
Raise this exception to mark a test as a comparison between two images.
"""
| {
"content_hash": "325cb4147f18a55a435591c1d73007aa",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 75,
"avg_line_length": 34.5,
"alnum_prop": 0.717391304347826,
"repo_name": "louisLouL/pair_trading",
"id": "c39a39207747c75d768ea2dd498dfed364eb88c5",
"size": "138",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "capstone_env/lib/python3.6/site-packages/matplotlib/testing/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "148513"
},
{
"name": "C++",
"bytes": "172384"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "HTML",
"bytes": "568460"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Python",
"bytes": "30357437"
},
{
"name": "Shell",
"bytes": "3260"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
"""Support for HomematicIP Cloud devices."""
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME, EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (
CONF_ACCESSPOINT,
CONF_AUTHTOKEN,
DOMAIN,
HMIPC_AUTHTOKEN,
HMIPC_HAPID,
HMIPC_NAME,
)
from .generic_entity import HomematicipGenericEntity # noqa: F401
from .hap import HomematicipAuth, HomematicipHAP # noqa: F401
from .services import async_setup_services, async_unload_services
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(CONF_NAME, default=""): vol.Any(cv.string),
vol.Required(CONF_ACCESSPOINT): cv.string,
vol.Required(CONF_AUTHTOKEN): cv.string,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the HomematicIP Cloud component."""
hass.data[DOMAIN] = {}
accesspoints = config.get(DOMAIN, [])
for conf in accesspoints:
if conf[CONF_ACCESSPOINT] not in {
entry.data[HMIPC_HAPID]
for entry in hass.config_entries.async_entries(DOMAIN)
}:
hass.async_add_job(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
HMIPC_HAPID: conf[CONF_ACCESSPOINT],
HMIPC_AUTHTOKEN: conf[CONF_AUTHTOKEN],
HMIPC_NAME: conf[CONF_NAME],
},
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up an access point from a config entry."""
# 0.104 introduced config entry unique id, this makes upgrading possible
if entry.unique_id is None:
new_data = dict(entry.data)
hass.config_entries.async_update_entry(
entry, unique_id=new_data[HMIPC_HAPID], data=new_data
)
hap = HomematicipHAP(hass, entry)
hass.data[DOMAIN][entry.unique_id] = hap
if not await hap.async_setup():
return False
await async_setup_services(hass)
# Register on HA stop event to gracefully shutdown HomematicIP Cloud connection
hap.reset_connection_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, hap.shutdown
)
# Register hap as device in registry.
device_registry = await dr.async_get_registry(hass)
home = hap.home
# Add the HAP name from configuration if set.
hapname = home.label if not home.name else f"{home.name} {home.label}"
device_registry.async_get_or_create(
config_entry_id=home.id,
identifiers={(DOMAIN, home.id)},
manufacturer="eQ-3",
name=hapname,
model=home.modelType,
sw_version=home.currentAPVersion,
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
hap = hass.data[DOMAIN].pop(entry.unique_id)
hap.reset_connection_listener()
await async_unload_services(hass)
return await hap.async_reset()
| {
"content_hash": "751d8b3c98991a72b691521c1d375cb1",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 83,
"avg_line_length": 31.142857142857142,
"alnum_prop": 0.6233135456017269,
"repo_name": "tchellomello/home-assistant",
"id": "47da33e86dab8cb424b5c61bcf643e73817df6af",
"size": "3706",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homematicip_cloud/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
'''
This is a simple proxy-minion to manage ROKU devices
:maintainer: Christer Edwards (christer.edwards@gmail.com)
:maturity: 20150920
:depends: none
:platform: all
'''
from __future__ import absolute_import
# Import python libs
import logging
import salt.utils.http
__proxyenabled__ = ['roku']
LOG = logging.getLogger(__file__)
DETAILS = {}
def __virtual__():
'''
Only return if all the modules are available
'''
if salt.utils.http.query:
return True
else:
return False
def init(opts):
'''
Every proxy module needs an 'init', though you can
just put a 'pass' here if it doesn't need to do anything.
'''
LOG.debug('roku proxy init() called...')
DETAILS['url'] = opts['proxy']['url']
if not DETAILS['url'].endswith('/'):
DETAILS['url'] += '/'
def ping():
device = salt.utils.http.query(DETAILS['url'], decode_type='xml', decode=True)
try:
ret = device['dict'][1]['serialNumber']
except KeyError:
ret = 'No data returned from API'
return ret
def package_list():
'''
List installed "packages", ie; channels
'''
channels = []
uri = 'query/apps'
device = salt.utils.http.query(DETAILS['url']+uri, decode_type='xml', decode=True)
try:
ret = device['dict']
for app in ret:
for key, val in app.items():
channels.append(val)
except KeyError:
channels = 'No data returned from API'
return channels
def service_stop():
'''
"stop" the service, ie; pause
'''
uri = 'keypress/Play'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def service_start():
'''
"start" the service, ie; play
'''
uri = 'keypress/Play'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def service_replay():
'''
Instant replay
'''
uri = 'keypress/InstantReplay'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def shutdown(opts):
'''
For this proxy shutdown is a no-op
'''
LOG.debug('roku proxy shutdown() called...')
pass
def navigate_search():
'''
Navigate search
'''
uri = 'keypress/Search'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def navigate_home():
'''
Navigate home
'''
uri = 'keypress/Home'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def navigate_back():
'''
Navigate back
'''
uri = 'keypress/Back'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def navigate_select():
'''
Navigate select
'''
uri = 'keypress/Select'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def navigate_left():
'''
Navigate left
'''
uri = 'keypress/Left'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def navigate_right():
'''
Navigate right
'''
uri = 'keypress/Right'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def navigate_up():
'''
Navigate up
'''
uri = 'keypress/Up'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def navigate_down():
'''
Navigate down
'''
uri = 'keypress/Down'
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
def navigate_input(search):
'''
Navigate search
'''
uri = 'keypress/Lit_'+search
return salt.utils.http.query(DETAILS['url']+uri, method='POST')
| {
"content_hash": "4e615605c21edfbc60d47258b00ff0d7",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 86,
"avg_line_length": 20.074285714285715,
"alnum_prop": 0.5977796754910333,
"repo_name": "cedwards/saltstack-proxy-module-roku",
"id": "21fc4e2fa8f55ee7d05a139ff1824f26d00eabf0",
"size": "3537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proxy/roku.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7708"
}
],
"symlink_target": ""
} |
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from neutronclient.v2_0.client import Client as neutronclient # noqa
class FwaasApiTests(test.APITestCase):
@test.create_stubs({neutronclient: ('create_firewall_rule',)})
def test_rule_create(self):
rule1 = self.fw_rules.first()
rule1_dict = self.api_fw_rules.first()
form_data = {'name': rule1.name,
'description': rule1.description,
'protocol': rule1.protocol,
'action': rule1.action,
'source_ip_address': rule1.source_ip_address,
'source_port': rule1.source_port,
'destination_ip_address': rule1.destination_ip_address,
'destination_port': rule1.destination_port,
'shared': rule1.shared,
'enabled': rule1.enabled
}
form_dict = {'firewall_rule': form_data}
ret_dict = {'firewall_rule': rule1_dict}
neutronclient.create_firewall_rule(form_dict).AndReturn(ret_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.rule_create(self.request, **form_data)
self.assertIsInstance(ret_val, api.fwaas.Rule)
self.assertEqual(rule1.name, ret_val.name)
self.assertTrue(ret_val.id)
@test.create_stubs({neutronclient: ('list_firewall_rules',
'list_firewall_policies')})
def test_rule_list(self):
exp_rules = self.fw_rules.list()
api_rules = {'firewall_rules': self.api_fw_rules.list()}
api_policies = {'firewall_policies': self.api_fw_policies.list()}
neutronclient.list_firewall_rules().AndReturn(api_rules)
neutronclient.list_firewall_policies().AndReturn(api_policies)
self.mox.ReplayAll()
ret_val = api.fwaas.rule_list(self.request)
for (v, d) in zip(ret_val, exp_rules):
self.assertIsInstance(v, api.fwaas.Rule)
self.assertEqual(d.name, v.name)
self.assertTrue(v.id)
if d.policy:
self.assertEqual(d.firewall_policy_id, v.policy.id, )
self.assertEqual(d.policy.name, v.policy.name)
else:
self.assertIsNone(v.policy)
@test.create_stubs({neutronclient: ('show_firewall_rule',
'show_firewall_policy')})
def test_rule_get(self):
exp_rule = self.fw_rules.first()
ret_dict = {'firewall_rule': self.api_fw_rules.first()}
policy_dict = {'firewall_policy': self.api_fw_policies.first()}
neutronclient.show_firewall_rule(exp_rule.id).AndReturn(ret_dict)
neutronclient.show_firewall_policy(
exp_rule.firewall_policy_id).AndReturn(policy_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.rule_get(self.request, exp_rule.id)
self.assertIsInstance(ret_val, api.fwaas.Rule)
self.assertEqual(exp_rule.name, ret_val.name)
self.assertTrue(ret_val.id)
self.assertEqual(exp_rule.firewall_policy_id, ret_val.policy.id)
self.assertEqual(exp_rule.policy.name, ret_val.policy.name)
@test.create_stubs({neutronclient: ('update_firewall_rule',)})
def test_rule_update(self):
rule = self.fw_rules.first()
rule_dict = self.api_fw_rules.first()
rule.name = 'new name'
rule.description = 'new desc'
rule.protocol = 'icmp'
rule.action = 'deny'
rule.shared = True
rule.enabled = False
rule_dict['name'] = 'new name'
rule_dict['description'] = 'new desc'
rule_dict['protocol'] = 'icmp'
rule_dict['action'] = 'deny'
rule_dict['shared'] = True
rule_dict['enabled'] = False
form_data = {'name': rule.name,
'description': rule.description,
'protocol': rule.protocol,
'action': rule.action,
'shared': rule.shared,
'enabled': rule.enabled
}
form_dict = {'firewall_rule': form_data}
ret_dict = {'firewall_rule': rule_dict}
neutronclient.update_firewall_rule(
rule.id, form_dict).AndReturn(ret_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.rule_update(self.request,
rule.id, **form_data)
self.assertIsInstance(ret_val, api.fwaas.Rule)
self.assertEqual(rule.name, ret_val.name)
self.assertTrue(ret_val.id)
@test.create_stubs({neutronclient: ('create_firewall_policy', )})
def test_policy_create(self):
policy1 = self.fw_policies.first()
policy1_dict = self.api_fw_policies.first()
form_data = {'name': policy1.name,
'description': policy1.description,
'firewall_rules': policy1.firewall_rules,
'shared': policy1.shared,
'audited': policy1.audited
}
form_dict = {'firewall_policy': form_data}
ret_dict = {'firewall_policy': policy1_dict}
neutronclient.create_firewall_policy(form_dict).AndReturn(ret_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.policy_create(self.request, **form_data)
self.assertIsInstance(ret_val, api.fwaas.Policy)
self.assertEqual(policy1.name, ret_val.name)
self.assertTrue(ret_val.id)
@test.create_stubs({neutronclient: ('list_firewall_policies',
'list_firewall_rules')})
def test_policy_list(self):
exp_policies = self.fw_policies.list()
policies_dict = {'firewall_policies': self.api_fw_policies.list()}
rules_dict = {'firewall_rules': self.api_fw_rules.list()}
neutronclient.list_firewall_policies().AndReturn(policies_dict)
neutronclient.list_firewall_rules().AndReturn(rules_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.policy_list(self.request)
for (v, d) in zip(ret_val, exp_policies):
self.assertIsInstance(v, api.fwaas.Policy)
self.assertEqual(d.name, v.name)
self.assertTrue(v.id)
self.assertEqual(len(d.firewall_rules), len(v.rules))
self.assertEqual(len(d.firewall_rules), len(v.firewall_rules))
for (r, exp_r) in zip(v.rules, d.rules):
self.assertEqual(exp_r.id, r.id)
@test.create_stubs({neutronclient: ('show_firewall_policy',
'list_firewall_rules')})
def test_policy_get(self):
exp_policy = self.fw_policies.first()
policy_dict = self.api_fw_policies.first()
# The first two rules are associated with the first policy.
api_rules = self.api_fw_rules.list()[:2]
ret_dict = {'firewall_policy': policy_dict}
neutronclient.show_firewall_policy(exp_policy.id).AndReturn(ret_dict)
filters = {'firewall_policy_id': exp_policy.id}
ret_dict = {'firewall_rules': api_rules}
neutronclient.list_firewall_rules(**filters).AndReturn(ret_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.policy_get(self.request, exp_policy.id)
self.assertIsInstance(ret_val, api.fwaas.Policy)
self.assertEqual(exp_policy.name, ret_val.name)
self.assertTrue(ret_val.id)
self.assertEqual(len(exp_policy.rules), len(ret_val.rules))
for (exp, ret) in zip(exp_policy.rules, ret_val.rules):
self.assertEqual(exp.id, ret.id)
@test.create_stubs({neutronclient: ('show_firewall_policy',)})
def test_policy_get_no_rule(self):
# 2nd policy is not associated with any rules.
exp_policy = self.fw_policies.list()[1]
policy_dict = self.api_fw_policies.list()[1]
ret_dict = {'firewall_policy': policy_dict}
neutronclient.show_firewall_policy(exp_policy.id).AndReturn(ret_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.policy_get(self.request, exp_policy.id)
self.assertIsInstance(ret_val, api.fwaas.Policy)
self.assertEqual(exp_policy.name, ret_val.name)
self.assertTrue(ret_val.id)
self.assertFalse(len(ret_val.rules))
@test.create_stubs({neutronclient: ('update_firewall_policy',)})
def test_policy_update(self):
policy = self.fw_policies.first()
policy_dict = self.api_fw_policies.first()
policy.name = 'new name'
policy.description = 'new desc'
policy.shared = True
policy.audited = False
policy_dict['name'] = 'new name'
policy_dict['description'] = 'new desc'
policy_dict['shared'] = True
policy_dict['audited'] = False
form_data = {'name': policy.name,
'description': policy.description,
'shared': policy.shared,
'audited': policy.audited
}
form_dict = {'firewall_policy': form_data}
ret_dict = {'firewall_policy': policy_dict}
neutronclient.update_firewall_policy(
policy.id, form_dict).AndReturn(ret_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.policy_update(self.request,
policy.id, **form_data)
self.assertIsInstance(ret_val, api.fwaas.Policy)
self.assertEqual(policy.name, ret_val.name)
self.assertTrue(ret_val.id)
@test.create_stubs({neutronclient: ('firewall_policy_insert_rule',)})
def test_policy_insert_rule(self):
policy = self.fw_policies.first()
policy_dict = self.api_fw_policies.first()
new_rule_id = 'h0881d38-c3eb-4fee-9763-12de3338041d'
policy.firewall_rules.append(new_rule_id)
policy_dict['firewall_rules'].append(new_rule_id)
body = {'firewall_rule_id': new_rule_id,
'insert_before': policy.firewall_rules[1],
'insert_after': policy.firewall_rules[0]}
neutronclient.firewall_policy_insert_rule(
policy.id, body).AndReturn(policy_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.policy_insert_rule(self.request,
policy.id, **body)
self.assertIn(new_rule_id, ret_val.firewall_rules)
@test.create_stubs({neutronclient: ('firewall_policy_remove_rule',)})
def test_policy_remove_rule(self):
policy = self.fw_policies.first()
policy_dict = self.api_fw_policies.first()
remove_rule_id = policy.firewall_rules[0]
policy_dict['firewall_rules'].remove(remove_rule_id)
body = {'firewall_rule_id': remove_rule_id}
neutronclient.firewall_policy_remove_rule(
policy.id, body).AndReturn(policy_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.policy_remove_rule(self.request,
policy.id, **body)
self.assertNotIn(remove_rule_id, ret_val.firewall_rules)
@test.create_stubs({neutronclient: ('create_firewall', )})
def test_firewall_create(self):
firewall = self.firewalls.first()
firewall_dict = self.api_firewalls.first()
form_data = {'name': firewall.name,
'description': firewall.description,
'firewall_policy_id': firewall.firewall_policy_id,
'shared': firewall.shared,
'admin_state_up': firewall.admin_state_up
}
form_dict = {'firewall': form_data}
ret_dict = {'firewall': firewall_dict}
neutronclient.create_firewall(form_dict).AndReturn(ret_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.firewall_create(self.request, **form_data)
self.assertIsInstance(ret_val, api.fwaas.Firewall)
self.assertEqual(firewall.name, ret_val.name)
self.assertTrue(ret_val.id)
@test.create_stubs({neutronclient: ('list_firewalls',
'list_firewall_policies')})
def test_firewall_list(self):
exp_firewalls = self.firewalls.list()
firewalls_dict = {'firewalls': self.api_firewalls.list()}
policies_dict = {'firewall_policies': self.api_fw_policies.list()}
neutronclient.list_firewalls().AndReturn(firewalls_dict)
neutronclient.list_firewall_policies().AndReturn(policies_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.firewall_list(self.request)
for (v, d) in zip(ret_val, exp_firewalls):
self.assertIsInstance(v, api.fwaas.Firewall)
self.assertEqual(d.name, v.name)
self.assertTrue(v.id)
self.assertEqual(d.firewall_policy_id, v.policy.id)
self.assertEqual(d.policy.name, v.policy.name)
@test.create_stubs({neutronclient: ('show_firewall',
'show_firewall_policy')})
def test_firewall_get(self):
exp_firewall = self.firewalls.first()
ret_dict = {'firewall': self.api_firewalls.first()}
policy_dict = {'firewall_policy': self.api_fw_policies.first()}
neutronclient.show_firewall(exp_firewall.id).AndReturn(ret_dict)
neutronclient.show_firewall_policy(
exp_firewall.firewall_policy_id).AndReturn(policy_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.firewall_get(self.request, exp_firewall.id)
self.assertIsInstance(ret_val, api.fwaas.Firewall)
self.assertEqual(exp_firewall.name, ret_val.name)
self.assertTrue(ret_val.id)
self.assertEqual(exp_firewall.firewall_policy_id, ret_val.policy.id)
self.assertEqual(exp_firewall.policy.name, ret_val.policy.name)
@test.create_stubs({neutronclient: ('update_firewall',)})
def test_firewall_update(self):
firewall = self.firewalls.first()
firewall_dict = self.api_firewalls.first()
firewall.name = 'new name'
firewall.description = 'new desc'
firewall.admin_state_up = False
firewall_dict['name'] = 'new name'
firewall_dict['description'] = 'new desc'
firewall_dict['admin_state_up'] = False
form_data = {'name': firewall.name,
'description': firewall.description,
'admin_state_up': firewall.admin_state_up
}
form_dict = {'firewall': form_data}
ret_dict = {'firewall': firewall_dict}
neutronclient.update_firewall(
firewall.id, form_dict).AndReturn(ret_dict)
self.mox.ReplayAll()
ret_val = api.fwaas.firewall_update(self.request,
firewall.id, **form_data)
self.assertIsInstance(ret_val, api.fwaas.Firewall)
self.assertEqual(firewall.name, ret_val.name)
self.assertTrue(ret_val.id)
| {
"content_hash": "79dbe19c03ce71ffab4d211f9ef43c8e",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 77,
"avg_line_length": 41.851540616246496,
"alnum_prop": 0.5935345693059367,
"repo_name": "mrunge/horizon",
"id": "1c978b2f1ec14367e0849649e6267abfd48efd69",
"size": "15563",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "openstack_dashboard/test/api_tests/fwaas_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "64112"
},
{
"name": "JavaScript",
"bytes": "238175"
},
{
"name": "Python",
"bytes": "3989776"
},
{
"name": "Shell",
"bytes": "16967"
}
],
"symlink_target": ""
} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import csv
import datetime
import doctest
import html
import io
import json
import logging
from typing import Dict, List
from urllib.parse import quote
import superset.utils.database
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
)
import pytest
import pytz
import random
import re
import unittest
from unittest import mock
import pandas as pd
import sqlalchemy as sqla
from sqlalchemy.exc import SQLAlchemyError
from superset.models.cache import CacheKey
from superset.utils.database import get_example_database
from tests.integration_tests.conftest import with_feature_flags
from tests.integration_tests.fixtures.energy_dashboard import (
load_energy_table_with_slice,
load_energy_table_data,
)
from tests.integration_tests.test_app import app
import superset.views.utils
from superset import (
dataframe,
db,
security_manager,
sql_lab,
)
from superset.common.db_query_status import QueryStatus
from superset.connectors.sqla.models import SqlaTable
from superset.db_engine_specs.base import BaseEngineSpec
from superset.db_engine_specs.mssql import MssqlEngineSpec
from superset.exceptions import SupersetException
from superset.extensions import async_query_manager
from superset.models import core as models
from superset.models.annotations import Annotation, AnnotationLayer
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query
from superset.result_set import SupersetResultSet
from superset.utils import core as utils
from superset.views import core as views
from superset.views.database.views import DatabaseView
from .base_tests import SupersetTestCase
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
load_world_bank_data,
)
logger = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def cleanup():
db.session.query(Query).delete()
db.session.query(DatasourceAccessRequest).delete()
db.session.query(models.Log).delete()
db.session.commit()
yield
class TestCore(SupersetTestCase):
def setUp(self):
self.table_ids = {
tbl.table_name: tbl.id for tbl in (db.session.query(SqlaTable).all())
}
self.original_unsafe_db_setting = app.config["PREVENT_UNSAFE_DB_CONNECTIONS"]
def tearDown(self):
db.session.query(Query).delete()
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = self.original_unsafe_db_setting
def test_login(self):
resp = self.get_resp("/login/", data=dict(username="admin", password="general"))
self.assertNotIn("User confirmation needed", resp)
resp = self.get_resp("/logout/", follow_redirects=True)
self.assertIn("User confirmation needed", resp)
resp = self.get_resp(
"/login/", data=dict(username="admin", password="wrongPassword")
)
self.assertIn("User confirmation needed", resp)
def test_dashboard_endpoint(self):
self.login()
resp = self.client.get("/superset/dashboard/-1/")
assert resp.status_code == 404
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_endpoint(self):
self.login(username="admin")
resp = self.client.get("/superset/slice/-1/")
assert resp.status_code == 404
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_viz_cache_key(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
viz = slc.viz
qobj = viz.query_obj()
cache_key = viz.cache_key(qobj)
qobj["groupby"] = []
cache_key_with_groupby = viz.cache_key(qobj)
self.assertNotEqual(cache_key, cache_key_with_groupby)
self.assertNotEqual(
viz.cache_key(qobj), viz.cache_key(qobj, time_compare="12 weeks")
)
self.assertNotEqual(
viz.cache_key(qobj, time_compare="28 days"),
viz.cache_key(qobj, time_compare="12 weeks"),
)
qobj["inner_from_dttm"] = datetime.datetime(1901, 1, 1)
self.assertEqual(cache_key_with_groupby, viz.cache_key(qobj))
def test_get_superset_tables_not_allowed(self):
example_db = superset.utils.database.get_example_database()
schema_name = self.default_schema_backend_map[example_db.backend]
self.login(username="gamma")
uri = f"superset/tables/{example_db.id}/{schema_name}/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_get_superset_tables_allowed(self):
session = db.session
table_name = "energy_usage"
role_name = "dummy_role"
self.logout()
self.login(username="gamma")
gamma_user = security_manager.find_user(username="gamma")
security_manager.add_role(role_name)
dummy_role = security_manager.find_role(role_name)
gamma_user.roles.append(dummy_role)
tbl_id = self.table_ids.get(table_name)
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id).first()
table_perm = table.perm
security_manager.add_permission_role(
dummy_role,
security_manager.find_permission_view_menu("datasource_access", table_perm),
)
session.commit()
example_db = utils.get_example_database()
schema_name = self.default_schema_backend_map[example_db.backend]
uri = f"superset/tables/{example_db.id}/{schema_name}/{table_name}/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
# cleanup
gamma_user = security_manager.find_user(username="gamma")
gamma_user.roles.remove(security_manager.find_role(role_name))
session.commit()
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_get_superset_tables_not_allowed_with_out_permissions(self):
session = db.session
table_name = "energy_usage"
role_name = "dummy_role_no_table_access"
self.logout()
self.login(username="gamma")
gamma_user = security_manager.find_user(username="gamma")
security_manager.add_role(role_name)
dummy_role = security_manager.find_role(role_name)
gamma_user.roles.append(dummy_role)
session.commit()
example_db = utils.get_example_database()
schema_name = self.default_schema_backend_map[example_db.backend]
uri = f"superset/tables/{example_db.id}/{schema_name}/{table_name}/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
# cleanup
gamma_user = security_manager.find_user(username="gamma")
gamma_user.roles.remove(security_manager.find_role(role_name))
session.commit()
def test_get_superset_tables_substr(self):
example_db = superset.utils.database.get_example_database()
if example_db.backend in {"presto", "hive", "sqlite"}:
# TODO: change table to the real table that is in examples.
return
self.login(username="admin")
schema_name = self.default_schema_backend_map[example_db.backend]
uri = f"superset/tables/{example_db.id}/{schema_name}/ab_role/"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
expected_response = {
"options": [
{
"label": "ab_role",
"schema": schema_name,
"title": "ab_role",
"type": "table",
"value": "ab_role",
"extra": None,
}
],
"tableLength": 1,
}
self.assertEqual(response, expected_response)
def test_get_superset_tables_not_found(self):
self.login(username="admin")
uri = f"superset/tables/invalid/public/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_annotation_json_endpoint(self):
# Set up an annotation layer and annotation
layer = AnnotationLayer(name="foo", descr="bar")
db.session.add(layer)
db.session.commit()
annotation = Annotation(
layer_id=layer.id,
short_descr="my_annotation",
start_dttm=datetime.datetime(2020, 5, 20, 18, 21, 51),
end_dttm=datetime.datetime(2020, 5, 20, 18, 31, 51),
)
db.session.add(annotation)
db.session.commit()
self.login()
resp_annotations = json.loads(
self.get_resp("annotationlayermodelview/api/read")
)
# the UI needs id and name to function
self.assertIn("id", resp_annotations["result"][0])
self.assertIn("name", resp_annotations["result"][0])
response = self.get_resp(
f"/superset/annotation_json/{layer.id}?form_data="
+ quote(json.dumps({"time_range": "100 years ago : now"}))
)
assert "my_annotation" in response
# Rollback changes
db.session.delete(annotation)
db.session.delete(layer)
db.session.commit()
def test_admin_only_permissions(self):
def assert_admin_permission_in(role_name, assert_func):
role = security_manager.find_role(role_name)
permissions = [p.permission.name for p in role.permissions]
assert_func("can_approve", permissions)
assert_admin_permission_in("Admin", self.assertIn)
assert_admin_permission_in("Alpha", self.assertNotIn)
assert_admin_permission_in("Gamma", self.assertNotIn)
def test_admin_only_menu_views(self):
def assert_admin_view_menus_in(role_name, assert_func):
role = security_manager.find_role(role_name)
view_menus = [p.view_menu.name for p in role.permissions]
assert_func("ResetPasswordView", view_menus)
assert_func("RoleModelView", view_menus)
assert_func("Security", view_menus)
assert_func("SQL Lab", view_menus)
assert_admin_view_menus_in("Admin", self.assertIn)
assert_admin_view_menus_in("Alpha", self.assertNotIn)
assert_admin_view_menus_in("Gamma", self.assertNotIn)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_save_slice(self):
self.login(username="admin")
slice_name = f"Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
copy_name_prefix = "Test Sankey"
copy_name = f"{copy_name_prefix}[save]{random.random()}"
tbl_id = self.table_ids.get("energy_usage")
new_slice_name = f"{copy_name_prefix}[overwrite]{random.random()}"
url = (
"/superset/explore/table/{}/?slice_name={}&"
"action={}&datasource_name=energy_usage"
)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["target"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": slice_id,
}
# Changing name and save as a new slice
resp = self.client.post(
url.format(tbl_id, copy_name, "saveas"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
new_slice_id = resp.json["form_data"]["slice_id"]
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, copy_name)
form_data.pop("slice_id") # We don't save the slice id when saving as
self.assertEqual(slc.viz.form_data, form_data)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["source"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": new_slice_id,
"time_range": "now",
}
# Setting the name back to its original name by overwriting new slice
self.client.post(
url.format(tbl_id, new_slice_name, "overwrite"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, new_slice_name)
self.assertEqual(slc.viz.form_data, form_data)
# Cleanup
slices = (
db.session.query(Slice)
.filter(Slice.slice_name.like(copy_name_prefix + "%"))
.all()
)
for slc in slices:
db.session.delete(slc)
db.session.commit()
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_filter_endpoint(self):
self.login(username="admin")
slice_name = "Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
tbl_id = self.table_ids.get("energy_usage")
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)
table.filter_select_enabled = True
url = (
"/superset/filter/table/{}/target/?viz_type=sankey&groupby=source"
"&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&"
"slice_id={}&datasource_name=energy_usage&"
"datasource_id=1&datasource_type=table"
)
# Changing name
resp = self.get_resp(url.format(tbl_id, slice_id))
assert len(resp) > 0
assert "energy_target0" in resp
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_data(self):
# slice data should have some required attributes
self.login(username="admin")
slc = self.get_slice(
slice_name="Girls", session=db.session, expunge_from_session=False
)
slc_data_attributes = slc.data.keys()
assert "changed_on" in slc_data_attributes
assert "modified" in slc_data_attributes
assert "owners" in slc_data_attributes
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_slices(self):
# Testing by hitting the two supported end points for all slices
self.login(username="admin")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, "explore", slc.slice_url),
]
for name, method, url in urls:
logger.info(f"[{name}]/[{method}]: {url}")
print(f"[{name}]/[{method}]: {url}")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_add_slice(self):
self.login(username="admin")
# assert that /chart/add responds with 200
url = "/chart/add"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_get_user_slices_for_owners(self):
self.login(username="alpha")
user = security_manager.find_user("alpha")
slice_name = "Girls"
# ensure user is not owner of any slices
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(data, [])
# make user owner of slice and verify that endpoint returns said slice
slc = self.get_slice(
slice_name=slice_name, session=db.session, expunge_from_session=False
)
slc.owners = [user]
db.session.merge(slc)
db.session.commit()
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["title"], slice_name)
# remove ownership and ensure user no longer gets slice
slc = self.get_slice(
slice_name=slice_name, session=db.session, expunge_from_session=False
)
slc.owners = []
db.session.merge(slc)
db.session.commit()
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(data, [])
def test_get_user_slices(self):
self.login(username="admin")
userid = security_manager.find_user("admin").id
url = f"/sliceasync/api/read?_flt_0_created_by={userid}"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_slices_V2(self):
# Add explore-v2-beta role to admin user
# Test all slice urls as user with with explore-v2-beta role
security_manager.add_role("explore-v2-beta")
security_manager.add_user(
"explore_beta",
"explore_beta",
" user",
"explore_beta@airbnb.com",
security_manager.find_role("explore-v2-beta"),
password="general",
)
self.login(username="explore_beta", password="general")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [(slc.slice_name, "slice_url", slc.slice_url)]
for name, method, url in urls:
print(f"[{name}]/[{method}]: {url}")
self.client.get(url)
def test_doctests(self):
modules = [utils, models, sql_lab]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_misc(self):
assert self.get_resp("/health") == "OK"
assert self.get_resp("/healthcheck") == "OK"
assert self.get_resp("/ping") == "OK"
def test_testconn(self, username="admin"):
# need to temporarily allow sqlite dbs, teardown will undo this
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False
self.login(username=username)
database = superset.utils.database.get_example_database()
# validate that the endpoint works with the password-masked sqlalchemy uri
data = json.dumps(
{
"uri": database.safe_sqlalchemy_uri(),
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
# validate that the endpoint works with the decrypted sqlalchemy uri
data = json.dumps(
{
"uri": database.sqlalchemy_uri_decrypted,
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
def test_testconn_failed_conn(self, username="admin"):
self.login(username=username)
data = json.dumps(
{"uri": "broken://url", "name": "examples", "impersonate_user": False}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: broken"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
data = json.dumps(
{
"uri": "mssql+pymssql://url",
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: mssql+pymssql"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
def test_testconn_unsafe_uri(self, username="admin"):
self.login(username=username)
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = True
response = self.client.post(
"/superset/testconn",
data=json.dumps(
{
"uri": "sqlite:///home/superset/unsafe.db",
"name": "unsafe",
"impersonate_user": False,
}
),
content_type="application/json",
)
self.assertEqual(400, response.status_code)
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {
"error": "SQLiteDialect_pysqlite cannot be used as a data source for security reasons."
}
self.assertEqual(expected_body, response_body)
def test_custom_password_store(self):
database = superset.utils.database.get_example_database()
conn_pre = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
def custom_password_store(uri):
return "password_store_test"
models.custom_password_store = custom_password_store
conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
if conn_pre.password:
assert conn.password == "password_store_test"
assert conn.password != conn_pre.password
# Disable for password store for later tests
models.custom_password_store = None
def test_databaseview_edit(self, username="admin"):
# validate that sending a password-masked uri does not over-write the decrypted
# uri
self.login(username=username)
database = superset.utils.database.get_example_database()
sqlalchemy_uri_decrypted = database.sqlalchemy_uri_decrypted
url = "databaseview/edit/{}".format(database.id)
data = {k: database.__getattribute__(k) for k in DatabaseView.add_columns}
data["sqlalchemy_uri"] = database.safe_sqlalchemy_uri()
self.client.post(url, data=data)
database = superset.utils.database.get_example_database()
self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)
# Need to clean up after ourselves
database.impersonate_user = False
database.allow_dml = False
database.allow_run_async = False
db.session.commit()
@pytest.mark.usefixtures(
"load_energy_table_with_slice", "load_birth_names_dashboard_with_slices"
)
def test_warm_up_cache(self):
self.login()
slc = self.get_slice("Girls", db.session)
data = self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(slc.id))
self.assertEqual(
data, [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
)
data = self.get_json_resp(
"/superset/warm_up_cache?table_name=energy_usage&db_name=main"
)
assert len(data) > 0
dashboard = self.get_dash_by_slug("births")
assert self.get_json_resp(
f"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}"
) == [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
assert self.get_json_resp(
f"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}&extra_filters="
+ quote(json.dumps([{"col": "name", "op": "in", "val": ["Jennifer"]}]))
) == [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_cache_logging(self):
self.login("admin")
store_cache_keys = app.config["STORE_CACHE_KEYS_IN_METADATA_DB"]
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = True
girls_slice = self.get_slice("Girls", db.session)
self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(girls_slice.id))
ck = db.session.query(CacheKey).order_by(CacheKey.id.desc()).first()
assert ck.datasource_uid == f"{girls_slice.table.id}__table"
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = store_cache_keys
def test_redirect_invalid(self):
model_url = models.Url(url="hhttp://invalid.com")
db.session.add(model_url)
db.session.commit()
self.login(username="admin")
response = self.client.get(f"/r/{model_url.id}")
assert response.headers["Location"] == "http://localhost/"
db.session.delete(model_url)
db.session.commit()
@with_feature_flags(KV_STORE=False)
def test_kv_disabled(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 404)
@with_feature_flags(KV_STORE=True)
def test_kv_enabled(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 200)
kv = db.session.query(models.KeyValue).first()
kv_value = kv.value
self.assertEqual(json.loads(value), json.loads(kv_value))
resp = self.client.get("/kv/{}/".format(kv.id))
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(value), json.loads(resp.data.decode("utf-8")))
def test_gamma(self):
self.login(username="gamma")
assert "Charts" in self.get_resp("/chart/list/")
assert "Dashboards" in self.get_resp("/dashboard/list/")
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_csv_endpoint(self):
self.login()
client_id = "{}".format(random.getrandbits(64))[:10]
get_name_sql = """
SELECT name
FROM birth_names
LIMIT 1
"""
resp = self.run_sql(get_name_sql, client_id, raise_on_error=True)
name = resp["data"][0]["name"]
sql = f"""
SELECT name
FROM birth_names
WHERE name = '{name}'
LIMIT 1
"""
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO(f"name\n{name}\n"))
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO(f"name\n{name}\n"))
self.assertEqual(list(expected_data), list(data))
self.logout()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_extra_table_metadata(self):
self.login()
example_db = superset.utils.database.get_example_database()
schema = "default" if example_db.backend in {"presto", "hive"} else "superset"
self.get_json_resp(
f"/superset/extra_table_metadata/{example_db.id}/birth_names/{schema}/"
)
def test_templated_sql_json(self):
if superset.utils.database.get_example_database().backend == "presto":
# TODO: make it work for presto
return
self.login()
sql = "SELECT '{{ 1+1 }}' as test"
data = self.run_sql(sql, "fdaklj3ws")
self.assertEqual(data["data"][0]["test"], "2")
@mock.patch(
"tests.integration_tests.superset_test_custom_template_processors.datetime"
)
@mock.patch("superset.views.core.get_sql_results")
def test_custom_templated_sql_json(self, sql_lab_mock, mock_dt) -> None:
"""Test sqllab receives macros expanded query."""
mock_dt.utcnow = mock.Mock(return_value=datetime.datetime(1970, 1, 1))
self.login()
sql = "SELECT '$DATE()' as test"
resp = {
"status": QueryStatus.SUCCESS,
"query": {"rows": 1},
"data": [{"test": "'1970-01-01'"}],
}
sql_lab_mock.return_value = resp
dbobj = self.create_fake_db_for_macros()
json_payload = dict(database_id=dbobj.id, sql=sql)
self.get_json_resp(
"/superset/sql_json/", raise_on_error=False, json_=json_payload
)
assert sql_lab_mock.called
self.assertEqual(sql_lab_mock.call_args[0][1], "SELECT '1970-01-01' as test")
self.delete_fake_db_for_macros()
def test_fetch_datasource_metadata(self):
self.login(username="admin")
url = "/superset/fetch_datasource_metadata?" "datasourceKey=1__table"
resp = self.get_json_resp(url)
keys = [
"name",
"type",
"order_by_choices",
"granularity_sqla",
"time_grain_sqla",
"id",
]
for k in keys:
self.assertIn(k, resp.keys())
@staticmethod
def _get_user_activity_endpoints(user: str):
userid = security_manager.find_user(user).id
return (
f"/superset/recent_activity/{userid}/",
f"/superset/created_slices/{userid}/",
f"/superset/created_dashboards/{userid}/",
f"/superset/fave_slices/{userid}/",
f"/superset/fave_dashboards/{userid}/",
f"/superset/user_slices/{userid}/",
f"/superset/fave_dashboards_by_username/{user}/",
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_user_profile(self, username="admin"):
self.login(username=username)
slc = self.get_slice("Girls", db.session)
# Setting some faves
url = f"/superset/favstar/Slice/{slc.id}/select/"
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
url = f"/superset/favstar/Dashboard/{dash.id}/select/"
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
resp = self.get_resp(f"/superset/profile/{username}/")
self.assertIn('"app"', resp)
for endpoint in self._get_user_activity_endpoints(username):
data = self.get_json_resp(endpoint)
self.assertNotIn("message", data)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_user_activity_access(self, username="gamma"):
self.login(username=username)
# accessing own and other users' activity is allowed by default
for user in ("admin", "gamma"):
for endpoint in self._get_user_activity_endpoints(user):
resp = self.client.get(endpoint)
assert resp.status_code == 200
# disabling flag will block access to other users' activity data
access_flag = app.config["ENABLE_BROAD_ACTIVITY_ACCESS"]
app.config["ENABLE_BROAD_ACTIVITY_ACCESS"] = False
for user in ("admin", "gamma"):
for endpoint in self._get_user_activity_endpoints(user):
resp = self.client.get(endpoint)
expected_status_code = 200 if user == username else 403
assert resp.status_code == expected_status_code
# restore flag
app.config["ENABLE_BROAD_ACTIVITY_ACCESS"] = access_flag
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_id_is_always_logged_correctly_on_web_request(self):
# explore case
self.login("admin")
slc = db.session.query(Slice).filter_by(slice_name="Girls").one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
self.get_resp(slc.slice_url)
self.assertEqual(1, qry.count())
def create_sample_csvfile(self, filename: str, content: List[str]) -> None:
with open(filename, "w+") as test_file:
for l in content:
test_file.write(f"{l}\n")
def create_sample_excelfile(self, filename: str, content: Dict[str, str]) -> None:
pd.DataFrame(content).to_excel(filename)
def enable_csv_upload(self, database: models.Database) -> None:
"""Enables csv upload in the given database."""
database.allow_file_upload = True
db.session.commit()
add_datasource_page = self.get_resp("/databaseview/list/")
self.assertIn("Upload a CSV", add_datasource_page)
form_get = self.get_resp("/csvtodatabaseview/form")
self.assertIn("CSV to Database configuration", form_get)
def test_dataframe_timezone(self):
tz = pytz.FixedOffset(60)
data = [
(datetime.datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=tz),),
(datetime.datetime(2017, 11, 18, 22, 6, 30, tzinfo=tz),),
]
results = SupersetResultSet(list(data), [["data"]], BaseEngineSpec)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
json_str = json.dumps(data, default=utils.pessimistic_json_iso_dttm_ser)
self.assertDictEqual(
data[0], {"data": pd.Timestamp("2017-11-18 21:53:00.219225+0100", tz=tz)}
)
self.assertDictEqual(
data[1], {"data": pd.Timestamp("2017-11-18 22:06:30+0100", tz=tz)}
)
self.assertEqual(
json_str,
'[{"data": "2017-11-18T21:53:00.219225+01:00"}, {"data": "2017-11-18T22:06:30+01:00"}]',
)
def test_mssql_engine_spec_pymssql(self):
# Test for case when tuple is returned (pymssql)
data = [
(1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000)),
(2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000)),
]
results = SupersetResultSet(
list(data), [["col1"], ["col2"], ["col3"]], MssqlEngineSpec
)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
self.assertEqual(len(data), 2)
self.assertEqual(
data[0],
{"col1": 1, "col2": 1, "col3": pd.Timestamp("2017-10-19 23:39:16.660000")},
)
def test_comments_in_sqlatable_query(self):
clean_query = "SELECT '/* val 1 */' as c1, '-- val 2' as c2 FROM tbl"
commented_query = "/* comment 1 */" + clean_query + "-- comment 2"
table = SqlaTable(
table_name="test_comments_in_sqlatable_query_table",
sql=commented_query,
database=get_example_database(),
)
rendered_query = str(table.get_from_clause()[0])
self.assertEqual(clean_query, rendered_query)
def test_slice_payload_no_datasource(self):
self.login(username="admin")
data = self.get_json_resp("/superset/explore_json/", raise_on_error=False)
self.assertEqual(
data["errors"][0]["message"],
"The dataset associated with this chart no longer exists",
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_explore_json(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/",
data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["rowcount"], 2)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_explore_json_dist_bar_order(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"url_params": {},
"granularity_sqla": "ds",
"time_range": 'DATEADD(DATETIME("2021-01-22T00:00:00"), -100, year) : 2021-01-22T00:00:00',
"metrics": [
{
"expressionType": "SIMPLE",
"column": {
"id": 334,
"column_name": "name",
"verbose_name": "null",
"description": "null",
"expression": "",
"filterable": True,
"groupby": True,
"is_dttm": False,
"type": "VARCHAR(255)",
"python_date_format": "null",
},
"aggregate": "COUNT",
"sqlExpression": "null",
"isNew": False,
"hasCustomLabel": False,
"label": "COUNT(name)",
"optionName": "metric_xdzsijn42f9_khi4h3v3vci",
},
{
"expressionType": "SIMPLE",
"column": {
"id": 332,
"column_name": "ds",
"verbose_name": "null",
"description": "null",
"expression": "",
"filterable": True,
"groupby": True,
"is_dttm": True,
"type": "TIMESTAMP WITHOUT TIME ZONE",
"python_date_format": "null",
},
"aggregate": "COUNT",
"sqlExpression": "null",
"isNew": False,
"hasCustomLabel": False,
"label": "COUNT(ds)",
"optionName": "metric_80g1qb9b6o7_ci5vquydcbe",
},
],
"order_desc": True,
"adhoc_filters": [],
"groupby": ["name"],
"columns": [],
"row_limit": 10,
"color_scheme": "supersetColors",
"label_colors": {},
"show_legend": True,
"y_axis_format": "SMART_NUMBER",
"bottom_margin": "auto",
"x_ticks_layout": "auto",
}
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/",
data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
resp = self.run_sql(
"""
SELECT count(name) AS count_name, count(ds) AS count_ds
FROM birth_names
WHERE ds >= '1921-01-22 00:00:00.000000' AND ds < '2021-01-22 00:00:00.000000'
GROUP BY name
ORDER BY count_name DESC
LIMIT 10;
""",
client_id="client_id_1",
username="admin",
)
count_ds = []
count_name = []
for series in data["data"]:
if series["key"] == "COUNT(ds)":
count_ds = series["values"]
if series["key"] == "COUNT(name)":
count_name = series["values"]
for expected, actual_ds, actual_name in zip(resp["data"], count_ds, count_name):
assert expected["count_name"] == actual_name["y"]
assert expected["count_ds"] == actual_ds["y"]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_explore_json_async(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/",
data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
keys = list(data.keys())
self.assertEqual(rv.status_code, 202)
self.assertCountEqual(
keys, ["channel_id", "job_id", "user_id", "status", "errors", "result_url"]
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_explore_json_async_results_format(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/?results=true",
data={"form_data": json.dumps(form_data)},
)
self.assertEqual(rv.status_code, 200)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch(
"superset.utils.cache_manager.CacheManager.cache",
new_callable=mock.PropertyMock,
)
@mock.patch("superset.viz.BaseViz.force_cached", new_callable=mock.PropertyMock)
def test_explore_json_data(self, mock_force_cached, mock_cache):
tbl_id = self.table_ids.get("birth_names")
form_data = dict(
{
"form_data": {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
}
)
class MockCache:
def get(self, key):
return form_data
def set(self):
return None
mock_cache.return_value = MockCache()
mock_force_cached.return_value = False
self.login(username="admin")
rv = self.client.get("/superset/explore_json/data/valid-cache-key")
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["rowcount"], 2)
@mock.patch(
"superset.utils.cache_manager.CacheManager.cache",
new_callable=mock.PropertyMock,
)
def test_explore_json_data_no_login(self, mock_cache):
tbl_id = self.table_ids.get("birth_names")
form_data = dict(
{
"form_data": {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
}
)
class MockCache:
def get(self, key):
return form_data
def set(self):
return None
mock_cache.return_value = MockCache()
rv = self.client.get("/superset/explore_json/data/valid-cache-key")
self.assertEqual(rv.status_code, 401)
def test_explore_json_data_invalid_cache_key(self):
self.login(username="admin")
cache_key = "invalid-cache-key"
rv = self.client.get(f"/superset/explore_json/data/{cache_key}")
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 404)
self.assertEqual(data["error"], "Cached data not found")
@mock.patch(
"superset.security.SupersetSecurityManager.get_schemas_accessible_by_user"
)
@mock.patch("superset.security.SupersetSecurityManager.can_access_database")
@mock.patch("superset.security.SupersetSecurityManager.can_access_all_datasources")
def test_schemas_access_for_csv_upload_endpoint(
self,
mock_can_access_all_datasources,
mock_can_access_database,
mock_schemas_accessible,
):
self.login(username="admin")
dbobj = self.create_fake_db()
mock_can_access_all_datasources.return_value = False
mock_can_access_database.return_value = False
mock_schemas_accessible.return_value = ["this_schema_is_allowed_too"]
data = self.get_json_resp(
url="/superset/schemas_access_for_file_upload?db_id={db_id}".format(
db_id=dbobj.id
)
)
assert data == ["this_schema_is_allowed_too"]
self.delete_fake_db()
@mock.patch("superset.views.core.results_backend_use_msgpack", False)
def test_display_limit(self):
from superset.views import core
core.results_backend = mock.Mock()
self.login()
data = [{"col_0": i} for i in range(100)]
payload = {
"status": QueryStatus.SUCCESS,
"query": {"rows": 100},
"data": data,
}
# limit results to 1
expected_key = {"status": "success", "query": {"rows": 100}, "data": data}
limited_data = data[:1]
expected_limited = {
"status": "success",
"query": {"rows": 100},
"data": limited_data,
"displayLimitReached": True,
}
query_mock = mock.Mock()
query_mock.sql = "SELECT *"
query_mock.database = 1
query_mock.schema = "superset"
# do not apply msgpack serialization
use_msgpack = app.config["RESULTS_BACKEND_USE_MSGPACK"]
app.config["RESULTS_BACKEND_USE_MSGPACK"] = False
serialized_payload = sql_lab._serialize_payload(payload, False)
compressed = utils.zlib_compress(serialized_payload)
core.results_backend.get.return_value = compressed
with mock.patch("superset.views.core.db") as mock_superset_db:
mock_superset_db.session.query().filter_by().one_or_none.return_value = (
query_mock
)
# get all results
result_key = json.loads(self.get_resp("/superset/results/key/"))
result_limited = json.loads(self.get_resp("/superset/results/key/?rows=1"))
self.assertEqual(result_key, expected_key)
self.assertEqual(result_limited, expected_limited)
app.config["RESULTS_BACKEND_USE_MSGPACK"] = use_msgpack
def test_results_default_deserialization(self):
use_new_deserialization = False
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, str)
query_mock = mock.Mock()
deserialized_payload = superset.views.utils._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
self.assertDictEqual(deserialized_payload, payload)
query_mock.assert_not_called()
def test_results_msgpack_deserialization(self):
use_new_deserialization = True
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, bytes)
with mock.patch.object(
db_engine_spec, "expand_data", wraps=db_engine_spec.expand_data
) as expand_data:
query_mock = mock.Mock()
query_mock.database.db_engine_spec.expand_data = expand_data
deserialized_payload = superset.views.utils._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
df = results.to_pandas_df()
payload["data"] = dataframe.df_to_records(df)
self.assertDictEqual(deserialized_payload, payload)
expand_data.assert_called_once()
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"FOO": lambda x: 1},
clear=True,
)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_feature_flag_serialization(self):
"""
Functions in feature flags don't break bootstrap data serialization.
"""
self.login()
encoded = json.dumps(
{"FOO": lambda x: 1, "super": "set"},
default=utils.pessimistic_json_iso_dttm_ser,
)
html_string = (
html.escape(encoded, quote=False)
.replace("'", "'")
.replace('"', """)
)
dash_id = db.session.query(Dashboard.id).first()[0]
tbl_id = self.table_ids.get("wb_health_population")
urls = [
"/superset/sqllab",
"/superset/welcome",
f"/superset/dashboard/{dash_id}/",
"/superset/profile/admin/",
f"/explore/?dataset_type=table&dataset_id={tbl_id}",
]
for url in urls:
data = self.get_resp(url)
self.assertTrue(html_string in data)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"SQLLAB_BACKEND_PERSISTENCE": True},
clear=True,
)
def test_sqllab_backend_persistence_payload(self):
username = "admin"
self.login(username)
user_id = security_manager.find_user(username).id
# create a tab
data = {
"queryEditor": json.dumps(
{
"title": "Untitled Query 1",
"dbId": 1,
"schema": None,
"autorun": False,
"sql": "SELECT ...",
"queryLimit": 1000,
}
)
}
resp = self.get_json_resp("/tabstateview/", data=data)
tab_state_id = resp["id"]
# run a query in the created tab
self.run_sql(
"SELECT name FROM birth_names",
"client_id_1",
username=username,
raise_on_error=True,
sql_editor_id=str(tab_state_id),
)
# run an orphan query (no tab)
self.run_sql(
"SELECT name FROM birth_names",
"client_id_2",
username=username,
raise_on_error=True,
)
# we should have only 1 query returned, since the second one is not
# associated with any tabs
payload = views.Superset._get_sqllab_tabs(user_id=user_id)
self.assertEqual(len(payload["queries"]), 1)
def test_virtual_table_explore_visibility(self):
# test that default visibility it set to True
database = superset.utils.database.get_example_database()
self.assertEqual(database.allows_virtual_table_explore, True)
# test that visibility is disabled when extra is set to False
extra = database.get_extra()
extra["allows_virtual_table_explore"] = False
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, False)
# test that visibility is enabled when extra is set to True
extra = database.get_extra()
extra["allows_virtual_table_explore"] = True
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, True)
# test that visibility is not broken with bad values
extra = database.get_extra()
extra["allows_virtual_table_explore"] = "trash value"
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, True)
def test_data_preview_visibility(self):
# test that default visibility is allowed
database = utils.get_example_database()
self.assertEqual(database.disable_data_preview, False)
# test that visibility is disabled when extra is set to true
extra = database.get_extra()
extra["disable_data_preview"] = True
database.extra = json.dumps(extra)
self.assertEqual(database.disable_data_preview, True)
# test that visibility is enabled when extra is set to false
extra = database.get_extra()
extra["disable_data_preview"] = False
database.extra = json.dumps(extra)
self.assertEqual(database.disable_data_preview, False)
# test that visibility is not broken with bad values
extra = database.get_extra()
extra["disable_data_preview"] = "trash value"
database.extra = json.dumps(extra)
self.assertEqual(database.disable_data_preview, False)
def test_explore_database_id(self):
database = superset.utils.database.get_example_database()
explore_database = superset.utils.database.get_example_database()
# test that explore_database_id is the regular database
# id if none is set in the extra
self.assertEqual(database.explore_database_id, database.id)
# test that explore_database_id is correct if the extra is set
extra = database.get_extra()
extra["explore_database_id"] = explore_database.id
database.extra = json.dumps(extra)
self.assertEqual(database.explore_database_id, explore_database.id)
def test_get_column_names_from_metric(self):
simple_metric = {
"expressionType": utils.AdhocMetricExpressionType.SIMPLE.value,
"column": {"column_name": "my_col"},
"aggregate": "SUM",
"label": "My Simple Label",
}
assert utils.get_column_name_from_metric(simple_metric) == "my_col"
sql_metric = {
"expressionType": utils.AdhocMetricExpressionType.SQL.value,
"sqlExpression": "SUM(my_label)",
"label": "My SQL Label",
}
assert utils.get_column_name_from_metric(sql_metric) is None
assert utils.get_column_names_from_metrics([simple_metric, sql_metric]) == [
"my_col"
]
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.models.core.DB_CONNECTION_MUTATOR")
def test_explore_injected_exceptions(self, mock_db_connection_mutator):
"""
Handle injected exceptions from the db mutator
"""
# Assert we can handle a custom exception at the mutator level
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
slice = db.session.query(Slice).first()
url = f"/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
# Assert we can handle a driver exception at the mutator level
exception = SQLAlchemyError("Error message")
mock_db_connection_mutator.side_effect = exception
slice = db.session.query(Slice).first()
url = f"/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.models.core.DB_CONNECTION_MUTATOR")
def test_dashboard_injected_exceptions(self, mock_db_connection_mutator):
"""
Handle injected exceptions from the db mutator
"""
# Assert we can handle a custom excetion at the mutator level
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
url = f"/superset/dashboard/{dash.id}/"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
# Assert we can handle a driver exception at the mutator level
exception = SQLAlchemyError("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
url = f"/superset/dashboard/{dash.id}/"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
@mock.patch("superset.sql_lab.cancel_query")
@mock.patch("superset.views.core.db.session")
def test_stop_query_not_implemented(
self, mock_superset_db_session, mock_sql_lab_cancel_query
):
"""
Handles stop query when the DB engine spec does not
have a cancel query method.
"""
form_data = {"client_id": "foo"}
query_mock = mock.Mock()
query_mock.client_id = "foo"
query_mock.status = QueryStatus.RUNNING
self.login(username="admin")
mock_superset_db_session.query().filter_by().one().return_value = query_mock
mock_sql_lab_cancel_query.return_value = False
rv = self.client.post(
"/superset/stop_query/",
data={"form_data": json.dumps(form_data)},
)
assert rv.status_code == 422
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "e4fa35f727113fca3435c861f61f9026",
"timestamp": "",
"source": "github",
"line_count": 1628,
"max_line_length": 103,
"avg_line_length": 37.894348894348894,
"alnum_prop": 0.5806263372884652,
"repo_name": "airbnb/caravel",
"id": "6bc08cae88a3bf4987f763ab2bf34a5ca68ef2ce",
"size": "61692",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/integration_tests/core_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57416"
},
{
"name": "HTML",
"bytes": "112618"
},
{
"name": "JavaScript",
"bytes": "406496"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "588212"
},
{
"name": "Shell",
"bytes": "980"
}
],
"symlink_target": ""
} |
"""Implement operations for branch-based reviews."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdt_branch
#
# Public Classes:
# Branch
# .is_abandoned
# .is_null
# .is_new
# .is_status_bad_pre_review
# .is_status_bad_land
# .is_status_bad_abandoned
# .is_status_bad
# .has_new_commits
# .base_branch_name
# .review_branch_hash
# .review_branch_name
# .review_id_or_none
# .get_author_names_emails
# .get_any_author_emails
# .get_repo_name
# .get_browse_url
# .describe
# .describe_new_commits
# .make_message_digest
# .make_raw_diff
# .verify_review_branch_base
# .get_commit_message_from_tip
# .abandon
# .remove
# .clear_mark
# .mark_bad_land
# .mark_bad_abandoned
# .mark_bad_in_review
# .mark_new_bad_in_review
# .mark_bad_pre_review
# .mark_ok_in_review
# .mark_ok_new_review
# .land
#
# Public Functions:
# calc_is_ok
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
# TODO: write test driver
from __future__ import absolute_import
import phlgit_checkout
import phlgit_log
import phlgit_push
import phlgit_revparse
import phlgitu_ref
import phlsys_textconvert
import abdt_differ
import abdt_errident
import abdt_exception
import abdt_lander
import abdt_naming
import abdt_tryloop
# TODO: allow this to be passed in
_MAX_DIFF_SIZE = int(1.5 * 1024 * 1024)
def calc_is_ok(branch):
"""Return True if the supplied 'branch' is ok, False if bad, else None.
Note that a branch can be 'null' in which case we return None.
:branch: the Branch to examine
:returns: bool status of the branch
"""
assert branch is not None
if branch.is_null() or branch.is_new() or branch.is_abandoned():
return None
return not branch.is_status_bad()
class Branch(object):
def __init__(
self,
repo,
review_branch,
review_hash,
tracking_branch,
tracking_hash,
lander,
repo_name,
browse_url=None):
"""Create a new relationship tracker for the supplied branch names.
:repo: a callable supporting git commands, e.g. repo("status")
:review_branch: the abdt_gittypes.GitReviewBranch
:review_hash: the commit hash of the branch or None
:tracking_branch: the abdt_gittypes.GitWorkingBranch
:tracking_hash: the commit hash of the branch or None
:lander: a lander conformant to abdt_lander
:repo_name: a short string to identify the repo to humans
:browse_url: a URL to browse the branch or repo (may be None)
"""
self._repo = repo
self._review_branch = review_branch
self._review_hash = review_hash
self._tracking_branch = tracking_branch
self._tracking_hash = tracking_hash
self._lander = lander
assert self._review_branch_valid_or_none()
assert self._tracking_branch_valid_or_none()
self._repo_name = repo_name
self._browse_url = browse_url
assert self._repo_name is not None
def _review_branch_valid_or_none(self):
if not self._has_review_branch():
return True
else:
return isinstance(
self._review_branch,
abdt_naming.ReviewBranch)
def _tracking_branch_valid_or_none(self):
if not self._has_tracking_branch():
return True
else:
return isinstance(
self._tracking_branch,
abdt_naming.TrackerBranch)
def _has_review_branch(self):
return self._review_branch is not None
def _has_tracking_branch(self):
return self._tracking_branch is not None
def is_abandoned(self):
"""Return True if the author's branch no longer exists."""
return not self._has_review_branch() and self._has_tracking_branch()
def is_null(self):
"""Return True if we don't have any data."""
no_review_branch = not self._has_review_branch()
no_tracking_branch = not self._has_tracking_branch()
return no_review_branch and no_tracking_branch
def is_new(self):
"""Return True if we haven't marked the author's branch."""
return self._has_review_branch() and not self._has_tracking_branch()
def is_status_bad_pre_review(self):
"""Return True if the author's branch is marked 'bad pre-review'."""
if self._has_tracking_branch():
return abdt_naming.isStatusBadPreReview(self._tracking_branch)
else:
return False
def is_status_bad_land(self):
"""Return True if the author's branch is marked 'bad land'."""
if self._has_tracking_branch():
return abdt_naming.isStatusBadLand(self._tracking_branch)
else:
return False
def is_status_bad_abandoned(self):
"""Return True if the author's branch is marked 'bad abandoned'."""
if self._has_tracking_branch():
branch = self._tracking_branch
return branch.status == abdt_naming.WB_STATUS_BAD_ABANDONED
else:
return False
def is_status_bad(self):
"""Return True if the author's branch is marked any bad status."""
if self._has_tracking_branch():
return abdt_naming.isStatusBad(self._tracking_branch)
else:
return False
def has_new_commits(self):
"""Return True if the author's branch is different since marked."""
if self.is_new():
return True
else:
return self._review_hash != self._tracking_hash
def base_branch_name(self):
"""Return the string name of the branch the review will land on."""
if self._review_branch:
return self._review_branch.base
return self._tracking_branch.base
def review_branch_hash(self):
"""Return the string hash of the review branch or None."""
return self._review_hash
def review_branch_name(self):
"""Return the string name of the branch the review is based on."""
if self._review_branch:
return self._review_branch.branch
return self._tracking_branch.review_name
def review_id_or_none(self):
"""Return the int id of the review or 'None' if there isn't one."""
if not self._tracking_branch:
return None
review_id = None
try:
review_id = int(self._tracking_branch.id)
except ValueError:
pass
return review_id
def get_author_names_emails(self):
"""Return a list of (name, email) tuples from the branch."""
hashes = self._get_commit_hashes()
# names and emails are only mentioned once, in the order that they
# appear. reverse the order so that the the most recent commit is
# considered first.
hashes.reverse()
names_emails = phlgit_log.get_author_names_emails_from_hashes(
self._repo, hashes)
names_emails.reverse()
return names_emails
def get_any_author_emails(self):
"""Return a list of emails from the branch.
If the branch has an invalid base or has no history against the base
then resort to using the whole history.
Useful if 'get_author_names_emails' fails.
"""
if phlgit_revparse.get_sha1_or_none(
self._repo, self._review_branch.remote_base) is None:
hashes = phlgit_log.get_last_n_commit_hashes_from_ref(
self._repo, 1, self._review_branch.remote_branch)
else:
hashes = self._get_commit_hashes()
if not hashes:
hashes = phlgit_log.get_last_n_commit_hashes_from_ref(
self._repo, 1, self._review_branch.remote_branch)
committers = phlgit_log.get_author_names_emails_from_hashes(
self._repo, hashes)
emails = [committer[1] for committer in committers]
return emails
def get_repo_name(self):
"""Return the human name for the repo the branch came from."""
return self._repo_name
def get_browse_url(self):
"""Return the url to browse this branch, may be None."""
return self._browse_url
def _get_commit_hashes(self):
hashes = self._repo.get_range_hashes(
self._review_branch.remote_base,
self._review_branch.remote_branch)
return hashes
def describe(self):
"""Return a string description of this branch for a human to read."""
branch_description = "(null branch)"
if not self.is_null():
branch_description = self.review_branch_name()
if self.is_abandoned():
branch_description += " (abandoned)"
return "{}, {}".format(self.get_repo_name(), branch_description)
def describe_new_commits(self):
"""Return a string description of the new commits on the branch."""
hashes = None
previous = None
latest = self._review_branch.remote_branch
if self.is_new():
previous = self._review_branch.remote_base
else:
previous = self._tracking_branch.remote_branch
hashes = self._repo.get_range_hashes(previous, latest)
hashes.reverse()
revisions = self._repo.make_revisions_from_hashes(hashes)
message = ""
for r in revisions:
message += r.abbrev_hash + " " + r.subject + "\n"
return phlsys_textconvert.ensure_ascii(message)
def make_message_digest(self):
"""Return a string digest of the commit messages on the branch.
The digest is comprised of the title from the earliest commit
unique to the branch and all of the message bodies from the
unique commits on the branch.
"""
hashes = self._get_commit_hashes()
revisions = self._repo.make_revisions_from_hashes(hashes)
message = revisions[0].subject + "\n\n"
for r in revisions:
message += r.message
return phlsys_textconvert.ensure_ascii(message)
def make_raw_diff(self):
"""Return an abdt_differ.DiffResult of the changes on the branch.
If the diff would exceed the pre-specified max diff size then take
measures to reduce the diff.
"""
# checkout the 'to' branch, otherwise we won't take into account any
# changes to .gitattributes files
phlgit_checkout.branch(self._repo, self._review_branch.remote_branch)
try:
return abdt_differ.make_raw_diff(
self._repo,
self._review_branch.remote_base,
self._review_branch.remote_branch,
_MAX_DIFF_SIZE)
except abdt_differ.NoDiffError:
raise abdt_exception.NoDiffException(
self.base_branch_name(),
self.review_branch_name(),
self.review_branch_hash())
def _is_based_on(self, name, base):
# TODO: actually do this
return True
def verify_review_branch_base(self):
"""Raise exception if review branch has invalid base."""
if self._review_branch.base not in self._repo.get_remote_branches():
raise abdt_exception.MissingBaseException(
self._review_branch.branch,
self._review_branch.description,
self._review_branch.base)
if not self._is_based_on(
self._review_branch.branch, self._review_branch.base):
raise abdt_exception.AbdUserException(
"'" + self._review_branch.branch +
"' is not based on '" + self._review_branch.base + "'")
def get_commit_message_from_tip(self):
"""Return string commit message from latest commit on branch."""
hashes = self._get_commit_hashes()
revision = phlgit_log.make_revision_from_hash(self._repo, hashes[-1])
message = revision.subject + "\n"
message += "\n"
message += revision.message + "\n"
return phlsys_textconvert.ensure_ascii(message)
def _push_delete_review_branch(self):
def action():
self._repo.push_delete(self._review_branch.branch)
self._tryloop(action, abdt_errident.PUSH_DELETE_REVIEW)
def _push_delete_tracking_branch(self):
def action():
self._repo.push_delete(self._tracking_branch.branch)
self._tryloop(action, abdt_errident.PUSH_DELETE_TRACKING)
def abandon(self):
"""Remove information associated with the abandoned review branch."""
# TODO: raise if the branch is not actually abandoned by the user
self._push_delete_tracking_branch()
self._tracking_branch = None
self._tracking_hash = None
def remove(self):
"""Remove review branch and tracking branch."""
self._repo.archive_to_abandoned(
self._review_hash,
self.review_branch_name(),
self._tracking_branch.base)
# push the abandoned archive, don't escalate if it fails to push
try:
# XXX: oddly pylint complains if we call push_landed() directly:
# "Using method (_tryloop) as an attribute (not invoked)"
def push_abandoned():
self._repo.push_abandoned()
self._tryloop(
push_abandoned,
abdt_errident.PUSH_ABANDONED_ARCHIVE)
except Exception:
# XXX: don't worry if we can't push the landed, this is most
# likely a permissioning issue but not a showstopper.
# we should probably nag on the review instead.
pass
self._push_delete_review_branch()
self._push_delete_tracking_branch()
self._review_branch = None
self._review_hash = None
self._tracking_branch = None
self._tracking_hash = None
def clear_mark(self):
"""Clear status and last commit associated with the review branch."""
self._push_delete_tracking_branch()
self._tracking_branch = None
self._tracking_hash = None
def mark_bad_land(self):
"""Mark the current version of the review branch as 'bad land'."""
assert self.review_id_or_none() is not None
self._tryloop(
lambda: self._push_status(abdt_naming.WB_STATUS_BAD_LAND),
abdt_errident.MARK_BAD_LAND)
def mark_bad_abandoned(self):
"""Mark the current version of the review branch as 'bad abandoned'."""
assert self.review_id_or_none() is not None
self._tryloop(
lambda: self._push_status(abdt_naming.WB_STATUS_BAD_ABANDONED),
abdt_errident.MARK_BAD_ABANDONED)
def mark_bad_in_review(self):
"""Mark the current version of the review branch as 'bad in review'."""
assert self.review_id_or_none() is not None
self._tryloop(
lambda: self._push_status(abdt_naming.WB_STATUS_BAD_INREVIEW),
abdt_errident.MARK_BAD_IN_REVIEW)
def mark_new_bad_in_review(self, revision_id):
"""Mark the current version of the review branch as 'bad in review'."""
assert self.review_id_or_none() is None
def action():
if not self.is_new():
# 'push_bad_new_in_review' wont clean up our existing tracker
self._push_delete_tracking_branch()
self._push_new(
abdt_naming.WB_STATUS_BAD_INREVIEW,
revision_id)
self._tryloop(action, abdt_errident.MARK_NEW_BAD_IN_REVIEW)
def mark_bad_pre_review(self):
"""Mark this version of the review branch as 'bad pre review'."""
assert self.review_id_or_none() is None
assert self.is_status_bad_pre_review() or self.is_new()
# early out if this operation is redundant, pushing is expensive
if self.is_status_bad_pre_review() and not self.has_new_commits():
return
def action():
self._push_new(
abdt_naming.WB_STATUS_BAD_PREREVIEW,
None)
self._tryloop(
action, abdt_errident.MARK_BAD_PRE_REVIEW)
def mark_ok_in_review(self):
"""Mark this version of the review branch as 'ok in review'."""
assert self.review_id_or_none() is not None
self._tryloop(
lambda: self._push_status(abdt_naming.WB_STATUS_OK),
abdt_errident.MARK_OK_IN_REVIEW)
def mark_ok_new_review(self, revision_id):
"""Mark this version of the review branch as 'ok in review'."""
assert self.review_id_or_none() is None
def action():
if not self.is_new():
# 'push_bad_new_in_review' wont clean up our existing tracker
self._push_delete_tracking_branch()
self._push_new(
abdt_naming.WB_STATUS_OK,
revision_id)
self._tryloop(action, abdt_errident.MARK_OK_NEW_REVIEW)
def land(self, author_name, author_email, message):
"""Integrate the branch into the base and remove the review branch."""
self._repo.checkout_forced_new_branch(
self._tracking_branch.base,
self._tracking_branch.remote_base)
try:
result = self._lander(
self._repo,
self._tracking_branch.remote_branch,
author_name,
author_email,
message)
except abdt_lander.LanderException as e:
self._repo("reset", "--hard") # fix the working copy
raise abdt_exception.LandingException(
str(e),
self.review_branch_name(),
self._tracking_branch.base)
landing_hash = phlgit_revparse.get_sha1(
self._repo, self._tracking_branch.base)
# don't tryloop here as it's more expected that we can't push the base
# due to permissioning or some other error
try:
self._repo.push(self._tracking_branch.base)
except Exception as e:
raise abdt_exception.LandingPushBaseException(
str(e),
self.review_branch_name(),
self._tracking_branch.base)
self._tryloop(
lambda: self._repo.push_delete(
self._tracking_branch.branch,
self.review_branch_name()),
abdt_errident.PUSH_DELETE_LANDED)
self._repo.archive_to_landed(
self._tracking_hash,
self.review_branch_name(),
self._tracking_branch.base,
landing_hash,
message)
# push the landing archive, don't escalate if it fails to push
try:
# XXX: oddly pylint complains if we call push_landed() directly:
# "Using method (_tryloop) as an attribute (not invoked)"
def push_landed():
self._repo.push_landed()
self._tryloop(
push_landed,
abdt_errident.PUSH_LANDING_ARCHIVE)
except Exception:
# XXX: don't worry if we can't push the landed, this is most
# likely a permissioning issue but not a showstopper.
# we should probably nag on the review instead.
pass
self._review_branch = None
self._review_hash = None
self._tracking_branch = None
self._tracking_hash = None
return result
def _push_status(self, status):
old_branch = self._tracking_branch.branch
self._tracking_branch.update_status(status)
new_branch = self._tracking_branch.branch
if old_branch == new_branch:
phlgit_push.push_asymmetrical_force(
self._repo,
self._review_branch.remote_branch,
phlgitu_ref.make_local(new_branch),
self._tracking_branch.remote)
else:
phlgit_push.move_asymmetrical(
self._repo,
self._review_branch.remote_branch,
phlgitu_ref.make_local(old_branch),
phlgitu_ref.make_local(new_branch),
self._repo.get_remote())
self._tracking_hash = self._review_hash
def _push_new(self, status, revision_id):
tracking_branch = self._review_branch.make_tracker(
status, revision_id)
phlgit_push.push_asymmetrical_force(
self._repo,
self._review_branch.remote_branch,
phlgitu_ref.make_local(tracking_branch.branch),
tracking_branch.remote)
self._tracking_branch = tracking_branch
self._tracking_hash = self._review_hash
def _tryloop(self, f, identifier):
return abdt_tryloop.tryloop(f, identifier, self.describe())
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"content_hash": "7e19f938becb8eca646b05639a647d32",
"timestamp": "",
"source": "github",
"line_count": 623,
"max_line_length": 79,
"avg_line_length": 35.258426966292134,
"alnum_prop": 0.5866794136392607,
"repo_name": "valhallasw/phabricator-tools",
"id": "14468f155655f4eca7646f095836170115a5608a",
"size": "21966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/abd/abdt_branch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "342"
},
{
"name": "Puppet",
"bytes": "4246"
},
{
"name": "Python",
"bytes": "964066"
},
{
"name": "Ruby",
"bytes": "2000"
},
{
"name": "Shell",
"bytes": "128202"
}
],
"symlink_target": ""
} |
import time
from lib import oled as display
display.disp_content.tonemode = "Vol"
display.disp_content.tonevalue = 50
display.disp_content.time = "17:55"
display.disp_content.name = "DRadio Wissen"
display.disp_content.artist = "Bla"
display.disp_content.title = "Blubber"
display.disp_content.app_mode = "RAD"
display.disp_content.source_string = 0
display.disp_content.wifi = 60
display.disp_content.volume = 50
display.disp_content.mpd_stat = "stop"
display.update_display(1234567)
time.sleep(2)
display.disp_content.tonemode = "Vol"
display.disp_content.tonevalue = 30
display.disp_content.time = "0:17"
display.disp_content.name = "Deutschlandfunk"
display.disp_content.artist = "Bla"
display.disp_content.title = "Blubber"
display.disp_content.app_mode = "SPOT"
display.disp_content.source_string = 0
display.disp_content.wifi = 10
display.disp_content.volume = 30
display.disp_content.mpd_stat = "play"
display.update_display(1234567)
count=100
while count >= 0:
display.disp_content.tonevalue = count
display.disp_content.volume = count
display.disp_content.wifi = count
display.update_display(1234567)
count = count - 1
time.sleep(0.2)
while count <= 100:
display.disp_content.tonevalue = count
display.disp_content.volume = count
display.disp_content.wifi = count
display.update_display(1234567)
count = count + 1
time.sleep(0.2)
| {
"content_hash": "95e8eb07959729546d5ee7d219850b64",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 45,
"avg_line_length": 27.96,
"alnum_prop": 0.740343347639485,
"repo_name": "thk4711/orangepi-radio",
"id": "bb92fc6eb2777046f691971e655f83543ae0cd96",
"size": "1421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/oled_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91018"
}
],
"symlink_target": ""
} |
"""
Pyodide and other single-threaded Python builds will be missing the
_multiprocessing module. Test that joblib still works in this environment.
"""
import os
import subprocess
import sys
def test_missing_multiprocessing(tmp_path):
"""
Test that import joblib works even if _multiprocessing is missing.
pytest has already imported everything from joblib. The most reasonable way
to test importing joblib with modified environment is to invoke a separate
Python process. This also ensures that we don't break other tests by
importing a bad `_multiprocessing` module.
"""
(tmp_path / "_multiprocessing.py").write_text(
'raise ImportError("No _multiprocessing module!")'
)
env = dict(os.environ)
# For subprocess, use current sys.path with our custom version of
# multiprocessing inserted.
env["PYTHONPATH"] = ":".join([str(tmp_path)] + sys.path)
subprocess.check_call(
[sys.executable, "-c",
"import joblib, math; "
"joblib.Parallel(n_jobs=1)("
"joblib.delayed(math.sqrt)(i**2) for i in range(10))"
], env=env)
| {
"content_hash": "f062d5bc403934ad292af8ee76e41ae1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 35.09375,
"alnum_prop": 0.6829919857524488,
"repo_name": "joblib/joblib",
"id": "251925ced5208b4aaf09d9aab305eb44c7102818",
"size": "1123",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "joblib/test/test_missing_multiprocessing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "390"
},
{
"name": "Makefile",
"bytes": "363"
},
{
"name": "Python",
"bytes": "622066"
},
{
"name": "Shell",
"bytes": "9122"
}
],
"symlink_target": ""
} |
import bz2
import datetime as dt
from datetime import datetime
import gzip
import io
import lzma
import os
import struct
import warnings
import zipfile
import numpy as np
import pytest
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
import pandas._testing as tm
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import (
CategoricalConversionWarning,
InvalidColumnName,
PossiblePrecisionLoss,
StataMissingValue,
StataReader,
StataWriterUTF8,
read_stata,
)
@pytest.fixture()
def mixed_frame():
return DataFrame(
{
"a": [1, 2, 3, 4],
"b": [1.0, 3.0, 27.0, 81.0],
"c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"],
}
)
@pytest.fixture
def dirpath(datapath):
return datapath("io", "data", "stata")
@pytest.fixture
def parsed_114(dirpath):
dta14_114 = os.path.join(dirpath, "stata5_114.dta")
parsed_114 = read_stata(dta14_114, convert_dates=True)
parsed_114.index.name = "index"
return parsed_114
class TestStata:
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "data", "stata")
self.dta1_114 = os.path.join(self.dirpath, "stata1_114.dta")
self.dta1_117 = os.path.join(self.dirpath, "stata1_117.dta")
self.dta2_113 = os.path.join(self.dirpath, "stata2_113.dta")
self.dta2_114 = os.path.join(self.dirpath, "stata2_114.dta")
self.dta2_115 = os.path.join(self.dirpath, "stata2_115.dta")
self.dta2_117 = os.path.join(self.dirpath, "stata2_117.dta")
self.dta3_113 = os.path.join(self.dirpath, "stata3_113.dta")
self.dta3_114 = os.path.join(self.dirpath, "stata3_114.dta")
self.dta3_115 = os.path.join(self.dirpath, "stata3_115.dta")
self.dta3_117 = os.path.join(self.dirpath, "stata3_117.dta")
self.csv3 = os.path.join(self.dirpath, "stata3.csv")
self.dta4_113 = os.path.join(self.dirpath, "stata4_113.dta")
self.dta4_114 = os.path.join(self.dirpath, "stata4_114.dta")
self.dta4_115 = os.path.join(self.dirpath, "stata4_115.dta")
self.dta4_117 = os.path.join(self.dirpath, "stata4_117.dta")
self.dta_encoding = os.path.join(self.dirpath, "stata1_encoding.dta")
self.dta_encoding_118 = os.path.join(self.dirpath, "stata1_encoding_118.dta")
self.csv14 = os.path.join(self.dirpath, "stata5.csv")
self.dta14_113 = os.path.join(self.dirpath, "stata5_113.dta")
self.dta14_114 = os.path.join(self.dirpath, "stata5_114.dta")
self.dta14_115 = os.path.join(self.dirpath, "stata5_115.dta")
self.dta14_117 = os.path.join(self.dirpath, "stata5_117.dta")
self.csv15 = os.path.join(self.dirpath, "stata6.csv")
self.dta15_113 = os.path.join(self.dirpath, "stata6_113.dta")
self.dta15_114 = os.path.join(self.dirpath, "stata6_114.dta")
self.dta15_115 = os.path.join(self.dirpath, "stata6_115.dta")
self.dta15_117 = os.path.join(self.dirpath, "stata6_117.dta")
self.dta16_115 = os.path.join(self.dirpath, "stata7_115.dta")
self.dta16_117 = os.path.join(self.dirpath, "stata7_117.dta")
self.dta17_113 = os.path.join(self.dirpath, "stata8_113.dta")
self.dta17_115 = os.path.join(self.dirpath, "stata8_115.dta")
self.dta17_117 = os.path.join(self.dirpath, "stata8_117.dta")
self.dta18_115 = os.path.join(self.dirpath, "stata9_115.dta")
self.dta18_117 = os.path.join(self.dirpath, "stata9_117.dta")
self.dta19_115 = os.path.join(self.dirpath, "stata10_115.dta")
self.dta19_117 = os.path.join(self.dirpath, "stata10_117.dta")
self.dta20_115 = os.path.join(self.dirpath, "stata11_115.dta")
self.dta20_117 = os.path.join(self.dirpath, "stata11_117.dta")
self.dta21_117 = os.path.join(self.dirpath, "stata12_117.dta")
self.dta22_118 = os.path.join(self.dirpath, "stata14_118.dta")
self.dta23 = os.path.join(self.dirpath, "stata15.dta")
self.dta24_111 = os.path.join(self.dirpath, "stata7_111.dta")
self.dta25_118 = os.path.join(self.dirpath, "stata16_118.dta")
self.dta26_119 = os.path.join(self.dirpath, "stata1_119.dta.gz")
self.stata_dates = os.path.join(self.dirpath, "stata13_dates.dta")
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_empty_dta(self, version):
empty_ds = DataFrame(columns=["unit"])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path, write_index=False, version=version)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
@pytest.mark.parametrize("file", ["dta1_114", "dta1_117"])
def test_read_dta1(self, file):
file = getattr(self, file)
parsed = self.read_dta(file)
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame(
[(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"],
)
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected["float_miss"] = expected["float_miss"].astype(np.float32)
tm.assert_frame_equal(parsed, expected)
def test_read_dta2(self):
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1),
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1),
),
(pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT),
],
columns=[
"datetime_c",
"datetime_big_c",
"date",
"weekly_date",
"monthly_date",
"quarterly_date",
"half_yearly_date",
"yearly_date",
],
)
expected["yearly_date"] = expected["yearly_date"].astype("O")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
assert len(w) == 3
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected, check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_115, expected, check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_117, expected, check_datetimelike_compat=True)
@pytest.mark.parametrize("file", ["dta3_113", "dta3_114", "dta3_115", "dta3_117"])
def test_read_dta3(self, file):
file = getattr(self, file)
parsed = self.read_dta(file)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected["year"] = expected["year"].astype(np.int16)
expected["quarter"] = expected["quarter"].astype(np.int8)
tm.assert_frame_equal(parsed, expected)
@pytest.mark.parametrize("file", ["dta4_113", "dta4_114", "dta4_115", "dta4_117"])
def test_read_dta4(self, file):
file = getattr(self, file)
parsed = self.read_dta(file)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"],
],
columns=[
"fully_labeled",
"fully_labeled2",
"incompletely_labeled",
"labeled_with_missings",
"float_labelled",
],
)
# these are all categoricals
for col in expected:
orig = expected[col].copy()
categories = np.asarray(expected["fully_labeled"][orig.notna()])
if col == "incompletely_labeled":
categories = orig
cat = orig.astype("category")._values
cat = cat.set_categories(categories, ordered=True)
cat.categories.rename(None, inplace=True)
expected[col] = cat
# stata doesn't save .category metadata
tm.assert_frame_equal(parsed, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=["x", "y", "z"],
)
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_dta18(self):
parsed_118 = self.read_dta(self.dta22_118)
parsed_118["Bytes"] = parsed_118["Bytes"].astype("O")
expected = DataFrame.from_records(
[
["Cat", "Bogota", "Bogotá", 1, 1.0, "option b Ünicode", 1.0],
["Dog", "Boston", "Uzunköprü", np.nan, np.nan, np.nan, np.nan],
["Plane", "Rome", "Tromsø", 0, 0.0, "option a", 0.0],
["Potato", "Tokyo", "Elâzığ", -4, 4.0, 4, 4],
["", "", "", 0, 0.3332999, "option a", 1 / 3.0],
],
columns=[
"Things",
"Cities",
"Unicode_Cities_Strl",
"Ints",
"Floats",
"Bytes",
"Longs",
],
)
expected["Floats"] = expected["Floats"].astype(np.float32)
for col in parsed_118.columns:
tm.assert_almost_equal(parsed_118[col], expected[col])
with StataReader(self.dta22_118) as rdr:
vl = rdr.variable_labels()
vl_expected = {
"Unicode_Cities_Strl": "Here are some strls with Ünicode chars",
"Longs": "long data",
"Things": "Here are some things",
"Bytes": "byte data",
"Ints": "int data",
"Cities": "Here are some cities",
"Floats": "float data",
}
tm.assert_dict_equal(vl, vl_expected)
assert rdr.data_label == "This is a Ünicode data label"
def test_read_write_dta5(self):
original = DataFrame(
[(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"],
)
original.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = "index"
original.index = original.index.astype(np.int32)
original["year"] = original["year"].astype(np.int32)
original["quarter"] = original["quarter"].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
original,
check_index_type=False,
)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_write_dta10(self, version):
original = DataFrame(
data=[["string", "object", 1, 1.1, np.datetime64("2003-12-25")]],
columns=["string", "object", "integer", "floating", "datetime"],
)
original["object"] = Series(original["object"], dtype=object)
original.index.name = "index"
original.index = original.index.astype(np.int32)
original["integer"] = original["integer"].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {"datetime": "tc"}, version=version)
written_and_read_again = self.read_dta(path)
# original.index is np.int32, read index is np.int64
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
original,
check_index_type=False,
)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = DataFrame(np.random.randn(5, 4), columns=list("abcd"))
df.loc[2, "a":"c"] = np.nan
df_copy = df.copy()
with tm.ensure_clean() as path:
df.to_stata(path, write_index=False)
tm.assert_frame_equal(df, df_copy)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_encoding(self, version):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding)
result = encoded.kreis1849[0]
expected = raw.kreis1849[0]
assert result == expected
assert isinstance(result, str)
with tm.ensure_clean() as path:
encoded.to_stata(path, write_index=False, version=version)
reread_encoded = read_stata(path)
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame(
[(1, 2, 3, 4)],
columns=[
"good",
"b\u00E4d",
"8number",
"astringwithmorethan32characters______",
],
)
formatted = DataFrame(
[(1, 2, 3, 4)],
columns=["good", "b_d", "_8number", "astringwithmorethan32characters_"],
)
formatted.index.name = "index"
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with tm.assert_produces_warning(pd.io.stata.InvalidColumnName):
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_write_dta12(self, version):
original = DataFrame(
[(1, 2, 3, 4, 5, 6)],
columns=[
"astringwithmorethan32characters_1",
"astringwithmorethan32characters_2",
"+",
"-",
"short",
"delete",
],
)
formatted = DataFrame(
[(1, 2, 3, 4, 5, 6)],
columns=[
"astringwithmorethan32characters_",
"_0astringwithmorethan32character",
"_",
"_1_",
"_short",
"_delete",
],
)
formatted.index.name = "index"
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", InvalidColumnName)
original.to_stata(path, None, version=version)
# should get a warning for that format.
assert len(w) == 1
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted)
def test_read_write_dta13(self):
s1 = Series(2 ** 9, dtype=np.int16)
s2 = Series(2 ** 17, dtype=np.int32)
s3 = Series(2 ** 33, dtype=np.int64)
original = DataFrame({"int16": s1, "int32": s2, "int64": s3})
original.index.name = "index"
formatted = original
formatted["int64"] = formatted["int64"].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.parametrize(
"file", ["dta14_113", "dta14_114", "dta14_115", "dta14_117"]
)
def test_read_write_reread_dta14(self, file, parsed_114, version):
file = getattr(self, file)
parsed = self.read_dta(file)
parsed.index.name = "index"
expected = self.read_csv(self.csv14)
cols = ["byte_", "int_", "long_", "float_", "double_"]
for col in cols:
expected[col] = expected[col]._convert(datetime=True, numeric=True)
expected["float_"] = expected["float_"].astype(np.float32)
expected["date_td"] = pd.to_datetime(expected["date_td"], errors="coerce")
tm.assert_frame_equal(parsed_114, parsed)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {"date_td": "td"}, version=version)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), parsed_114)
@pytest.mark.parametrize(
"file", ["dta15_113", "dta15_114", "dta15_115", "dta15_117"]
)
def test_read_write_reread_dta15(self, file):
expected = self.read_csv(self.csv15)
expected["byte_"] = expected["byte_"].astype(np.int8)
expected["int_"] = expected["int_"].astype(np.int16)
expected["long_"] = expected["long_"].astype(np.int32)
expected["float_"] = expected["float_"].astype(np.float32)
expected["double_"] = expected["double_"].astype(np.float64)
expected["date_td"] = expected["date_td"].apply(
datetime.strptime, args=("%Y-%m-%d",)
)
file = getattr(self, file)
parsed = self.read_dta(file)
tm.assert_frame_equal(expected, parsed)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_timestamp_and_label(self, version):
original = DataFrame([(1,)], columns=["variable"])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = "This is a data file."
with tm.ensure_clean() as path:
original.to_stata(
path, time_stamp=time_stamp, data_label=data_label, version=version
)
with StataReader(path) as reader:
assert reader.time_stamp == "29 Feb 2000 14:21"
assert reader.data_label == data_label
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_invalid_timestamp(self, version):
original = DataFrame([(1,)], columns=["variable"])
time_stamp = "01 Jan 2000, 00:00:00"
with tm.ensure_clean() as path:
msg = "time_stamp should be datetime type"
with pytest.raises(ValueError, match=msg):
original.to_stata(path, time_stamp=time_stamp, version=version)
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = "index"
with tm.ensure_clean() as path:
# should get a warning for that format.
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index("index")
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_nan_to_missing_value(self, version):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({"s1": s1, "s2": s2})
original.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index("index")
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ["x", "y"]
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), columns=columns)
original.index.name = "index_not_written"
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
with pytest.raises(KeyError, match=original.index.name):
written_and_read_again["index_not_written"]
def test_string_no_dates(self):
s1 = Series(["a", "A longer string"])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({"s1": s1, "s2": s2})
original.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({"s0": s0, "s1": s1, "s2": s2, "s3": s3})
original.index.name = "index"
with tm.ensure_clean() as path:
with tm.assert_produces_warning(PossiblePrecisionLoss):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified["s1"] = Series(modified["s1"], dtype=np.int16)
modified["s2"] = Series(modified["s2"], dtype=np.int32)
modified["s3"] = Series(modified["s3"], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index("index"), modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = "index"
with tm.ensure_clean() as path:
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path, {0: "tc"})
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ["_0"]
tm.assert_frame_equal(written_and_read_again.set_index("index"), modified)
def test_105(self):
# Data obtained from:
# http://go.worldbank.org/ZXY29PVJ21
dpath = os.path.join(self.dirpath, "S4_EDUC1.dta")
df = pd.read_stata(dpath)
df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]]
df0 = DataFrame(df0)
df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"]
df0["clustnum"] = df0["clustnum"].astype(np.int16)
df0["pri_schl"] = df0["pri_schl"].astype(np.int8)
df0["psch_num"] = df0["psch_num"].astype(np.int8)
df0["psch_dis"] = df0["psch_dis"].astype(np.float32)
tm.assert_frame_equal(df.head(3), df0)
def test_value_labels_old_format(self):
# GH 19417
#
# Test that value_labels() returns an empty dict if the file format
# predates supporting value labels.
dpath = os.path.join(self.dirpath, "S4_EDUC1.dta")
reader = StataReader(dpath)
assert reader.value_labels() == {}
reader.close()
def test_date_export_formats(self):
columns = ["tc", "td", "tw", "tm", "tq", "th", "ty"]
conversions = {c: c for c in columns}
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = "index"
expected_values = [
datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1),
] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.parametrize("byteorder", [">", "<"])
def test_bool_uint(self, byteorder, version):
s0 = Series([0, 1, True], dtype=np.bool_)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame(
{"s0": s0, "s1": s1, "s2": s2, "s3": s3, "s4": s4, "s5": s5, "s6": s6}
)
original.index.name = "index"
expected = original.copy()
expected_types = (
np.int8,
np.int8,
np.int16,
np.int16,
np.int32,
np.int32,
np.float64,
)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path, byteorder=byteorder, version=version)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index("index")
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
with StataReader(self.dta16_115) as rdr:
sr_115 = rdr.variable_labels()
with StataReader(self.dta16_117) as rdr:
sr_117 = rdr.variable_labels()
keys = ("var1", "var2", "var3")
labels = ("label1", "label2", "label3")
for k, v in sr_115.items():
assert k in sr_117
assert v == sr_117[k]
assert k in keys
assert v in labels
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s["s" + str(str_len)] = Series(
["a" * str_len, "b" * str_len, "c" * str_len]
)
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
with StataReader(path) as sr:
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
assert int(variable[1:]) == int(fmt[1:-1])
assert int(variable[1:]) == typ
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s["s" + str(str_len)] = Series(
["a" * str_len, "b" * str_len, "c" * str_len]
)
original = DataFrame(s)
msg = (
r"Fixed width strings in Stata \.dta files are limited to 244 "
r"\(or fewer\)\ncharacters\. Column 's500' does not satisfy "
r"this restriction\. Use the\n'version=117' parameter to write "
r"the newer \(Stata 13 and later\) format\."
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ("b", "h", "l")
df = DataFrame([[0.0]], columns=["float_"])
with tm.ensure_clean() as path:
df.to_stata(path)
with StataReader(path) as rdr:
valid_range = rdr.VALID_RANGE
expected_values = ["." + chr(97 + i) for i in range(26)]
expected_values.insert(0, ".")
for t in types:
offset = valid_range[t][1]
for i in range(0, 27):
val = StataMissingValue(offset + 1 + i)
assert val.string == expected_values[i]
# Test extremes for floats
val = StataMissingValue(struct.unpack("<f", b"\x00\x00\x00\x7f")[0])
assert val.string == "."
val = StataMissingValue(struct.unpack("<f", b"\x00\xd0\x00\x7f")[0])
assert val.string == ".z"
# Test extremes for floats
val = StataMissingValue(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
)
assert val.string == "."
val = StataMissingValue(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x1a\xe0\x7f")[0]
)
assert val.string == ".z"
@pytest.mark.parametrize("file", ["dta17_113", "dta17_115", "dta17_117"])
def test_missing_value_conversion(self, file):
columns = ["int8_", "int16_", "int32_", "float32_", "float64_"]
smv = StataMissingValue(101)
keys = sorted(smv.MISSING_VALUES.keys())
data = []
for i in range(27):
row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data, columns=columns)
parsed = read_stata(getattr(self, file), convert_missing=True)
tm.assert_frame_equal(parsed, expected)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([pd.NaT] * 7)
columns = [
"date_tc",
"date_td",
"date_tw",
"date_tm",
"date_tq",
"date_th",
"date_ty",
]
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999, 12, 24)
expected[2][3] = datetime(9999, 12, 1)
expected[2][4] = datetime(9999, 10, 1)
expected[2][5] = datetime(9999, 7, 1)
expected[4][2] = datetime(2262, 4, 16)
expected[4][3] = expected[4][4] = datetime(2262, 4, 1)
expected[4][5] = expected[4][6] = datetime(2262, 1, 1)
expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677, 10, 1)
expected[5][5] = expected[5][6] = datetime(1678, 1, 1)
expected = DataFrame(expected, columns=columns, dtype=object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115, check_datetimelike_compat=True)
tm.assert_frame_equal(expected, parsed_117, check_datetimelike_compat=True)
date_conversion = {c: c[-2:] for c in columns}
# {c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = "index"
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
expected,
check_datetimelike_compat=True,
)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected["byte_"] = expected["byte_"].astype(np.int8)
expected["int_"] = expected["int_"].astype(np.int16)
expected["long_"] = expected["long_"].astype(np.int32)
expected["float_"] = expected["float_"].astype(np.float32)
expected["double_"] = expected["double_"].astype(np.float64)
expected["date_td"] = expected["date_td"].apply(
datetime.strptime, args=("%Y-%m-%d",)
)
no_conversion = read_stata(self.dta15_117, convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(
self.dta15_117, convert_dates=True, preserve_dtypes=False
)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected["date_td"] = expected["date_td"].apply(
datetime.strptime, args=("%Y-%m-%d",)
)
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected["byte_"] = expected["byte_"].astype(np.int8)
expected["int_"] = expected["int_"].astype(np.int16)
expected["long_"] = expected["long_"].astype(np.int32)
expected["float_"] = expected["float_"].astype(np.float32)
expected["double_"] = expected["double_"].astype(np.float64)
expected["date_td"] = expected["date_td"].apply(
datetime.strptime, args=("%Y-%m-%d",)
)
columns = ["byte_", "int_", "long_"]
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True, columns=columns)
tm.assert_frame_equal(expected, dropped)
# See PR 10757
columns = ["int_", "long_", "byte_"]
expected = expected[columns]
reordered = read_stata(self.dta15_117, convert_dates=True, columns=columns)
tm.assert_frame_equal(expected, reordered)
msg = "columns contains duplicate entries"
with pytest.raises(ValueError, match=msg):
columns = ["byte_", "byte_"]
read_stata(self.dta15_117, convert_dates=True, columns=columns)
msg = "The following columns were not found in the Stata data set: not_found"
with pytest.raises(ValueError, match=msg):
columns = ["byte_", "int_", "long_", "not_found"]
read_stata(self.dta15_117, convert_dates=True, columns=columns)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.filterwarnings(
"ignore:\\nStata value:pandas.io.stata.ValueLabelTypeMismatch"
)
def test_categorical_writing(self, version):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10],
],
columns=[
"fully_labeled",
"fully_labeled2",
"incompletely_labeled",
"labeled_with_missings",
"float_labelled",
"unlabeled",
],
)
expected = original.copy()
# these are all categoricals
original = pd.concat(
[original[col].astype("category") for col in original], axis=1
)
expected.index.name = "index"
expected["incompletely_labeled"] = expected["incompletely_labeled"].apply(str)
expected["unlabeled"] = expected["unlabeled"].apply(str)
for col in expected:
orig = expected[col].copy()
cat = orig.astype("category")._values
cat = cat.as_ordered()
if col == "unlabeled":
cat = cat.set_categories(orig, ordered=True)
cat.categories.rename(None, inplace=True)
expected[col] = cat
with tm.ensure_clean() as path:
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index("index")
tm.assert_frame_equal(res, expected)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = DataFrame.from_records(
[["a" * 10000], ["b" * 10000], ["c" * 10000], ["d" * 10000]],
columns=["Too_long"],
)
original = pd.concat(
[original[col].astype("category") for col in original], axis=1
)
with tm.ensure_clean() as path:
msg = (
"Stata value labels for a single variable must have "
r"a combined length less than 32,000 characters\."
)
with pytest.raises(ValueError, match=msg):
original.to_stata(path)
original = DataFrame.from_records(
[["a"], ["b"], ["c"], ["d"], [1]], columns=["Too_long"]
)
original = pd.concat(
[original[col].astype("category") for col in original], axis=1
)
with tm.assert_produces_warning(pd.io.stata.ValueLabelTypeMismatch):
original.to_stata(path)
# should get a warning for mixed content
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_categorical_with_stata_missing_values(self, version):
values = [["a" + str(i)] for i in range(120)]
values.append([np.nan])
original = DataFrame.from_records(values, columns=["many_labels"])
original = pd.concat(
[original[col].astype("category") for col in original], axis=1
)
original.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index("index")
expected = original.copy()
for col in expected:
cat = expected[col]._values
new_cats = cat.remove_unused_categories().categories
cat = cat.set_categories(new_cats, ordered=True)
expected[col] = cat
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize("file", ["dta19_115", "dta19_117"])
def test_categorical_order(self, file):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [
(True, "ordered", ["a", "b", "c", "d", "e"], np.arange(5)),
(True, "reverse", ["a", "b", "c", "d", "e"], np.arange(5)[::-1]),
(True, "noorder", ["a", "b", "c", "d", "e"], np.array([2, 1, 4, 0, 3])),
(True, "floating", ["a", "b", "c", "d", "e"], np.arange(0, 5)),
(True, "float_missing", ["a", "d", "e"], np.array([0, 1, 2, -1, -1])),
(False, "nolabel", [1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, "int32_mixed", ["d", 2, "e", "b", "a"], np.arange(5)),
]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append(
(col, pd.Categorical.from_codes(codes, labels, ordered=True))
)
else:
cols.append((col, Series(labels, dtype=np.float32)))
expected = DataFrame.from_dict(dict(cols))
# Read with and with out categoricals, ensure order is identical
file = getattr(self, file)
parsed = read_stata(file)
tm.assert_frame_equal(expected, parsed)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col].dtype):
tm.assert_series_equal(expected[col].cat.codes, parsed[col].cat.codes)
tm.assert_index_equal(
expected[col].cat.categories, parsed[col].cat.categories
)
@pytest.mark.parametrize("file", ["dta20_115", "dta20_117"])
def test_categorical_sorting(self, file):
parsed = read_stata(getattr(self, file))
# Sort based on codes, not strings
parsed = parsed.sort_values("srh", na_position="first")
# Don't sort index
parsed.index = np.arange(parsed.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
cat = pd.Categorical.from_codes(
codes=codes, categories=categories, ordered=True
)
expected = Series(cat, name="srh")
tm.assert_series_equal(expected, parsed["srh"])
@pytest.mark.parametrize("file", ["dta19_115", "dta19_117"])
def test_categorical_ordering(self, file):
file = getattr(self, file)
parsed = read_stata(file)
parsed_unordered = read_stata(file, order_categoricals=False)
for col in parsed:
if not is_categorical_dtype(parsed[col].dtype):
continue
assert parsed[col].cat.ordered
assert not parsed_unordered[col].cat.ordered
@pytest.mark.parametrize(
"file",
[
"dta1_117",
"dta2_117",
"dta3_117",
"dta4_117",
"dta14_117",
"dta15_117",
"dta16_117",
"dta17_117",
"dta18_117",
"dta19_117",
"dta20_117",
],
)
@pytest.mark.parametrize("chunksize", [1, 2])
@pytest.mark.parametrize("convert_categoricals", [False, True])
@pytest.mark.parametrize("convert_dates", [False, True])
def test_read_chunks_117(
self, file, chunksize, convert_categoricals, convert_dates
):
fname = getattr(self, file)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates,
)
itr = read_stata(
fname,
iterator=True,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates,
)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
from_frame = self._convert_categorical(from_frame)
tm.assert_frame_equal(
from_frame, chunk, check_dtype=False, check_datetimelike_compat=True
)
pos += chunksize
itr.close()
@staticmethod
def _convert_categorical(from_frame: DataFrame) -> DataFrame:
"""
Emulate the categorical casting behavior we expect from roundtripping.
"""
for col in from_frame:
ser = from_frame[col]
if is_categorical_dtype(ser.dtype):
cat = ser._values.remove_unused_categories()
if cat.categories.dtype == object:
categories = pd.Index(cat.categories._values)
cat = cat.set_categories(categories)
from_frame[col] = cat
return from_frame
def test_iterator(self):
fname = self.dta3_117
parsed = read_stata(fname)
with read_stata(fname, iterator=True) as itr:
chunk = itr.read(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = list(itr)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
with read_stata(fname, iterator=True) as itr:
chunk = itr.get_chunk(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = itr.get_chunk()
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
# GH12153
with read_stata(fname, chunksize=4) as itr:
from_chunks = pd.concat(itr)
tm.assert_frame_equal(parsed, from_chunks)
@pytest.mark.parametrize(
"file",
[
"dta2_115",
"dta3_115",
"dta4_115",
"dta14_115",
"dta15_115",
"dta16_115",
"dta17_115",
"dta18_115",
"dta19_115",
"dta20_115",
],
)
@pytest.mark.parametrize("chunksize", [1, 2])
@pytest.mark.parametrize("convert_categoricals", [False, True])
@pytest.mark.parametrize("convert_dates", [False, True])
def test_read_chunks_115(
self, file, chunksize, convert_categoricals, convert_dates
):
fname = getattr(self, file)
# Read the whole file
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates,
)
# Compare to what we get when reading by chunk
itr = read_stata(
fname,
iterator=True,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
from_frame = self._convert_categorical(from_frame)
tm.assert_frame_equal(
from_frame, chunk, check_dtype=False, check_datetimelike_compat=True
)
pos += chunksize
itr.close()
def test_read_chunks_columns(self):
fname = self.dta3_117
columns = ["quarter", "cpi", "m1"]
chunksize = 2
parsed = read_stata(fname, columns=columns)
with read_stata(fname, iterator=True) as itr:
pos = 0
for j in range(5):
chunk = itr.read(chunksize, columns=columns)
if chunk is None:
break
from_frame = parsed.iloc[pos : pos + chunksize, :]
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
pos += chunksize
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_write_variable_labels(self, version, mixed_frame):
# GH 13631, add support for writing variable labels
mixed_frame.index.name = "index"
variable_labels = {"a": "City Rank", "b": "City Exponent", "c": "City"}
with tm.ensure_clean() as path:
mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
expected_labels = {
"index": "",
"a": "City Rank",
"b": "City Exponent",
"c": "City",
}
assert read_labels == expected_labels
variable_labels["index"] = "The Index"
with tm.ensure_clean() as path:
mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
assert read_labels == variable_labels
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_invalid_variable_labels(self, version, mixed_frame):
mixed_frame.index.name = "index"
variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
with tm.ensure_clean() as path:
msg = "Variable labels must be 80 characters or fewer"
with pytest.raises(ValueError, match=msg):
mixed_frame.to_stata(
path, variable_labels=variable_labels, version=version
)
@pytest.mark.parametrize("version", [114, 117])
def test_invalid_variable_label_encoding(self, version, mixed_frame):
mixed_frame.index.name = "index"
variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
variable_labels["a"] = "invalid character Œ"
with tm.ensure_clean() as path:
with pytest.raises(
ValueError, match="Variable labels must contain only characters"
):
mixed_frame.to_stata(
path, variable_labels=variable_labels, version=version
)
def test_write_variable_label_errors(self, mixed_frame):
values = ["\u03A1", "\u0391", "\u039D", "\u0394", "\u0391", "\u03A3"]
variable_labels_utf8 = {
"a": "City Rank",
"b": "City Exponent",
"c": "".join(values),
}
msg = (
"Variable labels must contain only characters that can be "
"encoded in Latin-1"
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
mixed_frame.to_stata(path, variable_labels=variable_labels_utf8)
variable_labels_long = {
"a": "City Rank",
"b": "City Exponent",
"c": "A very, very, very long variable label "
"that is too long for Stata which means "
"that it has more than 80 characters",
}
msg = "Variable labels must be 80 characters or fewer"
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
mixed_frame.to_stata(path, variable_labels=variable_labels_long)
def test_default_date_conversion(self):
# GH 12259
dates = [
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
]
original = DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
reread = read_stata(path, convert_dates=True)
tm.assert_frame_equal(original, reread)
original.to_stata(path, write_index=False, convert_dates={"dates": "tc"})
direct = read_stata(path, convert_dates=True)
tm.assert_frame_equal(reread, direct)
dates_idx = original.columns.tolist().index("dates")
original.to_stata(path, write_index=False, convert_dates={dates_idx: "tc"})
direct = read_stata(path, convert_dates=True)
tm.assert_frame_equal(reread, direct)
def test_unsupported_type(self):
original = DataFrame({"a": [1 + 2j, 2 + 4j]})
msg = "Data type complex128 not supported"
with pytest.raises(NotImplementedError, match=msg):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_unsupported_datetype(self):
dates = [
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
]
original = DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
msg = "Format %tC not implemented"
with pytest.raises(NotImplementedError, match=msg):
with tm.ensure_clean() as path:
original.to_stata(path, convert_dates={"dates": "tC"})
dates = pd.date_range("1-1-1990", periods=3, tz="Asia/Hong_Kong")
original = DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
with pytest.raises(NotImplementedError, match="Data type datetime64"):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_repeated_column_labels(self):
# GH 13923, 25772
msg = """
Value labels for column ethnicsn are not unique. These cannot be converted to
pandas categoricals.
Either read the file with `convert_categoricals` set to False or use the
low level interface in `StataReader` to separately read the values and the
value_labels.
The repeated labels are:\n-+\nwolof
"""
with pytest.raises(ValueError, match=msg):
read_stata(self.dta23, convert_categoricals=True)
def test_stata_111(self):
# 111 is an old version but still used by current versions of
# SAS when exporting to Stata format. We do not know of any
# on-line documentation for this version.
df = read_stata(self.dta24_111)
original = DataFrame(
{
"y": [1, 1, 1, 1, 1, 0, 0, np.NaN, 0, 0],
"x": [1, 2, 1, 3, np.NaN, 4, 3, 5, 1, 6],
"w": [2, np.NaN, 5, 2, 4, 4, 3, 1, 2, 3],
"z": ["a", "b", "c", "d", "e", "", "g", "h", "i", "j"],
}
)
original = original[["y", "x", "w", "z"]]
tm.assert_frame_equal(original, df)
def test_out_of_range_double(self):
# GH 14618
df = DataFrame(
{
"ColumnOk": [0.0, np.finfo(np.double).eps, 4.49423283715579e307],
"ColumnTooBig": [0.0, np.finfo(np.double).eps, np.finfo(np.double).max],
}
)
msg = (
r"Column ColumnTooBig has a maximum value \(.+\) outside the range "
r"supported by Stata \(.+\)"
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
df.to_stata(path)
df.loc[2, "ColumnTooBig"] = np.inf
msg = (
"Column ColumnTooBig has a maximum value of infinity which is outside "
"the range supported by Stata"
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
df.to_stata(path)
def test_out_of_range_float(self):
original = DataFrame(
{
"ColumnOk": [
0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max / 10.0,
],
"ColumnTooBig": [
0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max,
],
}
)
original.index.name = "index"
for col in original:
original[col] = original[col].astype(np.float32)
with tm.ensure_clean() as path:
original.to_stata(path)
reread = read_stata(path)
original["ColumnTooBig"] = original["ColumnTooBig"].astype(np.float64)
tm.assert_frame_equal(original, reread.set_index("index"))
original.loc[2, "ColumnTooBig"] = np.inf
msg = (
"Column ColumnTooBig has a maximum value of infinity which "
"is outside the range supported by Stata"
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_path_pathlib(self):
df = tm.makeDataFrame()
df.index.name = "index"
reader = lambda x: read_stata(x).set_index("index")
result = tm.round_trip_pathlib(df.to_stata, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self):
df = tm.makeDataFrame()
df.index.name = "index"
reader = lambda x: read_stata(x).set_index("index")
result = tm.round_trip_localpath(df.to_stata, reader)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("write_index", [True, False])
def test_value_labels_iterator(self, write_index):
# GH 16923
d = {"A": ["B", "E", "C", "A", "E"]}
df = DataFrame(data=d)
df["A"] = df["A"].astype("category")
with tm.ensure_clean() as path:
df.to_stata(path, write_index=write_index)
with pd.read_stata(path, iterator=True) as dta_iter:
value_labels = dta_iter.value_labels()
assert value_labels == {"A": {0: "A", 1: "B", 2: "C", 3: "E"}}
def test_set_index(self):
# GH 17328
df = tm.makeDataFrame()
df.index.name = "index"
with tm.ensure_clean() as path:
df.to_stata(path)
reread = pd.read_stata(path, index_col="index")
tm.assert_frame_equal(df, reread)
@pytest.mark.parametrize(
"column", ["ms", "day", "week", "month", "qtr", "half", "yr"]
)
def test_date_parsing_ignores_format_details(self, column):
# GH 17797
#
# Test that display formats are ignored when determining if a numeric
# column is a date value.
#
# All date types are stored as numbers and format associated with the
# column denotes both the type of the date and the display format.
#
# STATA supports 9 date types which each have distinct units. We test 7
# of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that
# accounts for leap seconds and %tb relies on STATAs business calendar.
df = read_stata(self.stata_dates)
unformatted = df.loc[0, column]
formatted = df.loc[0, column + "_fmt"]
assert unformatted == formatted
def test_writer_117(self):
original = DataFrame(
data=[
[
"string",
"object",
1,
1,
1,
1.1,
1.1,
np.datetime64("2003-12-25"),
"a",
"a" * 2045,
"a" * 5000,
"a",
],
[
"string-1",
"object-1",
1,
1,
1,
1.1,
1.1,
np.datetime64("2003-12-26"),
"b",
"b" * 2045,
"",
"",
],
],
columns=[
"string",
"object",
"int8",
"int16",
"int32",
"float32",
"float64",
"datetime",
"s1",
"s2045",
"srtl",
"forced_strl",
],
)
original["object"] = Series(original["object"], dtype=object)
original["int8"] = Series(original["int8"], dtype=np.int8)
original["int16"] = Series(original["int16"], dtype=np.int16)
original["int32"] = original["int32"].astype(np.int32)
original["float32"] = Series(original["float32"], dtype=np.float32)
original.index.name = "index"
original.index = original.index.astype(np.int32)
copy = original.copy()
with tm.ensure_clean() as path:
original.to_stata(
path,
convert_dates={"datetime": "tc"},
convert_strl=["forced_strl"],
version=117,
)
written_and_read_again = self.read_dta(path)
# original.index is np.int32, read index is np.int64
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
original,
check_index_type=False,
)
tm.assert_frame_equal(original, copy)
def test_convert_strl_name_swap(self):
original = DataFrame(
[["a" * 3000, "A", "apple"], ["b" * 1000, "B", "banana"]],
columns=["long1" * 10, "long", 1],
)
original.index.name = "index"
with tm.assert_produces_warning(pd.io.stata.InvalidColumnName):
with tm.ensure_clean() as path:
original.to_stata(path, convert_strl=["long", 1], version=117)
reread = self.read_dta(path)
reread = reread.set_index("index")
reread.columns = original.columns
tm.assert_frame_equal(reread, original, check_index_type=False)
def test_invalid_date_conversion(self):
# GH 12259
dates = [
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
]
original = DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
with tm.ensure_clean() as path:
msg = "convert_dates key must be a column or an integer"
with pytest.raises(ValueError, match=msg):
original.to_stata(path, convert_dates={"wrong_name": "tc"})
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_nonfile_writing(self, version):
# GH 21041
bio = io.BytesIO()
df = tm.makeDataFrame()
df.index.name = "index"
with tm.ensure_clean() as path:
df.to_stata(bio, version=version)
bio.seek(0)
with open(path, "wb") as dta:
dta.write(bio.read())
reread = pd.read_stata(path, index_col="index")
tm.assert_frame_equal(df, reread)
def test_gzip_writing(self):
# writing version 117 requires seek and cannot be used with gzip
df = tm.makeDataFrame()
df.index.name = "index"
with tm.ensure_clean() as path:
with gzip.GzipFile(path, "wb") as gz:
df.to_stata(gz, version=114)
with gzip.GzipFile(path, "rb") as gz:
reread = pd.read_stata(gz, index_col="index")
tm.assert_frame_equal(df, reread)
def test_unicode_dta_118(self):
unicode_df = self.read_dta(self.dta25_118)
columns = ["utf8", "latin1", "ascii", "utf8_strl", "ascii_strl"]
values = [
["ραηδας", "PÄNDÄS", "p", "ραηδας", "p"],
["ƤĀńĐąŜ", "Ö", "a", "ƤĀńĐąŜ", "a"],
["ᴘᴀᴎᴅᴀS", "Ü", "n", "ᴘᴀᴎᴅᴀS", "n"],
[" ", " ", "d", " ", "d"],
[" ", "", "a", " ", "a"],
["", "", "s", "", "s"],
["", "", " ", "", " "],
]
expected = DataFrame(values, columns=columns)
tm.assert_frame_equal(unicode_df, expected)
def test_mixed_string_strl(self):
# GH 23633
output = [{"mixed": "string" * 500, "number": 0}, {"mixed": None, "number": 1}]
output = DataFrame(output)
output.number = output.number.astype("int32")
with tm.ensure_clean() as path:
output.to_stata(path, write_index=False, version=117)
reread = read_stata(path)
expected = output.fillna("")
tm.assert_frame_equal(reread, expected)
# Check strl supports all None (null)
output.loc[:, "mixed"] = None
output.to_stata(
path, write_index=False, convert_strl=["mixed"], version=117
)
reread = read_stata(path)
expected = output.fillna("")
tm.assert_frame_equal(reread, expected)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_all_none_exception(self, version):
output = [{"none": "none", "number": 0}, {"none": None, "number": 1}]
output = DataFrame(output)
output.loc[:, "none"] = None
with tm.ensure_clean() as path:
with pytest.raises(ValueError, match="Column `none` cannot be exported"):
output.to_stata(path, version=version)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_invalid_file_not_written(self, version):
content = "Here is one __�__ Another one __·__ Another one __½__"
df = DataFrame([content], columns=["invalid"])
with tm.ensure_clean() as path:
msg1 = (
r"'latin-1' codec can't encode character '\\ufffd' "
r"in position 14: ordinal not in range\(256\)"
)
msg2 = (
"'ascii' codec can't decode byte 0xef in position 14: "
r"ordinal not in range\(128\)"
)
with pytest.raises(UnicodeEncodeError, match=f"{msg1}|{msg2}"):
with tm.assert_produces_warning(ResourceWarning):
df.to_stata(path)
def test_strl_latin1(self):
# GH 23573, correct GSO data to reflect correct size
output = DataFrame(
[["pandas"] * 2, ["þâÑÐŧ"] * 2], columns=["var_str", "var_strl"]
)
with tm.ensure_clean() as path:
output.to_stata(path, version=117, convert_strl=["var_strl"])
with open(path, "rb") as reread:
content = reread.read()
expected = "þâÑÐŧ"
assert expected.encode("latin-1") in content
assert expected.encode("utf-8") in content
gsos = content.split(b"strls")[1][1:-2]
for gso in gsos.split(b"GSO")[1:]:
val = gso.split(b"\x00")[-2]
size = gso[gso.find(b"\x82") + 1]
assert len(val) == size - 1
def test_encoding_latin1_118(self):
# GH 25960
msg = """
One or more strings in the dta file could not be decoded using utf-8, and
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
with tm.assert_produces_warning(UnicodeWarning) as w:
encoded = read_stata(self.dta_encoding_118)
assert len(w) == 151
assert w[0].message.args[0] == msg
expected = DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"])
tm.assert_frame_equal(encoded, expected)
@pytest.mark.slow
def test_stata_119(self):
# Gzipped since contains 32,999 variables and uncompressed is 20MiB
with gzip.open(self.dta26_119, "rb") as gz:
df = read_stata(gz)
assert df.shape == (1, 32999)
assert df.iloc[0, 6] == "A" * 3000
assert df.iloc[0, 7] == 3.14
assert df.iloc[0, -1] == 1
assert df.iloc[0, 0] == pd.Timestamp(datetime(2012, 12, 21, 21, 12, 21))
@pytest.mark.parametrize("version", [118, 119, None])
def test_utf8_writer(self, version):
cat = pd.Categorical(["a", "β", "ĉ"], ordered=True)
data = DataFrame(
[
[1.0, 1, "ᴬ", "ᴀ relatively long ŝtring"],
[2.0, 2, "ᴮ", ""],
[3.0, 3, "ᴰ", None],
],
columns=["a", "β", "ĉ", "strls"],
)
data["ᴐᴬᵀ"] = cat
variable_labels = {
"a": "apple",
"β": "ᵈᵉᵊ",
"ĉ": "ᴎტჄႲႳႴႶႺ",
"strls": "Long Strings",
"ᴐᴬᵀ": "",
}
data_label = "ᴅaᵀa-label"
data["β"] = data["β"].astype(np.int32)
with tm.ensure_clean() as path:
writer = StataWriterUTF8(
path,
data,
data_label=data_label,
convert_strl=["strls"],
variable_labels=variable_labels,
write_index=False,
version=version,
)
writer.write_file()
reread_encoded = read_stata(path)
# Missing is intentionally converted to empty strl
data["strls"] = data["strls"].fillna("")
tm.assert_frame_equal(data, reread_encoded)
reader = StataReader(path)
assert reader.data_label == data_label
assert reader.variable_labels() == variable_labels
data.to_stata(path, version=version, write_index=False)
reread_to_stata = read_stata(path)
tm.assert_frame_equal(data, reread_to_stata)
def test_writer_118_exceptions(self):
df = DataFrame(np.zeros((1, 33000), dtype=np.int8))
with tm.ensure_clean() as path:
with pytest.raises(ValueError, match="version must be either 118 or 119."):
StataWriterUTF8(path, df, version=117)
with tm.ensure_clean() as path:
with pytest.raises(ValueError, match="You must use version 119"):
StataWriterUTF8(path, df, version=118)
@pytest.mark.parametrize("version", [105, 108, 111, 113, 114])
def test_backward_compat(version, datapath):
data_base = datapath("io", "data", "stata")
ref = os.path.join(data_base, "stata-compat-118.dta")
old = os.path.join(data_base, f"stata-compat-{version}.dta")
expected = pd.read_stata(ref)
old_dta = pd.read_stata(old)
tm.assert_frame_equal(old_dta, expected, check_dtype=False)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.parametrize("use_dict", [True, False])
@pytest.mark.parametrize("infer", [True, False])
def test_compression(compression, version, use_dict, infer):
file_name = "dta_inferred_compression.dta"
if compression:
file_ext = "gz" if compression == "gzip" and not use_dict else compression
file_name += f".{file_ext}"
compression_arg = compression
if infer:
compression_arg = "infer"
if use_dict:
compression_arg = {"method": compression}
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index.name = "index"
with tm.ensure_clean(file_name) as path:
df.to_stata(path, version=version, compression=compression_arg)
if compression == "gzip":
with gzip.open(path, "rb") as comp:
fp = io.BytesIO(comp.read())
elif compression == "zip":
with zipfile.ZipFile(path, "r") as comp:
fp = io.BytesIO(comp.read(comp.filelist[0]))
elif compression == "bz2":
with bz2.open(path, "rb") as comp:
fp = io.BytesIO(comp.read())
elif compression == "xz":
with lzma.open(path, "rb") as comp:
fp = io.BytesIO(comp.read())
elif compression is None:
fp = path
reread = read_stata(fp, index_col="index")
tm.assert_frame_equal(reread, df)
@pytest.mark.parametrize("method", ["zip", "infer"])
@pytest.mark.parametrize("file_ext", [None, "dta", "zip"])
def test_compression_dict(method, file_ext):
file_name = f"test.{file_ext}"
archive_name = "test.dta"
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index.name = "index"
with tm.ensure_clean(file_name) as path:
compression = {"method": method, "archive_name": archive_name}
df.to_stata(path, compression=compression)
if method == "zip" or file_ext == "zip":
zp = zipfile.ZipFile(path, "r")
assert len(zp.filelist) == 1
assert zp.filelist[0].filename == archive_name
fp = io.BytesIO(zp.read(zp.filelist[0]))
else:
fp = path
reread = read_stata(fp, index_col="index")
tm.assert_frame_equal(reread, df)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_chunked_categorical(version):
df = DataFrame({"cats": Series(["a", "b", "a", "b", "c"], dtype="category")})
df.index.name = "index"
with tm.ensure_clean() as path:
df.to_stata(path, version=version)
reader = StataReader(path, chunksize=2, order_categoricals=False)
for i, block in enumerate(reader):
block = block.set_index("index")
assert "cats" in block
tm.assert_series_equal(block.cats, df.cats.iloc[2 * i : 2 * (i + 1)])
def test_chunked_categorical_partial(dirpath):
dta_file = os.path.join(dirpath, "stata-dta-partially-labeled.dta")
values = ["a", "b", "a", "b", 3.0]
with StataReader(dta_file, chunksize=2) as reader:
with tm.assert_produces_warning(CategoricalConversionWarning):
for i, block in enumerate(reader):
assert list(block.cats) == values[2 * i : 2 * (i + 1)]
if i < 2:
idx = pd.Index(["a", "b"])
else:
idx = pd.Float64Index([3.0])
tm.assert_index_equal(block.cats.cat.categories, idx)
with tm.assert_produces_warning(CategoricalConversionWarning):
with StataReader(dta_file, chunksize=5) as reader:
large_chunk = reader.__next__()
direct = read_stata(dta_file)
tm.assert_frame_equal(direct, large_chunk)
def test_iterator_errors(dirpath):
dta_file = os.path.join(dirpath, "stata-dta-partially-labeled.dta")
with pytest.raises(ValueError, match="chunksize must be a positive"):
StataReader(dta_file, chunksize=-1)
with pytest.raises(ValueError, match="chunksize must be a positive"):
StataReader(dta_file, chunksize=0)
with pytest.raises(ValueError, match="chunksize must be a positive"):
StataReader(dta_file, chunksize="apple")
def test_iterator_value_labels():
# GH 31544
values = ["c_label", "b_label"] + ["a_label"] * 500
df = DataFrame({f"col{k}": pd.Categorical(values, ordered=True) for k in range(2)})
with tm.ensure_clean() as path:
df.to_stata(path, write_index=False)
expected = pd.Index(["a_label", "b_label", "c_label"], dtype="object")
with pd.read_stata(path, chunksize=100) as reader:
for j, chunk in enumerate(reader):
for i in range(2):
tm.assert_index_equal(chunk.dtypes[i].categories, expected)
tm.assert_frame_equal(chunk, df.iloc[j * 100 : (j + 1) * 100])
def test_precision_loss():
df = DataFrame(
[[sum(2 ** i for i in range(60)), sum(2 ** i for i in range(52))]],
columns=["big", "little"],
)
with tm.ensure_clean() as path:
with tm.assert_produces_warning(
PossiblePrecisionLoss, match="Column converted from int64 to float64"
):
df.to_stata(path, write_index=False)
reread = read_stata(path)
expected_dt = Series([np.float64, np.float64], index=["big", "little"])
tm.assert_series_equal(reread.dtypes, expected_dt)
assert reread.loc[0, "little"] == df.loc[0, "little"]
assert reread.loc[0, "big"] == float(df.loc[0, "big"])
| {
"content_hash": "8137bed09b21f65cd16d1f8c2ae7c75d",
"timestamp": "",
"source": "github",
"line_count": 1999,
"max_line_length": 88,
"avg_line_length": 39.09804902451226,
"alnum_prop": 0.5394270506800415,
"repo_name": "jreback/pandas",
"id": "24944281419c340aa764066cb8978843f8b00eb7",
"size": "78288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/io/test_stata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14930989"
},
{
"name": "Shell",
"bytes": "29317"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
from framework.dependency_management.dependency_resolver import ServiceLocator
from framework.http.wafbypasser import wafbypasser
def format_args(args):
formatted_args = {
"target": None,
"payloads": None,
"headers": None,
"methods": None,
"data": None,
"contains": None,
"resp_code_det": None,
"reverse": None,
"fuzzing_signature": None,
"accepted_value": None,
"param_name": None,
"param_source": None,
"delay": None,
"follow_cookies": None,
"cookie": None,
"length": None,
"response_time": None,
"mode": None
}
for param, value in dict(args).iteritems():
formatted_args[param.lower()] = value
return formatted_args
DESCRIPTION = "WAF byppaser module plugin"
def run(PluginInfo):
Content = DESCRIPTION + " Results:<br />"
plugin_params = ServiceLocator.get_component("plugin_params")
args = {
'Description': DESCRIPTION,
'Mandatory': {'TARGET': None, 'MODE': None},
'Optional': {
'METHODS': None,
'COOKIE': None,
'HEADERS': None,
'LENGTH': None,
'DATA': None,
'CONTAINS': None,
'RESP_CODE_DET': None,
'RESPONSE_TIME': None,
'REVERSE': None,
'PAYLOADS': None,
'ACCEPTED_VALUE': None,
'PARAM_NAME': None,
'PARAM_SOURCE': None,
'DELAY': None,
'FOLLOW-COOKIES': None,
}
}
for Args in plugin_params.GetArgs(args, PluginInfo):
ret = plugin_params.SetConfig(Args) # Only now, after modifying ATTACHMENT_NAME, update config
wafbps = wafbypasser.WAFBypasser(Core)
wafbps.start(format_args(Args))
return Content
| {
"content_hash": "e3a139963a24bdd5e06c1a4d3df92000",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 103,
"avg_line_length": 29.661290322580644,
"alnum_prop": 0.5502990755845568,
"repo_name": "DarKnight24/owtf",
"id": "eab0a5b35a020cf015fa921e3350b17910828ef4",
"size": "1839",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "plugins/auxiliary/wafbypasser/WAF_Byppaser@OWTF-AWAF-001.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "165961"
},
{
"name": "JavaScript",
"bytes": "20557"
},
{
"name": "Python",
"bytes": "704531"
},
{
"name": "Shell",
"bytes": "58019"
}
],
"symlink_target": ""
} |
import os
from flask.cli import DispatchingApp
from werkzeug.debug import DebuggedApplication
from werkzeug.exceptions import NotFound
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.serving import WSGIRequestHandler, run_simple
from werkzeug.urls import url_parse
try:
import pywatchman
except ImportError:
pywatchman = None
def run_cmd(info, **kwargs):
if kwargs['reloader_type'] == 'watchman':
if pywatchman is None:
print('watchman is not available - you need to `pip install pywatchman`')
return
run_watchman()
return
run_server(info, **kwargs)
def run_watchman():
from .watchman import Watchman
try:
Watchman().run()
except pywatchman.WatchmanError as exc:
from indico.util.console import cformat
print(cformat('%{red!}watchman error: {}').format(exc))
def run_server(info, host, port, url, ssl, ssl_key, ssl_cert, quiet, proxy, enable_evalex, evalex_from, reloader_type):
if port is None:
port = 8443 if ssl else 8000
if not enable_evalex:
evalex_whitelist = False
elif evalex_from:
evalex_whitelist = evalex_from
else:
evalex_whitelist = True
if not ssl:
ssl_ctx = None
elif ssl_key and ssl_cert:
ssl_ctx = (ssl_cert, ssl_key)
else:
ssl_ctx = 'adhoc'
if not url:
proto = 'https' if ssl else 'http'
url_host = f'[{host}]' if ':' in host else host
if (port == 80 and not ssl) or (port == 443 and ssl):
url = f'{proto}://{url_host}'
else:
url = f'{proto}://{url_host}:{port}'
os.environ['INDICO_DEV_SERVER'] = '1'
os.environ.pop('FLASK_DEBUG', None)
os.environ['INDICO_CONF_OVERRIDE'] = repr({
'BASE_URL': url,
})
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
print(f' * Serving Indico on {url}')
if evalex_whitelist:
print(f' * Werkzeug debugger console on {url}/console')
if evalex_whitelist is True: # noqa
print(' * Werkzeug debugger console is available to all clients!')
try:
from indico.core.config import get_config_path
extra_files = [get_config_path()]
except Exception:
extra_files = None
# our own logger initialization code runs earlier so werkzeug
# doesn't initialize its logger
import logging
werkzeug_logger = logging.getLogger('werkzeug')
werkzeug_logger.propagate = False
werkzeug_logger.setLevel(logging.INFO)
werkzeug_logger.addHandler(logging.StreamHandler())
app = _make_wsgi_app(info, url, evalex_whitelist, proxy)
run_simple(host, port, app,
reloader_type=reloader_type, use_reloader=(reloader_type != 'none'),
use_debugger=False, use_evalex=False, threaded=True, ssl_context=ssl_ctx,
extra_files=extra_files, request_handler=QuietWSGIRequestHandler if quiet else None)
def _reset_state():
# XXX: This hack is extremely awful, but when the reloader encounters
# an error during import / app creation time (e.g. saving a file with
# a syntax error) and the error is fixed later we still have the old
# data which then breaks things, so we clear them if possible (and not
# care about any exceptions while trying to import these things)
# The reason for this behavior is that a file that fails to import
# is not added to `sys.modules` so the reloader won't monitor it for
# changes.
from indico.core.celery import celery
celery.flask_app = None
def _make_wsgi_app(info, url, evalex_whitelist, proxy):
def _load_app():
_reset_state()
return info.load_app()
url_data = url_parse(url)
app = DispatchingApp(_load_app, use_eager_loading=False)
app = DebuggedIndico(app, evalex_whitelist)
app = _make_indico_dispatcher(app, url_data.path)
if proxy:
app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)
QuietWSGIRequestHandler.INDICO_URL_PREFIX = url_data.path.rstrip('/')
return app
def _make_indico_dispatcher(wsgi_app, path):
path = path.rstrip('/')
if not path:
# Nothing to dispatch
return wsgi_app
else:
return DispatcherMiddleware(NotFound(), {
path: wsgi_app
})
class DebuggedIndico(DebuggedApplication):
def __init__(self, *args, **kwargs):
self._evalex_whitelist = None
self._request_ip = None
super().__init__(*args, **kwargs)
@property
def evalex(self):
if not self._evalex_whitelist:
return False
elif self._evalex_whitelist is True: # noqa
return True
else:
return self._request_ip in self._evalex_whitelist
@evalex.setter
def evalex(self, value):
self._evalex_whitelist = value
def __call__(self, environ, start_response):
self._request_ip = environ['REMOTE_ADDR']
if self._request_ip.startswith('::ffff:'):
# convert ipv6-style ipv4 to the regular ipv4 notation
self._request_ip = self._request_ip[7:]
return super().__call__(environ, start_response)
class QuietWSGIRequestHandler(WSGIRequestHandler):
INDICO_URL_PREFIX = '' # set from outside based on the url path prefix
IGNORED_PATH_PREFIXES = {'/vars.js', '/css/', '/fonts/', '/images/', '/dist/', '/assets/', '/static/'}
def log_request(self, code='-', size='-'):
if code not in (304, 200):
super().log_request(code, size)
elif '?__debugger__=yes&cmd=resource' in self.path:
pass # don't log debugger resources, they are quite uninteresting
elif not any(self.path.startswith(self.INDICO_URL_PREFIX + x) for x in self.IGNORED_PATH_PREFIXES):
super().log_request(code, size)
| {
"content_hash": "be05e52eae7e660eac5b342ddcd811b2",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 119,
"avg_line_length": 34.179190751445084,
"alnum_prop": 0.6350414341281921,
"repo_name": "DirkHoffmann/indico",
"id": "6555594ef1877560fced6be5294612df0434d64f",
"size": "6127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/cli/devserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33249"
},
{
"name": "HTML",
"bytes": "1398354"
},
{
"name": "JavaScript",
"bytes": "2295843"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5426206"
},
{
"name": "SCSS",
"bytes": "496904"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
import sys
from telnet import Telnet
from handler import Handler
from managers import ListeningManager, ConnectionManager
# from _handlers import LogonHandler
class EchoHandler(Handler):
def __init__(self, conn):
Handler.__init__(self, conn)
def Enter(self):
self.SendString(self.connection, 'Enter Echo Handler\n')
def Leave(self):
self.SendString(self.connection, 'Leave Echo Handler\n')
def Handle(self, cmd):
if cmd != 'quit':
self.SendString(self.connection, 'Echo Command: %s\n' % cmd)
print 'Recv cmd: %s' % cmd
else:
self.SendString(self.connection, 'Good Bye!\n')
self.connection.Close()
# cm = ConnectionManager(LogonHandler, Telnet)
cm = ConnectionManager(EchoHandler, Telnet)
lm = ListeningManager(cm)
lm.AddPort(int(sys.argv[1]))
while True:
lm.Listen()
cm.Manage()
| {
"content_hash": "39a1ab05f15265502f9a7baece5ebf5e",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 25.714285714285715,
"alnum_prop": 0.6544444444444445,
"repo_name": "prim/SZMUD",
"id": "bf51d5539ea5b35d9f82956471151a9e713ff5c6",
"size": "919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "network/example/echo_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119898"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150905_2016'),
]
operations = [
]
| {
"content_hash": "500184742b44fe88706eeeb52c6ed5ec",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 45,
"avg_line_length": 17.153846153846153,
"alnum_prop": 0.6502242152466368,
"repo_name": "sebastian-code/portal",
"id": "931780c635df64cf04ab1d0b57d65daf1aeee514",
"size": "247",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djangocali-portal/users/migrations/0003_merge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1670"
},
{
"name": "HTML",
"bytes": "55353"
},
{
"name": "JavaScript",
"bytes": "2394"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "84625"
},
{
"name": "Shell",
"bytes": "7805"
}
],
"symlink_target": ""
} |
from django.conf import settings
import ldap
import ldap.modlist
import logging
logger = logging.getLogger("ldapproxy.models")
class LdapProxy:
'''
wrapper class to talk to the LDAP MASTER server.
'''
proxy = None
def __init__(self):
try:
if settings.LDAP_MASTER_DISABLE == True: return
except AttributeError: pass
try:
logger.debug("TLS AVAILABLE? %d" % (ldap.TLS_AVAIL))
print "LDAP SETTINGS->"+settings.LDAP_MASTER_URI
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, settings.LDAP_MASTER_CA)
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, settings.LDAP_MASTER_CERT)
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, settings.LDAP_MASTER_KEY)
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, settings.LDAP_MASTER_REQCERT)
ldap.set_option(ldap.OPT_TIMEOUT, settings.LDAP_MASTER_TIMEOUT)
self.proxy = ldap.initialize (settings.LDAP_MASTER_URI)
if settings.AUTH_LDAP_START_TLS:
self.proxy.start_tls_s()
self.proxy.simple_bind_s(settings.LDAP_MASTER_DN, settings.LDAP_MASTER_PWD)
logger.debug ("LdapProxy.__init__: Connected to ldapserver %s as %s with CA in %s and with certificate %s and key %s" % (settings.LDAP_MASTER_URI, settings.LDAP_MASTER_DN, settings.LDAP_MASTER_CA, settings.LDAP_MASTER_CERT, settings.LDAP_MASTER_KEY))
except ldap.LDAPError, error_message:
logger.error ("LdapProxy.__init__: Failed connecting to ldapserver %s as %s with CA in %s and with certificate %s and key %s: %s" % (settings.LDAP_MASTER_URI, settings.LDAP_MASTER_DN, settings.LDAP_MASTER_CA, settings.LDAP_MASTER_CERT, settings.LDAP_MASTER_KEY, error_message))
raise
# not really possible to lock objects in de LDAP backend.
# Howver, as expedients in different islands may compete, this becomes relevant.
# Test 1: editing/saving several times a project, while fetching search results and (re)adding
# object is commented out does not cause problems. Probably repeating the delete action raises
# also a ldap.NO_SUCH_OBJECT that is silently discarded. ==> OK
# Test 2: similar, but now deleting object is commented out. After second time,
# an ldap.ALREADY_EXISTS exception is raised. Either the calling code know how to deal with this
# or user is confronted with an internal server error. ==> Good enough for now?
# Crucial is to be careful that resubmitting does not destroy just-recedntly added data. But this
# should be a concern anyway.
# Not really guarantee to be successful, but at least we try a couple of times to overcome this problem here.
def create_or_replace (self, dn, entry):
try:
if settings.LDAP_MASTER_DISABLE == True: return
except AttributeError: pass
count = 0
while 1:
try:
resultid = self.proxy.search(dn, ldap.SCOPE_BASE)
try:
t, data = self.proxy.result(resultid, 1)
logger.debug("LdapProxy.create_or_replace: dn %s exists and is going to be deleted before being inserted again" % (dn))
self.proxy.delete_s(dn)
except ldap.NO_SUCH_OBJECT:
pass
logger.debug("LdapProxy.create_or_replace: adding %s [%s]" % (dn, entry))
self.proxy.add_s(dn,ldap.modlist.addModlist(entry))
break
except ldap.ALREADY_EXISTS:
count = count + 1
if count < settings.LDAP_MASTER_RETRIES:
continue
else:
logger.error ("LdapProxy: tried %d time to replace %s in LDAP directory" % (settings.LDAP_MASTER_RETRIES, dn))
raise
except ldap.LDAPError, error_message:
logger.error ("ldapproxy: create or replace %s with %s failed: %s" % (dn, entry, error_message))
raise
def delete (self, dn):
try:
if settings.LDAP_MASTER_DISABLE == True: return
except AttributeError: pass
try:
resultid = self.proxy.search(dn, ldap.SCOPE_BASE)
try:
t, data = self.proxy.result(resultid, 1)
logger.debug("LdapProxy.delete: dn %s exists and is going to be deleted" % (dn))
self.proxy.delete_s(dn)
except ldap.NO_SUCH_OBJECT:
pass
except ldap.LDAPError, error_message:
logger.error ("ldapproxy: delete %s failed: %s" % (dn, error_message))
raise
| {
"content_hash": "7d3b3f3d9a76bc725f71a13d5288a105",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 289,
"avg_line_length": 51.544444444444444,
"alnum_prop": 0.6208234533304592,
"repo_name": "avlach/univbris-ocf",
"id": "c8be5ccc40f7508af0b238ebb1c8d1a0efbd8940",
"size": "4639",
"binary": false,
"copies": "4",
"ref": "refs/heads/ofelia.opticaldevelopment",
"path": "expedient/src/python/expedient/common/ldapproxy/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "127542"
},
{
"name": "JavaScript",
"bytes": "289680"
},
{
"name": "Perl",
"bytes": "4421"
},
{
"name": "Python",
"bytes": "3446617"
},
{
"name": "Racket",
"bytes": "32770"
},
{
"name": "Shell",
"bytes": "7609"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from envisage.resource.file_resource_protocol import *
| {
"content_hash": "84169598c7c07fd3edbc3ca683adf281",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 54,
"avg_line_length": 47,
"alnum_prop": 0.8085106382978723,
"repo_name": "enthought/etsproxy",
"id": "78fac251c73c33d084002bdd1de2252e769b1af5",
"size": "109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/envisage/resource/file_resource_protocol.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
"""
Composites are the **factories** and **decision makers** of a
behaviour tree. They are responsible for shaping the branches.
.. graphviz:: dot/composites.dot
.. tip:: You should never need to subclass or create new composites.
Most patterns can be achieved with a combination of the above. Adding to this
set exponentially increases the complexity and subsequently
making it more difficult to design, introspect, visualise and debug the trees. Always try
to find the combination you need to achieve your result before contemplating adding
to this set. Actually, scratch that...just don't contemplate it!
Composite behaviours typically manage children and apply some logic to the way
they execute and return a result, but generally don't do anything themselves.
Perform the checks or actions you need to do in the non-composite behaviours.
* :class:`~py_trees.composites.Sequence`: execute children sequentially
* :class:`~py_trees.composites.Selector`: select a path through the tree, interruptible by higher priorities
* :class:`~py_trees.composites.Chooser`: like a selector, but commits to a path once started until it finishes
* :class:`~py_trees.composites.Parallel`: manage children concurrently
"""
##############################################################################
# Imports
##############################################################################
import itertools
from . import common
from .behaviour import Behaviour
from .common import Status
##############################################################################
# Composites
##############################################################################
class Composite(Behaviour):
"""
The parent class to all composite behaviours, i.e. those that
have children.
Args:
name (:obj:`str`): the composite behaviour name
children ([:class:`~py_trees.behaviour.Behaviour`]): list of children to add
*args: variable length argument list
**kwargs: arbitrary keyword arguments
"""
def __init__(self, name="", children=None, *args, **kwargs):
super(Composite, self).__init__(name, *args, **kwargs)
if children is not None:
for child in children:
self.add_child(child)
else:
self.children = []
############################################
# Worker Overrides
############################################
def setup(self, timeout):
"""
Relays to each child's :meth:`~py_trees.behaviour.Behaviuor.setup` method in turn.
Args:
timeout (:obj:`float`): time to wait (0.0 is blocking forever)
Return:
:obj:`bool`: suceess or failure of the operation
"""
self.logger.debug("%s.setup()" % (self.__class__.__name__))
result = True
for child in self.children:
new_result = child.setup(timeout)
if new_result is None:
# replace with py_trees exception!
self.logger.error("%s.setup()['%s'.setup() returned None (must be True||False)]" % (self.__class__.__name__, child.name))
result = result and new_result
if not result:
break
return result
def stop(self, new_status=Status.INVALID):
"""
There is generally two use cases that must be supported here.
1) Whenever the composite has gone to a recognised state (i.e. :data:`~py_trees.common.Status.FAILURE` or SUCCESS),
or 2) when a higher level parent calls on it to truly stop (INVALID).
In only the latter case will children need to be forcibly stopped as well. In the first case, they will
have stopped themselves appropriately already.
Args:
new_status (:class:`~py_trees.common.Status`): behaviour will transition to this new status
"""
self.logger.debug("%s.stop()[%s]" % (self.__class__.__name__, "%s->%s" % (self.status, new_status) if self.status != new_status else "%s" % new_status))
if new_status == Status.INVALID:
for child in self.children:
child.stop(new_status)
# This part just replicates the Behaviour.stop function. We replicate it here so that
# the Behaviour logging doesn't duplicate the composite logging here, just a bit cleaner this way.
self.terminate(new_status)
self.status = new_status
self.iterator = self.tick()
def tip(self):
"""
Recursive function to extract the last running node of the tree.
Returns:
:class::`~py_trees.behaviour.Behaviour`: the tip function of the current child of this composite or None
"""
return self.current_child.tip() if self.current_child is not None else None
############################################
# Children
############################################
def add_child(self, child):
"""
Adds a child.
Args:
child (:class:`~py_trees.behaviour.Behaviour`): child to add
Returns:
uuid.UUID: unique id of the child
"""
assert isinstance(child, Behaviour), "children must be behaviours, but you passed in %s" % type(child)
self.children.append(child)
child.parent = self
return child.id
def add_children(self, children):
"""
Append a list of children to the current list.
Args:
children ([:class:`~py_trees.behaviour.Behaviour`]): list of children to add
"""
for child in children:
self.add_child(child)
def remove_child(self, child):
"""
Remove the child behaviour from this composite.
Args:
child (:class:`~py_trees.behaviour.Behaviour`): child to delete
Returns:
:obj:`int`: index of the child that was removed
.. todo:: Error handling for when child is not in this list
"""
if child.status == Status.RUNNING:
child.stop(Status.INVALID)
child_index = self.children.index(child)
self.children.remove(child)
return child_index
def remove_all_children(self):
"""
Remove all children. Makes sure to stop each child if necessary.
"""
for child in self.children:
if child.status == Status.RUNNING:
child.stop(Status.INVALID)
# makes sure to delete it for this class and all references to it
# http://stackoverflow.com/questions/850795/clearing-python-lists
del self.children[:]
def replace_child(self, child, replacement):
"""
Replace the child behaviour with another.
Args:
child (:class:`~py_trees.behaviour.Behaviour`): child to delete
replacement (:class:`~py_trees.behaviour.Behaviour`): child to insert
"""
if child.status == Status.RUNNING:
child.stop(Status.INVALID)
child_index = self.children.index(child)
self.logger.debug("%s.replace_child()[%s->%s]" % (self.__class__.__name__, child.name, replacement.name))
self.children[child_index] = replacement
def remove_child_by_id(self, child_id):
"""
Remove the child with the specified id.
Args:
child_id (uuid.UUID): unique id of the child
Raises:
IndexError: if the child was not found
"""
child = next((c for c in self.children if c.id == child_id), None)
if child is not None:
if child.status == Status.RUNNING:
child.stop(Status.INVALID)
self.children.remove(child)
else:
raise IndexError('child was not found with the specified id [%s]' % child_id)
def prepend_child(self, child):
"""
Prepend the child before all other children.
Args:
child (:class:`~py_trees.behaviour.Behaviour`): child to insert
Returns:
uuid.UUID: unique id of the child
"""
self.children.insert(0, child)
child.parent = self
return child.id
def insert_child(self, child, index):
"""
Insert child at the specified index. This simply directly calls
the python list's :obj:`insert` method using the child and index arguments.
Args:
child (:class:`~py_trees.behaviour.Behaviour`): child to insert
index (:obj:`int`): index to insert it at
Returns:
uuid.UUID: unique id of the child
"""
self.children.insert(index, child)
child.parent = self
return child.id
##############################################################################
# Selector
##############################################################################
class Selector(Composite):
"""
Selectors are the Decision Makers
.. graphviz:: dot/selector.dot
A selector executes each of its child behaviours in turn until one of them
succeeds (at which point it itself returns :data:`~py_trees.common.Status.RUNNING` or :data:`~py_trees.common.Status.SUCCESS`,
or it runs out of children at which point it itself returns :data:`~py_trees.common.Status.FAILURE`.
We usually refer to selecting children as a means of *choosing between priorities*.
Each child and its subtree represent a decreasingly lower priority path.
.. note::
Switching from a low -> high priority branch causes a `stop(INVALID)` signal to be sent to the previously
executing low priority branch. This signal will percolate down that child's own subtree. Behaviours
should make sure that they catch this and *destruct* appropriately.
Make sure you do your appropriate cleanup in the :meth:`terminate()` methods! e.g. cancelling a running goal, or restoring a context.
.. seealso:: The :ref:`py-trees-demo-selector-program` program demos higher priority switching under a selector.
Args:
name (:obj:`str`): the composite behaviour name
children ([:class:`~py_trees.behaviour.Behaviour`]): list of children to add
*args: variable length argument list
**kwargs: arbitrary keyword arguments
"""
def __init__(self, name="Selector", children=None, *args, **kwargs):
super(Selector, self).__init__(name, children, *args, **kwargs)
self.current_child = None
def tick(self):
"""
Run the tick behaviour for this selector. Note that the status
of the tick is always determined by its children, not
by the user customised update function.
Yields:
:class:`~py_trees.behaviour.Behaviour`: a reference to itself or one of its children
"""
self.logger.debug("%s.tick()" % self.__class__.__name__)
# Required behaviour for *all* behaviours and composites is
# for tick() to check if it isn't running and initialise
if self.status != Status.RUNNING:
# selectors dont do anything specific on initialisation
# - the current child is managed by the update, never needs to be 'initialised'
# run subclass (user) handles
self.initialise()
# run any work designated by a customised instance of this class
self.update()
previous = self.current_child
for child in self.children:
for node in child.tick():
yield node
if node is child:
if node.status == Status.RUNNING or node.status == Status.SUCCESS:
self.current_child = child
self.status = node.status
if previous is None or previous != self.current_child:
# we interrupted, invalidate everything at a lower priority
passed = False
for child in self.children:
if passed:
if child.status != Status.INVALID:
child.stop(Status.INVALID)
passed = True if child == self.current_child else passed
yield self
return
# all children failed, set failure ourselves and current child to the last bugger who failed us
self.status = Status.FAILURE
try:
self.current_child = self.children[-1]
except IndexError:
self.current_child = None
yield self
def stop(self, new_status=Status.INVALID):
"""
Stopping a selector requires setting the current child to none. Note that it
is important to implement this here instead of terminate, so users are free
to subclass this easily with their own terminate and not have to remember
that they need to call this function manually.
Args:
new_status (:class:`~py_trees.common.Status`): the composite is transitioning to this new status
"""
# retain information about the last running child if the new status is
# SUCCESS or FAILURE
if new_status == Status.INVALID:
self.current_child = None
Composite.stop(self, new_status)
def __repr__(self):
"""
Simple string representation of the object.
Returns:
:obj:`str`: string representation
"""
s = "Name : %s\n" % self.name
s += " Status : %s\n" % self.status
s += " Current : %s\n" % (self.current_child.name if self.current_child is not None else "none")
s += " Children: %s\n" % [child.name for child in self.children]
return s
##############################################################################
# Chooser
##############################################################################
class Chooser(Selector):
"""
Choosers are Selectors with Commitment
.. graphviz:: dot/chooser.dot
A variant of the selector class. Once a child is selected, it
cannot be interrupted by higher priority siblings. As soon as the chosen child
itself has finished it frees the chooser for an alternative selection. i.e. priorities
only come into effect if the chooser wasn't running in the previous tick.
.. note::
This is the only composite in py_trees that is not a core composite in most behaviour tree implementations.
Nonetheless, this is useful in fields like robotics, where you have to ensure that your manipulator doesn't
drop it's payload mid-motion as soon as a higher interrupt arrives. Use this composite
sparingly and only if you can't find another way to easily create an elegant tree composition for your task.
Args:
name (:obj:`str`): the composite behaviour name
children ([:class:`~py_trees.behaviour.Behaviour`]): list of children to add
*args: variable length argument list
**kwargs: arbitrary keyword arguments
"""
def __init__(self, name="Chooser", children=None, *args, **kwargs):
super(Chooser, self).__init__(name, children, *args, **kwargs)
def tick(self):
"""
Run the tick behaviour for this chooser. Note that the status
of the tick is (for now) always determined by its children, not
by the user customised update function.
Yields:
:class:`~py_trees.behaviour.Behaviour`: a reference to itself or one of its children
"""
self.logger.debug("%s.tick()" % self.__class__.__name__)
# Required behaviour for *all* behaviours and composites is
# for tick() to check if it isn't running and initialise
if self.status != Status.RUNNING:
# chooser specific initialisation
# invalidate everything
for child in self.children:
child.stop(Status.INVALID)
self.current_child = None
# run subclass (user) initialisation
self.initialise()
# run any work designated by a customised instance of this class
self.update()
if self.current_child is not None:
# run our child, and invalidate anyone else who may have been ticked last run
# (bit wasteful always checking for the latter)
for child in self.children:
if child is self.current_child:
for node in self.current_child.tick():
yield node
elif child.status != Status.INVALID:
child.stop(Status.INVALID)
else:
for child in self.children:
for node in child.tick():
yield node
if child.status == Status.RUNNING or child.status == Status.SUCCESS:
self.current_child = child
break
new_status = self.current_child.status if self.current_child is not None else Status.FAILURE
self.stop(new_status)
yield self
##############################################################################
# Sequence
##############################################################################
class Sequence(Composite):
"""
Sequences are the factory lines of Behaviour Trees
.. graphviz:: dot/sequence.dot
A sequence will progressively tick over each of its children so long as
each child returns :data:`~py_trees.common.Status.SUCCESS`. If any child returns
:data:`~py_trees.common.Status.FAILURE` or :data:`~py_trees.common.Status.RUNNING` the sequence will halt and the parent will adopt
the result of this child. If it reaches the last child, it returns with
that result regardless.
.. note::
The sequence halts once it sees a child is RUNNING and then returns
the result. *It does not get stuck in the running behaviour*.
.. seealso:: The :ref:`py-trees-demo-sequence-program` program demos a simple sequence in action.
Args:
name (:obj:`str`): the composite behaviour name
children ([:class:`~py_trees.behaviour.Behaviour`]): list of children to add
*args: variable length argument list
**kwargs: arbitrary keyword arguments
"""
def __init__(self, name="Sequence", children=None, *args, **kwargs):
super(Sequence, self).__init__(name, children, *args, **kwargs)
self.current_index = -1 # -1 indicates uninitialised
def tick(self):
"""
Tick over the children.
Yields:
:class:`~py_trees.behaviour.Behaviour`: a reference to itself or one of its children
"""
self.logger.debug("%s.tick()" % self.__class__.__name__)
if self.status != Status.RUNNING:
self.logger.debug("%s.tick() [!RUNNING->resetting child index]" % self.__class__.__name__)
# sequence specific handling
self.current_index = 0
for child in self.children:
# reset the children, this helps when introspecting the tree
if child.status != Status.INVALID:
child.stop(Status.INVALID)
# subclass (user) handling
self.initialise()
# run any work designated by a customised instance of this class
self.update()
for child in itertools.islice(self.children, self.current_index, None):
for node in child.tick():
yield node
if node is child and node.status != Status.SUCCESS:
self.status = node.status
yield self
return
self.current_index += 1
# At this point, all children are happy with their SUCCESS, so we should be happy too
self.current_index -= 1 # went off the end of the list if we got to here
self.stop(Status.SUCCESS)
yield self
@property
def current_child(self):
"""
Have to check if there's anything actually running first.
Returns:
:class:`~py_trees.behaviour.Behaviour`: the child that is currently running, or None
"""
if self.current_index == -1:
return None
return self.children[self.current_index] if self.children else None
def stop(self, new_status=Status.INVALID):
"""
Stopping a sequence requires taking care of the current index. Note that
is important to implement this here intead of terminate, so users are free
to subclass this easily with their own terminate and not have to remember
that they need to call this function manually.
Args:
new_status (:class:`~py_trees.common.Status`): the composite is transitioning to this new status
"""
# retain information about the last running child if the new status is
# SUCCESS or FAILURE
if new_status == Status.INVALID:
self.current_index = -1
Composite.stop(self, new_status)
##############################################################################
# Parallel
##############################################################################
class Parallel(Composite):
"""
Parallels enable a kind of concurrency
.. graphviz:: dot/parallel.dot
Ticks every child every time the parallel is run (a poor man's form of paralellism).
* Parallels will return :data:`~py_trees.common.Status.FAILURE` if any child returns :py:data:`~py_trees.common.Status.FAILURE`
* Parallels with policy :data:`~py_trees.common.ParallelPolicy.SUCCESS_ON_ONE` return :py:data:`~py_trees.common.Status.SUCCESS` if **at least one** child returns :py:data:`~py_trees.common.Status.SUCCESS` and others are :py:data:`~py_trees.common.Status.RUNNING`.
* Parallels with policy :data:`~py_trees.common.ParallelPolicy.SUCCESS_ON_ALL` only returns :py:data:`~py_trees.common.Status.SUCCESS` if **all** children return :py:data:`~py_trees.common.Status.SUCCESS`
.. seealso:: The :ref:`py-trees-demo-context-switching-program` program demos a parallel used to assist in a context switching scenario.
Args:
name (:obj:`str`): the composite behaviour name
policy (:class:`~py_trees.common.ParallelPolicy`): policy to use for deciding success or otherwise
children ([:class:`~py_trees.behaviour.Behaviour`]): list of children to add
*args: variable length argument list
**kwargs: arbitrary keyword arguments
"""
def __init__(self, name="Parallel", policy=common.ParallelPolicy.SUCCESS_ON_ALL, children=None, *args, **kwargs):
super(Parallel, self).__init__(name, children, *args, **kwargs)
self.policy = policy
def tick(self):
"""
Tick over the children.
Yields:
:class:`~py_trees.behaviour.Behaviour`: a reference to itself or one of its children
"""
if self.status != Status.RUNNING:
# subclass (user) handling
self.initialise()
self.logger.debug("%s.tick()" % self.__class__.__name__)
# process them all first
for child in self.children:
for node in child.tick():
yield node
# new_status = Status.SUCCESS if self.policy == common.ParallelPolicy.SUCCESS_ON_ALL else Status.RUNNING
new_status = Status.RUNNING
if any([c.status == Status.FAILURE for c in self.children]):
new_status = Status.FAILURE
else:
if self.policy == common.ParallelPolicy.SUCCESS_ON_ALL:
if all([c.status == Status.SUCCESS for c in self.children]):
new_status = Status.SUCCESS
elif self.policy == common.ParallelPolicy.SUCCESS_ON_ONE:
if any([c.status == Status.SUCCESS for c in self.children]):
new_status = Status.SUCCESS
# special case composite - this parallel may have children that are still running
# so if the parallel itself has reached a final status, then these running children
# need to be made aware of it too
if new_status != Status.RUNNING:
for child in self.children:
if child.status == Status.RUNNING:
# interrupt it (exactly as if it was interrupted by a higher priority)
child.stop(Status.INVALID)
self.stop(new_status)
self.status = new_status
yield self
@property
def current_child(self):
"""
Have to check if there's anything actually running first.
Returns:
:class:`~py_trees.behaviour.Behaviour`: the child that is currently running, or None
"""
if self.status == Status.INVALID:
return None
if self.status == Status.FAILURE:
for child in self.children:
if child.status == Status.FAILURE:
return child
# shouldn't get here
elif self.status == Status.SUCCESS and self.policy == common.ParallelPolicy.SUCCESS_ON_ONE:
for child in self.children:
if child.status == Status.SUCCESS:
return child
else:
return self.children[-1]
| {
"content_hash": "44ba99e90c99961bef633180e72d165c",
"timestamp": "",
"source": "github",
"line_count": 609,
"max_line_length": 268,
"avg_line_length": 41.614121510673236,
"alnum_prop": 0.5892751450104565,
"repo_name": "stonier/py_trees_suite",
"id": "439b76c304a1e05941839a8ec6254f6d3995f136",
"size": "25628",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/0.6.x",
"path": "py_trees/composites.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.utils import DatabaseError as WrappedDatabaseError
from django.utils import six
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import psycopg2 as Database
import psycopg2.extensions
import psycopg2.extras
except ImportError as e:
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
def psycopg2_version():
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.') if v.isdigit())
PSYCOPG2_VERSION = psycopg2_version()
if PSYCOPG2_VERSION < (2, 4, 5):
raise ImproperlyConfigured("psycopg2_version 2.4.5 or newer is required; you have %s" % psycopg2.__version__)
# Some of these import psycopg2, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import utc_tzinfo_factory # NOQA isort:skip
from .version import get_version # NOQA isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
if six.PY2:
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
'INETARRAY',
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BigAutoField': 'bigserial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'interval',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'uuid',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE '%%' || {} || '%%'",
'icontains': "LIKE '%%' || UPPER({}) || '%%'",
'startswith': "LIKE {} || '%%'",
'istartswith': "LIKE UPPER({}) || '%%'",
'endswith': "LIKE '%%' || {}",
'iendswith': "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
}
conn_params.update(settings_dict['OPTIONS'])
conn_params.pop('isolation_level', None)
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS.
if self.isolation_level != connection.isolation_level:
connection.set_session(isolation_level=self.isolation_level)
return connection
def ensure_timezone(self):
self.ensure_connection()
conn_timezone_name = self.connection.get_parameter_status('TimeZone')
timezone_name = self.timezone_name
if timezone_name and conn_timezone_name != timezone_name:
with self.connection.cursor() as cursor:
cursor.execute(self.ops.set_time_zone_sql(), [timezone_name])
return True
return False
def init_connection_state(self):
self.connection.set_client_encoding('UTF8')
timezone_changed = self.ensure_timezone()
if timezone_changed:
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@property
def _nodb_connection(self):
nodb_connection = super(DatabaseWrapper, self)._nodb_connection
try:
nodb_connection.ensure_connection()
except (DatabaseError, WrappedDatabaseError):
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the default database instead.",
RuntimeWarning
)
settings_dict = self.settings_dict.copy()
settings_dict['NAME'] = settings.DATABASES[DEFAULT_DB_ALIAS]['NAME']
nodb_connection = self.__class__(
self.settings_dict.copy(),
alias=self.alias,
allow_thread_sharing=False)
return nodb_connection
@cached_property
def psycopg2_version(self):
return PSYCOPG2_VERSION
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
| {
"content_hash": "845a70e1dc9de2a5f19698aea4f7bd87",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 113,
"avg_line_length": 39.54887218045113,
"alnum_prop": 0.6263307984790875,
"repo_name": "jarshwah/django",
"id": "1ab05d81f60d232d7037beb7f70f804b4954fec4",
"size": "10520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/backends/postgresql/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53023"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448123"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12112516"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Test the Trusted Networks auth provider."""
from ipaddress import ip_address, ip_network
from unittest.mock import Mock, patch
from hass_nabucasa import remote
import pytest
import voluptuous as vol
from homeassistant import auth
from homeassistant.auth import auth_store
from homeassistant.auth.providers import trusted_networks as tn_auth
from homeassistant.components.http import CONF_TRUSTED_PROXIES, CONF_USE_X_FORWARDED_FOR
from homeassistant.data_entry_flow import RESULT_TYPE_ABORT, RESULT_TYPE_CREATE_ENTRY
from homeassistant.setup import async_setup_component
@pytest.fixture
def store(hass):
"""Mock store."""
return auth_store.AuthStore(hass)
@pytest.fixture
def provider(hass, store):
"""Mock provider."""
return tn_auth.TrustedNetworksAuthProvider(
hass,
store,
tn_auth.CONFIG_SCHEMA(
{
"type": "trusted_networks",
"trusted_networks": [
"192.168.0.1",
"192.168.128.0/24",
"::1",
"fd00::/8",
],
}
),
)
@pytest.fixture
def provider_with_user(hass, store):
"""Mock provider with trusted users config."""
return tn_auth.TrustedNetworksAuthProvider(
hass,
store,
tn_auth.CONFIG_SCHEMA(
{
"type": "trusted_networks",
"trusted_networks": [
"192.168.0.1",
"192.168.128.0/24",
"::1",
"fd00::/8",
],
# user_id will be injected in test
"trusted_users": {
"192.168.0.1": [],
"192.168.128.0/24": [],
"fd00::/8": [],
},
}
),
)
@pytest.fixture
def provider_bypass_login(hass, store):
"""Mock provider with allow_bypass_login config."""
return tn_auth.TrustedNetworksAuthProvider(
hass,
store,
tn_auth.CONFIG_SCHEMA(
{
"type": "trusted_networks",
"trusted_networks": [
"192.168.0.1",
"192.168.128.0/24",
"::1",
"fd00::/8",
],
"allow_bypass_login": True,
}
),
)
@pytest.fixture
def manager(hass, store, provider):
"""Mock manager."""
return auth.AuthManager(hass, store, {(provider.type, provider.id): provider}, {})
@pytest.fixture
def manager_with_user(hass, store, provider_with_user):
"""Mock manager with trusted user."""
return auth.AuthManager(
hass,
store,
{(provider_with_user.type, provider_with_user.id): provider_with_user},
{},
)
@pytest.fixture
def manager_bypass_login(hass, store, provider_bypass_login):
"""Mock manager with allow bypass login."""
return auth.AuthManager(
hass,
store,
{(provider_bypass_login.type, provider_bypass_login.id): provider_bypass_login},
{},
)
async def test_trusted_networks_credentials(manager, provider):
"""Test trusted_networks credentials related functions."""
owner = await manager.async_create_user("test-owner")
tn_owner_cred = await provider.async_get_or_create_credentials({"user": owner.id})
assert tn_owner_cred.is_new is False
assert any(cred.id == tn_owner_cred.id for cred in owner.credentials)
user = await manager.async_create_user("test-user")
tn_user_cred = await provider.async_get_or_create_credentials({"user": user.id})
assert tn_user_cred.id != tn_owner_cred.id
assert tn_user_cred.is_new is False
assert any(cred.id == tn_user_cred.id for cred in user.credentials)
with pytest.raises(tn_auth.InvalidUserError):
await provider.async_get_or_create_credentials({"user": "invalid-user"})
async def test_validate_access(provider):
"""Test validate access from trusted networks."""
provider.async_validate_access(ip_address("192.168.0.1"))
provider.async_validate_access(ip_address("192.168.128.10"))
provider.async_validate_access(ip_address("::1"))
provider.async_validate_access(ip_address("fd01:db8::ff00:42:8329"))
with pytest.raises(tn_auth.InvalidAuthError):
provider.async_validate_access(ip_address("192.168.0.2"))
with pytest.raises(tn_auth.InvalidAuthError):
provider.async_validate_access(ip_address("127.0.0.1"))
with pytest.raises(tn_auth.InvalidAuthError):
provider.async_validate_access(ip_address("2001:db8::ff00:42:8329"))
async def test_validate_access_proxy(hass, provider):
"""Test validate access from trusted networks are blocked from proxy."""
await async_setup_component(
hass,
"http",
{
"http": {
CONF_TRUSTED_PROXIES: ["192.168.128.0/31", "fd00::1"],
CONF_USE_X_FORWARDED_FOR: True,
}
},
)
provider.async_validate_access(ip_address("192.168.128.2"))
provider.async_validate_access(ip_address("fd00::2"))
with pytest.raises(tn_auth.InvalidAuthError):
provider.async_validate_access(ip_address("192.168.128.0"))
with pytest.raises(tn_auth.InvalidAuthError):
provider.async_validate_access(ip_address("192.168.128.1"))
with pytest.raises(tn_auth.InvalidAuthError):
provider.async_validate_access(ip_address("fd00::1"))
async def test_validate_access_cloud(hass, provider):
"""Test validate access from trusted networks are blocked from cloud."""
await async_setup_component(
hass,
"http",
{
"http": {
CONF_TRUSTED_PROXIES: ["192.168.128.0/31", "fd00::1"],
CONF_USE_X_FORWARDED_FOR: True,
}
},
)
hass.config.components.add("cloud")
provider.async_validate_access(ip_address("192.168.128.2"))
remote.is_cloud_request.set(True)
with pytest.raises(tn_auth.InvalidAuthError):
provider.async_validate_access(ip_address("192.168.128.2"))
async def test_validate_refresh_token(provider):
"""Verify re-validation of refresh token."""
with patch.object(provider, "async_validate_access") as mock:
with pytest.raises(tn_auth.InvalidAuthError):
provider.async_validate_refresh_token(Mock(), None)
provider.async_validate_refresh_token(Mock(), "127.0.0.1")
mock.assert_called_once_with(ip_address("127.0.0.1"))
async def test_login_flow(manager, provider):
"""Test login flow."""
owner = await manager.async_create_user("test-owner")
user = await manager.async_create_user("test-user")
# not from trusted network
flow = await provider.async_login_flow({"ip_address": ip_address("127.0.0.1")})
step = await flow.async_step_init()
assert step["type"] == RESULT_TYPE_ABORT
assert step["reason"] == "not_allowed"
# from trusted network, list users
flow = await provider.async_login_flow({"ip_address": ip_address("192.168.0.1")})
step = await flow.async_step_init()
assert step["step_id"] == "init"
schema = step["data_schema"]
assert schema({"user": owner.id})
with pytest.raises(vol.Invalid):
assert schema({"user": "invalid-user"})
# login with valid user
step = await flow.async_step_init({"user": user.id})
assert step["type"] == RESULT_TYPE_CREATE_ENTRY
assert step["data"]["user"] == user.id
async def test_trusted_users_login(manager_with_user, provider_with_user):
"""Test available user list changed per different IP."""
owner = await manager_with_user.async_create_user("test-owner")
sys_user = await manager_with_user.async_create_system_user(
"test-sys-user"
) # system user will not be available to select
user = await manager_with_user.async_create_user("test-user")
# change the trusted users config
config = provider_with_user.config["trusted_users"]
assert ip_network("192.168.0.1") in config
config[ip_network("192.168.0.1")] = [owner.id]
assert ip_network("192.168.128.0/24") in config
config[ip_network("192.168.128.0/24")] = [sys_user.id, user.id]
# not from trusted network
flow = await provider_with_user.async_login_flow(
{"ip_address": ip_address("127.0.0.1")}
)
step = await flow.async_step_init()
assert step["type"] == RESULT_TYPE_ABORT
assert step["reason"] == "not_allowed"
# from trusted network, list users intersect trusted_users
flow = await provider_with_user.async_login_flow(
{"ip_address": ip_address("192.168.0.1")}
)
step = await flow.async_step_init()
assert step["step_id"] == "init"
schema = step["data_schema"]
# only owner listed
assert schema({"user": owner.id})
with pytest.raises(vol.Invalid):
assert schema({"user": user.id})
# from trusted network, list users intersect trusted_users
flow = await provider_with_user.async_login_flow(
{"ip_address": ip_address("192.168.128.1")}
)
step = await flow.async_step_init()
assert step["step_id"] == "init"
schema = step["data_schema"]
# only user listed
assert schema({"user": user.id})
with pytest.raises(vol.Invalid):
assert schema({"user": owner.id})
with pytest.raises(vol.Invalid):
assert schema({"user": sys_user.id})
# from trusted network, list users intersect trusted_users
flow = await provider_with_user.async_login_flow({"ip_address": ip_address("::1")})
step = await flow.async_step_init()
assert step["step_id"] == "init"
schema = step["data_schema"]
# both owner and user listed
assert schema({"user": owner.id})
assert schema({"user": user.id})
with pytest.raises(vol.Invalid):
assert schema({"user": sys_user.id})
# from trusted network, list users intersect trusted_users
flow = await provider_with_user.async_login_flow(
{"ip_address": ip_address("fd00::1")}
)
step = await flow.async_step_init()
assert step["step_id"] == "init"
schema = step["data_schema"]
# no user listed
with pytest.raises(vol.Invalid):
assert schema({"user": owner.id})
with pytest.raises(vol.Invalid):
assert schema({"user": user.id})
with pytest.raises(vol.Invalid):
assert schema({"user": sys_user.id})
async def test_trusted_group_login(manager_with_user, provider_with_user):
"""Test config trusted_user with group_id."""
owner = await manager_with_user.async_create_user("test-owner")
# create a user in user group
user = await manager_with_user.async_create_user("test-user")
await manager_with_user.async_update_user(
user, group_ids=[auth.const.GROUP_ID_USER]
)
# change the trusted users config
config = provider_with_user.config["trusted_users"]
assert ip_network("192.168.0.1") in config
config[ip_network("192.168.0.1")] = [{"group": [auth.const.GROUP_ID_USER]}]
assert ip_network("192.168.128.0/24") in config
config[ip_network("192.168.128.0/24")] = [
owner.id,
{"group": [auth.const.GROUP_ID_USER]},
]
# not from trusted network
flow = await provider_with_user.async_login_flow(
{"ip_address": ip_address("127.0.0.1")}
)
step = await flow.async_step_init()
assert step["type"] == RESULT_TYPE_ABORT
assert step["reason"] == "not_allowed"
# from trusted network, list users intersect trusted_users
flow = await provider_with_user.async_login_flow(
{"ip_address": ip_address("192.168.0.1")}
)
step = await flow.async_step_init()
assert step["step_id"] == "init"
schema = step["data_schema"]
# only user listed
assert schema({"user": user.id})
with pytest.raises(vol.Invalid):
assert schema({"user": owner.id})
# from trusted network, list users intersect trusted_users
flow = await provider_with_user.async_login_flow(
{"ip_address": ip_address("192.168.128.1")}
)
step = await flow.async_step_init()
assert step["step_id"] == "init"
schema = step["data_schema"]
# both owner and user listed
assert schema({"user": owner.id})
assert schema({"user": user.id})
async def test_bypass_login_flow(manager_bypass_login, provider_bypass_login):
"""Test login flow can be bypass if only one user available."""
owner = await manager_bypass_login.async_create_user("test-owner")
# not from trusted network
flow = await provider_bypass_login.async_login_flow(
{"ip_address": ip_address("127.0.0.1")}
)
step = await flow.async_step_init()
assert step["type"] == RESULT_TYPE_ABORT
assert step["reason"] == "not_allowed"
# from trusted network, only one available user, bypass the login flow
flow = await provider_bypass_login.async_login_flow(
{"ip_address": ip_address("192.168.0.1")}
)
step = await flow.async_step_init()
assert step["type"] == RESULT_TYPE_CREATE_ENTRY
assert step["data"]["user"] == owner.id
user = await manager_bypass_login.async_create_user("test-user")
# from trusted network, two available user, show up login form
flow = await provider_bypass_login.async_login_flow(
{"ip_address": ip_address("192.168.0.1")}
)
step = await flow.async_step_init()
schema = step["data_schema"]
# both owner and user listed
assert schema({"user": owner.id})
assert schema({"user": user.id})
| {
"content_hash": "4f678dc47b911da44e688df5d48ba1a2",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 88,
"avg_line_length": 34.464467005076145,
"alnum_prop": 0.6227262685028353,
"repo_name": "toddeye/home-assistant",
"id": "406e9a033da4511dbc52c455e6b5b9bae1090f5e",
"size": "13579",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/auth/providers/test_trusted_networks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from cloudify import ctx
# put the operation decorator on any function that is a task
from cloudify.decorators import operation
@operation
def my_task(str1, str2, **kwargs):
ctx.logger.info('str1 = '+str1)
ctx.logger.info('str2 = '+str2)
result = str1 + str2
# setting node instance runtime property
ctx.instance.runtime_properties['result'] = result
ctx.logger.info('result = '+result)
| {
"content_hash": "80f4fc3e3e9045c620abdcebd91fb9bf",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 60,
"avg_line_length": 27.933333333333334,
"alnum_prop": 0.6992840095465394,
"repo_name": "lj020326/cloudify3-training-labs",
"id": "15a5e359b85fefb735c478679e1a2f880a769950",
"size": "1104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin/tasks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5672"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
from recommonmark.parser import CommonMarkParser
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyonep'
copyright = u'2015, Exosite'
author = u'Exosite'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
sys.path.append('..')
from pyonep import __version__ as pyonep_version
version = pyonep_version
# The full version, including alpha/beta/rc tags.
release = pyonep_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyonepdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyonep.tex', u'pyonep Documentation',
u'Exosite', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyonep', u'pyonep Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyonep', u'pyonep Documentation',
author, 'pyonep', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "4128aa06f7ff4a905d63c6153d582399",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 79,
"avg_line_length": 32.201465201465204,
"alnum_prop": 0.7060630189967012,
"repo_name": "asolz/pyonep",
"id": "68806dcbbe8f74e7f0af9810636b63dede0eafeb",
"size": "9210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "58949"
},
{
"name": "Shell",
"bytes": "1336"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
package_name = 'dpkt'
description = 'fast, simple packet creation / parsing, with definitions for the basic TCP/IP protocols'
readme = open('README.rst').read()
requirements = []
# PyPI Readme
long_description = open('README.rst').read()
# Pull in the package
package = __import__(package_name)
package_version = package.__version__
if "bdist_msi" in sys.argv:
# The MSI build target does not support a 4 digit version, e.g. '1.2.3.4'
# therefore we remove the last digit.
package_version, _, _ = package_version.rpartition('.')
setup(name=package_name,
version=package_version,
author=package.__author__,
author_email=package.__author_email__,
url=package.__url__,
description=description,
long_description=long_description,
packages=['dpkt'],
install_requires=requirements,
license='BSD',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
]
)
| {
"content_hash": "682b95e06084efaf6cb1ba7f7c0cc1e3",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 103,
"avg_line_length": 32.46808510638298,
"alnum_prop": 0.6402359108781127,
"repo_name": "smutt/dpkt",
"id": "8af6f87ca28f9375a9c4f2a50b88a16207286c7e",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "466"
},
{
"name": "Python",
"bytes": "457209"
}
],
"symlink_target": ""
} |
import os
from opentamiltests import *
from solthiruthi.datastore import Trie
from solthiruthi.dictionary import *
# Test the canned English dictionary with data structure
class TestEnglishDictionary(unittest.TestCase):
def setUp(self):
self.ENG, self.ENG_size = DictionaryBuilder.create(EnglishLinux)
print(u"Loading EnglishLinux dictionary @ size = %d" % self.ENG_size)
def tearDown(self):
del self.ENG
def test_factory_EnglishLinux(self):
words = [u"Car", u"Rabbit", u"COdE", u"CoMpUtEr", u"UnIx", u"InDiA"]
for w in words:
print(u"Verifying ... %s" % w)
self.assertTrue(self.ENG.isWord(w.lower()))
self.assertTrue(self.ENG.isWord(w.upper()))
self.assertTrue(self.ENG.isWord(w))
self.assertFalse(self.ENG.isWord(w + "31"))
return
class TestParallelDictionary(unittest.TestCase):
def setUp(self):
self.paralleld, _ = DictionaryBuilder.create(ParallelDictionary)
def test_wordlist(self):
self.assertTrue(len(self.paralleld.synonym) > 0)
self.assertEqual(
len(self.paralleld.getAllWords()), -3 + len(self.paralleld.synonym)
)
def test_synonym(self):
word, syn = u"abundance-மிகுதியாக".split("-")
res = self.paralleld.getWordTranslation(word)
self.assertEqual(res, syn)
class TestDictionarySaveLoad(unittest.TestCase):
def setUp(self):
self.fname = "data.dot"
self.wl = [u"abelian", u"commutative", u"monoid", u"rings", u"groups"]
self.TMP, self.TMPVocabSize = DictionaryBuilder.createUsingWordList(self.wl)
Trie.serializeToFile(self.TMP, self.fname)
def tearDown(self):
os.unlink(self.fname)
def test_wordlist_dictionary(self):
self.assertEqual(self.TMPVocabSize, len(self.wl))
self.assertTrue(self.TMP.isWord(u"groups"))
self.assertTrue(self.TMP.isWord(u"rings"))
self.assertFalse(self.TMP.isWord(u"trefoil"))
def test_load_n_save(self):
reloadTMP = Trie.deserializeFromFile(self.fname)
self.assertEqual(reloadTMP.getSize(), len(self.wl))
self.assertTrue(reloadTMP.isWord(u"groups"))
self.assertTrue(reloadTMP.isWord(u"rings"))
self.assertEqual(list(reloadTMP.getAllWords()), list(self.TMP.getAllWords()))
for wl in reloadTMP.getAllWords():
print(wl)
return
# Test the canned dictionary with data structure
class TestDictionary(unittest.TestCase):
def setUp(self):
self.TVU, self.TVU_size = DictionaryBuilder.create(TamilVU)
def tearDown(self):
del self.TVU
def test_wordlist_dictionary(self):
TMP, TMPVocabSize = DictionaryBuilder.createUsingWordList(
["word", "list", "wo", "rdli", "st"]
)
self.assertEqual(TMPVocabSize, 5)
self.assertTrue(TMP.isWord(u"word"))
self.assertFalse(TMP.isWord(u"wor"))
def test_factory_TVU(self):
TVU_agarathi, size = self.TVU, self.TVU_size
self.assertTrue(isinstance(TVU_agarathi, TamilVU))
self.assertEqual(size, 63896)
return
def test_isword(self):
words = u"தமிழ் நாட்டில் சங்ககாலத்திலேயே ஒட்டியாணம் போன்ற இடையணிகள் இருந்தமைக்கான சான்றுகளும் பெண்".split(
" "
)
self.assertEqual(len(list(filter(self.TVU.isWord, words))), 3)
self.assertTrue(self.TVU.isWord(u"தமிழ்"))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "8cf311d1448800bc56c899e86b87b6da",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 114,
"avg_line_length": 34.245098039215684,
"alnum_prop": 0.6378471228170627,
"repo_name": "arcturusannamalai/open-tamil",
"id": "e6c39232e0c5d551d4676e78454357a62d096341",
"size": "3735",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/solthiruthi_canned_datastore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "14505"
},
{
"name": "HTML",
"bytes": "5869"
},
{
"name": "Java",
"bytes": "35038"
},
{
"name": "JavaScript",
"bytes": "9250"
},
{
"name": "Makefile",
"bytes": "345"
},
{
"name": "Python",
"bytes": "603879"
},
{
"name": "Ruby",
"bytes": "26442"
},
{
"name": "Shell",
"bytes": "3686"
}
],
"symlink_target": ""
} |
import os, sys, signal, os.path
import subprocess
import time
from ipython1.kernel.scripts import ipcluster
from ipython1.kernel import task, controllerservice as cs, engineservice as es
from pebl import data, result
from pebl.learner import greedy
from pebl.taskcontroller import serial, multiprocess, ipy1
from pebl.test import testfile
# NOTE: The EC2 task controller is not tested automatically because:
# 1. it requires authentication credential that we can't put in svn
# 2. don't want to spend $$ everytime we run pebl's unittest.
# So, it's in pebl/test.manual/test_ec2.py
class TestSerialTC:
tctype = serial.SerialController
args = ()
def setUp(self):
d = data.fromfile(testfile("testdata5.txt"))
d.discretize()
self.tc = self.tctype(*self.args)
self.tasks = [greedy.GreedyLearner(d, max_iterations=100) for i in xrange(6)]
def test_tc(self):
results = self.tc.run(self.tasks)
results = result.merge(results)
assert isinstance(results, result.LearnerResult)
class TestMultiProcessTC(TestSerialTC):
tctype = multiprocess.MultiProcessController
args = (2,)
class TestIPython1TC:
# I've tried any ways of creating and terminating the cluster but the
# terminating always fails.. So, for now, you have to kill the cluster
# manually.
def setUp(self):
d = data.fromfile(testfile("testdata5.txt"))
d.discretize()
self.proc = subprocess.Popen("ipcluster -n 2 </dev/null 1>&0 2>&0", shell=True)
time.sleep(5)
def tearDown(self):
os.kill(self.proc.pid, signal.SIGINT)
time.sleep(5)
def test_tc(self):
d = data.fromfile(testfile("testdata5.txt"))
d.discretize()
tasks = [greedy.GreedyLearner(d) for x in range(5)]
tc = ipy1.IPython1Controller("127.0.0.1:10113")
results = tc.run(tasks)
results = result.merge(results)
assert isinstance(results, result.LearnerResult)
| {
"content_hash": "1d13a210f18f43bb03d3eed32e3ffef4",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 87,
"avg_line_length": 32.91803278688525,
"alnum_prop": 0.6757968127490039,
"repo_name": "abhik/pebl",
"id": "6b59e424d035923d49c9fe021d9631f1d1672ccf",
"size": "2008",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/pebl/test/test_taskcontroller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9560"
},
{
"name": "JavaScript",
"bytes": "33578"
},
{
"name": "Python",
"bytes": "223655"
}
],
"symlink_target": ""
} |
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(), (3, 2)],
}))
class Expm1FunctionTest(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, self.shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
self.ggx = \
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = F.expm1(x)
testing.assert_allclose(
numpy.expm1(self.x), y.data, atol=1e-7, rtol=1e-7)
def test_expm1_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_expm1_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
F.expm1, x_data, y_grad,
**self.check_backward_options)
def test_expm1_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_expm1_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
gradient_check.check_double_backward(
F.expm1, x_data, y_grad, x_grad_grad,
**self.check_double_backward_options)
def test_expm1_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_expm1_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
def test_expm1(self):
self.assertEqual(F.Expm1().label, 'expm1')
testing.run_module(__name__, __file__)
| {
"content_hash": "720ed804dad2916286c230ee47be37aa",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 30.47826086956522,
"alnum_prop": 0.6367094626723728,
"repo_name": "aonotas/chainer",
"id": "b3373da03b1950008c1250fb2d7e29a76f3a0620",
"size": "2103",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/functions_tests/math_tests/test_exponential_m1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3357320"
}
],
"symlink_target": ""
} |
from lino.projects.std.settings import *
from lino.utils import i2d
class Site(Site):
title = "Lino Mini 9"
project_model = 'contacts.Person'
languages = 'en de fr'
user_types_module = 'lino_xl.lib.xl.user_types'
demo_fixtures = """std demo demo2 checkdata""".split()
default_build_method = 'weasy2pdf'
the_demo_date = i2d(20141023)
webdav_protocol = 'davlink'
def get_installed_apps(self):
yield super(Site, self).get_installed_apps()
# yield 'lino.modlib.users'
yield 'lino_book.projects.min9.modlib.contacts'
yield 'lino_xl.lib.excerpts'
yield 'lino_xl.lib.addresses'
yield 'lino_xl.lib.phones'
yield 'lino_xl.lib.reception'
yield 'lino_xl.lib.courses'
yield 'lino_xl.lib.sepa'
yield 'lino_xl.lib.notes'
# yield 'lino_xl.lib.projects'
yield 'lino_xl.lib.humanlinks'
yield 'lino_xl.lib.households'
yield 'lino_xl.lib.calview'
# yield 'lino_xl.lib.extensible'
yield 'lino_xl.lib.pages'
yield 'lino.modlib.export_excel'
yield 'lino_xl.lib.dupable_partners'
yield 'lino.modlib.checkdata'
yield 'lino.modlib.tinymce'
# yield 'lino.modlib.wkhtmltopdf'
yield 'lino.modlib.weasyprint'
yield 'lino_xl.lib.appypod'
yield 'lino.modlib.notify'
yield 'lino.modlib.changes'
yield 'lino.modlib.comments'
yield 'lino.modlib.uploads'
yield 'lino_xl.lib.properties'
yield 'lino_xl.lib.cv'
yield 'lino_xl.lib.b2c'
yield 'lino_xl.lib.sales'
yield 'lino_xl.lib.finan'
def get_plugin_configs(self):
"""
Change the default value of certain plugin settings.
"""
yield super(Site, self).get_plugin_configs()
yield ('countries', 'country_code', 'BE')
yield ('b2c', 'import_statements_path', self.project_dir.child('sepa_in'))
def do_site_startup(self):
# lino_xl.lib.reception requires some workflow to be imported
from lino_xl.lib.cal.workflows import feedback
super(Site, self).do_site_startup()
SITE = Site(globals())
# ALLOWED_HOSTS = ['*']
DEBUG = True
# SECRET_KEY = "20227" # see :djangoticket:`20227`
| {
"content_hash": "3874d111316218856671cd9c9bda96a5",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 82,
"avg_line_length": 33.83582089552239,
"alnum_prop": 0.6193206881340979,
"repo_name": "lino-framework/book",
"id": "269290e34528ef87283fafd4d5299bc7d0023a2c",
"size": "2375",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino_book/projects/min9/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "JavaScript",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "991438"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
} |
"""
This does a test drawing with lots of things in it, running
with and without attribute checking.
"""
__version__ = '''$Id$'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os, sys, time
import reportlab.rl_config
import unittest
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.platypus import Flowable
from reportlab.graphics.shapes import *
from reportlab.graphics.charts.piecharts import Pie
class GraphicsSpeedTestCase(unittest.TestCase):
"Test speed of the graphics rendering process."
def test0(self, isFast=0):
"""Hello World, on a rectangular background.
The rectangle's fillColor is yellow.
The string's fillColor is red.
"""
reportlab.rl_config.shapeChecking = not isFast
pdfPath = outputfile('test_graphics_speed_fast.pdf')
c = Canvas(pdfPath)
t0 = time.time()
d = Drawing(400, 200)
num = 100
for i in range(num):
pc = Pie()
pc.x = 150
pc.y = 50
pc.data = [10,20,30,40,50,60]
pc.labels = ['a','b','c','d','e','f']
pc.slices.strokeWidth=0.5
pc.slices[3].popout = 20
pc.slices[3].strokeWidth = 2
pc.slices[3].strokeDashArray = [2,2]
pc.slices[3].labelRadius = 1.75
pc.slices[3].fontColor = colors.red
d.add(pc)
d.drawOn(c, 80, 500)
t1 = time.time()
result = 'drew %d pie charts in %0.4f' % (num, t1 - t0)
open(outputfile('test_graphics_speed_test%s.log' % (isFast+1)), 'w').write(result)
def test1(self, isFast=1):
"Same as test1(), but with shape checking turned on."
self.test0(isFast)
if False:
def test2(self):
"This is a profiled version of test1()."
try:
import profile
except ImportError:
return
fileName = outputfile('test_graphics_speed_profile.log')
# This runs ok, when only this test script is executed,
# but fails, when imported from runAll.py...
profile.run("t = GraphicsSpeedTestCase('test2')", fileName)
def makeSuite():
return makeSuiteForClasses(GraphicsSpeedTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| {
"content_hash": "65bb717ee252fc39269d812779ba37e4",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 92,
"avg_line_length": 31.85,
"alnum_prop": 0.6138147566718996,
"repo_name": "kanarelo/reportlab",
"id": "bf9346ac9a4b3c9409ef23fe604e812e2b8371f9",
"size": "2628",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "tests/test_graphics_speed.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "720286"
},
{
"name": "C++",
"bytes": "2140"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "2937123"
},
{
"name": "Shell",
"bytes": "2506"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.