code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
"""Build in connectors.
Connectors have to implement `process_request`.
"""
from .errors import HTTPError
try:
import aiohttp
import asyncio
class AHConnector:
"""Connector implementation using aiohttp."""
def __init__(self, sess=None, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.sess = sess or aiohttp.ClientSession(loop=self.loop)
self.closed = False
def __del__(self):
if not self.closed:
self.close()
def close(self):
self.closed = True
self.sess.close()
@asyncio.coroutine
def process_request(self, endpoint, data, type_):
"""Make and process the request.
Parameters
-----------
endpoint : `str`
The HTTP endpoint to make a request to
data : `dict`
The parameters for making the HTTP request
type_ : `type`
A converter to which to pass the response json and return.
"""
resp = yield from self.sess.get(endpoint, params=data)
if resp.status != 200:
raise HTTPError(resp.status, resp.reason, (yield from resp.text()))
data = yield from resp.json()
resp.close()
return type_(data)
except ImportError:
pass
try:
import requests
class ReqConnector:
"""Connector implementation using requests."""
def __init__(self, sess=None):
self.sess = sess or requests.Session()
def __del__(self):
self.close()
def close(self):
self.sess.close()
def process_request(self, endpoint, data, type_):
"""Make and process the request.
Parameters
-----------
endpoint : `str`
The HTTP endpoint to make a request to
data : `dict`
The parameters for making the HTTP request
type_ : `type`
A converter to which to pass the response json and return.
"""
resp = self.sess.get(endpoint, params=data)
if resp.status_code != 200:
raise HTTPError(resp.status_code, resp.reason, resp.text)
data = resp.json()
resp.close()
return type_(data)
except ImportError:
pass
| Phxntxm/osuapi | osuapi/connectors.py | Python | mit | 2,424 |
"""
@package mi.instrument.rbr.xr-420_thermistor_24.ooicore.driver
@file /Users/Bill/WorkSpace/marine-integrations/mi/instrument/rbr/xr-420_thermistor_24/ooicore/driver.py
@author Bill Bollenbacher
@brief Driver for the RBR Thermistor String (24 thermistors)
Release notes:
initial release
"""
import time
import re
import ntplib
import struct
from mi.core.common import BaseEnum, Units
from mi.core.time_tools import get_timestamp_delayed
from mi.core.time_tools import timegm_to_float
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol, InitializationType
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.exceptions import InstrumentTimeoutException, InstrumentParameterException, InstrumentProtocolException, \
SampleException, InstrumentCommandException
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, CommonDataParticleType
from mi.core.log import get_logger, get_logging_metaclass
__author__ = 'Bill Bollenbacher'
__license__ = 'Apache 2.0'
log = get_logger()
SAMPLE_DATA_PATTERN = (r'TIM (\d+)' + # timestamp
'\s+(-*\d+\.\d+)' + # Channel 1
'\s+(-*\d+\.\d+)' + # Channel 2
'\s+(-*\d+\.\d+)' + # Channel 3
'\s+(-*\d+\.\d+)' + # Channel 4
'\s+(-*\d+\.\d+)' + # Channel 5
'\s+(-*\d+\.\d+)' + # Channel 6
'\s+(-*\d+\.\d+)' + # Channel 7
'\s+(-*\d+\.\d+)' + # Channel 8
'\s+(-*\d+\.\d+)' + # Channel 9
'\s+(-*\d+\.\d+)' + # Channel 10
'\s+(-*\d+\.\d+)' + # Channel 11
'\s+(-*\d+\.\d+)' + # Channel 12
'\s+(-*\d+\.\d+)' + # Channel 13
'\s+(-*\d+\.\d+)' + # Channel 14
'\s+(-*\d+\.\d+)' + # Channel 15
'\s+(-*\d+\.\d+)' + # Channel 16
'\s+(-*\d+\.\d+)' + # Channel 17
'\s+(-*\d+\.\d+)' + # Channel 18
'\s+(-*\d+\.\d+)' + # Channel 19
'\s+(-*\d+\.\d+)' + # Channel 20
'\s+(-*\d+\.\d+)' + # Channel 21
'\s+(-*\d+\.\d+)' + # Channel 22
'\s+(-*\d+\.\d+)' + # Channel 23
'\s+(-*\d+\.\d+)' + # Channel 24
'\s+BV: (-*\d+\.\d+)' + # battery voltage
'\s+SN: (\d+) FET') # serial number
SAMPLE_DATA_REGEX = re.compile(SAMPLE_DATA_PATTERN)
class ScheduledJob(BaseEnum):
ACQUIRE_STATUS = 'acquire_status'
CLOCK_SYNC = 'clock_sync'
class DataParticleType(BaseEnum):
RAW = CommonDataParticleType.RAW
SAMPLE = 'tmpsf_sample'
ENGINEERING = 'tmpsf_engineering'
NEWLINE = '\r\n'
WRITE_DELAY = 0
# default timeout.
INSTRUMENT_TIMEOUT = 5
# Device responses.
class InstrumentResponses(BaseEnum):
"""
XR-420 responses.
"""
GET_STATUS = 'Logger status '
GET_IDENTIFICATION = 'RBR XR-420 '
GET_LOGGER_DATE_AND_TIME = 'CTD\r\n'
GET_SAMPLE_INTERVAL = 'CSP\r\n'
GET_START_DATE_AND_TIME = 'CST\r\n'
GET_END_DATE_AND_TIME = 'CET\r\n'
GET_BATTERY_VOLTAGE = 'BAT\r\n'
GET_CHANNEL_CALIBRATION = 'CAL\r\n'
GET_ADVANCED_FUNCTIONS = 'STC\r\n'
UNKNOWN_COMMAND = '? Unknown command \r\n'
START_SAMPLING = 'Logger started in mode '
class InstrumentCommands(BaseEnum):
GET_IDENTIFICATION = 'A'
GET_LOGGER_DATE_AND_TIME = 'B'
GET_SAMPLE_INTERVAL = 'C'
GET_START_DATE_AND_TIME = 'D'
GET_END_DATE_AND_TIME = 'E'
GET_STATUS = 'T'
GET_CHANNEL_CALIBRATION = 'Z'
GET_BATTERY_VOLTAGE = '!D'
SET_LOGGER_DATE_AND_TIME = 'J'
SET_SAMPLE_INTERVAL = 'K'
SET_START_DATE_AND_TIME = 'L'
SET_END_DATE_AND_TIME = 'M'
TAKE_SAMPLE_IMMEDIATELY = 'F'
RESET_SAMPLING_ERASE_FLASH = 'N'
START_SAMPLING = 'P'
STOP_SAMPLING = '!9'
SUSPEND_SAMPLING = '!S'
RESUME_SAMPLING = '!R'
SET_ADVANCED_FUNCTIONS = '!1'
GET_ADVANCED_FUNCTIONS = '!2'
class ProtocolStates(BaseEnum):
"""
Protocol states for XR-420. Cherry picked from DriverProtocolState enum.
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class ProtocolEvent(BaseEnum):
"""
Protocol events for XR-420. Cherry picked from DriverEvent enum.
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
DISCOVER = DriverEvent.DISCOVER
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
SCHEDULED_CLOCK_SYNC = DriverEvent.SCHEDULED_CLOCK_SYNC
SCHEDULED_ACQUIRE_STATUS = 'PROTOCOL_EVENT_SCHEDULED_ACQUIRE_STATUS'
class Capability(BaseEnum):
"""
Capabilities that are exposed to the user (subset of above)
"""
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
GET = DriverEvent.GET
SET = DriverEvent.SET
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
DISCOVER = DriverEvent.DISCOVER
# Device specific parameters.
class InstrumentParameters(DriverParameter):
"""
Device parameters for XR-420.
"""
# main menu parameters
IDENTIFICATION = 'identification'
LOGGER_DATE_AND_TIME = 'logger_date_and_time'
SAMPLE_INTERVAL = 'sample_interval'
START_DATE_AND_TIME = 'start_date_and_time'
END_DATE_AND_TIME = 'end_date_and_time'
STATUS = 'status'
BATTERY_VOLTAGE = 'battery_voltage'
POWER_ALWAYS_ON = 'power_always_on'
SIX_HZ_PROFILING_MODE = 'six_hz_profiling_mode'
OUTPUT_INCLUDES_SERIAL_NUMBER = 'output_includes_serial_number'
OUTPUT_INCLUDES_BATTERY_VOLTAGE = 'output_includes_battery_voltage'
SAMPLING_LED = 'sampling_led'
ENGINEERING_UNITS_OUTPUT = 'engineering_units_output'
AUTO_RUN = 'auto_run'
INHIBIT_DATA_STORAGE = 'inhibit_data_storage'
CALIBRATION_COEFFICIENTS_CHANNEL_1 = 'calibration_coefficients_channel_1'
CALIBRATION_COEFFICIENTS_CHANNEL_2 = 'calibration_coefficients_channel_2'
CALIBRATION_COEFFICIENTS_CHANNEL_3 = 'calibration_coefficients_channel_3'
CALIBRATION_COEFFICIENTS_CHANNEL_4 = 'calibration_coefficients_channel_4'
CALIBRATION_COEFFICIENTS_CHANNEL_5 = 'calibration_coefficients_channel_5'
CALIBRATION_COEFFICIENTS_CHANNEL_6 = 'calibration_coefficients_channel_6'
CALIBRATION_COEFFICIENTS_CHANNEL_7 = 'calibration_coefficients_channel_7'
CALIBRATION_COEFFICIENTS_CHANNEL_8 = 'calibration_coefficients_channel_8'
CALIBRATION_COEFFICIENTS_CHANNEL_9 = 'calibration_coefficients_channel_9'
CALIBRATION_COEFFICIENTS_CHANNEL_10 = 'calibration_coefficients_channel_10'
CALIBRATION_COEFFICIENTS_CHANNEL_11 = 'calibration_coefficients_channel_11'
CALIBRATION_COEFFICIENTS_CHANNEL_12 = 'calibration_coefficients_channel_12'
CALIBRATION_COEFFICIENTS_CHANNEL_13 = 'calibration_coefficients_channel_13'
CALIBRATION_COEFFICIENTS_CHANNEL_14 = 'calibration_coefficients_channel_14'
CALIBRATION_COEFFICIENTS_CHANNEL_15 = 'calibration_coefficients_channel_15'
CALIBRATION_COEFFICIENTS_CHANNEL_16 = 'calibration_coefficients_channel_16'
CALIBRATION_COEFFICIENTS_CHANNEL_17 = 'calibration_coefficients_channel_17'
CALIBRATION_COEFFICIENTS_CHANNEL_18 = 'calibration_coefficients_channel_18'
CALIBRATION_COEFFICIENTS_CHANNEL_19 = 'calibration_coefficients_channel_19'
CALIBRATION_COEFFICIENTS_CHANNEL_20 = 'calibration_coefficients_channel_20'
CALIBRATION_COEFFICIENTS_CHANNEL_21 = 'calibration_coefficients_channel_21'
CALIBRATION_COEFFICIENTS_CHANNEL_22 = 'calibration_coefficients_channel_22'
CALIBRATION_COEFFICIENTS_CHANNEL_23 = 'calibration_coefficients_channel_23'
CALIBRATION_COEFFICIENTS_CHANNEL_24 = 'calibration_coefficients_channel_24'
class Status(BaseEnum):
NOT_ENABLED_FOR_SAMPLING = 0x00
ENABLED_SAMPLING_NOT_STARTED = 0x01
STARTED_SAMPLING = 0x02
STOPPED_SAMPLING = 0x04
TEMPORARILY_SUSPENDED_SAMPLING = 0x05
HIGH_SPEED_PROFILING_MODE = 0x06
ERASING_DATA_MEMORY = 0x7F
DATA_MEMORY_ERASE_FAILED = 0x80
PASSED_END_TIME = 0x01
RCVD_STOP_COMMAND = 0x02
DATA_MEMORY_FULL = 0x03
CONFIGURATION_ERROR = 0x05
class AdvancedFunctionsParameters(BaseEnum):
POWER_ALWAYS_ON = InstrumentParameters.POWER_ALWAYS_ON
SIX_HZ_PROFILING_MODE = InstrumentParameters.SIX_HZ_PROFILING_MODE
OUTPUT_INCLUDES_SERIAL_NUMBER = InstrumentParameters.OUTPUT_INCLUDES_SERIAL_NUMBER
OUTPUT_INCLUDES_BATTERY_VOLTAGE = InstrumentParameters.OUTPUT_INCLUDES_BATTERY_VOLTAGE
SAMPLING_LED = InstrumentParameters.SAMPLING_LED
ENGINEERING_UNITS_OUTPUT = InstrumentParameters.ENGINEERING_UNITS_OUTPUT
AUTO_RUN = InstrumentParameters.AUTO_RUN
INHIBIT_DATA_STORAGE = InstrumentParameters.INHIBIT_DATA_STORAGE
class AdvancedFunctionsBits(BaseEnum):
power_always_on = 0x8000
six_hz_profiling_mode = 0x4000
output_includes_serial_number = 0x20
output_includes_battery_voltage = 0x10
sampling_led = 0x8
engineering_units_output = 0x4
auto_run = 0x2
inhibit_data_storage = 0x1
###############################################################################
# Driver for XR-420 Thermistor
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
Instrument driver class for XR-420 driver.
Uses CommandResponseInstrumentProtocol to communicate with the device
"""
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = InstrumentProtocol(InstrumentResponses, NEWLINE, self._driver_event)
###############################################################################
# Data particles
###############################################################################
class XR_420SampleDataParticleKey(BaseEnum):
TIMESTAMP = "timestamp"
TEMPERATURE01 = "temperature01"
TEMPERATURE02 = "temperature02"
TEMPERATURE03 = "temperature03"
TEMPERATURE04 = "temperature04"
TEMPERATURE05 = "temperature05"
TEMPERATURE06 = "temperature06"
TEMPERATURE07 = "temperature07"
TEMPERATURE08 = "temperature08"
TEMPERATURE09 = "temperature09"
TEMPERATURE10 = "temperature10"
TEMPERATURE11 = "temperature11"
TEMPERATURE12 = "temperature12"
TEMPERATURE13 = "temperature13"
TEMPERATURE14 = "temperature14"
TEMPERATURE15 = "temperature15"
TEMPERATURE16 = "temperature16"
TEMPERATURE17 = "temperature17"
TEMPERATURE18 = "temperature18"
TEMPERATURE19 = "temperature19"
TEMPERATURE20 = "temperature20"
TEMPERATURE21 = "temperature21"
TEMPERATURE22 = "temperature22"
TEMPERATURE23 = "temperature23"
TEMPERATURE24 = "temperature24"
BATTERY_VOLTAGE = "battery_voltage"
SERIAL_NUMBER = "serial_number"
class XR_420SampleDataParticle(DataParticle):
"""
Class for parsing sample data into a data particle structure for the XR-420 sensor.
"""
_data_particle_type = DataParticleType.SAMPLE
def _build_parsed_values(self):
"""
Take something in the data sample format and parse it into
values with appropriate tags.
@throws SampleException If there is a problem with sample creation
"""
temps = []
match = SAMPLE_DATA_REGEX.match(self.raw_data)
if not match:
raise SampleException("XR_420SampleDataParticle: No regex match of parsed sample data: [%r]", self.raw_data)
log.debug('_build_parsed_values: match=%s', match.group(0))
try:
log.debug('_build_parsed_values: group(1)=%s', match.group(1))
timestamp = time.strptime(match.group(1), "%y%m%d%H%M%S")
log.debug("_build_parsed_values: ts=%s", timestamp)
self.set_internal_timestamp(unix_time=timegm_to_float(timestamp))
ntp_timestamp = ntplib.system_to_ntp_time(timegm_to_float(timestamp))
for i in range(2, 26):
temps.append(float(match.group(i)))
battery_voltage = float(match.group(26))
serial_number = match.group(27)
except (ValueError, TypeError, IndexError) as ex:
raise SampleException("Error (%s) while decoding parameters in data: [%r]" % (ex, self.raw_data))
result = [{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TIMESTAMP,
DataParticleKey.VALUE: ntp_timestamp},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE01,
DataParticleKey.VALUE: temps[0]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE02,
DataParticleKey.VALUE: temps[1]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE03,
DataParticleKey.VALUE: temps[2]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE04,
DataParticleKey.VALUE: temps[3]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE05,
DataParticleKey.VALUE: temps[4]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE06,
DataParticleKey.VALUE: temps[5]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE07,
DataParticleKey.VALUE: temps[6]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE08,
DataParticleKey.VALUE: temps[7]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE09,
DataParticleKey.VALUE: temps[8]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE10,
DataParticleKey.VALUE: temps[9]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE11,
DataParticleKey.VALUE: temps[10]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE12,
DataParticleKey.VALUE: temps[11]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE13,
DataParticleKey.VALUE: temps[12]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE14,
DataParticleKey.VALUE: temps[13]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE15,
DataParticleKey.VALUE: temps[14]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE16,
DataParticleKey.VALUE: temps[15]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE17,
DataParticleKey.VALUE: temps[16]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE18,
DataParticleKey.VALUE: temps[17]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE19,
DataParticleKey.VALUE: temps[18]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE20,
DataParticleKey.VALUE: temps[19]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE21,
DataParticleKey.VALUE: temps[20]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE22,
DataParticleKey.VALUE: temps[21]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE23,
DataParticleKey.VALUE: temps[22]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.TEMPERATURE24,
DataParticleKey.VALUE: temps[23]},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.BATTERY_VOLTAGE,
DataParticleKey.VALUE: battery_voltage},
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.SERIAL_NUMBER,
DataParticleKey.VALUE: serial_number}]
log.debug('XR_420SampleDataParticle: particle=%r', result)
return result
class XR_420EngineeringDataParticleKey(BaseEnum):
CALIBRATION_COEFFICIENTS = 'tmpsf_cal_coeffs'
BATTERY_VOLTAGE = 'battery_voltage'
CALIBRATION_COEFFICIENTS_PARAMETERS = [
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_1,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_2,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_3,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_4,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_5,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_6,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_7,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_8,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_9,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_10,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_11,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_12,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_13,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_14,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_15,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_16,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_17,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_18,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_19,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_20,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_21,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_22,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_23,
InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_24
]
class XR_420EngineeringDataParticle(DataParticle):
"""
Class for constructing engineering data into an engineering particle structure for the XR-420 sensor.
The raw_data variable in the DataParticle base class needs to be initialized to a reference to
a dictionary that contains the status parameters.
"""
_data_particle_type = DataParticleType.ENGINEERING
def _build_parsed_values(self):
"""
Build the status particle from a dictionary of parameters adding the appropriate tags.
NOTE: raw_data references a dictionary with the status parameters, not a line of input
@throws SampleException If there is a problem with particle creation
"""
result = []
if not isinstance(self.raw_data, dict):
raise SampleException("Error: raw_data is not a dictionary")
log.debug('XR_420EngineeringDataParticle: raw_data=%r', self.raw_data)
voltage = self.raw_data.get(InstrumentParameters.BATTERY_VOLTAGE)
if voltage is not None:
result.append(
{DataParticleKey.VALUE_ID: XR_420SampleDataParticleKey.BATTERY_VOLTAGE,
DataParticleKey.VALUE: voltage}
)
else:
raise SampleException("XR_420EngineeringDataParticle: missing battery voltage")
cals = []
for param in CALIBRATION_COEFFICIENTS_PARAMETERS:
value = self.raw_data.get(param)
if value is not None:
cals.append(value)
else:
raise SampleException("XR_420EngineeringDataParticle: missing calibration")
result.append({DataParticleKey.VALUE_ID: XR_420EngineeringDataParticleKey.CALIBRATION_COEFFICIENTS,
DataParticleKey.VALUE: cals})
log.debug('XR_420EngineeringDataParticle: particle=%r', result)
return result
###############################################################################
# Protocol for XR-420
###############################################################################
class InstrumentProtocol(CommandResponseInstrumentProtocol):
"""
This protocol implements a simple command-response interaction for the XR-420 instrument.
"""
__metaclass__ = get_logging_metaclass(log_level='debug')
def __init__(self, prompts, newline, driver_event):
"""
"""
self.write_delay = WRITE_DELAY
self._last_data_timestamp = None
self.advanced_functions_bits = AdvancedFunctionsBits.dict()
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
self._protocol_fsm = ThreadSafeFSM(ProtocolStates,
ProtocolEvent,
ProtocolEvent.ENTER,
ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolStates.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolStates.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
self._protocol_fsm.add_handler(ProtocolStates.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolStates.UNKNOWN, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolStates.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolStates.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
self._protocol_fsm.add_handler(ProtocolStates.COMMAND, ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample)
self._protocol_fsm.add_handler(ProtocolStates.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolStates.COMMAND, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolStates.COMMAND, ProtocolEvent.START_DIRECT, self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolStates.COMMAND, ProtocolEvent.CLOCK_SYNC, self._handler_clock_sync)
self._protocol_fsm.add_handler(ProtocolStates.COMMAND, ProtocolEvent.ACQUIRE_STATUS, self._handler_acquire_status)
self._protocol_fsm.add_handler(ProtocolStates.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
self._protocol_fsm.add_handler(ProtocolStates.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
self._protocol_fsm.add_handler(ProtocolStates.AUTOSAMPLE, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolStates.AUTOSAMPLE, ProtocolEvent.SCHEDULED_ACQUIRE_STATUS, self._handler_acquire_status)
self._protocol_fsm.add_handler(ProtocolStates.AUTOSAMPLE, ProtocolEvent.SCHEDULED_CLOCK_SYNC, self._handler_clock_sync)
self._protocol_fsm.add_handler(ProtocolStates.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample)
self._protocol_fsm.add_handler(ProtocolStates.DIRECT_ACCESS, ProtocolEvent.ENTER, self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolStates.DIRECT_ACCESS, ProtocolEvent.EXIT, self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolStates.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct)
self._protocol_fsm.add_handler(ProtocolStates.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct)
# Set state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolStates.UNKNOWN)
self._build_command_handlers()
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
# create chunker for processing instrument samples.
self._chunker = StringChunker(InstrumentProtocol.chunker_sieve_function)
self.initialize_scheduler()
# TMPSF uses a menu driven API
self._direct_commands = {
'Newline': self._newline
}
@staticmethod
def chunker_sieve_function(raw_data):
# The method that detects data sample structures from instrument
return_list = []
for match in SAMPLE_DATA_REGEX.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _filter_capabilities(self, events):
"""
"""
events_out = [x for x in events if Capability.has(x)]
return events_out
def _got_chunk(self, structure, timestamp):
"""
The base class got_data has gotten a structure from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
log.debug("_got_chunk: detected structure = <%s>", structure)
self._extract_sample(XR_420SampleDataParticle, SAMPLE_DATA_REGEX, structure, timestamp)
########################################################################
# overridden methods from base class.
########################################################################
def _get_response(self, timeout=10, expected_prompt=None):
"""
overridden to find expected prompt anywhere in buffer
Get a response from the instrument, but be a bit loose with what we
find. Leave some room for white space around prompts and not try to
match that just in case we are off by a little whitespace or not quite
at the end of a line.
@todo Consider cases with no prompt
@param timeout The timeout in seconds
@param expected_prompt Only consider the specific expected prompt as
presented by this string
@throw InstrumentProtocolException on timeout
"""
# Grab time for timeout and wait for prompt.
starttime = time.time()
if expected_prompt is None:
prompt_list = self._prompts.list()
else:
if isinstance(expected_prompt, basestring):
prompt_list = [expected_prompt]
else:
prompt_list = expected_prompt
while True:
for item in prompt_list:
if item in self._promptbuf:
return item, self._linebuf
else:
time.sleep(.1)
if time.time() > starttime + timeout:
raise InstrumentTimeoutException("in InstrumentProtocol._get_response()")
def _do_cmd_resp(self, cmd, *args, **kwargs):
"""
overridden to retrieve the expected response from the build handler
Perform a command-response on the device.
@param cmd The command to execute.
@param args positional arguments to pass to the build handler.
@param timeout=timeout optional wakeup and command timeout.
@retval resp_result The (possibly parsed) response result.
@raises InstrumentTimeoutException if the response did not occur in time.
@raises InstrumentProtocolException if command could not be built or if response
was not recognized.
"""
# Get timeout and initialize response.
timeout = kwargs.get('timeout', 10)
expected_prompt = kwargs.get('expected_prompt', None)
write_delay = kwargs.get('write_delay', 0)
# Get the build handler.
build_handler = self._build_handlers.get(cmd, None)
if not build_handler:
raise InstrumentProtocolException('Cannot build command: %s' % cmd)
(cmd_line, expected_response) = build_handler(command=cmd, **kwargs)
if expected_prompt is None:
expected_prompt = expected_response
# Wakeup the device, pass up exception if timeout
self._wakeup()
# Clear line and prompt buffers for result.
self._linebuf = ''
self._promptbuf = ''
# Send command.
log.debug('_do_cmd_resp: cmd=%r, timeout=%s, write_delay=%s, expected_prompt=%s,',
repr(cmd_line), timeout, write_delay, expected_prompt)
if write_delay == 0:
self._connection.send(cmd_line)
else:
for char in cmd_line:
self._connection.send(char)
time.sleep(write_delay)
# Wait for the prompt, prepare result and return, timeout exception
(prompt, result) = self._get_response(timeout, expected_prompt=expected_prompt)
resp_handler = self._response_handlers.get((self.get_current_state(), cmd), None) or \
self._response_handlers.get(cmd, None)
resp_result = None
if resp_handler:
resp_result = resp_handler(result, prompt, **kwargs)
return resp_result
def _wakeup(self, *args):
"""
overridden to find longest matching prompt anywhere in the buffer and to be
more responsive with its use of sleep()
Clear buffers and send a wakeup command to the instrument
@param timeout The timeout to wake the device.
@param delay The time to wait between consecutive wakeups.
@throw InstrumentTimeoutException if the device could not be woken.
"""
timeout = 5
response_delay = 1
# Clear the prompt buffer.
self._promptbuf = ''
# Grab start time for overall timeout.
start_time = time.time()
while True:
# Send 'get status' command.
log.debug('_wakeup: sending <%r>' % InstrumentCommands.GET_STATUS)
self._connection.send(InstrumentCommands.GET_STATUS)
# Grab send time for response timeout.
send_time = time.time()
while True:
time.sleep(.1)
# look for response
if InstrumentResponses.GET_STATUS in self._promptbuf:
log.debug('_wakeup got prompt: %r' % repr(InstrumentResponses.GET_STATUS))
return InstrumentResponses.GET_STATUS
time_now = time.time()
# check for overall timeout
if time_now > start_time + timeout:
raise InstrumentTimeoutException("Timeout waiting for instrument to wakeup")
# check for retry timeout
if time_now > send_time + response_delay:
break
########################################################################
# State Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can be COMMAND or AUTOSAMPLE.
@raise InstrumentProtocolException if we fail to discover our state
"""
next_state = ProtocolStates.COMMAND
result = []
try:
self._wakeup()
except InstrumentTimeoutException:
# didn't get status response, so indicate that there is trouble with the instrument
raise InstrumentProtocolException('Failed to discover instrument state. Unable to wake up instrument.')
match = re.search('Logger status (\d{2})', self._promptbuf)
if match is not None:
# got status response, so determine what mode the instrument is in
status = int(match.group(1), 16)
log.debug("_handler_unknown_discover: parsed=%s, status=%d" % (match.group(1), status))
if status in [Status.STARTED_SAMPLING,
Status.TEMPORARILY_SUSPENDED_SAMPLING,
Status.HIGH_SPEED_PROFILING_MODE]:
next_state = ProtocolStates.AUTOSAMPLE
else:
raise InstrumentProtocolException('Failed to discover instrument state. prompt mismatch.')
return next_state, (next_state, result)
########################################################################
# State Command handlers.
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentProtocolException if the update commands and not recognized.
"""
# Command device to update parameters and send a config change event if needed.
if self._init_type != InitializationType.NONE:
self._update_params()
self._init_params()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_command_set(self, *args, **kwargs):
"""
Perform a set command.
@param args[0] parameter : value dict.
"""
next_state = None
result = []
self._verify_not_readonly(*args, **kwargs)
self._set_params(*args, **kwargs)
return next_state, (next_state, result)
def _set_params(self, *args, **kwargs):
try:
params_to_set = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
else:
if not isinstance(params_to_set, dict):
raise InstrumentParameterException('Set parameters not a dict.')
self._verify_not_readonly(*args, **kwargs)
update_params = False
set_advance_params = False
for (key, val) in params_to_set.iteritems():
try:
if key in AdvancedFunctionsParameters.list():
old_val = self._param_dict.get(key)
if old_val != val:
self._param_dict.set_value(key, val)
set_advance_params = True
else:
old_val = self._param_dict.format(key)
new_val = self._param_dict.format(key, params_to_set[key])
if old_val != new_val:
update_params = True
command = self._param_dict.get_submenu_write(key)
log.debug('set_params: cmd=%s, name=%s, value=%s', command, key, val)
self._do_cmd_no_resp(command, key, val, timeout=5)
except Exception:
log.debug('Unknown driver parameter %s', key)
raise InstrumentParameterException('Unknown driver parameter %s' % key)
if set_advance_params:
command = self._param_dict.get_submenu_write(AdvancedFunctionsParameters.POWER_ALWAYS_ON)
self._do_cmd_no_resp(command, AdvancedFunctionsParameters.POWER_ALWAYS_ON,
self._param_dict.get(AdvancedFunctionsParameters.POWER_ALWAYS_ON), timeout=5)
if update_params:
self._update_params()
self._check_for_set_failures(params_to_set)
def _handler_command_start_autosample(self, *args, **kwargs):
"""
Switch into autosample mode.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = ProtocolStates.AUTOSAMPLE
result = []
# these calls will return if reset is successful or raise an exception otherwise
self._reset_instrument()
self._start_sampling()
return next_state, (next_state, result)
def _handler_command_start_direct(self):
next_state = ProtocolStates.DIRECT_ACCESS
result = []
return next_state, (next_state, result)
########################################################################
# Autosample handlers.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state.
"""
if self._init_type != InitializationType.NONE:
self._reset_instrument()
self._update_params()
self._init_params()
self._start_sampling()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_exit(self, *args, **kwargs):
"""
Exit autosample state.
"""
pass
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Stop autosample and switch back to command mode.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command misunderstood or
incorrect prompt received.
"""
next_state = ProtocolStates.COMMAND
result = []
# this call will return if reset is successful or raise an exception otherwise
self._reset_instrument()
return next_state, (next_state, result)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
pass
def _handler_direct_access_execute_direct(self, data):
next_state = None
result = []
self._do_cmd_direct(data)
return next_state, (next_state, result)
def _handler_direct_access_stop_direct(self):
next_state = ProtocolStates.COMMAND
result = []
return next_state, (next_state, result)
########################################################################
# general handlers.
########################################################################
def _handler_clock_sync(self, *args, **kwargs):
"""
sync clock close to a second edge
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
result = []
if self.get_current_state() == ProtocolStates.AUTOSAMPLE:
try:
self._reset_instrument()
except InstrumentCommandException:
log.error('Unable to break out of sampling. Will not set clock!')
# get time in ION format so command builder method can convert it correctly
str_time = get_timestamp_delayed("%d %b %Y %H:%M:%S")
log.debug("_handler_command_clock_sync: time set to %s", str_time)
# set the time
command = self._param_dict.get_submenu_write(InstrumentParameters.LOGGER_DATE_AND_TIME)
self._do_cmd_no_resp(command, InstrumentParameters.LOGGER_DATE_AND_TIME, str_time, timeout=5)
# get the time to update the parameter value
command = self._param_dict.get_submenu_read(InstrumentParameters.LOGGER_DATE_AND_TIME)
self._do_cmd_resp(command)
if self.get_current_state() == ProtocolStates.AUTOSAMPLE:
try:
self._start_sampling()
except InstrumentCommandException:
log.error('Unable to go back to sampling, changing state to COMMAND')
return ProtocolStates.COMMAND, (ProtocolStates.COMMAND, None)
return next_state, (next_state, result)
def _handler_acquire_status(self, *args, **kwargs):
"""
Get device status
"""
next_state = None
# update battery voltage value
command = self._param_dict.get_submenu_read(InstrumentParameters.BATTERY_VOLTAGE)
self._do_cmd_resp(command, name=InstrumentParameters.BATTERY_VOLTAGE)
params = [InstrumentParameters.BATTERY_VOLTAGE] + CALIBRATION_COEFFICIENTS_PARAMETERS
status_params = {}
for name in params:
status_params[name] = self._param_dict.get(name)
log.debug("Add parameter %s: %r", name, status_params[name])
# Create status data particle, but pass in a reference to the dictionary just created as first parameter
# instead of the 'line'. The status data particle class will use the 'raw_data' variable as a reference to a
# dictionary object to get access to parameter values (see the Mavs4EngineeringDataParticle class).
particle = XR_420EngineeringDataParticle(status_params, preferred_timestamp=DataParticleKey.DRIVER_TIMESTAMP)
status = particle.generate()
self._driver_event(DriverAsyncEvent.SAMPLE, status)
return next_state, (next_state, [status])
########################################################################
# Private helpers.
########################################################################
def _check_for_set_failures(self, params_to_check):
device_parameters = self._param_dict.get_config()
for key in params_to_check.keys():
log.debug("Verify set, key: %s", key)
if params_to_check[key] != device_parameters[key]:
msg = "SET FAILURE: %s is %s and should have been set to %s" % (key, device_parameters[key], params_to_check[key])
log.debug("_check_for_set_failures: %s", msg)
raise InstrumentParameterException(msg)
def _start_sampling(self):
# now start sampling
status_response = self._do_cmd_resp(InstrumentCommands.START_SAMPLING)
log.debug('_start_sampling: status=%s', status_response)
status_as_int = int(status_response, 16)
if status_as_int not in [Status.ENABLED_SAMPLING_NOT_STARTED, Status.STARTED_SAMPLING]:
raise InstrumentCommandException("_handler_command_start_autosample: " +
"Failed to start sampling, status=%s"
% status_response)
def _reset_instrument(self):
ENABLING_SEQUENCE = '!U01N'
ERASE_TIMEOUT = 60
# Issue reset command and return if successful.
for i in range(2):
# Wakeup the device, pass up exception if timeout
self._wakeup()
# Send 'reset sampling' command.
log.debug('_reset_instrument: sending <%r>' % ENABLING_SEQUENCE)
self._connection.send(ENABLING_SEQUENCE)
time.sleep(1)
log.debug('_reset_instrument: sending <%r>' % InstrumentCommands.RESET_SAMPLING_ERASE_FLASH)
self._connection.send(InstrumentCommands.RESET_SAMPLING_ERASE_FLASH)
starttime = time.time()
# Erasing flash memory and resetting sampling mode
while True:
self._do_cmd_resp(InstrumentCommands.GET_STATUS)
status_as_int = int(self._param_dict.get(InstrumentParameters.STATUS), 16)
log.debug('_reset_instrument: status=%x', status_as_int)
if status_as_int == Status.NOT_ENABLED_FOR_SAMPLING:
# instrument is reset and ready
return
elif status_as_int == Status.ERASING_DATA_MEMORY:
# instrument is still busy
time.sleep(1)
elif status_as_int == Status.DATA_MEMORY_ERASE_FAILED:
# serious instrument failure
raise InstrumentCommandException("_reset_instrument: " +
"SERIOUS FAILURE to reset instrument! status=%s"
% Status.DATA_MEMORY_ERASE_FAILED)
if time.time() > starttime + ERASE_TIMEOUT:
break
raise InstrumentCommandException("_reset_instrument: " +
"Failed to reset instrument after 2 tries of %d seconds each, status=%s"
% (ERASE_TIMEOUT, self._param_dict.get(InstrumentParameters.STATUS)))
def _convert_battery_voltage(self, reported_battery_voltage):
battery_voltage = int(reported_battery_voltage, 16)
battery_voltage *= .0816485
battery_voltage += .25417
return battery_voltage
def _convert_xr_420_date_and_time(self, reported_date_and_time):
"""
convert string from XR-420 "yymmddhhmmss to ION "21 AUG 2012 09:51:55"
"""
return time.strftime("%d %b %Y %H:%M:%S", time.strptime(reported_date_and_time, "%y%m%d%H%M%S"))
def _convert_ion_date_time(self, ion_date_time_string):
"""
convert string from ION "21 AUG 2012 09:51:55" to XR-420 "yymmddhhmmss"
"""
return time.strftime("%y%m%d%H%M%S", time.strptime(ion_date_time_string, "%d %b %Y %H:%M:%S"))
def _convert_xr_420_time(self, reported_time):
"""
convert string from XR-420 "hhmmss to ION "09:51:55"
"""
return time.strftime("%H:%M:%S", time.strptime(reported_time, "%H%M%S"))
def _convert_ion_time(self, ion_date_time_string):
"""
convert string from ION "09:51:55" to XR-420 "hhmmss"
"""
return time.strftime("%H%M%S", time.strptime(ion_date_time_string, "%H:%M:%S"))
def _convert_calibration(self, calibration_string):
"""
convert calibration string from 32 hex byte values to 4 floating point values
"""
if len(calibration_string) != 64:
raise InstrumentParameterException('_convert_calibration: calibration response is not 64 characters in length.')
float_list = []
for index in range(4):
bytes_in_hex = calibration_string[0:16]
calibration_string = calibration_string[16:]
bytes_in_hex = bytes_in_hex.decode('hex')
float_value = struct.unpack('<d', bytes_in_hex)
float_list.append(float_value[0])
return float_list
def _check_bit_value(self, value):
if value in [0, 1]:
log.debug('_check_bit_value: value <%s> is binary', value)
return value
else:
log.debug('_check_bit_value: value <%s> is not binary - raising exception', value)
raise InstrumentParameterException('not a binary value.')
def _update_params(self, *args, **kwargs):
"""
Update the parameter dictionary.
"""
# Get old param dict config.
old_config = self._param_dict.get_config()
for key in InstrumentParameters.list():
if key != InstrumentParameters.ALL:
command = self._param_dict.get_submenu_read(key)
response = self._do_cmd_resp(command, name=key)
# Get new param dict config. If it differs from the old config,
# tell driver superclass to publish a config change event.
new_config = self._param_dict.get_config()
log.debug("Old Configuration: %s", old_config)
log.debug("New Configuration: %s", new_config)
if new_config != old_config:
log.debug("Configuration change detected!")
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="Acquire Status")
self._cmd_dict.add(Capability.CLOCK_SYNC, display_name='Synchronize Clock')
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover', timeout=30)
def _build_param_dict(self):
"""
Populate the parameter dictionary with XR-420 parameters.
For each parameter key add value formatting function for set commands.
"""
# The parameter dictionary.
self._param_dict = ProtocolParameterDict()
# Add parameter handlers to parameter dictionary for instrument configuration parameters.
self._param_dict.add(InstrumentParameters.STATUS,
r'Logger status (.*)\r\n',
lambda match : match.group(1),
str,
type=ParameterDictType.STRING,
display_name="Logger Status",
description="Current logging status of the instrument.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_STATUS)
self._param_dict.add(InstrumentParameters.IDENTIFICATION,
r'(RBR XR-420 .*)\r\n',
lambda match : match.group(1),
str,
type=ParameterDictType.STRING,
display_name="Identification",
description="Instrument identification.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_IDENTIFICATION)
self._param_dict.add(InstrumentParameters.LOGGER_DATE_AND_TIME,
r'(\d{12})CTD\r\n',
lambda match : self._convert_xr_420_date_and_time(match.group(1)),
str,
type=ParameterDictType.STRING,
display_name="Date/Time",
description="Timestamp of last get/set time.",
units="D M Y H:M:S",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_LOGGER_DATE_AND_TIME,
submenu_write=InstrumentCommands.SET_LOGGER_DATE_AND_TIME)
self._param_dict.add(InstrumentParameters.SAMPLE_INTERVAL,
r'(\d{6})CSP\r\n',
lambda match : self._convert_xr_420_time(match.group(1)),
str,
startup_param=True,
default_value='00:00:12', # 12 seconds
type=ParameterDictType.STRING,
display_name="Sample Interval",
description="Sampling interval between samples taken.",
units="HH:MM:SS",
submenu_read=InstrumentCommands.GET_SAMPLE_INTERVAL,
submenu_write=InstrumentCommands.SET_SAMPLE_INTERVAL)
self._param_dict.add(InstrumentParameters.START_DATE_AND_TIME,
r'(\d{12})CST\r\n',
lambda match : self._convert_xr_420_date_and_time(match.group(1)),
str,
startup_param=True,
default_value='01 Jan 2000 00:00:00',
direct_access=False,
type=ParameterDictType.STRING,
display_name="Start Date and Time",
description="Date/time at which the logger starts sampling.",
units="D M Y H:M:S",
visibility=ParameterDictVisibility.IMMUTABLE,
submenu_read=InstrumentCommands.GET_START_DATE_AND_TIME,
submenu_write=InstrumentCommands.SET_START_DATE_AND_TIME)
self._param_dict.add(InstrumentParameters.END_DATE_AND_TIME,
r'(\d{12})CET\r\n',
lambda match : self._convert_xr_420_date_and_time(match.group(1)),
str,
default_value='01 Jan 2050 00:00:00',
startup_param=True,
direct_access=False,
type=ParameterDictType.STRING,
display_name="End Date and Time",
description="Date/time at which the logger stops sampling.",
units="D M Y H:M:S",
visibility=ParameterDictVisibility.IMMUTABLE,
submenu_read=InstrumentCommands.GET_END_DATE_AND_TIME,
submenu_write=InstrumentCommands.SET_END_DATE_AND_TIME)
self._param_dict.add(InstrumentParameters.BATTERY_VOLTAGE,
r'(\w{2})BAT\r\n',
lambda match : self._convert_battery_voltage(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Battery Voltage",
description="Battery voltage of the instrument.",
units=Units.VOLT,
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_BATTERY_VOLTAGE)
self._param_dict.add(InstrumentParameters.POWER_ALWAYS_ON,
r'$^',
lambda value : self._check_bit_value(value),
None,
default_value=1, # 1 = True
type=ParameterDictType.INT,
display_name="Power Always On",
range={'True': 1, 'False': 0},
description="Enable instrument sleeping between samples: (1:True | 0:False)",
startup_param=True,
direct_access=False,
submenu_read=InstrumentCommands.GET_ADVANCED_FUNCTIONS,
submenu_write=InstrumentCommands.SET_ADVANCED_FUNCTIONS)
# Not available using this logger
self._param_dict.add(InstrumentParameters.SIX_HZ_PROFILING_MODE,
r'$^',
lambda value : self._check_bit_value(value),
None,
default_value=0, # 0 = False
type=ParameterDictType.INT,
display_name="6Hz Profiling Mode",
range={'True': 1, 'False': 0},
description="Enable profiling mode: (1:True | 0:False)",
startup_param=True,
direct_access=False,
visibility=ParameterDictVisibility.IMMUTABLE,
submenu_read=InstrumentCommands.GET_ADVANCED_FUNCTIONS,
submenu_write=InstrumentCommands.SET_ADVANCED_FUNCTIONS)
self._param_dict.add(InstrumentParameters.OUTPUT_INCLUDES_SERIAL_NUMBER,
r'$^',
lambda value : self._check_bit_value(value),
None,
default_value=1, # 1 = True
startup_param=True,
direct_access=True,
type=ParameterDictType.INT,
display_name="Output Includes Serial Number",
range={'True': 1, 'False': 0},
description="Enable serial number in output: (1:True | 0:False)",
visibility=ParameterDictVisibility.IMMUTABLE,
submenu_read=InstrumentCommands.GET_ADVANCED_FUNCTIONS,
submenu_write=InstrumentCommands.SET_ADVANCED_FUNCTIONS)
self._param_dict.add(InstrumentParameters.OUTPUT_INCLUDES_BATTERY_VOLTAGE,
r'$^',
lambda value : self._check_bit_value(value),
None,
default_value=1, # 1 = True
startup_param=True,
direct_access=True,
type=ParameterDictType.INT,
display_name="Output Includes Battery Voltage",
range={'True': 1, 'False': 0},
description="Enable battery voltage in output: (1:True | 0:False)",
visibility=ParameterDictVisibility.IMMUTABLE,
submenu_read=InstrumentCommands.GET_ADVANCED_FUNCTIONS,
submenu_write=InstrumentCommands.SET_ADVANCED_FUNCTIONS)
self._param_dict.add(InstrumentParameters.SAMPLING_LED,
r'$^',
lambda value : self._check_bit_value(value),
None,
default_value=0, # 0 = False
startup_param=True,
direct_access=False,
type=ParameterDictType.INT,
display_name="Sampling LED",
range={'True': 1, 'False': 0},
description="Enable sampling LED: (1:True | 0:False)",
visibility=ParameterDictVisibility.IMMUTABLE,
submenu_read=InstrumentCommands.GET_ADVANCED_FUNCTIONS,
submenu_write=InstrumentCommands.SET_ADVANCED_FUNCTIONS)
self._param_dict.add(InstrumentParameters.ENGINEERING_UNITS_OUTPUT,
r'$^',
lambda value : self._check_bit_value(value),
None,
default_value=1, # 1 = True
startup_param=True,
direct_access=True,
type=ParameterDictType.INT,
display_name="Engineering Units Output",
range={'True': 1, 'False': 0},
description="Enable engineering units in output: (1:True | 0:False)",
visibility=ParameterDictVisibility.IMMUTABLE,
submenu_read=InstrumentCommands.GET_ADVANCED_FUNCTIONS,
submenu_write=InstrumentCommands.SET_ADVANCED_FUNCTIONS)
self._param_dict.add(InstrumentParameters.AUTO_RUN,
r'$^',
lambda value : self._check_bit_value(value),
None,
default_value=1, # 1 = True
startup_param=True,
direct_access=False,
type=ParameterDictType.INT,
display_name="Auto Run",
range={'True': 1, 'False': 0},
description="Enable instrument to restart in sampling mode after power cycle: (1:True | 0:False)",
visibility=ParameterDictVisibility.IMMUTABLE,
submenu_read=InstrumentCommands.GET_ADVANCED_FUNCTIONS,
submenu_write=InstrumentCommands.SET_ADVANCED_FUNCTIONS)
self._param_dict.add(InstrumentParameters.INHIBIT_DATA_STORAGE,
r'$^',
lambda value : self._check_bit_value(value),
None,
default_value=1, # 1 = True
startup_param=True,
direct_access=False,
type=ParameterDictType.INT,
display_name="Inhibit Data Storage",
range={'True': 1, 'False': 0},
description="Disable data storage on instrument: (1:True | 0:False)",
submenu_read=InstrumentCommands.GET_ADVANCED_FUNCTIONS,
submenu_write=InstrumentCommands.SET_ADVANCED_FUNCTIONS)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_1,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
# Why is each individual CC a list? IOS has a single list parameter
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 1",
description="Current calibrations for channel 1.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_2,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 2",
description="Current calibrations for channel 2.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_3,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 3",
description="Current calibrations for channel 3.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_4,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 4",
description="Current calibrations for channel 4.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_5,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 5",
description="Current calibrations for channel 5.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_6,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 6",
description="Current calibrations for channel 6.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_7,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 7",
description="Current calibrations for channel 7.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_8,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 8",
description="Current calibrations for channel 8.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_9,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 9",
description="Current calibrations for channel 9.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_10,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 10",
description="Current calibrations for channel 10.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_11,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 11",
description="Current calibrations for channel 11.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_12,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 12",
description="Current calibrations for channel 12.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_13,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 13",
description="Current calibrations for channel 13.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_14,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 14",
description="Current calibrations for channel 14.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_15,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 15",
description="Current calibrations for channel 15.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_16,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 16",
description="Current calibrations for channel 16.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_17,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 17",
description="Current calibrations for channel 17.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_18,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 18",
description="Current calibrations for channel 18.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_19,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 19",
description="Current calibrations for channel 19.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_20,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 20",
description="Current calibrations for channel 20.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_21,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 21",
description="Current calibrations for channel 21.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_22,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 22",
description="Current calibrations for channel 22.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_23,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 23",
description="Current calibrations for channel 23.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
self._param_dict.add(InstrumentParameters.CALIBRATION_COEFFICIENTS_CHANNEL_24,
r'(\w{64})CAL\r\n',
lambda match : self._convert_calibration(match.group(1)),
str,
type=ParameterDictType.LIST,
display_name="Calibration Coefficients Channel 24",
description="Current calibrations for channel 24.",
visibility=ParameterDictVisibility.READ_ONLY,
submenu_read=InstrumentCommands.GET_CHANNEL_CALIBRATION)
def _build_command_handlers(self):
# Add build handlers for device get commands.
# TODO - these can all be consolidated into a single build command that takes the command as the first
# argument like the rest of the drivers
self._add_build_handler(InstrumentCommands.GET_STATUS, self._build_get_status_command)
self._add_build_handler(InstrumentCommands.GET_IDENTIFICATION, self._build_get_identification_command)
self._add_build_handler(InstrumentCommands.GET_LOGGER_DATE_AND_TIME, self._build_get_logger_date_and_time_command)
self._add_build_handler(InstrumentCommands.GET_SAMPLE_INTERVAL, self._build_get_sample_interval_command)
self._add_build_handler(InstrumentCommands.GET_START_DATE_AND_TIME, self._build_get_start_date_and_time_command)
self._add_build_handler(InstrumentCommands.GET_END_DATE_AND_TIME, self._build_get_end_date_and_time_command)
self._add_build_handler(InstrumentCommands.GET_BATTERY_VOLTAGE, self._build_get_battery_voltage_command)
self._add_build_handler(InstrumentCommands.GET_CHANNEL_CALIBRATION, self._build_get_channel_calibration_command)
self._add_build_handler(InstrumentCommands.GET_ADVANCED_FUNCTIONS, self._build_get_advanced_functions_command)
self._add_build_handler(InstrumentCommands.START_SAMPLING, self._build_start_sampling_command)
# Add build handlers for device set commands.
self._add_build_handler(InstrumentCommands.SET_LOGGER_DATE_AND_TIME, self._build_set_date_time_command)
self._add_build_handler(InstrumentCommands.SET_START_DATE_AND_TIME, self._build_set_date_time_command)
self._add_build_handler(InstrumentCommands.SET_END_DATE_AND_TIME, self._build_set_date_time_command)
self._add_build_handler(InstrumentCommands.SET_SAMPLE_INTERVAL, self._build_set_time_command)
self._add_build_handler(InstrumentCommands.SET_ADVANCED_FUNCTIONS, self._build_set_advanced_functions_command)
# Add response handlers for device get commands.
self._add_response_handler(InstrumentCommands.GET_STATUS, self._parse_status_response)
self._add_response_handler(InstrumentCommands.GET_IDENTIFICATION, self._parse_identification_response)
self._add_response_handler(InstrumentCommands.GET_LOGGER_DATE_AND_TIME, self._parse_logger_date_and_time_response)
self._add_response_handler(InstrumentCommands.GET_SAMPLE_INTERVAL, self._parse_sample_interval_response)
self._add_response_handler(InstrumentCommands.GET_START_DATE_AND_TIME, self._parse_start_date_and_time_response)
self._add_response_handler(InstrumentCommands.GET_END_DATE_AND_TIME, self._parse_end_date_and_time_response)
self._add_response_handler(InstrumentCommands.GET_BATTERY_VOLTAGE, self._parse_battery_voltage_response)
self._add_response_handler(InstrumentCommands.GET_CHANNEL_CALIBRATION, self._parse_channel_calibration_response)
self._add_response_handler(InstrumentCommands.GET_ADVANCED_FUNCTIONS, self._parse_advanced_functions_response)
self._add_response_handler(InstrumentCommands.START_SAMPLING, self._parse_start_sampling_response)
##################################################################################################
# set command handlers
##################################################################################################
def _build_set_date_time_command(self, cmd, *args):
try:
[name, value] = args
time_str = self._convert_ion_date_time(value)
command = cmd + time_str
return command
except Exception as ex:
raise InstrumentParameterException('_build_set_date_time_command: %r.' % ex)
def _build_set_time_command(self, cmd, *args):
try:
[name, value] = args
time_str = self._convert_ion_time(value)
command = cmd + time_str
return command
except Exception as ex:
raise InstrumentParameterException('_build_set_time_command: %r.' % ex)
def _build_set_advanced_functions_command(self, cmd, *args):
try:
value = 0
for name in AdvancedFunctionsParameters.list():
if self._param_dict.get(name) == 1:
value = value | self.advanced_functions_bits[name]
log.debug("_build_set_advanced_functions_command: value=%x, a_f[%s]=%x", value, name, self.advanced_functions_bits[name])
value *= 0x10000
value_str = '%08x' % value
command = cmd + value_str
return command
except Exception as ex:
raise InstrumentParameterException('_build_set_advanced_functions_command: %r.' % ex)
##################################################################################################
# get command handlers
##################################################################################################
def _build_get_status_command(self, **kwargs):
cmd_name = kwargs.get('command', None)
if cmd_name is None:
raise InstrumentParameterException('_build_get_status_command requires a command.')
return cmd_name, InstrumentResponses.GET_STATUS
def _build_get_identification_command(self, **kwargs):
cmd_name = kwargs.get('command', None)
if cmd_name is None:
raise InstrumentParameterException('_build_get_identification_command requires a command.')
return cmd_name, InstrumentResponses.GET_IDENTIFICATION
def _build_get_logger_date_and_time_command(self, **kwargs):
cmd_name = kwargs.get('command', None)
if cmd_name is None:
raise InstrumentParameterException('_build_get_logger_date_and_time_command requires a command.')
return cmd_name, InstrumentResponses.GET_LOGGER_DATE_AND_TIME
def _build_get_sample_interval_command(self, **kwargs):
cmd_name = kwargs.get('command', None)
if cmd_name is None:
raise InstrumentParameterException('_build_get_sample_interval_command requires a command.')
return cmd_name, InstrumentResponses.GET_SAMPLE_INTERVAL
def _build_get_start_date_and_time_command(self, **kwargs):
cmd_name = kwargs.get('command', None)
if cmd_name is None:
raise InstrumentParameterException('_build_get_start_date_and_time_command requires a command.')
return cmd_name, InstrumentResponses.GET_START_DATE_AND_TIME
def _build_get_end_date_and_time_command(self, **kwargs):
cmd_name = kwargs.get('command', None)
if cmd_name is None:
raise InstrumentParameterException('_build_get_end_date_and_time_command requires a command.')
return cmd_name, InstrumentResponses.GET_END_DATE_AND_TIME
def _build_get_battery_voltage_command(self, **kwargs):
cmd_name = kwargs.get('command', None)
if cmd_name is None:
raise InstrumentParameterException('_build_get_battery_voltage_command requires a command.')
return cmd_name, InstrumentResponses.GET_BATTERY_VOLTAGE
def _build_get_channel_calibration_command(self, **kwargs):
cmd_name = kwargs.get('command', None)
if cmd_name is None:
raise InstrumentParameterException('_build_get_channel_calibration_command requires a command.')
param_name = kwargs.get('name', None)
if param_name is None:
raise InstrumentParameterException('_build_get_channel_calibration_command requires a parameter name.')
channel_number = '%02X' % int(param_name.split('_')[-1])
cmd = cmd_name + channel_number
return cmd, InstrumentResponses.GET_CHANNEL_CALIBRATION
def _build_get_advanced_functions_command(self, **kwargs):
cmd_name = kwargs.get('command', None)
if cmd_name is None:
raise InstrumentParameterException('_build_get_advanced_functions_command requires a command.')
return cmd_name, InstrumentResponses.GET_ADVANCED_FUNCTIONS
def _build_start_sampling_command(self, **kwargs):
cmd_name = kwargs.get('command', None)
if cmd_name is None:
raise InstrumentParameterException('_build_start_sampling_command requires a command.')
return cmd_name, InstrumentResponses.START_SAMPLING
##################################################################################################
# response handlers
##################################################################################################
def _parse_status_response(self, response, prompt, **kwargs):
if InstrumentResponses.GET_STATUS in response:
self._param_dict.update(response)
else:
raise InstrumentParameterException('Get status response not correct: %r.' % response)
def _parse_identification_response(self, response, prompt, **kwargs):
if InstrumentResponses.GET_IDENTIFICATION in response:
self._param_dict.update(response)
else:
raise InstrumentParameterException('Get identification response not correct: %r.' % response)
def _parse_logger_date_and_time_response(self, response, prompt, **kwargs):
if InstrumentResponses.GET_LOGGER_DATE_AND_TIME in response:
self._param_dict.update(response)
else:
raise InstrumentParameterException('Get logger date and time response not correct: %r.' % response)
def _parse_sample_interval_response(self, response, prompt, **kwargs):
if InstrumentResponses.GET_SAMPLE_INTERVAL in response:
self._param_dict.update(response)
else:
raise InstrumentParameterException('Get sample interval response not correct: %r.' % response)
def _parse_start_date_and_time_response(self, response, prompt, **kwargs):
if InstrumentResponses.GET_START_DATE_AND_TIME in response:
self._param_dict.update(response)
else:
raise InstrumentParameterException('Get start date and time response not correct: %r.' % response)
def _parse_end_date_and_time_response(self, response, prompt, **kwargs):
if InstrumentResponses.GET_END_DATE_AND_TIME in response:
self._param_dict.update(response)
else:
raise InstrumentParameterException('Get end date and time response not correct: %r.' % response)
def _parse_battery_voltage_response(self, response, prompt, **kwargs):
if InstrumentResponses.GET_BATTERY_VOLTAGE in response:
self._param_dict.update(response)
else:
raise InstrumentParameterException('Get battery voltage response not correct: %r.' % response)
def _parse_channel_calibration_response(self, response, prompt, **kwargs):
param_name = kwargs.get('name', None)
if param_name is None:
raise InstrumentParameterException('_parse_channel_calibration_response requires a parameter name.')
if InstrumentResponses.GET_CHANNEL_CALIBRATION in response:
self._param_dict.update(response, param_name)
else:
raise InstrumentParameterException('Get channel calibration response not correct: %r.' % response)
def _get_bit_value(self, name, value, **kwargs):
bit_value = value & self.advanced_functions_bits[name]
log.debug("_get_bit_value: value=%x, a_f[%s]=%x, bit_value=%d", value, name, self.advanced_functions_bits[name], bit_value)
return 0 if bit_value == 0 else 1
def _parse_advanced_functions_response(self, response, prompt, **kwargs):
match = re.search('([0-9A-F]{4})[0-9A-F]{4}STC', response)
if match is not None:
hex_value = int(match.group(1), 16)
log.debug("_parse_advanced_functions_response: hex_str=%s, hex_value=%x", match.group(1), hex_value)
for name in AdvancedFunctionsParameters.list():
self._param_dict.set_value(name, self._get_bit_value(name, hex_value))
else:
raise InstrumentParameterException('Get advanced functions response not correct: %r.' % response)
def _parse_start_sampling_response(self, response, prompt, **kwargs):
match = re.search('Logger started in mode (\d{2})', response)
if match is not None:
return match.group(1)
else:
raise InstrumentParameterException('Start sampling response not correct: %r.' % response)
def create_playback_protocol(callback):
return InstrumentProtocol(None, None, callback)
| janeen666/mi-instrument | mi/instrument/rbr/xr_420_thermistor_24/ooicore/driver.py | Python | bsd-2-clause | 90,876 |
#!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_bgp_config
short_description: Manage BGP routing configuraiton in EOS
description:
- The eos_bgp_config module provides resource management of the
global BGP routing process for Arista EOS nodes
version_added: 1.1.0
category: BGP
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enable
- Python Client for eAPI 0.3.1 or later
notes:
- All configuraiton is idempontent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports tateful resource configuration
options:
bgp_as:
description:
- The BGP autonomous system number to be configured for the
local BGP routing instance. The value must be in the valid
BGP AS range of 1 to 65535.
required: true
default: null
choices: []
aliases: []
version_added: 1.1.0
enable:
description:
- Configures the administrative state for the global BGP routing
process. If enable is True then the BGP routing process is
administartively enabled and if enable is False then
the BGP routing process is administratively disabled.
default: true
required: false
choices: ['True', 'False']
aliases: []
version_added: 1.1.0
router_id:
description:
- Configures the BGP routing process router-id value. The router
id must be in the form of A.B.C.D
default: false
required: false
choices: []
aliases: []
version_added: 1.1.0
"""
EXAMPLES = """
- name: enable BGP routing with AS 65535
eos_bgp_config: bgp_as=65535 state=present enable=yes
- name: disable the BGP routing process
eos_bgp_config: bgp_as=65535 enable=no
- name: configure the BGP router-id
eos_bgp_config: bgp_as=65535 router_id=1.1.1.1
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
if self._node:
return self._node
self._node = self.connect()
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
changed = self.invoke(self.instance.get('state'))
self.result['changed'] = changed or True
self.refresh()
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
"""Returns the BGP routing instance configuration
"""
bgp_as = module.attributes['bgp_as']
result = module.node.api('bgp').get()
_instance = dict(bgp_as=bgp_as, state='absent')
if result and bgp_as == str(result['bgp_as']):
_instance['state'] = 'present'
_instance['router_id'] = result['router_id']
_instance['enable'] = not result['shutdown']
return _instance
def create(module):
"""Creates a new isntance of BGP routing on the node
"""
bgp_as = module.attributes['bgp_as']
module.log('Invoked create for eos_bgp_config[{}]'.format(bgp_as))
module.node.api('bgp').create(bgp_as)
def remove(module):
"""Removes the BGP routing instance from the node
"""
bgp_as = module.attributes['bgp_as']
module.log('Invoked remove for eos_bgp_config[{}]'.format(bgp_as))
module.node.api('bgp').delete()
def set_enable(module):
"""Globally enables or disables the BGP process
"""
value = not module.attributes['enable']
bgp_as = module.attributes['bgp_as']
module.log('Invoked set_enable for eos_bgp_config[{}] '
'with value {}'.format(bgp_as, value))
module.node.api('bgp').set_shutdown(value)
def set_router_id(module):
"""Configures the BGP router-id
"""
value = module.attributes['router_id']
bgp_as = module.attributes['bgp_as']
module.log('Invoked set_router_id for eos_bgp_config[{}] '
'with value {}'.format(bgp_as, value))
module.node.api('bgp').set_router_id(value)
def main():
"""The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
bgp_as=dict(required=True),
enable=dict(type='bool', default=True),
router_id=dict()
)
module = EosAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
module.flush(True)
main() | steve-dodd/pynet | class6/library/eos_bgp_config.py | Python | gpl-2.0 | 14,323 |
"""Interactive map entities/players!
Note:
Could even be something like a sign! Or the human player.
"""
import pygame
from hypatia import constants
from hypatia import actor
class HumanPlayer(actor.Actor):
def __init__(self, *args, **kwargs):
actor.Actor.__init__(self, *args, **kwargs)
# NOTE: outdated/needs to be updated for velocity
def move(self, game, direction):
"""Modify human player's positional data legally (check
for collisions).
Note:
Will round down to nearest probable step
if full step is impassable.
Needs to use velocity instead...
Args:
direction (constants.Direction):
"""
self.walkabout.direction = direction
# hack for incorporating new velocity system, will update later
if direction in (constants.Direction.north, constants.Direction.south):
planned_movement_in_pixels = self.velocity.y
else:
planned_movement_in_pixels = self.velocity.x
adj_speed = game.screen.time_elapsed_milliseconds / 1000.0
iter_pixels = max([1, int(planned_movement_in_pixels)])
# test a series of positions
for pixels in range(iter_pixels, 0, -1):
# create a rectangle at the new position
new_topleft_x, new_topleft_y = self.walkabout.topleft_float
# what's going on here
if pixels == 2:
adj_speed = 1
if direction == constants.Direction.north:
new_topleft_y -= pixels * adj_speed
elif direction == constants.Direction.east:
new_topleft_x += pixels * adj_speed
elif direction == constants.Direction.south:
new_topleft_y += pixels * adj_speed
elif direction == constants.Direction.west:
new_topleft_x -= pixels * adj_speed
destination_rect = pygame.Rect((new_topleft_x, new_topleft_y),
self.walkabout.size)
collision_rect = self.walkabout.rect.union(destination_rect)
if not game.scene.collide_check(collision_rect):
# we're done, we can move!
new_topleft = (new_topleft_x, new_topleft_y)
self.walkabout.action = constants.Action.walk
animation = self.walkabout.current_animation()
self.walkabout.size = animation.getMaxSize()
self.walkabout.rect = destination_rect
self.walkabout.topleft_float = new_topleft
return True
# never found an applicable destination
self.walkabout.action = constants.Action.stand
return False
class Npc(actor.Actor):
def __init__(self, *args, **kwargs):
actor.Actor.__init__(self, *args, **kwargs)
| Applemann/hypatia | hypatia/player.py | Python | mit | 2,943 |
#!/usr/bin/env ambari-python-wrap
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import zipfile
import os
from ambari_server.ambariPath import AmbariPath
# Default values are hardcoded here
BACKUP_PROCESS = 'backup'
RESTORE_PROCESS = 'restore'
SUPPORTED_PROCESSES = [BACKUP_PROCESS, RESTORE_PROCESS]
# The list of files where the ambari server state is kept on the filesystem
AMBARI_FILESYSTEM_STATE = [AmbariPath.get("/etc/ambari-server/conf"),
AmbariPath.get("/var/lib/ambari-server/resources"),
AmbariPath.get("/var/run/ambari-server/bootstrap/"),
AmbariPath.get("/var/run/ambari-server/stack-recommendations")]
# What to use when no path/archive is specified
DEFAULT_ARCHIVE = AmbariPath.get("/var/lib/ambari-server/Ambari_State_Backup.zip")
# Responsible for managing the Backup/Restore process
class BackupRestore:
def __init__(self, state_file_list, zipname, zip_folder_path):
"""
Zip file creator
:param state_file_list: the list of files where the Ambari State is kept on the filesystem
:param zipname: the name of the archive to use
:param zip_folder_path: the path of the archive
:return:
"""
self.state_file_list = state_file_list
self.zipname = zipname
self.zip_folder_path = zip_folder_path
def perform_backup(self):
"""
Used to perform the actual backup, by creating the zip archive
:return:
"""
try:
print("Creating zip file...")
# Use allowZip64=True to allow sizes greater than 4GB
zipf = zipfile.ZipFile(self.zip_folder_path + self.zipname, 'w', allowZip64=True)
zipdir(zipf, self.state_file_list, self.zipname)
except Exception, e:
sys.exit("Could not create zip file. Details: " + str(e))
print("Zip file created at " + self.zip_folder_path + self.zipname)
def perform_restore(self):
"""
Used to perform the restore process
:return:
"""
try:
print("Extracting the archive " + self.zip_folder_path + self.zipname)
unzip(self.zip_folder_path + self.zipname, '/')
except Exception, e:
sys.exit("Could not extract the zipfile " + self.zip_folder_path + self.zipname
+ " Details: " + str(e))
def unzip(source_filename, dest_dir):
"""
Zip archive extractor
:param source_filename: the absolute path of the file to unzip
:param dest_dir: the destination of the zip content
:return:
"""
zf = zipfile.ZipFile(source_filename)
try:
zf.extractall(dest_dir)
except Exception, e:
print("A problem occurred while unzipping. Details: " + str(e))
raise e
finally:
zf.close()
def zipdir(zipf, state_file_list, zipname):
"""
Used to archive the specified directory
:param zipf: the zipfile
:param state_file_list: the file list to archive
:param zipname: the name of the zip
:return:
"""
try:
for path in state_file_list:
for root, dirs, files in os.walk(path):
for file in files:
if not file == zipname:
zipf.write(os.path.join(root, file))
except Exception, e:
print("A problem occurred while unzipping. Details: " + str(e))
raise e
finally:
zipf.close()
def print_usage():
"""
Usage instructions
:return:
"""
print("Usage: python BackupRestore.py <processType> [zip-folder-path|zip-file-path]\n\n"
+ " processType - backup : backs up the filesystem state of the Ambari server into a zip file\n"
+ " processType - restore : restores the filesystem state of the Ambari server\n"
+ " [zip-folder-path] used with backup specifies the path of the folder where the zip file to be created\n"
+ " [zip-folder-path] used with restore specifies the path of the Ambari folder where the zip file to restore from is located\n")
def validate_folders(folders):
"""
Used to validate folder existence on the machine
:param folders: folder list containing paths to validate
:return:
"""
for folder in folders:
if not os.path.isdir(folder):
sys.exit("Error while validating folders. Folder " + folder + " does not exist.")
def retrieve_path_and_zipname(archive_absolute_path):
target = {'path': None , 'zipname': None}
try:
elements = archive_absolute_path.split("/")
if elements is not None and len(elements)>0:
target['zipname'] = elements[len(elements)-1]
target['path'] = archive_absolute_path.replace(elements[len(elements)-1], "")
except Exception, e:
sys.exit("Could not retrieve path and zipname from the absolute path " + archive_absolute_path + ". Please check arguments."
+ " Details: " + str(e))
return target
def main(argv=None):
# Arg checks
if len(argv) != 3 and len(argv) != 2:
print_usage()
sys.exit("Invalid usage.")
else:
process_type = argv[1]
if not (SUPPORTED_PROCESSES.__contains__(process_type)):
sys.exit("Unsupported process type: " + process_type)
# if no archive is specified
if len(argv) == 2:
print "No path specified. Will use " + DEFAULT_ARCHIVE
location_data = retrieve_path_and_zipname(DEFAULT_ARCHIVE)
else:
location_data = retrieve_path_and_zipname(argv[2])
validate_folders([location_data['path']])
zip_file_path = location_data['path']
ambari_backup_zip_filename = location_data['zipname']
backup_restore = BackupRestore(AMBARI_FILESYSTEM_STATE, ambari_backup_zip_filename, zip_file_path)
print(process_type.title() + " process initiated.")
if process_type == BACKUP_PROCESS:
validate_folders(AMBARI_FILESYSTEM_STATE)
backup_restore.perform_backup()
print(BACKUP_PROCESS.title() + " complete.")
if process_type == RESTORE_PROCESS:
backup_restore.perform_restore()
print(RESTORE_PROCESS.title() + " complete.")
if __name__ == '__main__':
main(sys.argv)
| arenadata/ambari | ambari-server/src/main/python/ambari_server/BackupRestore.py | Python | apache-2.0 | 6,601 |
from dataclasses import dataclass
import dataclasses
from typing import Dict, Optional, Tuple
from frozendict import frozendict
from randovania.game_description.item.item_category import ItemCategory
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.games.game import RandovaniaGame
@dataclass(frozen=True)
class MajorItem:
game: RandovaniaGame
name: str
item_category: ItemCategory
broad_category: ItemCategory
model_name: str
progression: Tuple[str, ...]
ammo_index: Tuple[str, ...] = tuple()
unlocks_ammo: bool = False
required: bool = False
original_index: Optional[PickupIndex] = None
probability_offset: int = 0
probability_multiplier: float = 1
warning: Optional[str] = None
extra: frozendict = dataclasses.field(default_factory=frozendict)
def __post_init__(self):
if not self.progression and not self.ammo_index:
raise ValueError(f"Item {self.name} has no progression nor ammo.")
@classmethod
def from_json(cls, name: str, value: dict, game: RandovaniaGame,
item_categories: Dict[str, ItemCategory]) -> "MajorItem":
return cls(
game=game,
name=name,
item_category=item_categories[value["item_category"]],
broad_category=item_categories[value["broad_category"]],
model_name=value["model_name"],
progression=tuple(value["progression"]),
ammo_index=tuple(value.get("ammo", [])),
unlocks_ammo=value.get("unlocks_ammo", False),
required=value.get("required", False),
original_index=PickupIndex(value["original_index"]) if "original_index" in value else None,
probability_offset=value["probability_offset"],
probability_multiplier=value["probability_multiplier"],
warning=value.get("warning"),
extra=frozendict(value.get("extra", {}))
)
@property
def as_json(self) -> dict:
result = {
"item_category": self.item_category.name,
"broad_category": self.broad_category.name,
"model_name": self.model_name,
"progression": list(self.progression),
"ammo": list(self.ammo_index),
"unlocks_ammo": self.unlocks_ammo,
"required": self.required,
"probability_offset": self.probability_offset,
"probability_multiplier": self.probability_multiplier,
"extra": self.extra
}
if self.original_index is not None:
result["original_index"] = self.original_index.index
if self.warning is not None:
result["warning"] = self.warning
return result
| henriquegemignani/randovania | randovania/game_description/item/major_item.py | Python | gpl-3.0 | 2,756 |
"""
.15925 Editor
Copyright 2014 TechInvestLab.ru dot15926@gmail.com
.15925 Editor is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3.0 of the License, or (at your option) any later version.
.15925 Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with .15925 Editor.
"""
from xml.sax.saxutils import escape, unescape
import re
from iso15926.io.rdf_base import expand_uri
import iso15926.kb as kb
import gzip
re_qn = re.compile(r'qn(\d+)')
def split_uri(uri):
p = uri.rfind('#')
if p==-1:
p = uri.rfind('/')
if p==-1:
return ('', uri)
return (uri[:p+1], uri[p+1:])
class XmlWriter:
def x(self, descr, *t):
if t:
descr = descr.format(*t)
return self.r('<').r(descr).r(' />\n')
def o(self, descr, *t):
if t:
descr = descr.format(*t)
return self.r('<').r(descr).r('>\n')
def c(self, tag):
return self.r('</').r(tag).r('>\n')
def t(self, text):
return self.r(escape(text))
def r(self, raw_text):
self._out(raw_text)
return self
def f(self, raw_fmt, *t):
self.r(raw_fmt.format(*t))
return self
def l(self, descr, *t):
if len(t)>1:
descr = descr.format(*t[:-1])
literal = t[-1]
return self.r('<').r(descr).r('>').r(escape(literal)).r('</').r(descr.split(' ', 1)[0]).r('>\n')
def e(self, descr, *t):
if t:
descr = descr.format(*t)
return XmlTagContext(self, descr)
class XmlTagContext:
def __init__(self, writer, descr):
self.writer = writer
self.descr = descr
def __enter__(self):
self.writer.o(self.descr)
def __exit__(self, exc_type, exc_value, traceback):
self.writer.c(self.descr.split(' ', 1)[0])
class RdfXmlWriter(XmlWriter):
def SaveGraphToFile(self, graph, fname, use_gzip = False):
if use_gzip:
with gzip.open(fname, 'wb') as f:
self._out = f.write
self.write_begin(graph.basens, graph.nslist, graph.grGetUsedNamespaces())
self.write_graph(graph)
self.write_end()
else:
with open(fname, 'wb') as f:
self._out = f.write
self.write_begin(graph.basens, graph.nslist, graph.grGetUsedNamespaces())
self.write_graph(graph)
self.write_end()
self._out = None
public.collect_garbage()
def write_begin(self, basens=None, nslist={}, nsset=set()):
self.basens = basens
self.nslist = dict(nslist)
self.nsset = set(nsset)
names = set()
uris = set()
if self.basens:
self.nslist['basens'] = self.basens
for name, uri in self.nslist.iteritems():
names.add(name)
uris.add(uri)
unkns = []
for name in names:
if re_qn.match(name):
unkns.append(int(name[2:]))
if unkns:
unk = max(unkns)+1
else:
unk = 1
for uri in self.nsset:
if uri not in uris and uri != '' and uri != '_#':
found = False
for k, v in kb.all_known_namespaces:
if v == uri and k not in names:
self.nslist[k] = uri
found = True
if not found:
self.nslist['qn{0}'.format(unk)] = uri
unk += 1
self.ns_compact_name = {}
for name, uri in self.nslist.iteritems():
self.ns_compact_name[uri] = name
self.r('<?xml version="1.0"?>\n\n')
self.r('<!DOCTYPE rdf:RDF [\n')
for n, uri in self.nslist.iteritems():
self.f(' <!ENTITY {0} "{1}" >\n', n, uri)
self.r(']>\n\n')
self.r('<rdf:RDF \n')
if self.basens:
self.f(' xmlns="{0}"\n', self.basens)
if self.basens[-1] == '#':
self.f(' xml:base="{0}"\n', self.basens[:-1])
for n, uri in self.nslist.iteritems():
self.f(' xmlns:{0}="{1}"\n', n, uri)
self.r('>\n')
def write_end(self):
self.r('</rdf:RDF>\n')
def to_tag(self, uri):
ns, name = split_uri(uri)
if self.basens==ns:
return name
return ':'.join((self.ns_compact_name[ns], name))
def to_ref(self, uri):
ns, name = split_uri(uri)
if self.basens[-1] == '#' and self.basens==ns:
return '#'+name
ns = self.ns_compact_name.get(ns, None)
if ns is None:
return uri
return '&{0};{1}'.format(ns, name)
def entity(self, ent, type):
return self.x('{0} rdf:about="{1}"', type, self.to_ref(ent))
def in_entity(self, ent=None, type=None):
if type is None:
type = 'rdf:Description'
else:
type = self.to_tag(type)
if ent is None:
return self.e(type)
else:
return self.e('{0} rdf:about="{1}"', type, self.to_ref(ent))
def in_prop(self, property):
return self.e(self.to_tag(property))
def resource(self, property, value):
return self.x('{0} rdf:resource="{1}"', self.to_tag(property), self.to_ref(value))
def nontyped(self, property, value):
return self.l('{0}', self.to_tag(property), value)
def langtyped(self, property, value, lang):
return self.l('{0} xml:lang="{1}"', self.to_tag(property), lang, value)
def datatyped(self, property, value, datatype):
return self.l('{0} rdf:datatype="{1}"', self.to_tag(property), self.to_ref(datatype), value)
def write_entity(self, g, ent, triples):
if not triples:
return
typ = g.grObjects(ent, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')
if len(typ)==1:
if ent.startswith('_#'):
e = self.in_entity(None, typ[0])
else:
e = self.in_entity(ent, typ[0])
typ = True
else:
if ent.startswith('_#'):
e = self.in_entity(None)
else:
e = self.in_entity(ent)
typ = False
with e:
for t in triples:
p = t.p
if t.has_literal:
if t.has_lang:
self.langtyped(p, t.l, t.lang)
elif t.has_datatype:
self.datatyped(p, t.l, t.datatype)
else:
self.nontyped(p, t.l)
else:
if typ and p=='http://www.w3.org/1999/02/22-rdf-syntax-ns#type':
continue
o = t.o
if g.grIs_rdflist(o):
with self.e('{0} rdf:parseType="Collection"', self.to_tag(p)):
for a in g.grGet_rdflist(o):
if a.startswith('_#'):
self.write_entity(g, a, g.grTriplesForSubj(a))
else:
self.x('rdf:Description rdf:about="{0}"', self.to_ref(a))
elif g.grIs_owllist(o):
with self.e(self.to_tag(p)):
lst = g.grGet_owllist(o)
for a in lst:
if a.startswith('_#'):
self.r('<owllist:OWLList><owllist:hasContents>')
self.write_entity(g, a, g.grTriplesForSubj(a))
self.r('</owllist:hasContents><owllist:hasNext>')
else:
self.f('<owllist:OWLList><owllist:hasContents rdf:resource="{0}" /><owllist:hasNext>', self.to_ref(a))
self.r('<owllist:OWLList />')
for a in lst:
self.r('</owllist:hasNext></owllist:OWLList>')
self.r('\n')
elif o.startswith('_#'):
with self.e(self.to_tag(p)):
self.write_entity(g, o, g.grTriplesForSubj(o))
else:
self.resource(p, o)
def write_graph(self, g):
total = len(g.ks)
count = 0
for curi, triples in g.ks.iteritems():
uri = expand_uri(curi)
if uri.startswith('_#') and g.ko.get(curi) is not None:
continue
self.write_entity(g, uri, triples)
count += 1
if count % 1000 == 0:
st = getattr(g, 'AsyncChangeState', None)
if st:
# graph is Document
g.AsyncChangeState(g.state_saving, 100*count/total)
| TechInvestLab/dot15926 | editor_qt/iso15926/io/rdfxml_writer.py | Python | lgpl-3.0 | 9,297 |
"""Faça um programa que carregue uma lista com os modelos de cinco carros (exemplo de modelos: FUSCA, GOL, VECTRA etc).
Carregue uma outra lista com o consumo desses carros, isto é, quantos quilômetros cada um desses carros faz com um litro
de combustível. Calcule e mostre:
O modelo do carro mais econômico;
Quantos litros de combustível cada um dos carros cadastrados consome para percorrer uma distância de 1000 quilômetro
s
e quanto isto custará, considerando um que a gasolina custe R$ 2,25 o litro. Abaixo segue uma tela de exemplo. O dis
posição das informações deve ser o mais próxima possível ao exemplo. Os dados são fictícios e podem mudar a cada exe
cução do programa.
Comparativo de Consumo de Combustível
Veículo 1
Nome: fusca
Km por litro: 7
Veículo 2
Nome: gol
Km por litro: 10
Veículo 3
Nome: uno
Km por litro: 12.5
Veículo 4
Nome: Vectra
Km por litro: 9
Veículo 5
Nome: Peugeout
Km por litro: 14.5
Relatório Final
1 - fusca - 7.0 - 142.9 litros - R$ 321.43
2 - gol - 10.0 - 100.0 litros - R$ 225.00
3 - uno - 12.5 - 80.0 litros - R$ 180.00
4 - vectra - 9.0 - 111.1 litros - R$ 250.00
5 - peugeout - 14.5 - 69.0 litros - R$ 155.17
O menor consumo é do peugeout."""
cars = input().split()
liters_km = input().split()
size = len(liters_km)
cars_organized = [0,0,0,0,0]
liters_km_organized = [0,0,0,0,0]
c = 0
car = 0
menor = 0
while c < size:
for i in range(size):
if float(i) < float(liters_km[c]):
menor += 1
print("menor",menor)
if menor == 1:
cars_organized[4] = [cars[c]]
liters_km_organized[4] = [liters_km[c]]
elif menor == 2:
cars_organized[3] = [cars[c]]
liters_km_organized[3] = [liters_km[c]]
elif menor == 3:
cars_organized[2] = [cars[c]]
liters_km_organized[2] = [liters_km[c]]
elif menor == 4:
cars_organized[1] = [cars[c]]
liters_km_organized[1] = [liters_km[c]]
elif menor == 5:
cars_organized[0] = [cars[c]]
liters_km_organized[0] = [liters_km[c]]
c += 1
menor = 0
for i in range(size):
liters_km[i] = float(liters_km[i]) * 2.25
c = 0
print("Relatório Final")
while c < size:
print(c + 1,"-",cars_organized[c],"-",liters_km_organized[c],"Litros -", liters_km[c])
c += 1
print("O Menor consumo é do :", cars_organized[0])
| jucimarjr/IPC_2017-1 | lista06/lista06_lista01_questao21.py | Python | apache-2.0 | 2,450 |
"""
TestCases for checking set_get_returns_none.
"""
import sys, os, string
import tempfile
from pprint import pprint
import unittest
try:
# For Python 2.3
from bsddb import db
except ImportError:
# For earlier Pythons w/distutils pybsddb
from bsddb3 import db
from test_all import verbose
#----------------------------------------------------------------------
class GetReturnsNoneTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_get_returns_none(self):
d = db.DB()
d.open(self.filename, db.DB_BTREE, db.DB_CREATE)
d.set_get_returns_none(1)
for x in string.letters:
d.put(x, x * 40)
data = d.get('bad key')
assert data == None
data = d.get('a')
assert data == 'a'*40
count = 0
c = d.cursor()
rec = c.first()
while rec:
count = count + 1
rec = c.next()
assert rec == None
assert count == 52
c.close()
d.close()
def test02_get_raises_exception(self):
d = db.DB()
d.open(self.filename, db.DB_BTREE, db.DB_CREATE)
d.set_get_returns_none(0)
for x in string.letters:
d.put(x, x * 40)
self.assertRaises(db.DBNotFoundError, d.get, 'bad key')
self.assertRaises(KeyError, d.get, 'bad key')
data = d.get('a')
assert data == 'a'*40
count = 0
exceptionHappened = 0
c = d.cursor()
rec = c.first()
while rec:
count = count + 1
try:
rec = c.next()
except db.DBNotFoundError: # end of the records
exceptionHappened = 1
break
assert rec != None
assert exceptionHappened
assert count == 52
c.close()
d.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(GetReturnsNoneTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/bsddb/test/test_get_none.py | Python | gpl-2.0 | 2,233 |
# -*- coding: utf-8 -*-
import random
import time
import gevent
from celery import Celery
from gevent.pool import Pool
from gevent.timeout import Timeout, with_timeout
import sys
class TimeOutException(Exception):
pass
app = Celery('with_celery', broker='amqp://guest@localhost//')
@app.task
def add(x, y):
print '%s + %s = %s' % (x, y, x+y)
return x + y
def sub_task(i):
c = gevent.getcurrent()
print '----------> %s' % c
random.seed(i)
r = random.randint(0, 5)
time.sleep(r)
# gevent.sleep(r)
print 'sub_task - %s(%ss)' % (i, r)
return r
@app.task
def test():
result = []
start = time.time()
print 'test start: %s' % start
try:
with Timeout(2, TimeOutException) as timeout:
for t in TaskPool.imap_unordered(sub_task, xrange(10)):
print t, time.time()
result.append(t)
except TimeOutException, e:
print '*************time out*************'
end = time.time()
print 'test end: %s, total: %s' % (end, end - start)
return result
@app.task
def test_async():
start = time.time()
print 'test_async start: %s' % start
threads = [gevent.spawn(sub_task, i) for i in xrange(10)]
try:
gevent.joinall(threads, 3)
result = [t.value for t in threads if t.successful()]
except Exception, e:
print 'test_async exception: %s' % e
end = time.time()
print 'test_async end: %s, total: %s' % (end, end - start)
return result
@app.task
def test_sync():
start = time.time()
print 'test start: %s' % start
result = map(sub_task, xrange(10))
end = time.time()
print 'test end: %s, total: %s' % (end, end - start)
return result
def gsleep(i):
print 'gsleep: %s' % i
gevent.sleep(i)
return i
@app.task
def test_with_timeout():
try:
result = with_timeout(1, gsleep, 3)
# result = with_timeout(1, test_with_timeout, 3, timeout_value=-1)
print 'test_with_timeout timeout_value = %s' % result
except Timeout:
print 'test_with_timeout timout exception'
@app.task
def test_timeout(seconds, default):
timeout = Timeout.start_new(seconds)
try:
try:
return gsleep(5)
except Timeout as t:
# if sys.exc_info()[1] is timeout:
if t is timeout:
print 'timeout instance sys.exc_info()[1] is timout: %s' % (sys.exc_info()[1] is timeout)
return default
raise # not my timeout
finally:
print 'test_timeout: cancel timeout'
timeout.cancel()
@app.task
def test_timeout1(seconds, default):
timeout = gevent.Timeout(seconds)
timeout.start()
t = gevent.spawn(gsleep, 1)
try:
try:
t.join(timeout=timeout)
# started -- Boolean, 指示此Greenlet是否已经启动
# ready() -- Boolean, 指示此Greenlet是否已经停止
# successful() -- Boolean, 指示此Greenlet是否已经停止而且没抛异常
# value -- 任意值, 此Greenlet代码返回的值
# exception -- 异常, 此Greenlet内抛出的未捕获异常
if t.successful():
return t.value
except Timeout as t:
# if sys.exc_info()[1] is timeout:
if t is timeout:
print 'timeout instance is: %s' % sys.exc_info()[1]
return default
print 'test_timeout1: not my timeout'
raise # not my timeout
finally:
if t.ready():
print 'greenlet is stop.'
if t.exception:
print 'greenlet is stop exception'
if t.successful():
print 'greenlet is stop success.'
print 'test_timeout1: cancel timeout'
timeout.cancel()
TaskPool = Pool(5)
# celery -A with_celery worker -P gevent -c 10 --logleve=info
if __name__ == '__main__':
# test()
add.delay(3, 4)
# test.delay()
test_sync.delay()
test_async.delay()
test_with_timeout.delay()
test_timeout.delay(4, -9999)
test_timeout1.delay(4, -9999)
| gmaclinuxer/pyabs | test/test_with_celery.py | Python | mit | 4,114 |
# -*- coding: utf-8 -*-
"""
This module contains tags for including react components into templates.
"""
import uuid
from django import template
from django.conf import settings
from django.template import Node
register = template.Library()
CONTEXT_KEY = "REACT_COMPONENTS"
CONTEXT_PROCESSOR = 'django_react_templatetags.context_processors.react_context_processor' # NOQA
def get_uuid():
return uuid.uuid4().hex
class ReactTagManager(Node):
"""
Handles the printing of react placeholders and queueing, is invoked by
react_render.
"""
def __init__(self, identifier, component, data=None):
component_prefix = ""
if hasattr(settings, "REACT_COMPONENT_PREFIX"):
component_prefix = settings.REACT_COMPONENT_PREFIX
self.identifier = identifier
self.component = "%s%s" % (component_prefix, component)
self.data = data
def _has_processor(self):
try:
status = CONTEXT_PROCESSOR in settings.TEMPLATES[0]['OPTIONS']['context_processors'] # NOQA
except Exception as e: # NOQA
status = False
return status
def render(self, context):
if not self._has_processor():
raise Exception('"react_context_processor must be added to TEMPLATE_CONTEXT_PROCESSORS"') # NOQA
components = context.get(CONTEXT_KEY, [])
try:
resolved_data = self.data.resolve(context)
except template.VariableDoesNotExist:
resolved_data = None
except AttributeError:
resolved_data = None
identifier = self.identifier
if isinstance(self.identifier, template.Variable):
identifier = self.identifier.resolve(context)
elif not identifier:
identifier = "%s_%s" % (self.component, get_uuid())
component = {
"identifier": identifier,
"component": self.component,
"data": resolved_data,
}
components.append(component)
context[CONTEXT_KEY] = components
return u'<div id="%s"></div>' % identifier
def _prepare_args(parses, token):
"""
Normalize token arguments that can be passed along to node renderer
"""
values = {
"identifier": None,
"data": None
}
args = token.split_contents()
method = args[0]
for arg in args[1:]:
key, value = arg.split(r'=',)
if key == "id":
key = "identifier"
if value.startswith('"') or value.startswith('\''):
value = value[1:-1]
else:
value = template.Variable(value)
values[key] = value
assert "component" in values, "%s is missing component value" % method
return values
@register.tag
def react_render(parser, token):
"""
Renders a react placeholder and adds it to the global render queue.
Example:
{% react_render component="ListRestaurants" data=restaurants %}
"""
values = _prepare_args(parser, token)
return ReactTagManager(**values)
@register.inclusion_tag('react_print.html', takes_context=True)
def react_print(context):
"""
Generates ReactDOM.render calls based on REACT_COMPONENT queue,
this needs to be run after react has been loaded.
The queue will be cleared after beeing called.
Example:
{% react_print %}
"""
components = context[CONTEXT_KEY]
context[CONTEXT_KEY] = []
return {
"components": components
}
| ssteinerx/django-react-templatetags | django_react_templatetags/templatetags/react.py | Python | mit | 3,500 |
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from landpage.views import terms
import json
from landpage.models import LandpageTeamMember
from landpage.models import LandpageCoursePreview
from registrar.models import Course
from registrar.models import Student
from registrar.models import Teacher
TEST_USER_EMAIL = "ledo@gah.com"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "password"
class TermsTest(TestCase):
def tearDown(self):
pass
def setUp(self):
pass
def test_url_resolves_to_terms_page(self):
found = resolve('/terms');
self.assertEqual(found.func,terms.terms_page)
def test_terms_page_returns_correct_html(self):
parameters = {"course_id":1}
client = Client()
response = client.post(
'/terms',
data=parameters,
)
self.assertEqual(response.status_code, 200)
self.assertIn(b'Terms',response.content)
# self.assertIn(b'The definitive course on comics!',response.content)
| AcademicsToday/py-academicstoday | academicstoday_project/landpage/tests/test_terms.py | Python | apache-2.0 | 1,259 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of level.
# https://github.com/heynemann/level
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Bernardo Heynemann <heynemann@gmail.com>
from unittest import TestCase as PythonTestCase
from tornado.ioloop import IOLoop
from level.testing import LevelTestCase
def async_case(f, *args, **kw):
def handle_method(*args, **kw):
async def go():
await f(*args, **kw)
loop = IOLoop.instance()
loop.run_sync(go)
handle_method.__name__ = f.__name__
return handle_method
class TestCase(PythonTestCase):
def setUp(self):
super(TestCase, self).setUp()
self.io_loop = IOLoop()
self.io_loop.make_current()
class WebTestCase(LevelTestCase):
pass
| heynemann/level | tests/unit/base.py | Python | mit | 845 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
components/tools/OmeroPy/omero/util/UploadMask.py
-----------------------------------------------------------------------------
Copyright (C) 2006-2009 University of Dundee. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
------------------------------------------------------------------------------
@author Jean-Marie Burel
<a href="mailto:j.burel@dundee.ac.uk">j.burel@dundee.ac.uk</a>
@author Donald MacDonald
<a href="mailto:donald@lifesci.dundee.ac.uk">donald@lifesci.dundee.ac.uk</a>
@version 3.0
<small>
(<b>Internal version:</b> $Revision: $Date: $)
</small>
@since 3.0-Beta4.1
"""
from OmeroPopo import MaskData
from OmeroPopo import ROIData
from OmeroPopo import ImageData
from OmeroPopo import ROICoordinate
import math
class uploadMask():
##
# Instantiate the uploadMask Object.
#
def __init__(self):
## Map of colour, roiclass this will store all the masks in the image, and
# all the roi with a particular colour. */
self.roiMap = {};
##
# Add a Mask Shape to the appropriate ROIClass, creating one if neccessary.
# @param image The Image containing the mask data.
# @param z The Z Section of the image.
# @param t The Time point of the image.
#
def addMaskShape(self, image, z, t):
maskMap = self.createMasks(image, z, t);
for mask in maskMap:
roiClass = None;
if(self.roiMap.has_key(mask.getColour())):
roiClass = self.roiMap[mask.getColour()];
else:
roiClass = ROIClass();
self.roiMap[mask.getColour()] = roiClass;
roiClass.addMask(mask, z, t);
##
# Get all the masks for the image.
# @param image The Image containing the mask data.
# @param z The Z Section of the image.
# @param t The Time point of the image.
# @return See above.
#
def createMasks(self, inputImage, z, t):
value = None;
mask = None;
map = {};
for x in range(inputImage.getWidth()):
for y in range(inputImage.getHeight()):
value = inputImage.getRGB(x, y);
if(value==Color.black.getRGB()):
continue;
if (not map.has_key(value)):
mask = MaskClass(value);
map[value] = mask;
else:
mask = map[value];
mask.add(Point(x, y));
return map;
##
# Return all the roi for the image.
# @param image See above.
# @return See above.
#
def getROIForImage(self, image):
roiList = []
for roi in self.roiMap:
roiList.append(roi.getROI(image));
return roiList;
class MaskClass():
##
# Instantiate a new mask object with colour value.
# @param value The colour of the mask as packedInt
#
def __init__(self, value):
## The points in the mask. These points are in the image coordinates.
self.points = {};
## The colour of the mask.
self.colour = value;
## The min(x,y) and max(x,y) coordinates. */
self.min = Point();
self.max = Point();
## The width of the mask.
self.width = 0;
## The height of the mask.
self.height = 0;
##
# Get the colour of the mask.
# @return See above.
#
def getColour(self):
return self.colour;
##
# Get the Points in the mask as a bytestream that can be used to
# make an image.
# @return See above.
#
def asBytes(self):
import array
bytesArray = array.array('B');
for cnt in range(int(math.ceil(self.width*self.height))):
bytesArray.append(0);
position = 0;
for y in range(self.max.y):
for x in range(self.max.x):
if self.points.has_key(Point(x,y)):
self.setBit(bytesArray, position, 1)
else:
self.setBit(bytesArray, position, 0)
position = position + 1;
byteSwappedArray = bytesArray.byteswap();
bytesString = bytesArray.tostring();
return bytesString;
##
# Add Point p to the list of points in the mask.
# @param p See above.
#
def add(self, p):
if(len(self.points) == 0):
self.min = Point(p);
self.max = Point(p);
else:
self.min.x = min(p.x, min.x);
self.min.y = min(p.y, min.y);
self.max.x = max(p.x, max.x);
self.max.y = max(p.y, max.y);
self.width = max.x-min.x+1;
self.height = max.y-min.y+1;
self.points.add(p);
##
# Create a MaskData Object from the mask.
# @param z The Z section the mask data is on.
# @param t The T section the mask data is on.
# @return See above.
#
def asMaskData(self, z, t):
mask = MaskData();
mask.setX(self.min.x);
mask.setY(self.min.y);
mask.setWidth(self.width);
mask.setHeight(self.height);
mask.setFill(self.colour);
mask.setT(t);
mask.setZ(z);
mask.setMask(self.asBytes());
return mask;
def setBit(self, data, bit, val):
bytePosition = bit/8;
bitPosition = bit%8;
data[bytePosition] = data[bytePosition] & ~(0x1<<bitPosition) | (val<<bitPosition);
def getBit(self, data, bit):
bytePosition = bit/8;
bitPosition = bit%8;
if ((data[bytePosition] & (0x1<<bitPosition))!=0):
return 1
else:
return 0
class ROIClass():
##
# Instantiate the ROIClass and create the maskMap.
#
def __init__(self):
##
# Map of the coordinates and mask objects, may have more than one
# mask on one plane.
#
self.maskMap = {};
##
# Add a mask to the ROIMap, this will store the mask and it's z,t
# @param mask See above.
# @param z See above.
# @param t See above.
#
def addMask(self, mask, z, t):
maskList = [];
coord = ROICoordinate(z, t);
if(self.maskMap.has_key(coord)):
maskList = self.maskMap[coord];
else:
maskList = [];
self.maskMap.put(coord, maskList);
maskList.append(mask);
##
# Create the roi for the
# @param image
# @return See above.
#
def getROI(self, image):
roi = ROIData();
roi.setId(image.getId());
for coord in self.maskMap:
maskList = self.maskMap[coord];
for mask in maskList:
toSaveMask = mask.asMaskData(coord.getZSection(), coord.getTimePoint());
roi.addShapeData(toSaveMask);
return roi;
##
# Point class with x, y values
#
class Point():
##
# Initialise point class
# @param p point class can be initialised from another point.
#
def __init__(self, p = None):
if(p != None):
x = p.x;
y = p.y;
else:
x = 0;
y = 0;
##
# Get the x value of the point
# @return See Above.
#
def getX(self):
return self.x;
##
# Set the x value of the point
# @param x See Above.
#
def setX(self, x):
self.x = x;
##
# Get the y value of the point
# @return See Above.
#
def getY(self):
return self.y;
##
# Set the y value of the point
# @param y See Above.
#
def setY(self, y):
self.y = y;
| bramalingam/openmicroscopy | components/tools/OmeroPy/src/omero/util/uploadMask.py | Python | gpl-2.0 | 8,488 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
from st2client.client import Client
from st2client.models import KeyValuePair
from st2common.services.access import create_token
from st2common.util.api import get_full_public_api_url
from st2common.util.date import get_datetime_utc_now
from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR, SYSTEM_SCOPE
class DatastoreService(object):
"""
Class provides public methods for accessing datastore items.
"""
DATASTORE_NAME_SEPARATOR = DATASTORE_KEY_SEPARATOR
def __init__(self, logger, pack_name, class_name, api_username):
self._api_username = api_username
self._pack_name = pack_name
self._class_name = class_name
self._logger = logger
self._client = None
self._token_expire = get_datetime_utc_now()
##################################
# Methods for datastore management
##################################
def list_values(self, local=True, prefix=None):
"""
Retrieve all the datastores items.
:param local: List values from a namespace local to this pack/class. Defaults to True.
:type: local: ``bool``
:param prefix: Optional key name prefix / startswith filter.
:type prefix: ``str``
:rtype: ``list`` of :class:`KeyValuePair`
"""
client = self._get_api_client()
self._logger.audit('Retrieving all the value from the datastore')
key_prefix = self._get_full_key_prefix(local=local, prefix=prefix)
kvps = client.keys.get_all(prefix=key_prefix)
return kvps
def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False):
"""
Retrieve a value from the datastore for the provided key.
By default, value is retrieved from the namespace local to the pack/class. If you want to
retrieve a global value from a datastore, pass local=False to this method.
:param name: Key name.
:type name: ``str``
:param local: Retrieve value from a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which item is saved. Defaults to system scope.
:type: local: ``str``
:param decrypt: Return the decrypted value. Defaults to False.
:type: local: ``bool``
:rtype: ``str`` or ``None``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.' % scope)
name = self._get_full_key_name(name=name, local=local)
client = self._get_api_client()
self._logger.audit('Retrieving value from the datastore (name=%s)', name)
try:
params = {'decrypt': str(decrypt).lower(), 'scope': scope}
kvp = client.keys.get_by_id(id=name, params=params)
except Exception as e:
self._logger.exception(
'Exception retrieving value from datastore (name=%s): %s',
name,
e
)
return None
if kvp:
return kvp.value
return None
def set_value(self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False):
"""
Set a value for the provided key.
By default, value is set in a namespace local to the pack/class. If you want to
set a global value, pass local=False to this method.
:param name: Key name.
:type name: ``str``
:param value: Key value.
:type value: ``str``
:param ttl: Optional TTL (in seconds).
:type ttl: ``int``
:param local: Set value in a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which to place the item. Defaults to system scope.
:type: local: ``str``
:param encrypt: Encrypt the value when saving. Defaults to False.
:type: local: ``bool``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.', scope)
name = self._get_full_key_name(name=name, local=local)
value = str(value)
client = self._get_api_client()
self._logger.audit('Setting value in the datastore (name=%s)', name)
instance = KeyValuePair()
instance.id = name
instance.name = name
instance.value = value
instance.scope = scope
if encrypt:
instance.secret = True
if ttl:
instance.ttl = ttl
client.keys.update(instance=instance)
return True
def delete_value(self, name, local=True, scope=SYSTEM_SCOPE):
"""
Delete the provided key.
By default, value is deleted from a namespace local to the pack/class. If you want to
delete a global value, pass local=False to this method.
:param name: Name of the key to delete.
:type name: ``str``
:param local: Delete a value in a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which item is saved. Defaults to system scope.
:type: local: ``str``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.', scope)
name = self._get_full_key_name(name=name, local=local)
client = self._get_api_client()
instance = KeyValuePair()
instance.id = name
instance.name = name
self._logger.audit('Deleting value from the datastore (name=%s)', name)
try:
params = {'scope': scope}
client.keys.delete(instance=instance, params=params)
except Exception as e:
self._logger.exception(
'Exception deleting value from datastore (name=%s): %s',
name,
e
)
return False
return True
def _get_api_client(self):
"""
Retrieve API client instance.
"""
token_expire = self._token_expire <= get_datetime_utc_now()
if not self._client or token_expire:
self._logger.audit('Creating new Client object.')
ttl = (24 * 60 * 60)
self._token_expire = get_datetime_utc_now() + timedelta(seconds=ttl)
temporary_token = create_token(username=self._api_username, ttl=ttl)
api_url = get_full_public_api_url()
self._client = Client(api_url=api_url, token=temporary_token.token)
return self._client
def _get_full_key_name(self, name, local):
"""
Retrieve a full key name.
:rtype: ``str``
"""
if local:
name = self._get_key_name_with_prefix(name=name)
return name
def _get_full_key_prefix(self, local, prefix=None):
if local:
key_prefix = self._get_local_key_name_prefix()
if prefix:
key_prefix += prefix
else:
key_prefix = prefix
return key_prefix
def _get_local_key_name_prefix(self):
"""
Retrieve key prefix which is local to this pack/class.
"""
key_prefix = self._get_datastore_key_prefix() + self.DATASTORE_NAME_SEPARATOR
return key_prefix
def _get_key_name_with_prefix(self, name):
"""
Retrieve a full key name which is local to the current pack/class.
:param name: Base datastore key name.
:type name: ``str``
:rtype: ``str``
"""
prefix = self._get_datastore_key_prefix()
full_name = prefix + self.DATASTORE_NAME_SEPARATOR + name
return full_name
def _get_datastore_key_prefix(self):
prefix = '%s.%s' % (self._pack_name, self._class_name)
return prefix
| tonybaloney/st2 | st2common/st2common/services/datastore.py | Python | apache-2.0 | 8,753 |
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Beam DoFns and PTransforms to provide validation of question models."""
from __future__ import annotations
from core.domain import question_domain
from core.jobs import job_utils
from core.jobs.decorators import validation_decorators
from core.jobs.transforms.validation import base_validation
from core.platform import models
(question_models, skill_models) = models.Registry.import_models(
[models.NAMES.question, models.NAMES.skill])
@validation_decorators.AuditsExisting(
question_models.QuestionSnapshotMetadataModel)
class ValidateQuestionSnapshotMetadataModel(
base_validation.BaseValidateCommitCmdsSchema):
"""Overrides _get_change_domain_class for QuestionSnapshotMetadataModel."""
def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument
"""Returns a change domain class.
Args:
input_model: datastore_services.Model. Entity to validate.
Returns:
question_domain.QuestionChange. A domain object class for the
changes made by commit commands of the model.
"""
return question_domain.QuestionChange
@validation_decorators.RelationshipsOf(question_models.QuestionSkillLinkModel)
def question_skill_link_model_relationships(model):
"""Yields how the properties of the model relates to the ID of others."""
yield model.id, [question_models.QuestionModel]
yield model.skill_id, [skill_models.SkillModel]
@validation_decorators.RelationshipsOf(
question_models.QuestionCommitLogEntryModel)
def question_commit_log_entry_model_relationships(model):
"""Yields how the properties of the model relates to the ID of others."""
yield model.question_id, [question_models.QuestionModel]
@validation_decorators.RelationshipsOf(question_models.QuestionSummaryModel)
def question_summary_model_relationships(model):
"""Yields how the properties of the model relates to the ID of others."""
yield model.id, [question_models.QuestionModel]
@validation_decorators.AuditsExisting(
question_models.QuestionCommitLogEntryModel)
class ValidateQuestionCommitLogEntryModel(
base_validation.BaseValidateCommitCmdsSchema):
"""Overrides _get_change_domain_class for QuestionCommitLogEntryModel."""
def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument
"""Returns a change domain class.
Args:
input_model: datastore_services.Model. Entity to validate.
Returns:
question_domain.QuestionChange. A domain object class for the
changes made by commit commands of the model.
"""
model = job_utils.clone_model(input_model)
if model.id.startswith('question'):
return question_domain.QuestionChange
else:
return None
| brianrodri/oppia | core/jobs/transforms/validation/question_validation.py | Python | apache-2.0 | 3,458 |
from setuptools import setup, find_packages
setup(name='alfred',
version="0.3",
author="Mike Spindel",
author_email="mike@spindel.is",
license="MIT",
keywords="alfred alfredapp script filter",
url="http://github.com/deactivated/python-alfred",
description='Utilities for Alfred script filters.',
install_requires=['lxml'],
packages=find_packages(),
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python"])
| deactivated/python-alfred | setup.py | Python | mit | 674 |
import angr
from angr.sim_type import SimTypeFd, SimTypeTop
from ..libc import io_file_data_for_arch
import logging
l = logging.getLogger("angr.procedures.posix.fileno")
######################################
# fileno
######################################
class fileno(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, f):
self.argument_types = {0: self.ty_ptr(SimTypeTop())}
self.return_type = SimTypeFd()
# Get FILE struct
io_file_data = io_file_data_for_arch(self.state.arch)
# Get the file descriptor from FILE struct
fd = self.state.se.eval(self.state.memory.load(f + io_file_data['fd'],
4 * 8, # int
endness=self.state.arch.memory_endness))
return fd
| f-prettyland/angr | angr/procedures/posix/fileno.py | Python | bsd-2-clause | 865 |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import webob.exc
from neutron.api.v2 import attributes
from neutron.extensions import securitygroup as ext_sg
from neutron.plugins.mlnx.db import mlnx_db_v2 as mlnx_db
from neutron.tests.unit import test_extension_security_group as test_sg
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
PLUGIN_NAME = ('neutron.plugins.mlnx.'
'mlnx_plugin.MellanoxEswitchPlugin')
AGENT_NAME = ('neutron.plugins.mlnx.'
'agent.eswitch_neutron_agent.MlnxEswitchNeutronAgent')
NOTIFIER = ('neutron.plugins.mlnx.'
'agent_notify_api.AgentNotifierApi')
class MlnxSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase):
_plugin_name = PLUGIN_NAME
def setUp(self, plugin=None):
test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_IPTABLES_DRIVER)
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
self._attribute_map_bk_ = {}
for item in attributes.RESOURCE_ATTRIBUTE_MAP:
self._attribute_map_bk_[item] = (attributes.
RESOURCE_ATTRIBUTE_MAP[item].
copy())
super(MlnxSecurityGroupsTestCase, self).setUp(PLUGIN_NAME)
self.addCleanup(mock.patch.stopall)
def tearDown(self):
attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_
super(MlnxSecurityGroupsTestCase, self).tearDown()
class TestMlnxSecurityGroups(MlnxSecurityGroupsTestCase,
test_sg.TestSecurityGroups,
test_sg_rpc.SGNotificationTestMixin):
pass
class TestMlnxSecurityGroupsXML(TestMlnxSecurityGroups):
fmt = 'xml'
class TestMlnxSecurityGroupsDB(MlnxSecurityGroupsTestCase):
def test_security_group_get_port_from_device(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
security_group_id = sg['security_group']['id']
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
fixed_ips = port['port']['fixed_ips']
data = {'port': {'fixed_ips': fixed_ips,
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[security_group_id]}}
req = self.new_update_request('ports', data,
port['port']['id'])
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
res = self.deserialize(self.fmt,
req.get_response(self.api))
port_id = res['port']['id']
device_id = port_id[:8]
port_dict = mlnx_db.get_port_from_device(device_id)
self.assertEqual(port_id, port_dict['id'])
self.assertEqual([security_group_id],
port_dict[ext_sg.SECURITYGROUPS])
self.assertEqual([], port_dict['security_group_rules'])
self.assertEqual([fixed_ips[0]['ip_address']],
port_dict['fixed_ips'])
self._delete('ports', port['port']['id'])
def test_security_group_get_port_from_device_with_no_port(self):
port_dict = mlnx_db.get_port_from_device('bad_device_id')
self.assertEqual(None, port_dict)
class TestMlnxSecurityGroupsDBXML(TestMlnxSecurityGroupsDB):
fmt = 'xml'
| netscaler/neutron | neutron/tests/unit/mlnx/test_mlnx_security_group.py | Python | apache-2.0 | 4,391 |
#!/usr/bin/env python3
import re
from collections import Counter
def minutes_per_guard():
"""Return a dictionary with each minute spent sleeping for each guard.
Repeating
E.g. {
10: [5, 6, 7, ..., 53, 54, 24, 25, 26, 27, 28],
99: [40, 41, 42, ..., 48, 49, 36, 37, ..., 48, 49, 50, 51, 52, 53, 54]}
"""
data = sorted(open('input.txt').read().splitlines())
mpg = dict()
current_guard, start = 0, 0
r_cycle = lambda s: rf".*:(\d\d).*{s}" # falls or wakes
for line in data:
guard = re.findall(r".*#(\d+).*", line)
if guard:
current_guard = int(guard[0])
falls = re.findall(r_cycle('falls'), line)
if falls:
start = int(falls[0])
wakes = re.findall(r_cycle('wakes'), line)
if wakes:
finish = int(wakes[0])
new_mins = list(range(start, finish))
current_mins = mpg.get(current_guard, [])
mpg[current_guard] = current_mins + new_mins
return mpg
def part1(minutes_per_guard):
sleeper_id = max(minutes_per_guard, key=lambda x: len(minutes_per_guard[x]))
# eg Counter({24: 2, 5: 1, 6: 1, ...}) (k = minute, v = how many times)
frequencies = Counter(minutes_per_guard[sleeper_id])
# eg 24 (minute where guard was mostly asleep)
minute = max(frequencies, key=frequencies.get)
return sleeper_id * minute
def part2(minutes_per_guard):
# eg: {10: (24, 2), 99: (45, 3)}
most_commons = {k: Counter(v).most_common(1)[0] for (k, v) in minutes_per_guard.items()}
# eg: 99 (because 45 appears 3 times)
sleeper_id = max(most_commons, key=lambda k: most_commons[k][1])
# eg: 45
minute = most_commons[sleeper_id][0]
return sleeper_id * minute
mpg = minutes_per_guard()
print(part1(mpg))
print(part2(mpg))
| YouriAckx/AdventOfCode | 2018/day04/day04.py | Python | gpl-3.0 | 1,826 |
#!/usr/bin/env python
# Licensed under the Apache 2.0 License
'''
Deletes a user from the database
Usage: deleteuser username
The environment variable LIGHTS_WEB_DATABASE must be set to the path of the database
Created on Nov 13, 2014
@author: Gary O'Neall
'''
import sys
import sqlite3
from hashlib import sha256
from os import path
DB_PATH = path.expandvars('$LIGHTS_WEB_DATABASE')
def usage():
''' Prints the usage to the console
'''
print "Usage:"
print "deleteuser username"
if __name__ == '__main__':
if len(sys.argv) != 2:
usage()
sys.exit(1)
username = sys.argv[1].strip()
if not path.isfile(DB_PATH):
print "Database is not initialized"
sys.exit(1)
con = sqlite3.connect(DB_PATH)
try:
cursor = con.execute('select id from users where username=?', [username])
row = cursor.fetchone()
if not row:
print "User does not exist"
sys.exit(1)
con.execute('delete from users where username=?', [username])
print 'user deleted'
except Exception as ex:
print "Error updating database: "+str(ex)
finally:
con.commit()
con.close()
| goneall/PiLightsWebServer | src/deleteuser.py | Python | apache-2.0 | 1,246 |
def extractForthemoneyTranslations(item):
"""
Forthemoney Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractForthemoneyTranslations.py | Python | bsd-3-clause | 249 |
#!/usr/bin/python3
# counting_sheeps.py
# Returns the count of true values of an array.
#
# Written by Billy Wilson Arante <arantebillywilson@gmail.com>
# Last updated on 2017/11/24 PHT
def counting_sheeps(arr_of_sheeps):
count = 0
for i in arr_of_sheeps:
if (i == True):
count += 1
return count
def test():
arr = [True, False, True, True, False]
print(counting_sheeps(arr))
if __name__ == '__main__':
test()
| arantebillywilson/python-snippets | codewars/counting_sheeps.py | Python | mit | 459 |
import time
import RPi.GPIO as GPIO
import sys
# the red led cathode is wired to pin 22 of the expansion header
# the yellow led cathode is wired to pin 18 of the expansion header
# the green led cathode is wired to pin 16 of the expansion header
REDLED = 22
YELLOWLED = 18
GREENLED = 16
# set up control for the channel and pull to a known state
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(REDLED, GPIO.OUT)
GPIO.output(REDLED, GPIO.LOW)
GPIO.setup(YELLOWLED, GPIO.OUT)
GPIO.output(YELLOWLED, GPIO.LOW)
GPIO.setup(GREENLED, GPIO.OUT)
GPIO.output(GREENLED, GPIO.LOW)
# expect to be invoked as led.py {red|yellow|green}
# with no arguments defaults to turning off all led
if len(sys.argv) == 2:
if sys.argv[1] == "red":
GPIO.output(REDLED, GPIO.HIGH)
if sys.argv[1] == "yellow":
GPIO.output(YELLOWLED, GPIO.HIGH)
if sys.argv[1] == "green":
GPIO.output(GREENLED, GPIO.HIGH)
| timroster/gamma-rover | led.py | Python | mit | 910 |
def counter():
print 'counter: starting counter'
i = -3
while i < 3:
i = i + 1
print 'counter: yield', i
yield i
if __name__ == '__main__':
print "the generator function:"
print repr(counter)
print "call generator function"
c = counter()
print "the generator:"
print repr(c)
print 'iterate'
for item in c:
print 'received:', item
| AmandaMoen/AmandaMoen | notes/resources/UW_IntroClass/class08/code/yield_example.py | Python | gpl-2.0 | 411 |
"""This module only works as a central point for importing logging that outputs to stdout
"""
"""
Copyright (c) 2015, Are Hansen - Honeypot Development.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND AN
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import logging
import sys
log = logging.getLogger(__name__)
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(
logging.Formatter('%(asctime)s %(module)s[%(process)d]: %(msg)s', datefmt="%b %d %T")
)
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO) | ZombieNinjaPirate/HonSSH-utilities | bifrozt/functions/std.py | Python | gpl-2.0 | 1,728 |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/linecharts.py
__version__=''' $Id: linecharts.py 3604 2009-11-27 16:35:29Z meitham $ '''
__doc__="""This modules defines a very preliminary Line Chart example."""
from reportlab.lib import colors
from reportlab.lib.validators import isNumber, isColor, isColorOrNone, isListOfStrings, \
isListOfStringsOrNone, SequenceOf, isBoolean, NoneOr, \
isListOfNumbersOrNone, isStringOrNone
from reportlab.lib.attrmap import *
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.shapes import Line, Rect, Group, Drawing, Polygon, PolyLine
from reportlab.graphics.widgets.signsandsymbols import NoEntry
from reportlab.graphics.charts.axes import XCategoryAxis, YValueAxis
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.widgets.markers import uSymbol2Symbol, isSymbol, makeMarker
from reportlab.graphics.charts.areas import PlotArea
from reportlab.graphics.charts.legends import _objStr
class LineChartProperties(PropHolder):
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber, desc='Width of a line.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of a line.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array of a line.'),
symbol = AttrMapValue(NoneOr(isSymbol), desc='Widget placed at data points.',advancedUsage=1),
shader = AttrMapValue(None, desc='Shader Class.',advancedUsage=1),
filler = AttrMapValue(None, desc='Filler Class.',advancedUsage=1),
name = AttrMapValue(isStringOrNone, desc='Name of the line.'),
)
class AbstractLineChart(PlotArea):
def makeSwatchSample(self,rowNo, x, y, width, height):
baseStyle = self.lines
styleIdx = rowNo % len(baseStyle)
style = baseStyle[styleIdx]
color = style.strokeColor
y = y+height/2.
if self.joinedLines:
dash = getattr(style, 'strokeDashArray', getattr(baseStyle,'strokeDashArray',None))
strokeWidth= getattr(style, 'strokeWidth', getattr(style, 'strokeWidth',None))
L = Line(x,y,x+width,y,strokeColor=color,strokeLineCap=0)
if strokeWidth: L.strokeWidth = strokeWidth
if dash: L.strokeDashArray = dash
else:
L = None
if hasattr(style, 'symbol'):
S = style.symbol
elif hasattr(baseStyle, 'symbol'):
S = baseStyle.symbol
else:
S = None
if S: S = uSymbol2Symbol(S,x+width/2.,y,color)
if S and L:
g = Group()
g.add(L)
g.add(S)
return g
return S or L
def getSeriesName(self,i,default=None):
'''return series name i or default'''
return _objStr(getattr(self.lines[i],'name',default))
class LineChart(AbstractLineChart):
pass
# This is conceptually similar to the VerticalBarChart.
# Still it is better named HorizontalLineChart... :-/
class HorizontalLineChart(LineChart):
"""Line chart with multiple lines.
A line chart is assumed to have one category and one value axis.
Despite its generic name this particular line chart class has
a vertical value axis and a horizontal category one. It may
evolve into individual horizontal and vertical variants (like
with the existing bar charts).
Available attributes are:
x: x-position of lower-left chart origin
y: y-position of lower-left chart origin
width: chart width
height: chart height
useAbsolute: disables auto-scaling of chart elements (?)
lineLabelNudge: distance of data labels to data points
lineLabels: labels associated with data values
lineLabelFormat: format string or callback function
groupSpacing: space between categories
joinedLines: enables drawing of lines
strokeColor: color of chart lines (?)
fillColor: color for chart background (?)
lines: style list, used cyclically for data series
valueAxis: value axis object
categoryAxis: category axis object
categoryNames: category names
data: chart data, a list of data series of equal length
"""
_attrMap = AttrMap(BASE=LineChart,
useAbsolute = AttrMapValue(isNumber, desc='Flag to use absolute spacing values.',advancedUsage=1),
lineLabelNudge = AttrMapValue(isNumber, desc='Distance between a data point and its label.',advancedUsage=1),
lineLabels = AttrMapValue(None, desc='Handle to the list of data point labels.'),
lineLabelFormat = AttrMapValue(None, desc='Formatting string or function used for data point labels.'),
lineLabelArray = AttrMapValue(None, desc='explicit array of line label values, must match size of data if present.'),
groupSpacing = AttrMapValue(isNumber, desc='? - Likely to disappear.'),
joinedLines = AttrMapValue(isNumber, desc='Display data points joined with lines if true.'),
lines = AttrMapValue(None, desc='Handle of the lines.'),
valueAxis = AttrMapValue(None, desc='Handle of the value axis.'),
categoryAxis = AttrMapValue(None, desc='Handle of the category axis.'),
categoryNames = AttrMapValue(isListOfStringsOrNone, desc='List of category names.'),
data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) numbers.'),
inFill = AttrMapValue(isBoolean, desc='Whether infilling should be done.',advancedUsage=1),
reversePlotOrder = AttrMapValue(isBoolean, desc='If true reverse plot order.',advancedUsage=1),
annotations = AttrMapValue(None, desc='list of callables, will be called with self, xscale, yscale.',advancedUsage=1),
)
def __init__(self):
LineChart.__init__(self)
# Allow for a bounding rectangle.
self.strokeColor = None
self.fillColor = None
# Named so we have less recoding for the horizontal one :-)
self.categoryAxis = XCategoryAxis()
self.valueAxis = YValueAxis()
# This defines two series of 3 points. Just an example.
self.data = [(100,110,120,130),
(70, 80, 80, 90)]
self.categoryNames = ('North','South','East','West')
self.lines = TypedPropertyCollection(LineChartProperties)
self.lines.strokeWidth = 1
self.lines[0].strokeColor = colors.red
self.lines[1].strokeColor = colors.green
self.lines[2].strokeColor = colors.blue
# control spacing. if useAbsolute = 1 then
# the next parameters are in points; otherwise
# they are 'proportions' and are normalized to
# fit the available space.
self.useAbsolute = 0 #- not done yet
self.groupSpacing = 1 #5
self.lineLabels = TypedPropertyCollection(Label)
self.lineLabelFormat = None
self.lineLabelArray = None
# This says whether the origin is above or below
# the data point. +10 means put the origin ten points
# above the data point if value > 0, or ten
# points below if data value < 0. This is different
# to label dx/dy which are not dependent on the
# sign of the data.
self.lineLabelNudge = 10
# If you have multiple series, by default they butt
# together.
# New line chart attributes.
self.joinedLines = 1 # Connect items with straight lines.
self.inFill = 0
self.reversePlotOrder = 0
def demo(self):
"""Shows basic use of a line chart."""
drawing = Drawing(200, 100)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(14, 10, 21, 28, 38, 46, 25, 5)
]
lc = HorizontalLineChart()
lc.x = 20
lc.y = 10
lc.height = 85
lc.width = 170
lc.data = data
lc.lines.symbol = makeMarker('Circle')
drawing.add(lc)
return drawing
def calcPositions(self):
"""Works out where they go.
Sets an attribute _positions which is a list of
lists of (x, y) matching the data.
"""
self._seriesCount = len(self.data)
self._rowLength = max(map(len,self.data))
if self.useAbsolute:
# Dimensions are absolute.
normFactor = 1.0
else:
# Dimensions are normalized to fit.
normWidth = self.groupSpacing
availWidth = self.categoryAxis.scale(0)[1]
normFactor = availWidth / normWidth
self._positions = []
for rowNo in range(len(self.data)):
lineRow = []
for colNo in range(len(self.data[rowNo])):
datum = self.data[rowNo][colNo]
if datum is not None:
(groupX, groupWidth) = self.categoryAxis.scale(colNo)
x = groupX + (0.5 * self.groupSpacing * normFactor)
y = self.valueAxis.scale(0)
height = self.valueAxis.scale(datum) - y
lineRow.append((x, y+height))
self._positions.append(lineRow)
def _innerDrawLabel(self, rowNo, colNo, x, y):
"Draw a label for a given item in the list."
labelFmt = self.lineLabelFormat
labelValue = self.data[rowNo][colNo]
if labelFmt is None:
labelText = None
elif type(labelFmt) is str:
if labelFmt == 'values':
try:
labelText = self.lineLabelArray[rowNo][colNo]
except:
labelText = None
else:
labelText = labelFmt % labelValue
elif callable(labelFmt):
labelText = labelFmt(labelValue)
else:
raise ValueError("Unknown formatter type %s, expected string or function"%labelFmt)
if labelText:
label = self.lineLabels[(rowNo, colNo)]
if not label.visible: return
# Make sure labels are some distance off the data point.
if y > 0:
label.setOrigin(x, y + self.lineLabelNudge)
else:
label.setOrigin(x, y - self.lineLabelNudge)
label.setText(labelText)
else:
label = None
return label
def drawLabel(self, G, rowNo, colNo, x, y):
'''Draw a label for a given item in the list.
G must have an add method'''
G.add(self._innerDrawLabel(rowNo,colNo,x,y))
def makeLines(self):
g = Group()
labelFmt = self.lineLabelFormat
P = range(len(self._positions))
if self.reversePlotOrder: P.reverse()
inFill = self.inFill
if inFill:
inFillY = self.categoryAxis._y
inFillX0 = self.valueAxis._x
inFillX1 = inFillX0 + self.categoryAxis._length
inFillG = getattr(self,'_inFillG',g)
# Iterate over data rows.
for rowNo in P:
row = self._positions[rowNo]
styleCount = len(self.lines)
styleIdx = rowNo % styleCount
rowStyle = self.lines[styleIdx]
rowColor = rowStyle.strokeColor
dash = getattr(rowStyle, 'strokeDashArray', None)
if hasattr(self.lines[styleIdx], 'strokeWidth'):
strokeWidth = self.lines[styleIdx].strokeWidth
elif hasattr(self.lines, 'strokeWidth'):
strokeWidth = self.lines.strokeWidth
else:
strokeWidth = None
# Iterate over data columns.
if self.joinedLines:
points = []
for colNo in range(len(row)):
points += row[colNo]
if inFill:
points = points + [inFillX1,inFillY,inFillX0,inFillY]
inFillG.add(Polygon(points,fillColor=rowColor,strokeColor=rowColor,strokeWidth=0.1))
else:
line = PolyLine(points,strokeColor=rowColor,strokeLineCap=0,strokeLineJoin=1)
if strokeWidth:
line.strokeWidth = strokeWidth
if dash:
line.strokeDashArray = dash
g.add(line)
if hasattr(self.lines[styleIdx], 'symbol'):
uSymbol = self.lines[styleIdx].symbol
elif hasattr(self.lines, 'symbol'):
uSymbol = self.lines.symbol
else:
uSymbol = None
if uSymbol:
for colNo in range(len(row)):
x1, y1 = row[colNo]
symbol = uSymbol2Symbol(uSymbol,x1,y1,rowStyle.strokeColor)
if symbol: g.add(symbol)
# Draw item labels.
for colNo in range(len(row)):
x1, y1 = row[colNo]
self.drawLabel(g, rowNo, colNo, x1, y1)
return g
def draw(self):
"Draws itself."
vA, cA = self.valueAxis, self.categoryAxis
vA.setPosition(self.x, self.y, self.height)
if vA: vA.joinAxis = cA
if cA: cA.joinAxis = vA
vA.configure(self.data)
# If zero is in chart, put x axis there, otherwise
# use bottom.
xAxisCrossesAt = vA.scale(0)
if ((xAxisCrossesAt > self.y + self.height) or (xAxisCrossesAt < self.y)):
y = self.y
else:
y = xAxisCrossesAt
cA.setPosition(self.x, y, self.width)
cA.configure(self.data)
self.calcPositions()
g = Group()
g.add(self.makeBackground())
if self.inFill:
self._inFillG = Group()
g.add(self._inFillG)
g.add(cA)
g.add(vA)
cAdgl = getattr(cA,'drawGridLast',False)
vAdgl = getattr(vA,'drawGridLast',False)
if not cAdgl: cA.makeGrid(g,parent=self,dim=vA.getGridDims)
if not vAdgl: vA.makeGrid(g,parent=self,dim=cA.getGridDims)
g.add(self.makeLines())
if cAdgl: cA.makeGrid(g,parent=self,dim=vA.getGridDims)
if vAdgl: vA.makeGrid(g,parent=self,dim=cA.getGridDims)
for a in getattr(self,'annotations',()): g.add(a(self,cA.scale,vA.scale))
return g
def _cmpFakeItem(a,b):
'''t, z0, z1, x, y = a[:5]'''
return cmp((-a[1],a[3],a[0],-a[4]),(-b[1],b[3],b[0],-b[4]))
class _FakeGroup:
def __init__(self):
self._data = []
def add(self,what):
if what: self._data.append(what)
def value(self):
return self._data
def sort(self):
self._data.sort(_cmpFakeItem)
#for t in self._data: print t
class HorizontalLineChart3D(HorizontalLineChart):
_attrMap = AttrMap(BASE=HorizontalLineChart,
theta_x = AttrMapValue(isNumber, desc='dx/dz'),
theta_y = AttrMapValue(isNumber, desc='dy/dz'),
zDepth = AttrMapValue(isNumber, desc='depth of an individual series'),
zSpace = AttrMapValue(isNumber, desc='z gap around series'),
)
theta_x = .5
theta_y = .5
zDepth = 10
zSpace = 3
def calcPositions(self):
HorizontalLineChart.calcPositions(self)
nSeries = self._seriesCount
zSpace = self.zSpace
zDepth = self.zDepth
if self.categoryAxis.style=='parallel_3d':
_3d_depth = nSeries*zDepth+(nSeries+1)*zSpace
else:
_3d_depth = zDepth + 2*zSpace
self._3d_dx = self.theta_x*_3d_depth
self._3d_dy = self.theta_y*_3d_depth
def _calc_z0(self,rowNo):
zSpace = self.zSpace
if self.categoryAxis.style=='parallel_3d':
z0 = rowNo*(self.zDepth+zSpace)+zSpace
else:
z0 = zSpace
return z0
def _zadjust(self,x,y,z):
return x+z*self.theta_x, y+z*self.theta_y
def makeLines(self):
labelFmt = self.lineLabelFormat
P = range(len(self._positions))
if self.reversePlotOrder: P.reverse()
inFill = self.inFill
assert not inFill, "inFill not supported for 3d yet"
#if inFill:
#inFillY = self.categoryAxis._y
#inFillX0 = self.valueAxis._x
#inFillX1 = inFillX0 + self.categoryAxis._length
#inFillG = getattr(self,'_inFillG',g)
zDepth = self.zDepth
_zadjust = self._zadjust
theta_x = self.theta_x
theta_y = self.theta_y
F = _FakeGroup()
from utils3d import _make_3d_line_info
tileWidth = getattr(self,'_3d_tilewidth',None)
if not tileWidth and self.categoryAxis.style!='parallel_3d': tileWidth = 1
# Iterate over data rows.
for rowNo in P:
row = self._positions[rowNo]
n = len(row)
styleCount = len(self.lines)
styleIdx = rowNo % styleCount
rowStyle = self.lines[styleIdx]
rowColor = rowStyle.strokeColor
dash = getattr(rowStyle, 'strokeDashArray', None)
z0 = self._calc_z0(rowNo)
z1 = z0 + zDepth
if hasattr(self.lines[styleIdx], 'strokeWidth'):
strokeWidth = self.lines[styleIdx].strokeWidth
elif hasattr(self.lines, 'strokeWidth'):
strokeWidth = self.lines.strokeWidth
else:
strokeWidth = None
# Iterate over data columns.
if self.joinedLines:
if n:
x0, y0 = row[0]
for colNo in xrange(1,n):
x1, y1 = row[colNo]
_make_3d_line_info( F, x0, x1, y0, y1, z0, z1,
theta_x, theta_y,
rowColor, fillColorShaded=None, tileWidth=tileWidth,
strokeColor=None, strokeWidth=None, strokeDashArray=None,
shading=0.1)
x0, y0 = x1, y1
if hasattr(self.lines[styleIdx], 'symbol'):
uSymbol = self.lines[styleIdx].symbol
elif hasattr(self.lines, 'symbol'):
uSymbol = self.lines.symbol
else:
uSymbol = None
if uSymbol:
for colNo in xrange(n):
x1, y1 = row[colNo]
x1, y1 = _zadjust(x1,y1,z0)
symbol = uSymbol2Symbol(uSymbol,x1,y1,rowColor)
if symbol: F.add((2,z0,z0,x1,y1,symbol))
# Draw item labels.
for colNo in xrange(n):
x1, y1 = row[colNo]
x1, y1 = _zadjust(x1,y1,z0)
L = self._innerDrawLabel(rowNo, colNo, x1, y1)
if L: F.add((2,z0,z0,x1,y1,L))
F.sort()
g = Group()
map(lambda x,a=g.add: a(x[-1]),F.value())
return g
class VerticalLineChart(LineChart):
pass
def sample1():
drawing = Drawing(400, 200)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(5, 20, 46, 38, 23, 21, 6, 14)
]
lc = HorizontalLineChart()
lc.x = 50
lc.y = 50
lc.height = 125
lc.width = 300
lc.data = data
lc.joinedLines = 1
lc.lines.symbol = makeMarker('FilledDiamond')
lc.lineLabelFormat = '%2.0f'
catNames = 'Jan Feb Mar Apr May Jun Jul Aug'.split(' ')
lc.categoryAxis.categoryNames = catNames
lc.categoryAxis.labels.boxAnchor = 'n'
lc.valueAxis.valueMin = 0
lc.valueAxis.valueMax = 60
lc.valueAxis.valueStep = 15
drawing.add(lc)
return drawing
class SampleHorizontalLineChart(HorizontalLineChart):
"Sample class overwriting one method to draw additional horizontal lines."
def demo(self):
"""Shows basic use of a line chart."""
drawing = Drawing(200, 100)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(14, 10, 21, 28, 38, 46, 25, 5)
]
lc = SampleHorizontalLineChart()
lc.x = 20
lc.y = 10
lc.height = 85
lc.width = 170
lc.data = data
lc.strokeColor = colors.white
lc.fillColor = colors.HexColor(0xCCCCCC)
drawing.add(lc)
return drawing
def makeBackground(self):
g = Group()
g.add(HorizontalLineChart.makeBackground(self))
valAxis = self.valueAxis
valTickPositions = valAxis._tickValues
for y in valTickPositions:
y = valAxis.scale(y)
g.add(Line(self.x, y, self.x+self.width, y,
strokeColor = self.strokeColor))
return g
def sample1a():
drawing = Drawing(400, 200)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(5, 20, 46, 38, 23, 21, 6, 14)
]
lc = SampleHorizontalLineChart()
lc.x = 50
lc.y = 50
lc.height = 125
lc.width = 300
lc.data = data
lc.joinedLines = 1
lc.strokeColor = colors.white
lc.fillColor = colors.HexColor(0xCCCCCC)
lc.lines.symbol = makeMarker('FilledDiamond')
lc.lineLabelFormat = '%2.0f'
catNames = 'Jan Feb Mar Apr May Jun Jul Aug'.split(' ')
lc.categoryAxis.categoryNames = catNames
lc.categoryAxis.labels.boxAnchor = 'n'
lc.valueAxis.valueMin = 0
lc.valueAxis.valueMax = 60
lc.valueAxis.valueStep = 15
drawing.add(lc)
return drawing
def sample2():
drawing = Drawing(400, 200)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(5, 20, 46, 38, 23, 21, 6, 14)
]
lc = HorizontalLineChart()
lc.x = 50
lc.y = 50
lc.height = 125
lc.width = 300
lc.data = data
lc.joinedLines = 1
lc.lines.symbol = makeMarker('Smiley')
lc.lineLabelFormat = '%2.0f'
lc.strokeColor = colors.black
lc.fillColor = colors.lightblue
catNames = 'Jan Feb Mar Apr May Jun Jul Aug'.split(' ')
lc.categoryAxis.categoryNames = catNames
lc.categoryAxis.labels.boxAnchor = 'n'
lc.valueAxis.valueMin = 0
lc.valueAxis.valueMax = 60
lc.valueAxis.valueStep = 15
drawing.add(lc)
return drawing
def sample3():
drawing = Drawing(400, 200)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(5, 20, 46, 38, 23, 21, 6, 14)
]
lc = HorizontalLineChart()
lc.x = 50
lc.y = 50
lc.height = 125
lc.width = 300
lc.data = data
lc.joinedLines = 1
lc.lineLabelFormat = '%2.0f'
lc.strokeColor = colors.black
lc.lines[0].symbol = makeMarker('Smiley')
lc.lines[1].symbol = NoEntry
lc.lines[0].strokeWidth = 2
lc.lines[1].strokeWidth = 4
catNames = 'Jan Feb Mar Apr May Jun Jul Aug'.split(' ')
lc.categoryAxis.categoryNames = catNames
lc.categoryAxis.labels.boxAnchor = 'n'
lc.valueAxis.valueMin = 0
lc.valueAxis.valueMax = 60
lc.valueAxis.valueStep = 15
drawing.add(lc)
return drawing
| fergalmoran/Chrome2Kindle | server/reportlab/graphics/charts/linecharts.py | Python | mit | 23,114 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-19 14:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0004_plan_unlimited_users'),
]
operations = [
migrations.AlterField(
model_name='plan',
name='users',
field=models.IntegerField(default=0, verbose_name='número de usuários'),
),
]
| HeaTSolutions/CatracaDigital | CatracaDigital/landing/migrations/0005_auto_20160619_1124.py | Python | mit | 485 |
from django.conf.urls import patterns, include, url
from consumption import views
urlpatterns = patterns(
'',
url(r'^/?$', views.main),
)
| tbarbette/monitoring | consumption/urls.py | Python | gpl-2.0 | 159 |
#!/usr/bin/python3
# Copyright 2018 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the schema generated for the shared database."""
import unittest
from symbol_cache_schema import SymbolCacheSchema
SHARED_MEMORY_DB = 'file::memory:?cache=shared'
class TestSymbolCacheSchema(unittest.TestCase):
def testSchemaColumns(self):
uuid = 'E56A19D3-CA4C-3760-8855-26C98A9E1865'
dsym_path = '/usr/bin' # Using a directory in place of dSYM.
arch = 'x86_64'
symbol_cache = SymbolCacheSchema(SHARED_MEMORY_DB)
connection = symbol_cache.connection
cursor = connection.cursor()
cursor.execute('INSERT INTO symbol_cache '
'VALUES("%s", "%s", "%s");' %
(uuid, dsym_path, arch))
connection.commit()
cursor.execute('SELECT uuid, dsym_path, architecture FROM symbol_cache;')
rows_inserted = cursor.fetchall()
self.assertEqual(len(rows_inserted), 1)
self.assertEqual(rows_inserted[0][0], uuid)
self.assertEqual(rows_inserted[0][1], dsym_path)
self.assertEqual(rows_inserted[0][2], arch)
if __name__ == '__main__':
unittest.main()
| bazelbuild/tulsi | src/TulsiGenerator/Scripts/symbol_cache_schema_tests.py | Python | apache-2.0 | 1,667 |
from .util import escape, escape_html
class BaseRenderer(object):
NAME = 'base'
def __init__(self):
self._methods = {}
def register(self, name, method):
self._methods[name] = method
def _get_method(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
method = self._methods.get(name)
if not method:
raise AttributeError('No renderer "{!r}"'.format(name))
return method
def finalize(self, data):
raise NotImplementedError(
'The renderer needs to implement the finalize method.')
class AstRenderer(BaseRenderer):
NAME = 'ast'
def text(self, text):
return {'type': 'text', 'text': text}
def link(self, link, children=None, title=None):
if isinstance(children, str):
children = [{'type': 'text', 'text': children}]
return {
'type': 'link',
'link': link,
'children': children,
'title': title,
}
def image(self, src, alt="", title=None):
return {'type': 'image', 'src': src, 'alt': alt, 'title': title}
def codespan(self, text):
return {'type': 'codespan', 'text': text}
def linebreak(self):
return {'type': 'linebreak'}
def inline_html(self, html):
return {'type': 'inline_html', 'text': html}
def heading(self, children, level):
return {'type': 'heading', 'children': children, 'level': level}
def newline(self):
return {'type': 'newline'}
def thematic_break(self):
return {'type': 'thematic_break'}
def block_code(self, children, info=None):
return {
'type': 'block_code',
'text': children,
'info': info
}
def block_html(self, children):
return {'type': 'block_html', 'text': children}
def list(self, children, ordered, level, start=None):
token = {
'type': 'list',
'children': children,
'ordered': ordered,
'level': level,
}
if start is not None:
token['start'] = start
return token
def list_item(self, children, level):
return {'type': 'list_item', 'children': children, 'level': level}
def _create_default_method(self, name):
def __ast(children):
return {'type': name, 'children': children}
return __ast
def _get_method(self, name):
try:
return super(AstRenderer, self)._get_method(name)
except AttributeError:
return self._create_default_method(name)
def finalize(self, data):
return list(data)
class HTMLRenderer(BaseRenderer):
NAME = 'html'
HARMFUL_PROTOCOLS = {
'javascript:',
'vbscript:',
'data:',
}
def __init__(self, escape=True, allow_harmful_protocols=None):
super(HTMLRenderer, self).__init__()
self._escape = escape
self._allow_harmful_protocols = allow_harmful_protocols
def _safe_url(self, url):
if self._allow_harmful_protocols is None:
schemes = self.HARMFUL_PROTOCOLS
elif self._allow_harmful_protocols is True:
schemes = None
else:
allowed = set(self._allow_harmful_protocols)
schemes = self.HARMFUL_PROTOCOLS - allowed
if schemes:
for s in schemes:
if url.lower().startswith(s):
url = '#harmful-link'
break
return url
def text(self, text):
if self._escape:
return escape(text)
return escape_html(text)
def link(self, link, text=None, title=None):
if text is None:
text = link
s = '<a href="' + self._safe_url(link) + '"'
if title:
s += ' title="' + escape_html(title) + '"'
return s + '>' + (text or link) + '</a>'
def image(self, src, alt="", title=None):
src = self._safe_url(src)
alt = escape_html(alt)
s = '<img src="' + src + '" alt="' + alt + '"'
if title:
s += ' title="' + escape_html(title) + '"'
return s + ' />'
def emphasis(self, text):
return '<em>' + text + '</em>'
def strong(self, text):
return '<strong>' + text + '</strong>'
def codespan(self, text):
return '<code>' + escape(text) + '</code>'
def linebreak(self):
return '<br />\n'
def inline_html(self, html):
if self._escape:
return escape(html)
return html
def paragraph(self, text):
return '<p>' + text + '</p>\n'
def heading(self, text, level):
tag = 'h' + str(level)
return '<' + tag + '>' + text + '</' + tag + '>\n'
def newline(self):
return ''
def thematic_break(self):
return '<hr />\n'
def block_text(self, text):
return text
def block_code(self, code, info=None):
html = '<pre><code'
if info is not None:
info = info.strip()
if info:
lang = info.split(None, 1)[0]
lang = escape_html(lang)
html += ' class="language-' + lang + '"'
return html + '>' + escape(code) + '</code></pre>\n'
def block_quote(self, text):
return '<blockquote>\n' + text + '</blockquote>\n'
def block_html(self, html):
if not self._escape:
return html + '\n'
return '<p>' + escape(html) + '</p>\n'
def block_error(self, html):
return '<div class="error">' + html + '</div>\n'
def list(self, text, ordered, level, start=None):
if ordered:
html = '<ol'
if start is not None:
html += ' start="' + str(start) + '"'
return html + '>\n' + text + '</ol>\n'
return '<ul>\n' + text + '</ul>\n'
def list_item(self, text, level):
return '<li>' + text + '</li>\n'
def finalize(self, data):
return ''.join(data)
| lepture/mistune | mistune/renderers.py | Python | bsd-3-clause | 6,095 |
# -*- coding: utf-8 -*-
########################################
### Minecraft Server Maker #############
### Create Your Own Minecraft Server ###
########################################
from commun import *
import os
import subprocess as sub
if os.path.isfile("install.py"):
os.remove("install.py")
else:
pass
bcl = 1
nbnotfound = 1
clear()
while bcl:
print("###############################\n### Minecraft Server Maker ###\n##############################")
a=input("[1] Create Server [2] Quit : ")
if a == "1":
clear()
print("Starting creation of the server")
load(4)
from base import main
main()
bcl = 0
elif a == "2":
print("Good Bye ;)")
wait(1)
clear()
exit(0)
else:
clear()
print("Command not found\n:(")
print("Error #", nbnotfound)
nbnotfound += 1
if nbnotfound > 20:
print("Sorry but the program gona shutdown beceause you enter 20 false result !!!!! :/")
wait(2)
clear()
print("Good Bye")
exit(0)
| dinnozap/MinecraftServerMaker | main.py | Python | apache-2.0 | 973 |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 26 19:32:49 2018
@author: JinJheng
"""
x,y=map(int,input().split(','))
m,n=map(int,input().split(','))
if n > y:
greater = n
else:
greater = y
while(True):
if((greater % n == 0) and (greater % y == 0)):
q = greater
break
greater += 1
o=q/y*x
r=q/n*m
p=int(o+r)
def compute():
if p>q:
small=q
else:
small=p
for i in range(1,small+1):
if (p%i==0)and(q%i==0):
ans=i
print(x,end='')
print('/',end='')
print(y,'+',m,end='')
print('/',end='')
print(n,'=',int(p/ans),end='')
print('/',end='')
print(int(q/ans))
compute() | KuKuKai/104asiajava | 509.py | Python | mit | 712 |
import gameconfig
from random import randint
class SoundPlayer:
def __init__(self, sound_arg, **kwargs):
if gameconfig.SOUND:
from pygame import mixer
self.mixer = mixer
self.mixer.init()
init_loop = kwargs.get('loop', False)
if type(sound_arg) is dict:
sound = sound_arg['name'] + str(randint(1, sound_arg['number']))
else:
sound = sound_arg
if init_loop is not True:
self.volume = gameconfig.VOLUME['SOUND_FX']
self.loop = 0
else:
self.volume = gameconfig.VOLUME['MUSIC']
self.loop = -1
gameconfig.CURRENT_TRACK = self
self.sound_path = 'data/sound/' + sound + '.wav'
self.sound = self.mixer.Sound(self.sound_path)
self.sound.set_volume(self.volume)
else:
# if sound is false use empty object as current track
gameconfig.CURRENT_TRACK = self
def play(self):
if hasattr(self, 'sound'):
self.sound.play(loops = self.loop)
def stop(self):
if hasattr(self, 'sound'):
self.sound.stop()
def fadeout(self, time):
if hasattr(self, 'sound'):
self.sound.fadeout(time)
def switch_track(self, new_track):
if hasattr(self, 'sound'):
self.sound.fadeout(2000)
self.sound_path = 'data/sound/' + new_track + '.wav'
self.sound = self.mixer.Sound(self.sound_path)
self.sound.set_volume(self.volume)
self.play()
| nerdyLawman/officeHack | src/sound/SoundPlayer.py | Python | gpl-3.0 | 1,632 |
"""
Sarsa is a online updating method for Reinforcement learning.
Unlike Q learning which is a offline updating method, Sarsa is updating while in the current trajectory.
You will see the sarsa is more coward when punishment is close because it cares about all behaviours,
while q learning is more brave because it only cares about maximum behaviour.
"""
from maze_env import Maze
from RL_brain import DeepQNetwork
def run_maze():
step = 0
for episode in range(10):
# initial observation
print "episode===============================================",episode
observation = env.reset()
print "observation",observation
while True:
# fresh env
env.render()
# RL choose action based on observation
action = RL.choose_action(observation)
print "action",action
# RL take action and get next observation and reward
observation_, reward, done = env.step(action)
print "observation_, reward, done",observation_, reward, done
RL.store_transition(observation, action, reward, observation_)
if (step > 200) and (step % 5 == 0):
RL.learn()
# swap observation
observation = observation_
# break while loop when end of this episode
if done:
break
step += 1
# end of game
print('game over')
env.destroy()
if __name__ == "__main__":
# maze game
env = Maze()
RL = DeepQNetwork(env.n_actions, env.n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=200,
memory_size=2000,
# output_graph=True
)
env.after(100, run_maze)
env.mainloop()
RL.plot_cost() | DaniellaAngel/MachineLearning | DQN/run_this.py | Python | apache-2.0 | 1,923 |
#! /usr/bin/env python
# Test program for jaxml
#
# (C) Jerome Alet <alet@librelogiciel.com> 2000
# You're welcome to redistribute this software under the
# terms of the GNU General Public Licence version 2.0
# or, at your option, any higher version.
#
# You can read the complete GNU GPL in the file COPYING
# which should come along with this software, or visit
# the Free Software Foundation's WEB site http://www.fsf.org
#
# $Id: test.py,v 1.13 2003/02/13 14:36:13 jerome Exp $
#
import sys
# import the jaxml module from the parent directory
sys.path.insert(0, "..")
import jaxml
print "\n\n==== TESTING XML ====\n"
# now we create an instance
# we may optionally pass a version and an encoding arguments.
x = jaxml.XML_document()
# first tag, with different attributes
# numeric values are automatically quoted
x.sometag(yes = "NO", some = "a bit", value = 5)
# this one, and till the end will be inside the previous one
x.anothertag("this tag and till the end will be inside the <sometag> ... </sometag>")
# here we save the current position
x._push()
# here some nested tags
x.whatever()
x.ilikepython()
x._text("Here we are inside <whatever><ilikepython> ... </ilikepython></whatever>")
# the following tag has nothing but attributes, we must save and restore it's
# position because it is followed by another tag (we doesn't want to enclose the following tag)
x._push()
x.someattributetag(attr = "Hey ! I'm the attribute !")
x._pop()
x.justatest("This is just a test", dummy="YES")
# here we want to continue our document
# at the same indentation level than <whatever>
x._pop()
x.dummytag("we have just escaped", value = "Fine !")
x.dummytwo("Since the previous tag and this one were called with an unnamed first parameter\nwe didn't need _push() nor _pop()")
# here we insert plain text
x._text("Near the end")
# here we insert some text just like:
# <mytag>Some dummy text</mytag>
x.mytag("Some dummy text, and no tag attributes")
# here some beautiful tag nesting
x.onetag(message="This is").anotherone("a beautiful").deeper(message = "tag nesting possibility")
# here the naming space notation for <Space:Tag>...</Space:Tag>
x.namingspace.onetag("This is how to use the naming space notation Space:Tag", wonderful="YES")
# here just a tag with attributes, but nothing in it
# we don't need to _push() and _pop() because it isn't followed by anything
x.attributetag(content = "I've got nothing enclosed in me", index = 9)
# here we save to a file
x._output("sampleXML.xml")
# but we may as well output it to the screen
print x
# test the new templating facility
# I urge you to read the following lines and look carefully at the result
# to see how this beautiful thing works !
x._text("Now we will replace some content with the new possibility of using a document as a mapping.")
x._text("This may be useful for templating without a template file, or replacing some chars with their equivalent SGML entities for example:")
x._text("Here are three accented characters, two of them which will be replaced\nwith their equivalent SGML entities: àéè")
x["nothing enclosed"] = "something enclosed"
x["SGML"] = "XML"
x["attributetag"] = "modifiedattributename"
x["é"] = "é";
x["è"] = "è";
x["à"] = "à";
# this is also available as readable attributes
sys.stderr.write('x["è"] = %s\n' % x["è"])
# and we can also delete them
del x["è"]
# or use the str() or repr() builtin functions
mydoc = "With str() or repr(), my modified document looks like:\n" + str(x) + "And that's all folks !"
print mydoc
# Now we want to test the HTML output
print "\n\n==== TESTING HTML ====\n"
page = jaxml.HTML_document()
# here we begin our html document
page.html()
# we must do a push and a pop in order for the <body> tags
# to not be enclosed between <head> and </head>
page._push()
# build the head of the document
page.head()
#
#
# Other meta tags should work fine
page._meta(name="GENERATOR", content="jaxml.py v2.24 from Jerome Alet - alet@librelogiciel.com")
page._meta(name="DESCRIPTION", content="A CGI document, to test jaxml.py")
page._meta(name="KEYWORDS", content="python, jaxml, linux")
page.title("A CGI test document")
# here we exit from the <head> ... </head>
page._pop()
# we begin the body
page.body(bgcolor="pink")
# here we insert a dumb text
page._text("A small text")
# we do a push to be able to exit from the <form> ... </form>
page._push()
page.form(action="/cgi-bin/jerome/ok.py", method="POST")
page.h1("Form's title")
# to be able to exit from <select> ... </select>
page._push()
page.select(name="choice", size="1", multiple="multiple")
page.option("Choice number 1")
page.option("Choice number 2", selected="selected")
page.option("Choice number 3")
# exit from <select> ... </select>
page._pop()
page.h3("Second part of the Form")
page._br()
page._textinput(name="dumbstring", size="50")
page._submit()
page._reset()
# here we exit from the <form> ... </form>
page._pop()
page._text("here we should be outside of the form")
page._text("and there we should be one the same line visually but on two different lines in the html file")
page.a("Click on Me", href="http://www.slashdot.org")
page.pre("Hello !!!\n\t\tBye Bye\n\n")
page._text("Here we should be outside of the PRE.../PRE tag")
# then we insert some text
page._text("Just below you will see some lines of text which are included from a template file, with variables substitution:")
page._br()
# then we include the template file
page._template("template.htt", font_color='red', link_to_my_homepage="<a href='http://www.librelogiciel.com/'>My website</a>", another_variable="<br /><center>Thank you for trying</center>")
# then some separation
page.hr(width="33%", noshade="noshade")
# here we do the output to the screen
page._output()
# and here we do the output to a file
page._output("sampleHTML.html")
# Now we want to test the CGI/HTML output
print "\n\n==== TESTING CGI ====\n"
# just some dummy values
page = jaxml.CGI_document(encoding = "utf-8", content_type="text/html", version = "3.0")
# to do a redirection, just do
# page.set_redirect("http://www.librelogiciel.com/")
# then just call page.output("")
# here again we can do that whenever we want (before output)
# text/html is the default for _set_content_type()
#page._set_content_type("application/pdf")
# to define a pragma, just use:
# page._set_pragma("pragma_name")
# we can do that whenever we want, (before output)
# to define an expiration date, just use:
# page._set_expires("expiration_date")
# we can do that whenever we want, (before output)
# Maybe this should be done by the class's __init__ function
# but I don't think so in order for us to have more control
page._default_header(title = 'a CGI document')
# we begin the body
page.body(bgcolor="pink")
# here we insert a dumb text
page._text("A small text")
# we do a push to be able to exit from the <form> ... </form>
page._push()
page.form(action="/cgi-bin/jerome/ok.py", method="POST")
page.h1("Form's title")
# to be able to exit from <select> ... </select>
page._push()
page.select(name="choice", size="1")
page.option("Choice number 1")
page.option("Choice number 2")
page.option("Choice number 3", selected="selected")
# exit from <select> ... </select>
page._pop()
page.h3("Second part of the Form")
page._br()
page._textinput(name="dumbstring", size="50")
page._submit()
page._reset()
# here we exit from the <form> ... </form>
page._pop()
page._text("here we should be outside of the form")
page._text("and there we should be one the same line visually but on two different lines in the html file")
page.a("Click on Me", href="http://www.slashdot.org")
page.pre("Hello !!!\n\t\tBye Bye\n\n")
page._text("Here we should be outside of the PRE.../PRE tag")
# here we define a debug file which will receive the CGI output too
page._set_debug("CGI_debug.html")
# here we do the output
# for a CGI script, give an empty string (for stdout)
# or None, or nothing, unless you want to debug (give a filename) or a file object
page._output("")
# Now we want to test the arithmetic operations
print "\n\n==== TESTING ARITHMETIC ====\n"
print "page + page = %s" % (page + page)
print "page + 'string' = %s" % (page + 'string')
print "'string' + page = %s" % ('string' + page)
print "page * 2 = %s" % (page * 2)
print "2 * page = %s" % (2 * page)
# new name spaces support
x = jaxml.XML_document()
x.tag("hello", name="blah")
x.another("bloum",
{ "xmlns": { "tal" : "http://xml.zope.org/namespaces/tal",
"metal": "http://xml.zope.org/namespaces/metal"},
"metal": {"use-macro" : "here/StandardLookAndFeel/macros/master"},
"" : { "class" : "header"}
})
x._push("save")
x.otherone({ "xmlns": { "tal" : "http://xml.zope.org/namespaces/tal",
"metal": "http://xml.zope.org/namespaces/metal"},
"metal": {"use-macro" : "here/StandardLookAndFeel/macros/master"},
"" : { "class" : "header"}
})
x._push()
x.inside(name="inside")
x.atag()
x._text("blah")
x._pop("save")
x.outside(attrib="None")
print x
| denys-duchier/Scolar | config/softs/jaxml-3.01/test/test.py | Python | gpl-2.0 | 9,215 |
# -*- coding: utf-8 -*-
# Scrapy settings for maiziedu_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'maiziedu_spider'
SPIDER_MODULES = ['maiziedu_spider.spiders']
NEWSPIDER_MODULE = 'maiziedu_spider.spiders'
ITEM_PIPELINES = {
# 'scrapy.pipelines.images.ImagesPipeline': 100,
# 'maiziedu_spider.pipelines.JsonWriterPipeline': 300,
'maiziedu_spider.pipelines.MysqlPipeline': 600,
'maiziedu_spider.pipelines.MyImagesPipeline': 1,
}
IMAGES_URLS_FIELD = 'img_url'
IMAGES_STORE = r'download_images'
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'maiziedu_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'maiziedu_spider.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'maiziedu_spider.middlewares.RandomUserAgent': 1,
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'maiziedu_spider.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| ejoful/scrapy_example | maiziedu_spider/maiziedu_spider/settings.py | Python | gpl-3.0 | 5,476 |
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_user_quarantine
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_user_quarantine.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_user_quarantine_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_quarantine': {
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_quarantine.fortios_user(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('user', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_quarantine_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_quarantine': {
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_quarantine.fortios_user(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('user', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_quarantine_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_quarantine': {
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_quarantine.fortios_user(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('user', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_user_quarantine_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_quarantine': {
'random_attribute_not_valid': 'tag',
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_quarantine.fortios_user(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('user', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| Dhivyap/ansible | test/units/modules/network/fortios/test_fortios_user_quarantine.py | Python | gpl-3.0 | 5,350 |
# -*- coding: utf-8 -*-
"""
Optimization algorithms for OT
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
# Titouan Vayer <titouan.vayer@irisa.fr>
#
# License: MIT License
import numpy as np
from scipy.optimize.linesearch import scalar_search_armijo
from .lp import emd
from .bregman import sinkhorn
# The corresponding scipy function does not work for matrices
def line_search_armijo(f, xk, pk, gfk, old_fval,
args=(), c1=1e-4, alpha0=0.99):
"""
Armijo linesearch function that works with matrices
find an approximate minimum of f(xk+alpha*pk) that satifies the
armijo conditions.
Parameters
----------
f : callable
loss function
xk : ndarray
initial position
pk : ndarray
descent direction
gfk : ndarray
gradient of f at xk
old_fval : float
loss value at xk
args : tuple, optional
arguments given to f
c1 : float, optional
c1 const in armijo rule (>0)
alpha0 : float, optional
initial step (>0)
Returns
-------
alpha : float
step that satisfy armijo conditions
fc : int
nb of function call
fa : float
loss value at step alpha
"""
xk = np.atleast_1d(xk)
fc = [0]
def phi(alpha1):
fc[0] += 1
return f(xk + alpha1 * pk, *args)
if old_fval is None:
phi0 = phi(0.)
else:
phi0 = old_fval
derphi0 = np.sum(pk * gfk) # Quickfix for matrices
alpha, phi1 = scalar_search_armijo(
phi, phi0, derphi0, c1=c1, alpha0=alpha0)
return alpha, fc[0], phi1
def solve_linesearch(cost, G, deltaG, Mi, f_val,
armijo=True, C1=None, C2=None, reg=None, Gc=None, constC=None, M=None):
"""
Solve the linesearch in the FW iterations
Parameters
----------
cost : method
Cost in the FW for the linesearch
G : ndarray, shape(ns,nt)
The transport map at a given iteration of the FW
deltaG : ndarray (ns,nt)
Difference between the optimal map found by linearization in the FW algorithm and the value at a given iteration
Mi : ndarray (ns,nt)
Cost matrix of the linearized transport problem. Corresponds to the gradient of the cost
f_val : float
Value of the cost at G
armijo : bool, optional
If True the steps of the line-search is found via an armijo research. Else closed form is used.
If there is convergence issues use False.
C1 : ndarray (ns,ns), optional
Structure matrix in the source domain. Only used and necessary when armijo=False
C2 : ndarray (nt,nt), optional
Structure matrix in the target domain. Only used and necessary when armijo=False
reg : float, optional
Regularization parameter. Only used and necessary when armijo=False
Gc : ndarray (ns,nt)
Optimal map found by linearization in the FW algorithm. Only used and necessary when armijo=False
constC : ndarray (ns,nt)
Constant for the gromov cost. See [24]. Only used and necessary when armijo=False
M : ndarray (ns,nt), optional
Cost matrix between the features. Only used and necessary when armijo=False
Returns
-------
alpha : float
The optimal step size of the FW
fc : int
nb of function call. Useless here
f_val : float
The value of the cost for the next iteration
References
----------
.. [24] Vayer Titouan, Chapel Laetitia, Flamary R{\'e}mi, Tavenard Romain
and Courty Nicolas
"Optimal Transport for structured data with application on graphs"
International Conference on Machine Learning (ICML). 2019.
"""
if armijo:
alpha, fc, f_val = line_search_armijo(cost, G, deltaG, Mi, f_val)
else: # requires symetric matrices
dot1 = np.dot(C1, deltaG)
dot12 = dot1.dot(C2)
a = -2 * reg * np.sum(dot12 * deltaG)
b = np.sum((M + reg * constC) * deltaG) - 2 * reg * (np.sum(dot12 * G) + np.sum(np.dot(C1, G).dot(C2) * deltaG))
c = cost(G)
alpha = solve_1d_linesearch_quad(a, b, c)
fc = None
f_val = cost(G + alpha * deltaG)
return alpha, fc, f_val
def cg(a, b, M, reg, f, df, G0=None, numItermax=200, numItermaxEmd=100000,
stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False, **kwargs):
"""
Solve the general regularized OT problem with conditional gradient
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg*f(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the (ns,nt) metric cost matrix
- :math:`f` is the regularization term ( and df is its gradient)
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is conditional gradient as discussed in [1]_
Parameters
----------
a : ndarray, shape (ns,)
samples weights in the source domain
b : ndarray, shape (nt,)
samples in the target domain
M : ndarray, shape (ns, nt)
loss matrix
reg : float
Regularization term >0
G0 : ndarray, shape (ns,nt), optional
initial guess (default is indep joint density)
numItermax : int, optional
Max number of iterations
numItermaxEmd : int, optional
Max number of iterations for emd
stopThr : float, optional
Stop threshol on the relative variation (>0)
stopThr2 : float, optional
Stop threshol on the absolute variation (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
**kwargs : dict
Parameters for linesearch
Returns
-------
gamma : (ns x nt) ndarray
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [1] Ferradans, S., Papadakis, N., Peyré, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882.
See Also
--------
ot.lp.emd : Unregularized optimal ransport
ot.bregman.sinkhorn : Entropic regularized optimal transport
"""
loop = 1
if log:
log = {'loss': []}
if G0 is None:
G = np.outer(a, b)
else:
G = G0
def cost(G):
return np.sum(M * G) + reg * f(G)
f_val = cost(G)
if log:
log['loss'].append(f_val)
it = 0
if verbose:
print('{:5s}|{:12s}|{:8s}|{:8s}'.format(
'It.', 'Loss', 'Relative loss', 'Absolute loss') + '\n' + '-' * 48)
print('{:5d}|{:8e}|{:8e}|{:8e}'.format(it, f_val, 0, 0))
while loop:
it += 1
old_fval = f_val
# problem linearization
Mi = M + reg * df(G)
# set M positive
Mi += Mi.min()
# solve linear program
Gc = emd(a, b, Mi, numItermax=numItermaxEmd)
deltaG = Gc - G
# line search
alpha, fc, f_val = solve_linesearch(cost, G, deltaG, Mi, f_val, reg=reg, M=M, Gc=Gc, **kwargs)
G = G + alpha * deltaG
# test convergence
if it >= numItermax:
loop = 0
abs_delta_fval = abs(f_val - old_fval)
relative_delta_fval = abs_delta_fval / abs(f_val)
if relative_delta_fval < stopThr or abs_delta_fval < stopThr2:
loop = 0
if log:
log['loss'].append(f_val)
if verbose:
if it % 20 == 0:
print('{:5s}|{:12s}|{:8s}|{:8s}'.format(
'It.', 'Loss', 'Relative loss', 'Absolute loss') + '\n' + '-' * 48)
print('{:5d}|{:8e}|{:8e}|{:8e}'.format(it, f_val, relative_delta_fval, abs_delta_fval))
if log:
return G, log
else:
return G
def gcg(a, b, M, reg1, reg2, f, df, G0=None, numItermax=10,
numInnerItermax=200, stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False):
"""
Solve the general regularized OT problem with the generalized conditional gradient
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg1\cdot\Omega(\gamma) + reg2\cdot f(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- :math:`f` is the regularization term ( and df is its gradient)
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the generalized conditional gradient as discussed in [5,7]_
Parameters
----------
a : ndarray, shape (ns,)
samples weights in the source domain
b : ndarrayv (nt,)
samples in the target domain
M : ndarray, shape (ns, nt)
loss matrix
reg1 : float
Entropic Regularization term >0
reg2 : float
Second Regularization term >0
G0 : ndarray, shape (ns, nt), optional
initial guess (default is indep joint density)
numItermax : int, optional
Max number of iterations
numInnerItermax : int, optional
Max number of iterations of Sinkhorn
stopThr : float, optional
Stop threshol on the relative variation (>0)
stopThr2 : float, optional
Stop threshol on the absolute variation (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : ndarray, shape (ns, nt)
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1
.. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567.
See Also
--------
ot.optim.cg : conditional gradient
"""
loop = 1
if log:
log = {'loss': []}
if G0 is None:
G = np.outer(a, b)
else:
G = G0
def cost(G):
return np.sum(M * G) + reg1 * np.sum(G * np.log(G)) + reg2 * f(G)
f_val = cost(G)
if log:
log['loss'].append(f_val)
it = 0
if verbose:
print('{:5s}|{:12s}|{:8s}|{:8s}'.format(
'It.', 'Loss', 'Relative loss', 'Absolute loss') + '\n' + '-' * 48)
print('{:5d}|{:8e}|{:8e}|{:8e}'.format(it, f_val, 0, 0))
while loop:
it += 1
old_fval = f_val
# problem linearization
Mi = M + reg2 * df(G)
# solve linear program with Sinkhorn
# Gc = sinkhorn_stabilized(a,b, Mi, reg1, numItermax = numInnerItermax)
Gc = sinkhorn(a, b, Mi, reg1, numItermax=numInnerItermax)
deltaG = Gc - G
# line search
dcost = Mi + reg1 * (1 + np.log(G)) # ??
alpha, fc, f_val = line_search_armijo(cost, G, deltaG, dcost, f_val)
G = G + alpha * deltaG
# test convergence
if it >= numItermax:
loop = 0
abs_delta_fval = abs(f_val - old_fval)
relative_delta_fval = abs_delta_fval / abs(f_val)
if relative_delta_fval < stopThr or abs_delta_fval < stopThr2:
loop = 0
if log:
log['loss'].append(f_val)
if verbose:
if it % 20 == 0:
print('{:5s}|{:12s}|{:8s}|{:8s}'.format(
'It.', 'Loss', 'Relative loss', 'Absolute loss') + '\n' + '-' * 48)
print('{:5d}|{:8e}|{:8e}|{:8e}'.format(it, f_val, relative_delta_fval, abs_delta_fval))
if log:
return G, log
else:
return G
def solve_1d_linesearch_quad(a, b, c):
"""
For any convex or non-convex 1d quadratic function f, solve on [0,1] the following problem:
.. math::
\argmin f(x)=a*x^{2}+b*x+c
Parameters
----------
a,b,c : float
The coefficients of the quadratic function
Returns
-------
x : float
The optimal value which leads to the minimal cost
"""
f0 = c
df0 = b
f1 = a + f0 + df0
if a > 0: # convex
minimum = min(1, max(0, np.divide(-b, 2.0 * a)))
return minimum
else: # non convex
if f0 > f1:
return 1
else:
return 0
| rflamary/POT | ot/optim.py | Python | mit | 12,806 |
"""
A simple console for playing with the tango parser
"""
import sys
import os
if __name__ == "__main__":
tangodir = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)
sys.path.append(tangodir)
import tangolib.parser
def process_command(cmd):
if cmd == ":quit":
print("bye bye !")
sys.exit(0)
else:
print("Unknown command '{}'".format(cmd))
def parse_line(line):
p = tangolib.parser.Parser()
try:
result = p.parse_from_string(line)
print(result)
except tangolib.parser.ParseError as e:
print("Parsing failed:")
print(e)
if __name__ == "__main__":
print("Tango parser console")
while True:
line = input("> ")
if line.startswith(":"):
process_command(line)
else:
parse_line(line)
| fredokun/tango | src/tangolib/parser_console.py | Python | mit | 862 |
from kivy.uix.bubble import Bubble
from kivy.properties import StringProperty
from kivy.clock import Clock
from kivy.animation import Animation
from kivy.core.window import Window
class InfoBubble(Bubble):
'''Bubble to be used to display short Help Information'''
message = StringProperty('')
'''Message to be displayed
:data:`message` is a :class:`~kivy.properties.StringProperty`
'''
def show(self, pos, duration, width=None):
'''Animate the bubble into position'''
if width:
self.width = width
#wait for the bubble to adjust it's size according to text then animate
Clock.schedule_once(lambda dt: self._show(pos, duration))
def _show(self, pos, duration):
'''To show Infobubble at pos with Animation of duration.
'''
def on_stop(*l):
if duration:
Clock.schedule_once(self.hide, duration + .5)
self.opacity = 0
arrow_pos = self.arrow_pos
if arrow_pos[0] in ('l', 'r'):
pos = pos[0], pos[1] - (self.height/2)
else:
pos = pos[0] - (self.width/2), pos[1]
self.limit_to = Window
self.pos = pos
Window.add_widget(self)
anim = Animation(opacity=1, d=0.75)
anim.bind(on_complete=on_stop)
anim.cancel_all(self)
anim.start(self)
def hide(self, *dt):
''' Auto fade out the Bubble
'''
def on_stop(*l):
Window.remove_widget(self)
anim = Animation(opacity=0, d=0.75)
anim.bind(on_complete=on_stop)
anim.cancel_all(self)
anim.start(self)
| 5y/kivy-designer | designer/uix/info_bubble.py | Python | mit | 1,646 |
import json
from util import hook, http
@hook.command(autohelp=False)
def mcstatus(inp):
"""mcstatus -- Checks the status of various Mojang (the creators of Minecraft) servers."""
try:
request = http.get("http://status.mojang.com/check")
except (http.URLError, http.HTTPError) as e:
return "Unable to get Minecraft server status: {}".format(e)
# lets just reformat this data to get in a nice format
data = json.loads(request.replace("}", "").replace("{", "").replace("]", "}").replace("[", "{"))
out = []
# use a loop so we don't have to update it if they add more servers
green = []
yellow = []
red = []
for server, status in data.items():
if status == "green":
green.append(server)
elif status == "yellow":
yellow.append(server)
else:
red.append(server)
if green:
out = "\x033\x02Online\x02\x0f: " + ", ".join(green)
if yellow:
out += " "
if yellow:
out += "\x02Issues\x02: " + ", ".join(yellow)
if red:
out += " "
if red:
out += "\x034\x02Offline\x02\x0f: " + ", ".join(red)
return "\x0f" + out.replace(".mojang.com", ".mj") \
.replace(".minecraft.net", ".mc")
| Red-M/CloudBot-legacy | plugins/minecraft_status.py | Python | gpl-3.0 | 1,296 |
from csc.divisi.tensor import Tensor
import tables
from itertools import izip, count
from csc.divisi.pyt_utils import get_pyt_handle
# I noticed the following in the user manual: "Note that, from
# PyTables 1.1 on, you can nest several iterators over the same
# table." This looks worrisome; we may want to avoid returning
# generators.
class PTTensor(Tensor):
is_sparse = True
@classmethod
def open(cls, filename, pt_path='/', pt_name='tensor'):
'''
Open an existing PyTables tensor.
pt_path and pt_name are the "path" and "filename" within the
PyTables file.
Raises a tables.NoSuchNodeError if the table doesn't exist.
(FIXME: it will still create a new, empty file.)
'''
fileh = get_pyt_handle(filename)
table = fileh.getNode(pt_path, pt_name)
return cls(table)
@classmethod
def create(cls, filename, ndim, pt_path='/', pt_name='tensor', filters=None):
'''
Create a new PyTables tensor.
pt_path and pt_name are the "path" and "filename" within the
PyTables file.
Raises tables.NodeError if the table already exists.
'''
fileh = get_pyt_handle(filename)
table = fileh.createTable(pt_path, pt_name, cls.descriptor(ndim), filters=filters)
return cls(table)
def __init__(self, table):
'''
This tensor stores its data in a densely-packed PyTables table.
Generally, you will call the :meth:`create` or :meth:`open`
class methods rather than calling the constructor
directly.
Let's create a new tensor:
>>> from tempfile import mktemp # for uniqueness, not security
>>> filename = mktemp()
>>> t = PTTensor.create(filename=filename, ndim=2)
It obeys (most of) the same dict-like interface as other tensors:
>>> t[0,0] = 1.5
>>> t[0,0]
1.5
But the fastest way to get data in is by using :meth:`update`
(just like a Python dict):
>>> t.update([((0,0), 0.), ((0,1), 0.01), ((1,0), 0.10), ((1,1), 0.11)])
>>> t[0,0]
0.0
>>> t[1,1]
0.11
If you're adding to a value that you think already exists, use
the :meth:`inc` method:
>>> t.inc((0,0), 1)
>>> t[0,0]
1.0
Containment is supported, though it currently requires a
linear scan, so it's a bit slow:
>>> (0,1) in t
True
It also supports other tensor functions, including the very
useful :meth:`iteritems`:
>>> (0,1) in t.keys()
True
>>> sorted(t.iteritems())[0]
((0L, 0L), 1.0)
Like other tensors, you can query its shape:
>>> t.shape
(2L, 2L)
You can poke around at the PyTables table underneath this if
you want, but be warned that the PyTables API is kinda
obfuscated because they want to do "natural naming" for
data. Variables are prefixed by ``_v_``, file functions by
``_f_``, etc. To show that the stuff above actually stored
data on disk, let's close the file and re-open the
tensor. Don't try this at home, folks:
>>> t.table._v_file.close()
>>> t = PTTensor.open(filename)
>>> t.shape
(2L, 2L)
Internally, data is stored in a densely packed table of ndim+1
columns, one entry per row. ndim UInt32 columns store the
"key" (the indices for each mode), and the final Float64
column stores the value. The _last_ occurrence of a key is
used as the current value; this allows updates to be
constant-time. This property means that the table may gather
"junk" over time; TODO(kcarnold) implement a garbage collector
to delete the unused rows.
The unordered disk structure means that a sequential scan is
necessary to find any arbitrary key (though iteration through
all items is very fast). If you want fast key access, sort the
table and use the :mod:`bisect` module.
'''
self.table = table
table.flush()
self.ndim = len([x for x in table.colnames if x.startswith('i')])
self._col_names = map(self._column_name, xrange(self.ndim))
# Precompute the query for getting a single item.
self._getitem_key = ' & '.join('(%s==%%d)' % name for name in self._col_names)
# Compute what keys are present for each index.
dim_entries = [set() for _ in xrange(self.ndim)]
for key in self:
for dim_ent, idx in izip(dim_entries, key):
dim_ent.add(idx)
self._dim_entries = dim_entries
# Compute the shape
if all(dim_entries):
self._shape = [max(dim_ent) + 1 for dim_ent in dim_entries]
else:
# No contents.
self._shape = [0L for _ in xrange(self.ndim)]
def __repr__(self):
return '<PTTensor shape: %r; %d items>' % (self.shape, len(self))
def update(self, entries):
row = self.table.row
dim_entries = self._dim_entries
col_names = self._col_names
shape = self._shape
for key, val in entries:
for idx, col_name, ent, dim_ent in izip(count(), col_names, key, dim_entries):
row[col_name] = ent
dim_ent.add(ent)
shape[idx] = max(shape[idx], ent + 1L)
row['v'] = val
row.append()
self.table.flush()
def _idx_for_key(self, key):
indices = self.table.getWhereList(self._getitem_key % key)
if len(indices) == 0:
return None
return indices[-1] # only the last one is valid.
def update_existing_key(self, key, val):
self.table.cols.v[self._idx_for_key(key)] = val
#
# Tensor API
#
def inc(self, key, amt):
'''
Add amt to key.
'''
idx = self._idx_for_key(key)
if idx is None:
self[key] = amt
else:
self.table.cols.v[idx] += amt
def __getitem__(self, key):
idx = self._idx_for_key(key)
if idx is None:
raise KeyError(key)
return self.table.cols.v[idx]
def __setitem__(self, key, val):
self.update([(key, val)])
def __delitem__(self, key):
raise NotImplementedError('not yet implemented.')
def __iter__(self):
'''Iterate over keys.
Note: if the table is not garbage-collected, the same index may be iterated over multiple times.
'''
col_names = self._col_names
return (tuple(r[name] for name in col_names) for r in self.table)
def iteritems(self):
'''Iterate over key-value pairs.
Note: if the table is not garbage-collected, the same index
may be iterated over multiple times. The last value returned
is the correct one.
'''
col_names = self._col_names
return ((tuple(r[name] for name in col_names), r['v']) for r in self.table)
def has_key(self, key):
if any(kent not in dim_ent for kent, dim_ent in izip(key, self._dim_entries)):
# Optimization: we don't have that key
return False
return len(self.table.getWhereList(self._getitem_key % key)) == 1
@property
def shape(self):
return tuple(self._shape)
def __len__(self):
'''
Return the number of specified items in the tensor.
Note: if the table is not garbage-collected, this count may be
overestimated.
'''
return self.table.shape[0]
@classmethod
def descriptor(cls, ndim):
'''
Construct the "descriptor" that PyTables uses to define the kind of table to create.
'''
desc = dict((cls._column_name(idx), tables.UInt32Col())
for idx in range(ndim))
desc['v'] = tables.Float64Col()
return type('PyT', (tables.IsDescription,), desc)
@classmethod
def _column_name(cls, idx):
return 'i%d' % (idx,)
| commonsense/divisi | csc/divisi/pt_tensor.py | Python | gpl-3.0 | 8,119 |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells RPC Communication Driver
"""
from oslo.config import cfg
from oslo import messaging
from nova.cells import driver
from nova import rpc
cell_rpc_driver_opts = [
cfg.StrOpt('rpc_driver_queue_base',
default='cells.intercell',
help="Base queue name to use when communicating between "
"cells. Various topics by message type will be "
"appended to this.")]
CONF = cfg.CONF
CONF.register_opts(cell_rpc_driver_opts, group='cells')
CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
rpcapi_cap_opt = cfg.StrOpt('intercell',
help='Set a version cap for messages sent between cells services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class CellsRPCDriver(driver.BaseCellsDriver):
"""Driver for cell<->cell communication via RPC. This is used to
setup the RPC consumers as well as to send a message to another cell.
One instance of this class will be created for every neighbor cell
that we find in the DB and it will be associated with the cell in
its CellState.
One instance is also created by the cells manager for setting up
the consumers.
"""
def __init__(self, *args, **kwargs):
super(CellsRPCDriver, self).__init__(*args, **kwargs)
self.rpc_servers = []
self.intercell_rpcapi = InterCellRPCAPI()
def start_servers(self, msg_runner):
"""Start RPC servers.
Start up 2 separate servers for handling inter-cell
communication via RPC. Both handle the same types of
messages, but requests/replies are separated to solve
potential deadlocks. (If we used the same queue for both,
it's possible to exhaust the RPC thread pool while we wait
for replies.. such that we'd never consume a reply.)
"""
topic_base = CONF.cells.rpc_driver_queue_base
proxy_manager = InterCellRPCDispatcher(msg_runner)
for msg_type in msg_runner.get_message_types():
target = messaging.Target(topic='%s.%s' % (topic_base, msg_type),
server=CONF.host)
# NOTE(comstud): We do not need to use the object serializer
# on this because object serialization is taken care for us in
# the nova.cells.messaging module.
server = rpc.get_server(target, endpoints=[proxy_manager])
server.start()
self.rpc_servers.append(server)
def stop_servers(self):
"""Stop RPC servers.
NOTE: Currently there's no hooks when stopping services
to have managers cleanup, so this is not currently called.
"""
for server in self.rpc_servers:
server.stop()
def send_message_to_cell(self, cell_state, message):
"""Use the IntercellRPCAPI to send a message to a cell."""
self.intercell_rpcapi.send_message_to_cell(cell_state, message)
class InterCellRPCAPI(object):
"""Client side of the Cell<->Cell RPC API.
The CellsRPCDriver uses this to make calls to another cell.
API version history:
1.0 - Initial version.
... Grizzly supports message version 1.0. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.0.
"""
VERSION_ALIASES = {
'grizzly': '1.0',
}
def __init__(self):
super(InterCellRPCAPI, self).__init__()
self.version_cap = (
self.VERSION_ALIASES.get(CONF.upgrade_levels.intercell,
CONF.upgrade_levels.intercell))
self.transports = {}
def _get_client(self, next_hop, topic):
"""Turn the DB information for a cell into a messaging.RPCClient."""
transport = self._get_transport(next_hop)
target = messaging.Target(topic=topic, version='1.0')
serializer = rpc.RequestContextSerializer(None)
return messaging.RPCClient(transport,
target,
version_cap=self.version_cap,
serializer=serializer)
def _get_transport(self, next_hop):
"""NOTE(belliott) Each Transport object contains connection pool
state. Maintain references to them to avoid continual reconnects
to the message broker.
"""
transport_url = next_hop.db_info['transport_url']
if transport_url not in self.transports:
transport = messaging.get_transport(cfg.CONF, transport_url,
rpc.TRANSPORT_ALIASES)
self.transports[transport_url] = transport
else:
transport = self.transports[transport_url]
return transport
def send_message_to_cell(self, cell_state, message):
"""Send a message to another cell by JSON-ifying the message and
making an RPC cast to 'process_message'. If the message says to
fanout, do it. The topic that is used will be
'CONF.rpc_driver_queue_base.<message_type>'.
"""
topic_base = CONF.cells.rpc_driver_queue_base
topic = '%s.%s' % (topic_base, message.message_type)
cctxt = self._get_client(cell_state, topic)
if message.fanout:
cctxt = cctxt.prepare(fanout=message.fanout)
return cctxt.cast(message.ctxt, 'process_message',
message=message.to_json())
class InterCellRPCDispatcher(object):
"""RPC Dispatcher to handle messages received from other cells.
All messages received here have come from a sibling cell. Depending
on the ultimate target and type of message, we may process the message
in this cell, relay the message to another sibling cell, or both. This
logic is defined by the message class in the nova.cells.messaging module.
"""
target = messaging.Target(version='1.0')
def __init__(self, msg_runner):
"""Init the Intercell RPC Dispatcher."""
self.msg_runner = msg_runner
def process_message(self, _ctxt, message):
"""We received a message from another cell. Use the MessageRunner
to turn this from JSON back into an instance of the correct
Message class. Then process it!
"""
message = self.msg_runner.message_from_json(message)
message.process()
| redhat-openstack/nova | nova/cells/rpc_driver.py | Python | apache-2.0 | 7,115 |
# open the genomic dna file and read the contents
genomic_dna = open("genomic_dna.txt").read()
# open the exons locations file
exon_locations = open("exons.txt")
# create a variable to hold the coding sequence
coding_sequence = ""
# go through each line in the exon locations file
for line in exon_locations:
# split the line using a comma
positions = line.split(',')
# get the start and stop positions
start = int(positions[0])
stop = int(positions[1])
# extract the exon from the genomic dna
exon = genomic_dna[start:stop]
# append the exon to the end of the current coding sequence
coding_sequence = coding_sequence + exon
# write the coding sequence to an output file
output = open("coding_sequence.txt", "w")
output.write(coding_sequence)
output.close()
| ianmisner/BIO309_Spring2017 | week4/from_the_book/multiple_exons_from_genomic_dna.py | Python | gpl-3.0 | 803 |
import os
import unittest
from vsg.rules import variable
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_011_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_011_test_input.fixed.vhd'), lExpected)
class test_variable_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_011(self):
oRule = variable.rule_011()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'variable')
self.assertEqual(oRule.identifier, '011')
lExpected = [16, 18, 19, 21, 24, 24, 24, 24]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_011(self):
oRule = variable.rule_011()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| jeremiah-c-leary/vhdl-style-guide | vsg/tests/variable/test_rule_011.py | Python | gpl-3.0 | 1,178 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Model subclassing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import data_structures
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
# pylint: disable=not-callable
class SimpleTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=10):
super(SimpleTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='softmax')
if self.use_dp:
self.dp = keras.layers.Dropout(0.5)
if self.use_bn:
self.bn = keras.layers.BatchNormalization(axis=-1)
def call(self, x):
x = self.dense1(x)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.dense2(x)
class SimpleConvTestModel(keras.Model):
def __init__(self, num_classes=10):
super(SimpleConvTestModel, self).__init__(name='test_model')
self.num_classes = num_classes
self.conv1 = keras.layers.Conv2D(32, (3, 3), activation='relu')
self.flatten = keras.layers.Flatten()
self.dense1 = keras.layers.Dense(num_classes, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
return self.dense1(x)
class MultiIOTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=(2, 3)):
super(MultiIOTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes[0], activation='softmax')
self.dense3 = keras.layers.Dense(num_classes[1], activation='softmax')
if use_dp:
self.dp = keras.layers.Dropout(0.5)
if use_bn:
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x1, x2 = inputs
x1 = self.dense1(x1)
x2 = self.dense1(x2)
if self.use_dp:
x1 = self.dp(x1)
if self.use_bn:
x2 = self.bn(x2)
return [self.dense2(x1), self.dense3(x2)]
class NestedTestModel1(keras.Model):
"""A model subclass nested inside a model subclass.
"""
def __init__(self, num_classes=2):
super(NestedTestModel1, self).__init__(name='nested_model_1')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
self.test_net = SimpleTestModel(num_classes=4,
use_bn=True,
use_dp=True)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_functional_graph_model(input_dim, num_classes):
# A simple functional-API model (a.k.a. graph network)
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs)
class NestedTestModel2(keras.Model):
"""A model subclass with a functional-API graph network inside.
"""
def __init__(self, num_classes=2):
super(NestedTestModel2, self).__init__(name='nested_model_2')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = self.bn = keras.layers.BatchNormalization()
self.test_net = get_functional_graph_model(32, 4)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_nested_model_3(input_dim, num_classes):
# A functional-API model with a subclassed model inside.
# NOTE: this requires the inner subclass to implement `compute_output_shape`.
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(5, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
test_model = Inner()
x = test_model(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs, name='nested_model_3')
@keras_parameterized.run_all_keras_modes
class ModelSubclassingTest(keras_parameterized.TestCase):
def test_custom_build(self):
class DummyModel(keras.Model):
def __init__(self):
super(DummyModel, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.uses_custom_build = False
def call(self, inputs):
return self.dense1(inputs)
def build(self, input_shape):
self.uses_custom_build = True
test_model = DummyModel()
dummy_data = array_ops.ones((32, 50))
test_model(dummy_data)
self.assertTrue(test_model.uses_custom_build, 'Model should use user '
'defined build when called.')
def test_attribute_conflict_error(self):
class ModelWithProperty(keras.Model):
@property
def read_only(self):
return 1.
m = ModelWithProperty()
with self.assertRaisesRegexp(AttributeError, 'read_only'):
m.read_only = 2.
def test_custom_build_with_fit(self):
class DummyModel(keras.Model):
def __init__(self):
super(DummyModel, self).__init__()
self.layer1 = keras.layers.Dense(10, activation='relu')
def build(self, input_shape):
self.layer2 = keras.layers.Dense(1, activation='relu')
def call(self, inputs):
return self.layer2(self.layer1(inputs))
model = DummyModel()
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2, epochs=2)
self.assertLen(model.layers, 2)
self.assertLen(model.trainable_variables, 4)
def test_invalid_input_shape_build(self):
num_classes = 2
input_dim = 50
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'input shape is not one of the valid types'):
model.build(input_shape=tensor_shape.Dimension(input_dim))
def test_embed_dtype_with_subclass_build(self):
class Embedding(keras.layers.Layer):
"""An Embedding layer."""
def __init__(self, vocab_size, embedding_dim, **kwargs):
super(Embedding, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
def build(self, _):
self.embedding = self.add_variable(
'embedding_kernel',
shape=[self.vocab_size, self.embedding_dim],
dtype=np.float32,
initializer=init_ops.random_uniform_initializer(-0.1, 0.1),
trainable=True)
def call(self, x):
return embedding_ops.embedding_lookup(self.embedding, x)
class EmbedModel(keras.Model):
def __init__(self, vocab_size, embed_size):
super(EmbedModel, self).__init__()
self.embed1 = Embedding(vocab_size, embed_size)
def call(self, inputs):
return self.embed1(inputs)
model = EmbedModel(100, 20)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'if your layers do not support float type inputs'):
model.build(input_shape=(35, 20))
def test_single_time_step_rnn_build(self):
dim = 4
timesteps = 1
batch_input_shape = (None, timesteps, dim)
units = 3
class SimpleRNNModel(keras.Model):
def __init__(self):
super(SimpleRNNModel, self).__init__()
self.lstm = keras.layers.LSTM(units)
def call(self, inputs):
return self.lstm(inputs)
model = SimpleRNNModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(batch_input_shape)
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, timesteps, dim)))
def test_single_io_subclass_build(self):
num_classes = 2
input_dim = 50
batch_size = None
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(input_shape=(batch_size, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, input_dim)))
def test_single_io_dimension_subclass_build(self):
num_classes = 2
input_dim = tensor_shape.Dimension(50)
batch_size = tensor_shape.Dimension(None)
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(input_shape=(batch_size, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32, input_dim)))
def test_multidim_io_subclass_build(self):
num_classes = 10
# Input size, e.g. image
batch_size = 32
input_shape = (32, 32, 3)
model = SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
batch_input_shape = (batch_size,) + input_shape
model.build(input_shape=batch_input_shape)
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones(batch_input_shape))
def test_tensorshape_io_subclass_build(self):
num_classes = 10
# Input size, e.g. image
batch_size = None
input_shape = (32, 32, 3)
model = SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
model(array_ops.ones((32,) + input_shape))
def test_subclass_save_model(self):
num_classes = 10
# Input size, e.g. image
batch_size = None
input_shape = (32, 32, 3)
model = SimpleConvTestModel(num_classes)
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
weights = model.get_weights()
tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(tf_format_name)
if h5py is not None:
hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
model.save_weights(hdf5_format_name)
model = SimpleConvTestModel(num_classes)
model.build(
input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
if h5py is not None:
model.load_weights(hdf5_format_name)
self.assertAllClose(weights, model.get_weights())
model.load_weights(tf_format_name)
self.assertAllClose(weights, model.get_weights())
def test_multi_io_subclass_build(self):
batch_size = None
num_samples = 1000
input_dim = 50
model = MultiIOTestModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
batch_input_shape = tensor_shape.TensorShape((batch_size, input_dim))
model.build(
input_shape=[batch_input_shape, batch_input_shape])
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
model([x1, x2])
def test_summary(self):
class ToString(object):
def __init__(self):
self.contents = ''
def __call__(self, msg):
self.contents += msg + '\n'
# Single-io
model = SimpleTestModel(num_classes=4, use_bn=True, use_dp=True)
model._set_inputs(np.ones((3, 4))) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 356' in print_fn.contents)
# Multi-io
model = MultiIOTestModel(num_classes=(5, 6), use_bn=True, use_dp=True)
model._set_inputs([np.ones((3, 4)),
np.ones((3, 4))]) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 587' in print_fn.contents)
def test_no_dependency(self):
class Foo(keras.Model):
def __init__(self):
super(Foo, self).__init__()
self.isdep = keras.layers.Dense(1)
self.notdep = data_structures.NoDependency(keras.layers.Dense(2))
self.notdep_var = data_structures.NoDependency(
resource_variable_ops.ResourceVariable(1., name='notdep_var'))
m = Foo()
self.assertEqual([m.isdep, m.notdep], m.layers)
self.assertEqual(1, len(m._checkpoint_dependencies))
self.assertIs(m.isdep, m._checkpoint_dependencies[0].ref)
self.assertEqual('notdep_var:0', m.notdep_var.name)
def test_extra_variable(self):
class ExtraVar(keras.Model):
def __init__(self):
super(ExtraVar, self).__init__()
self.dense = keras.layers.Dense(1)
self.var = resource_variable_ops.ResourceVariable(1.)
self.not_trainable_var = resource_variable_ops.ResourceVariable(
2., trainable=False)
def call(self, inputs):
return self.dense(inputs + self.var)
m = ExtraVar()
self.assertTrue(m.trainable)
self.assertEqual([m.dense], m.layers)
self.assertEqual([m.var, m.not_trainable_var], m.variables)
self.assertEqual([m.var], m.trainable_variables)
self.assertEqual([m.not_trainable_var], m.non_trainable_variables)
self.assertLen(m.get_weights(), 2)
m.trainable = False
self.assertEqual([m.var, m.not_trainable_var], m.variables)
self.assertEqual([], m.trainable_variables)
self.assertEqual([m.var, m.not_trainable_var], m.non_trainable_variables)
self.assertLen(m.get_weights(), 2)
m.trainable = True
m(array_ops.ones([1, 1]))
self.assertEqual([m.dense.kernel, m.dense.bias], m.dense.variables)
self.assertEqual([m.dense.kernel, m.dense.bias], m.dense.weights)
self.assertLen(m.get_weights(), 4)
self.assertEqual([m.dense.kernel, m.dense.bias, m.var, m.not_trainable_var],
m.variables)
self.assertEqual([m.dense.kernel, m.dense.bias, m.var],
m.trainable_variables)
self.assertEqual([m.not_trainable_var], m.non_trainable_variables)
m.dense.trainable = False
self.assertEqual(
[m.dense.kernel, m.dense.bias, m.var, m.not_trainable_var],
m.variables)
self.assertEqual([m.var], m.trainable_variables)
self.assertEqual([m.dense.kernel, m.dense.bias, m.not_trainable_var],
m.non_trainable_variables)
self.assertLen(m.get_weights(), 4)
def test_add_weight_in_model(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,), trainable=False)
def call(self, inputs):
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModel()
model(x)
self.assertEqual(1, len(model.trainable_weights))
self.assertEqual(1, len(model.non_trainable_weights))
self.assertEqual(2, len(model.weights))
class MyModelCustomBuild(keras.Model):
def build(self, input_shape):
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,), trainable=False)
def call(self, inputs):
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModelCustomBuild()
model(x)
self.assertEqual(1, len(model.trainable_weights))
self.assertEqual(1, len(model.non_trainable_weights))
self.assertEqual(2, len(model.weights))
def test_add_update_in_model(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.b = self.add_weight('bias', (10,))
self.c = self.add_weight('bias2', (10,))
def call(self, inputs):
# Unconditional
self.add_update(self.b.assign(self.b * 2))
# Conditional
self.add_update(self.c.assign(inputs[1, :]), inputs)
return inputs + self.b + self.c
x = ops.convert_to_tensor(np.ones((10, 10), 'float32'))
model = MyModel()
model(x)
if context.executing_eagerly():
self.assertEqual(0, len(model.updates))
else:
self.assertEqual(2, len(model.updates))
self.assertEqual(1, len(model.get_updates_for(None)))
self.assertEqual(1, len(model.get_updates_for(x)))
@keras_parameterized.run_all_keras_modes
class ModelSubclassCompiledTest(keras_parameterized.TestCase):
def test_single_io_workflow_with_np_arrays(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc', keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
def test_multi_io_workflow_with_np_arrays(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
def test_single_io_workflow_with_dataset_iterators(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.cached_session():
model = SimpleTestModel(num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim), dtype=np.float32)
y = np.zeros((num_samples, num_classes), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
model.fit(iterator, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(iterator, steps=10, verbose=0)
def test_attributes(self):
# layers, weights, trainable_weights, non_trainable_weights, inputs, outputs
num_classes = (2, 3)
num_samples = 100
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
self.assertEqual(model.name, 'test_model')
self.assertEqual(model.built, False)
self.assertEqual(len(model.weights), 0)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([x1, x2], [y1, y2])
self.assertEqual(model.built, True)
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model.weights), 10)
self.assertEqual(len(model.trainable_weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.inputs), 2)
self.assertEqual(len(model.outputs), 2)
def test_updates(self):
# test that updates get run during training
num_samples = 100
input_dim = 50
class BNNet(keras.Model):
def __init__(self):
super(BNNet, self).__init__()
self.bn = keras.layers.BatchNormalization(beta_initializer='ones',
gamma_initializer='ones')
def call(self, inputs):
return self.bn(inputs)
x = np.ones((num_samples, input_dim))
y = np.ones((num_samples, input_dim))
model = BNNet()
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
y_ref = model.predict(x)
model.train_on_batch(x, y)
y_new = model.predict(x)
self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1)
def test_training_and_inference_behavior(self):
# test that dropout is applied in training and not inference
num_samples = 100
input_dim = 50
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs):
x = self.dp(inputs)
return self.dense(x)
model = DPNet()
x = np.ones((num_samples, input_dim))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
def test_training_methods(self):
# test fit, train_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
model.fit({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2},
epochs=2, batch_size=32)
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0,
validation_data=([x1, x2], [y1, y2]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([x1, x2], [y1, y2])
model.train_on_batch({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2})
def test_inference_methods(self):
# test predict, evaluate, test_on_batch, predict_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.evaluate([x1, x2], [y1, y2])
model.test_on_batch([x1, x2], [y1, y2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict([x1, x2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict_on_batch([x1, x2])
def test_saving(self):
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
y_ref_1, y_ref_2 = model.predict([x1, x2])
tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(tf_format_name)
if h5py is not None:
hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
model.save_weights(hdf5_format_name)
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
if h5py is not None:
with self.assertRaises(ValueError):
model.load_weights(hdf5_format_name)
model.load_weights(tf_format_name)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
if h5py is not None:
model.load_weights(hdf5_format_name)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
def test_subclass_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = NestedTestModel1(num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
def test_graph_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = NestedTestModel2(num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
def test_subclass_nested_in_graph(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = get_nested_model_3(input_dim=input_dim, num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 16)
self.assertEqual(len(model.non_trainable_weights), 4)
self.assertEqual(len(model.trainable_weights), 12)
def test_subclass_nested_in_sequential(self):
num_classes = 2
num_samples = 100
input_dim = 50
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
model = keras.Sequential([Inner()])
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.trainable_weights), 6)
def test_support_for_manual_training_arg(self):
# In most cases, the `training` argument is left unspecified, in which
# case it defaults to value corresponding to the Model method being used
# (fit -> True, predict -> False, etc).
# If the user writes their model `call` method to take
# an explicit `training` argument, we must check that the correct value
# is being passed to the model for each method call.
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs, training=False):
x = self.dp(inputs, training=training)
return self.dense(x)
model = DPNet()
x = np.ones((10, 10))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
def test_no_loss_in_compile(self):
class InternalLossModel(keras.Model):
def __init__(self):
super(InternalLossModel, self).__init__()
self.dense = keras.layers.Dense(1)
def call(self, inputs):
out = self.dense(inputs)
self.add_loss(math_ops.reduce_sum(out))
return out
model = InternalLossModel()
x = np.ones((10, 10))
model.predict(x)
model.compile(
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x)
model.evaluate(x)
class GraphSpecificModelSubclassingTests(test.TestCase):
@test_util.run_deprecated_v1
def test_single_io_workflow_with_tensors(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.cached_session():
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer='rmsprop')
x = array_ops.ones((num_samples, input_dim))
y = array_ops.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
@test_util.run_deprecated_v1
def test_multi_io_workflow_with_tensors(self):
num_classes = (2, 3)
num_samples = 10
input_dim = 50
with self.cached_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer='rmsprop')
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
y1 = array_ops.zeros((num_samples, num_classes[0]))
y2 = array_ops.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
@test_util.run_deprecated_v1
def test_updates_and_losses_for_nested_models_in_subclassed_model(self):
# Case 1: deferred-build sequential nested in subclass.
class TestModel1(keras.Model):
def __init__(self):
super(TestModel1, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = keras.Sequential([keras.layers.BatchNormalization(axis=1)])
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel1()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
# Case 2: placeholder-sequential nested in subclass.
class TestModel2(keras.Model):
def __init__(self):
super(TestModel2, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = keras.Sequential(
[keras.layers.BatchNormalization(axis=1, input_shape=(10,))])
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel2()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
# Case 3: functional-API model nested in subclass.
inputs = keras.Input((10,))
outputs = keras.layers.BatchNormalization(axis=1)(inputs)
bn = keras.Model(inputs, outputs)
class TestModel3(keras.Model):
def __init__(self):
super(TestModel3, self).__init__()
self.fc = keras.layers.Dense(10, input_shape=(784,),
activity_regularizer='l1')
self.bn = bn
def call(self, x):
return self.bn(self.fc(x))
with self.cached_session():
model = TestModel3()
x = array_ops.ones(shape=[100, 784], dtype='float32')
model(x)
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
@test_util.run_deprecated_v1
def test_multi_io_workflow_with_numpy_arrays_and_custom_placeholders(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
with self.cached_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer='rmsprop')
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
x2_placeholder = array_ops.placeholder(
dtype='float32', shape=(None, input_dim))
model._set_inputs([x1, x2_placeholder])
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
class CustomCallModel(keras.Model):
def __init__(self):
super(CustomCallModel, self).__init__()
self.dense1 = keras.layers.Dense(1, activation='relu')
self.dense2 = keras.layers.Dense(1, activation='softmax')
def call(self, first, second, fiddle_with_output='no', training=True):
combined = self.dense1(first) + self.dense2(second)
if fiddle_with_output == 'yes':
return 10. * combined
else:
return combined
class TrainingNoDefaultModel(keras.Model):
def __init__(self):
super(TrainingNoDefaultModel, self).__init__()
self.dense1 = keras.layers.Dense(1)
def call(self, x, training):
return self.dense1(x)
class TrainingMaskingModel(keras.Model):
def __init__(self):
super(TrainingMaskingModel, self).__init__()
self.dense1 = keras.layers.Dense(1)
def call(self, x, training=False, mask=None):
return self.dense1(x)
@test_util.run_all_in_graph_and_eager_modes
class CustomCallSignatureTests(test.TestCase):
def test_no_inputs_in_signature(self):
model = CustomCallModel()
first = array_ops.ones([2, 3])
second = array_ops.ones([2, 5])
output = model(first, second)
self.evaluate([v.initializer for v in model.variables])
expected_output = self.evaluate(model.dense1(first) + model.dense2(second))
self.assertAllClose(expected_output, self.evaluate(output))
output = model(first, second, fiddle_with_output='yes')
self.assertAllClose(10. * expected_output, self.evaluate(output))
output = model(first, second=second, training=False)
self.assertAllClose(expected_output, self.evaluate(output))
def test_training_args_call_build(self):
input_dim = 2
model = TrainingNoDefaultModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build((None, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
def test_training_and_mask_args_call_build(self):
input_dim = 2
model = TrainingMaskingModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
model.build((None, input_dim))
self.assertTrue(model.weights, ('Model should have weights now that it '
'has been properly built.'))
self.assertTrue(model.built, 'Model should be built after calling `build`.')
def test_custom_call_kwargs_and_build(self):
first_input_shape = (2, 3)
second_input_shape = (2, 5)
model = CustomCallModel()
self.assertFalse(model.built, 'Model should not have been built')
self.assertFalse(model.weights, ('Model should have no weights since it '
'has not been built.'))
with self.assertRaisesRegexp(
ValueError, 'cannot build your model if it has positional'):
model.build(input_shape=[first_input_shape, second_input_shape])
def test_inputs_in_signature(self):
class HasInputsAndOtherPositional(keras.Model):
def call(self, inputs, some_other_arg, training=False):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
model = HasInputsAndOtherPositional()
with self.assertRaisesRegexp(
TypeError, 'everything else as a keyword argument'):
x1, x2 = keras.Input((1, 1)), keras.Input((1, 1))
model(x1, x2)
def test_kwargs_in_signature(self):
class HasKwargs(keras.Model):
def call(self, x, y=3, **kwargs):
return x
model = HasKwargs()
arg = array_ops.ones([1])
model(arg, a=3)
if not context.executing_eagerly():
self.assertEqual(len(model.inputs), 1)
def test_args_in_signature(self):
class HasArgs(keras.Model):
def call(self, x, *args, **kwargs):
return [x] + list(args)
def compute_output_shape(self, input_shape):
return input_shape
model = HasArgs()
x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1))
model(x1, x2, x3, a=3)
self.assertEqual(len(model.inputs), 3)
def test_args_and_keywords_in_signature(self):
class HasArgs(keras.Model):
def call(self, x, training=True, *args, **kwargs): # pylint:disable=keyword-arg-before-vararg
return x
model = HasArgs()
x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1))
with self.assertRaisesRegexp(
TypeError, 'may not accept both positional arguments and '):
model(x1, x2, x3, a=3)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def test_training_no_default(self):
if context.executing_eagerly():
self.skipTest('b/120997007')
model = TrainingNoDefaultModel()
arg = array_ops.ones([1, 1])
model(arg, True)
self.assertEqual(len(model.inputs), 1)
def test_training_no_default_with_positional(self):
class TrainingNoDefaultWithPositional(keras.Model):
def call(self, x, training, positional):
return x
model = TrainingNoDefaultWithPositional()
x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1))
with self.assertRaisesRegexp(TypeError, 'after a non-input'):
model(x1, x2, x3)
if __name__ == '__main__':
test.main()
| ghchinoy/tensorflow | tensorflow/python/keras/model_subclassing_test.py | Python | apache-2.0 | 44,884 |
__author__ = 'jwright'
# Text and pictures tutorial
# http://pythonprogramming.net/tkinter-adding-text-images/
import Tkinter as tk
import Tkconstants, tkFileDialog
from Tkinter import *
class Tkbrut(tk.Frame):
def __init__(self, brut):
tk.Frame.__init__(self, brut)
def quit_win(self):
brut.destroy()
QuitButton = tk.Button(brut, text='Back', command=quit_win)
QuitButton.pack()
if __name__=='__main__':
brut = tk.Tk()
brut.geometry("400x300")
brut.title("Brute Force")
Tkbrut(brut).pack()
brut.mainloop() | COCS4950G7/COSC4950 | Source/GUI/frameGUI_BRUT.py | Python | gpl-3.0 | 544 |
import pygame
import pygameMenu
from game_logic import helper
class SettingsMenu(object):
def __init__(self, screen):
"""
Constructor. Receives the screen to be able to draw
the menu and creates the different configuration widgets
"""
# Settings menu
self.__settings_menu = pygameMenu.Menu(
screen,
bgfun=lambda: screen.fill(helper.WHITE),
color_selected=helper.BLACK,
font=pygameMenu.font.FONT_HELVETICA,
font_color=helper.BLACK,
font_size=15,
font_size_title=35,
menu_alpha=100,
menu_color=helper.WHITE,
menu_height=int(helper.HEIGHT * 0.85),
menu_width=int(helper.WIDTH * 0.9),
# onclose=pygameMenu.events.DISABLE_CLOSE,
title='Settings',
widget_alignment=pygameMenu.locals.ALIGN_LEFT,
window_height=helper.HEIGHT,
window_width=helper.WIDTH
)
self.__trials_widg = self.__settings_menu.add_text_input(
'Trials: ',
default=1000,
maxchar=6,
textinput_id='trials',
input_type=pygameMenu.locals.INPUT_INT,
enable_selection=False)
# Create selector with 3 difficulty options
self.__opponent_widg = self.__settings_menu.add_selector(
'Select opponent: ',
[('Computer', 'COMPUTER'),
('Human', 'HUMAN')],
selector_id='opponent',
default=0)
# Create selector with 3 difficulty options
self.__player_widg = self.__settings_menu.add_selector(
'Choose Player:',
[('O', 'O'),
('X', 'X')],
selector_id='player',
default=0)
self.__settings_menu.add_option(
'Back', pygameMenu.events.CLOSE,
align=pygameMenu.locals.ALIGN_CENTER)
def __call__(self):
"""
Bring the menu to life. Listen to events
and store the values set
"""
# Loop until back button is pressed
self.__settings_menu.mainloop(disable_loop=False)
# print('Settings data:')
# data = self.__settings_menu.get_input_data()
# for k in data.keys():
# print(u'\t{0}\t=>\t{1}'.format(k, data[k]))
return self.__settings_menu.get_input_data()
def load_settings(self, settings):
"""
Load the current settings of the game
and set them as the values of the configuration
widgets
"""
self.__trials_widg.set_value(settings.trials)
self.__opponent_widg.set_value(settings.opponent)
self.__player_widg.set_value(settings.player)
| juangallostra/TicTacToe | src/game_logic/settings_menu.py | Python | mit | 2,753 |
'''
In this exercise you need to know how to set joint commands.
* Tasks:
1. set stiffness of LShoulderPitch to 0
2. set speed of HeadYaw to 0.1
* Hint: The commands are stored in action (class Action in spark_agent.py)
'''
# add PYTHONPATH
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'software_installation'))
from spark_agent import SparkAgent
class MyAgent(SparkAgent):
def think(self, perception):
action = super(MyAgent, self).think(perception)
# YOUR CODE HERE
return action
if '__main__' == __name__:
agent = MyAgent()
agent.run()
| DAInamite/programming-humanoid-robot-in-python | introduction/set_joint_commands.py | Python | gpl-2.0 | 647 |
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
'''
Created on 12 Jul 2012
@author: George
'''
'''
Class that acts as an abstract. It should have no instances. All the core-objects should inherit from it
'''
# from SimPy.Simulation import Process, Resource, now, SimEvent, waitevent
import simpy
from ManPyObject import ManPyObject
# ===========================================================================
# the core object
# ===========================================================================
class CoreObject(ManPyObject):
class_name = 'Dream.CoreObject'
def __init__(self, id, name, **kw):
ManPyObject.__init__(self,id,name)
self.objName = name
# lists that hold the previous and next objects in the flow
self.next=[] #list with the next objects in the flow
self.previous=[] #list with the previous objects in the flow
self.nextIds=[] #list with the ids of the next objects in the flow
self.previousIds=[] #list with the ids of the previous objects in the flow
#lists to hold statistics of multiple runs
self.Failure=[]
self.Working=[]
self.Blockage=[]
self.Waiting=[]
self.OffShift=[]
self.WaitingForOperator=[]
self.WaitingForLoadOperator=[]
self.Loading = []
self.SettingUp =[]
# list that holds the objectInterruptions that have this element as victim
self.objectInterruptions=[]
#default attributes set so that the CoreObject has them
self.isPreemptive=False
self.resetOnPreemption=False
self.interruptCause=None
self.gatherWipStat=False
# flag used to signal that the station waits for removeEntity event
self.waitEntityRemoval=False
# attributes/indices used for printing the route, hold the cols corresponding to the object (entities route and operators route)
self.station_col_inds=[]
self.op_col_indx=None
# if there is input in a dictionary parse from it
from Globals import G
G.ObjList.append(self) # add object to ObjList
# list of expected signals of a station (values can be used as flags to inform on which signals is the station currently yielding)
self.expectedSignals={
"isRequested":0,
"canDispose":0,
"interruptionStart":0,
"interruptionEnd":0,
"loadOperatorAvailable":0,
"initialWIP":0,
"brokerIsSet":0,
"preemptQueue":0,
"entityRemoved":0,
"entityCreated":0,
"moveEnd":0,
"processOperatorUnavailable":0
}
# flag notifying the the station can deliver entities that ended their processing while interrupted
self.canDeliverOnInterruption=False
# keep wip stats for every replication
self.WipStat=[]
def initialize(self):
from Globals import G
self.env=G.env
self.Up=True #Boolean that shows if the object is in failure ("Down") or not ("up")
self.onShift=True
self.currentEntity=None
# ============================== total times ===============================================
self.totalOperationTime=0 #dummy variable to hold totalWorkin/SetupTime during an interruption (yield ...(self.operation('setup'))
self.totalBlockageTime=0 #holds the total blockage time
self.totalFailureTime=0 #holds the total failure time
self.totalWaitingTime=0 #holds the total waiting time
self.totalWorkingTime=0 #holds the total working time
self.totalOffShiftTime=0 #holds the total off-shift time
self.completedJobs=0 #holds the number of completed jobs
# ============================== Entity related attributes =================================
self.timeLastEntityEnded=0 #holds the last time that an entity ended processing in the object
self.nameLastEntityEnded="" #holds the name of the last entity that ended processing in the object
self.timeLastEntityEntered=0 #holds the last time that an entity entered in the object
self.nameLastEntityEntered="" #holds the name of the last entity that entered in the object
# ============================== shift related times =====================================
self.timeLastShiftStarted=0 #holds the time that the last shift of the object started
self.timeLastShiftEnded=0 #holds the time that the last shift of the object ended
self.offShiftTimeTryingToReleaseCurrentEntity=0 #holds the time that the object was off-shift while trying
#to release the current entity
# ============================== failure related times =====================================
self.timeLastFailure=0 #holds the time that the last failure of the object started
self.timeLastFailureEnded=0 #holds the time that the last failure of the object ended
#processing the current entity
self.downTimeInTryingToReleaseCurrentEntity=0 #holds the time that the object was down while trying
#to release the current entity . This might be due to failure, off-shift, etc
self.timeLastEntityLeft=0 #holds the last time that an entity left the object
self.processingTimeOfCurrentEntity=0 #holds the total processing time that the current entity required
# ============================== waiting flag ==============================================
self.waitToDispose=False #shows if the object waits to dispose an entity
self.isWorkingOnTheLast=False #shows if the object is performing the last processing before scheduled interruption
# ============================== the below are currently used in Jobshop =======================
self.giver=None #the CoreObject that the activeObject will take an Entity from
if len(self.previous)>0:
self.giver=self.previous[0]
self.receiver=None #the CoreObject that the activeObject will give an Entity to
if len(self.next)>0:
self.receiver=self.next[0]
# ============================== variable that is used for the loading of objects =============
self.exitAssignedToReceiver = None # by default the objects are not blocked
# when the entities have to be loaded to operated objects
# then the giverObjects have to be blocked for the time
# that the object is being loaded
# ============================== variable that is used signalling of objects ==================
self.entryAssignedToGiver = None # by default the objects are not blocked
# when the entities have to be received by objects
# then the objects have to be blocked after the first signal they receive
# in order to avoid signalling the same object
# while it has not received the entity it has been originally signalled for
# ============================== lists to hold statistics of multiple runs =====================
self.totalTimeWaitingForOperator=0
self.operatorWaitTimeCurrentEntity=0
self.totalTimeInCurrentEntity=0
self.operatorWaitTimeCurrentEntity=0
self.totalProcessingTimeInCurrentEntity=0
# self.failureTimeInCurrentEntity=0
self.setupTimeCurrentEntity=0
# the time that the object started/ended its wait for the operator
self.timeWaitForOperatorStarted=0
self.timeWaitForOperatorEnded=0
# the time that the object started/ended its wait for the operator
self.timeWaitForLoadOperatorStarted=0
self.timeWaitForLoadOperatorEnded=0
self.totalTimeWaitingForLoadOperator=0
# the time that the operator started/ended loading the object
self.timeLoadStarted=0
self.timeLoadEnded=0
self.totalLoadTime=0
# the time that the operator started/ended setting-up the object
self.timeSetupStarted=0
self.timeSetupEnded=0
self.totalSetupTime=0
# Current entity load/setup/loadOperatorwait/operatorWait related times
self.operatorWaitTimeCurrentEntity=0 # holds the time that the object was waiting for the operator
self.loadOperatorWaitTimeCurrentEntity = 0 # holds the time that the object waits for operator to load the it
self.loadTimeCurrentEntity = 0 # holds the time to load the current entity
self.setupTimeCurrentEntity = 0 # holds the time to setup the object before processing the current entity
self.shouldPreempt=False #flag that shows that the object should preempt or not
self.isProcessingInitialWIP=False #flag that is used only when a object has initial wip
self.lastGiver=None # variable that holds the last giver of the object, used by object in case of preemption
# initialize the wipStatList -
# TODO, think what to do in multiple runs
# TODO, this should be also updated in Globals.setWIP (in case we have initial wip)
import numpy as np
self.wipStatList=np.array([[0,0]])
self.isRequested=self.env.event()
self.canDispose=self.env.event()
self.interruptionEnd=self.env.event()
self.interruptionStart=self.env.event()
self.interruptedBy=None
self.entityRemoved=self.env.event()
self.initialWIP=self.env.event()
# flag used to signal that the station waits for removeEntity event
self.waitEntityRemoval=False
# attributes/indices used for printing the route, hold the cols corresponding to the object (entities route and operators route)
self.station_col_inds=[]
self.op_col_indx=None
# flag that locks the entry of an object so that it cannot receive entities
self.isLocked=False
# flag that shows if the object is processing state at any given time
self.isProcessing=False
# variable that shows what kind of operation is the station performing at the moment
'''
it can be Processing or Setup
XXX: others not yet implemented
'''
self.currentlyPerforming=None
# flag that shows if the object is blocked state at any given time
self.isBlocked=False
self.timeLastBlockageStarted=None
# list of expected signals of a station (values can be used as flags to inform on which signals is the station currently yielding)
self.expectedSignals={
"isRequested":0,
"canDispose":0,
"interruptionStart":0,
"interruptionEnd":0,
"loadOperatorAvailable":0,
"initialWIP":0,
"brokerIsSet":0,
"preemptQueue":0,
"entityRemoved":0,
"entityCreated":0,
"moveEnd":0
}
# lists that keep the start/endShiftTimes of the victim
self.endShiftTimes=[]
self.startShiftTimes=[]
# =======================================================================
# the main process of the core object
# this is dummy, every object must have its own implementation
# =======================================================================
def run(self):
raise NotImplementedError("Subclass must define 'run' method")
# =======================================================================
# sets the routing in and out elements for the Object
# =======================================================================
def defineRouting(self, predecessorList=[], successorList=[]):
self.next=successorList
self.previous=predecessorList
# =======================================================================
# checks if there is anything set as WIP at the begging of the simulation
# and sends an event to initialize the simulation
# =======================================================================
def initialSignalReceiver(self):
if self.haveToDispose():
self.signalReceiver()
def initialAllocationRequest(self):
# TODO if the station is operated, and the operators have skills defined then the SkilledOperatorRouter should be signalled
# XXX: there may be a case where one object is not assigned an operator, in that case we do not want to invoke the allocation routine
if self.checkForDedicatedOperators():
allocationNeeded=False
from Globals import G
for obj in G.MachineList:
if obj.operatorPool!='None':
if obj.operatorPool.operators:
allocationNeeded=False
break
else:
allocationNeeded=True
if allocationNeeded:
self.requestAllocation()
# =======================================================================
# removes an Entity from the Object the Entity to be removed is passed
# as argument by getEntity of the receiver
# =======================================================================
def removeEntity(self, entity=None, resetFlags=True, addBlockage=True):
if addBlockage and self.isBlocked:
# add the blocking time
self.addBlockage()
# reset flags
if resetFlags:
self.isBlocked=False
self.isProcessing=False
activeObjectQueue=self.Res.users
activeObjectQueue.remove(entity) #remove the Entity from the queue
if self.receiver:
self.receiver.appendEntity(entity)
self.downTimeInTryingToReleaseCurrentEntity=0
self.offShiftTimeTryingToReleaseCurrentEntity=0
self.timeLastEntityLeft=self.env.now
self.outputTrace(entity.name, "released "+self.objName)
#append the time to schedule so that it can be read in the result
#remember that every entity has it's schedule which is supposed to be updated every time
# he entity enters a new object
if entity.schedule:
entity.schedule[-1]["exitTime"] = self.env.now
# update wipStatList
if self.gatherWipStat:
import numpy
self.wipStatList=numpy.concatenate((self.wipStatList,[[self.env.now, len(activeObjectQueue)]]))
if self.expectedSignals['entityRemoved']:
self.printTrace(self.id, signal='(removedEntity)')
self.sendSignal(receiver=self, signal=self.entityRemoved)
return entity
#===========================================================================
# appends entity to the receiver object. to be called by the removeEntity of the giver
# this method is created to be overridden by the Assembly class in its getEntity where Frames are loaded
#===========================================================================
def appendEntity(self,entity=None):
activeObjectQueue=self.Res.users
activeObjectQueue.append(entity)
# =======================================================================
# called be getEntity it identifies the Entity
# to be obtained so that
# getEntity gives it to removeEntity as argument
# =======================================================================
def identifyEntityToGet(self):
giverObjectQueue=self.getGiverObjectQueue()
return giverObjectQueue[0]
# =======================================================================
# adds the blockage time to totalBlockageTime
# each time an Entity is removed
# =======================================================================
def addBlockage(self):
if self.timeLastBlockageStarted:
self.totalBlockageTime+=self.env.now-self.timeLastBlockageStarted
# =======================================================================
# gets an entity from the giver
# =======================================================================
def getEntity(self):
# get active object and its queue, as well as the active (to be) entity
#(after the sorting of the entities in the queue of the giver object)
# activeObject=self.getActiveObject()
activeObjectQueue=self.Res.users
# get giver object, its queue, and sort the entities according to this object priorities
giverObject=self.giver
giverObject.sortEntities() #sort the Entities of the giver
#according to the scheduling rule if applied
giverObject.sortEntitiesForReceiver(self)
giverObjectQueue=giverObject.Res.users
# if the giverObject is blocked then unBlock it
if giverObject.exitIsAssignedTo():
giverObject.unAssignExit()
# if the activeObject entry is blocked then unBlock it
if self.entryIsAssignedTo():
self.unAssignEntry()
activeEntity=self.identifyEntityToGet()
activeEntity.currentStation=self
# update the receiver of the giverObject
giverObject.receiver=self
# remove entity from the giver
activeEntity = giverObject.removeEntity(entity=self.identifyEntityToGet())
# variable that holds the last giver; used in case of preemption
self.lastGiver=self.giver
# #get the entity from the previous object and put it in front of the activeQ
# activeObjectQueue.append(activeEntity)
#append the time to schedule so that it can be read in the result
#remember that every entity has it's schedule which is supposed to be updated every time
# the entity enters a new object
activeEntity.schedule.append({"station": self,
"entranceTime": self.env.now})
#update variables
activeEntity.currentStation=self
self.timeLastEntityEntered=self.env.now
self.nameLastEntityEntered=activeEntity.name # this holds the name of the last entity that got into object
# update the next list of the object
self.updateNext(activeEntity)
self.outputTrace(activeEntity.name, "got into "+self.objName)
self.printTrace(activeEntity.name, enter=self.id)
# # if there are entities with requiredParts then check whether the requirements are fulfilled for them to proceed
# # ass soon as a "buffer" receives an entity it controls if the entity is requested elsewhere,
# # then it checks if there other requested entities by the same requesting entity.
# # Finally, it is controlled whether all the requested parts have concluded
# # their sequences for the requesting entity
# from Globals import G
# # for all the entities in the EntityList
# for entity in G.EntityList:
# requiredParts=entity.getRequiredParts()
# if requiredParts:
# # if the activeEntity is in the requierdParts of the entity
# if activeEntity in requiredParts:
# # if the entity that requires the activeEntity can proceed then signal the currentStation of the entity
# if entity.checkIfRequiredPartsReady() and entity.currentStation.expectedSignals['canDispose']:
# entity.mayProceed=True
# self.sendSignal(receiver=entity.currentStation, signal=entity.currentStation.canDispose)
# if the object (eg Queue) canAccept then signal the Giver
if self.canAccept():
self.signalGiver()
return activeEntity
#===========================================================================
# updates the next list of the object
#===========================================================================
def updateNext(self, entity=None):
pass
#===========================================================================
# check whether there is a critical entity to be disposed
# and if preemption is required
#===========================================================================
def preemptReceiver(self):
activeObjectQueue=self.Res.users
# find a critical order if any
critical=False
for entity in activeObjectQueue:
if entity.isCritical:
activeEntity=entity
critical=True
break
if critical:
# pick a receiver
receiver=None
if any(object for object in self.next if object.isPreemptive and object.checkIfActive()):
receiver=next(object for object in self.next if object.isPreemptive and object.checkIfActive())
# if there is any receiver that can be preempted check if it is operated
if receiver:
receiverOperated=False # local variable to inform if the receiver is operated for Loading
try:
from MachineJobShop import MachineJobShop
from MachineManagedJob import MachineManagedJob
# TODO: implement preemption for simple machines
if receiver.operatorPool\
and isinstance(receiver, MachineJobShop) or\
isinstance(receiver, MachineManagedJob):
# and the operationType list contains Load, the receiver is operated
if (receiver.operatorPool!="None")\
and any(type=="Load" for type in receiver.multOperationTypeList):
receiverOperated=True
except:
pass
# if the obtained Entity is critical and the receiver is preemptive and not operated
# in the case that the receiver is operated the preemption is performed by the operators
# if the receiver is not Up then no preemption will be performed
if not receiverOperated and len(receiver.Res.users)>0:
#if the receiver does not hold an Entity that is also critical
if not receiver.Res.users[0].isCritical:
receiver.shouldPreempt=True
self.printTrace(self.id, preempt=receiver.id)
receiver.preempt()
receiver.timeLastEntityEnded=self.env.now #required to count blockage correctly in the preemptied station
# sort so that the critical entity is placed in front
activeObjectQueue.sort(key=lambda x: x==activeEntity, reverse=True)
# if there is a critical entity and the possible receivers are operated then signal the Router
elif receiverOperated:
self.signalRouter(receiver)
activeObjectQueue.sort(key=lambda x: x==activeEntity, reverse=True)
# update wipStatList
if self.gatherWipStat:
import numpy
self.wipStatList=numpy.concatenate((self.wipStatList,[[self.env.now, len(activeObjectQueue)]]))
#===========================================================================
# find possible receivers
#===========================================================================
@staticmethod
def findReceiversFor(activeObject):
receivers=[]
for object in [x for x in activeObject.next if x.canAccept(activeObject) and not x.isRequested.triggered and x.expectedSignals['isRequested']]:
receivers.append(object)
return receivers
# =======================================================================
# signal the successor that the object can dispose an entity
# =======================================================================
def signalReceiver(self):
possibleReceivers=self.findReceiversFor(self)
if possibleReceivers:
receiver=self.selectReceiver(possibleReceivers)
receiversGiver=self
# perform the checks that canAcceptAndIsRequested used to perform and update activeCallersList or assignExit and operatorPool
while not receiver.canAcceptAndIsRequested(receiversGiver):
possibleReceivers.remove(receiver)
if not possibleReceivers:
receiversGiver=None
receiver=None
# if no receiver can accept then try to preempt a receive if the stations holds a critical order
self.preemptReceiver()
return False
receiver=self.selectReceiver(possibleReceivers)
receiversGiver=self
# sorting the entities of the object for the receiver
self.sortEntitiesForReceiver(receiver)
# signalling the Router if the receiver is operated and not assigned an operator
if self.signalRouter(receiver):
return False
self.receiver=receiver
self.receiver.giver=self
self.printTrace(self.id, signalReceiver=self.receiver.id)
# assign the entry of the receiver
self.receiver.assignEntryTo()
# assign the exit of the current object to the receiver
self.assignExitTo(self.receiver)
if self.receiver.expectedSignals['isRequested']:
self.sendSignal(receiver=self.receiver, signal=self.receiver.isRequested)
return True
# if no receiver can accept then try to preempt a receive if the stations holds a critical order
self.preemptReceiver()
return False
# =======================================================================
# select a receiver Object
# =======================================================================
@staticmethod
def selectReceiver(possibleReceivers=[]):
candidates=possibleReceivers
# dummy variables that help prioritize the objects requesting to give objects to the object (activeObject)
maxTimeWaiting=0 # dummy variable counting the time a successor is waiting
receiver=None
from Globals import G
for object in candidates:
timeWaiting=G.env.now-object.timeLastEntityLeft # the time it has been waiting is updated and stored in dummy variable timeWaiting
if(timeWaiting>maxTimeWaiting or maxTimeWaiting==0):# if the timeWaiting is the maximum among the ones of the successors
maxTimeWaiting=timeWaiting
receiver=object # set the receiver as the longest waiting possible receiver
return receiver
#===========================================================================
# sort the entities of the queue for the receiver
#===========================================================================
def sortEntitiesForReceiver(self, receiver=None):
pass
#===========================================================================
# find possible givers
#===========================================================================
@staticmethod
def findGiversFor(activeObject):
givers=[]
for object in [x for x in activeObject.previous if(not x is activeObject) and not x.canDispose.triggered and
(x.expectedSignals['canDispose'] or
(x.canDeliverOnInterruption and x.timeLastShiftEnded==x.env.now))]: # extra check.If shift ended right now and the object
# can unload we relax the canDispose flag
if object.haveToDispose(activeObject):
givers.append(object)
return givers
# =======================================================================
# signal the giver that the entity is removed from its internalQueue
# =======================================================================
def signalGiver(self):
possibleGivers=self.findGiversFor(self)
if possibleGivers:
giver=self.selectGiver(possibleGivers)
giversReceiver=self
# perform the checks that canAcceptAndIsRequested used to perform and update activeCallersList or assignExit and operatorPool
while not self.canAcceptAndIsRequested(giver):
possibleGivers.remove(giver)
if not possibleGivers:
return False
giver=self.selectGiver(possibleGivers)
giversReceiver=self
self.giver=giver
self.giver.receiver=self
if self.giver.expectedSignals['canDispose'] or (self.giver.canDeliverOnInterruption
and self.giver.timeLastShiftEnded==self.env.now): # extra check.If shift ended right now and the object
# can unload we relax the canDispose flag
self.sendSignal(receiver=self.giver, signal=self.giver.canDispose)
self.printTrace(self.id, signalGiver=self.giver.id)
return True
return False
# =======================================================================
# select a giver Object
# =======================================================================
@staticmethod
def selectGiver(possibleGivers=[]):
candidates=possibleGivers
# dummy variables that help prioritize the objects requesting to give objects to the object (activeObject)
maxTimeWaiting=0 # dummy variable counting the time a predecessor is blocked
giver=None
from Globals import G
# loop through the possible givers to see which have to dispose and which is the one blocked for longer
for object in candidates:
# calculate how much the giver is waiting
timeWaiting=G.env.now-object.timeLastEntityEnded
if(timeWaiting>=maxTimeWaiting):
giver=object # the object to deliver the Entity to the activeObject is set to the ith member of the previous list
maxTimeWaiting=timeWaiting
return giver
# =======================================================================
# actions to be taken after the simulation ends
# =======================================================================
def postProcessing(self, MaxSimtime=None):
if MaxSimtime==None:
from Globals import G
MaxSimtime=G.maxSimTime
activeObject=self.getActiveObject()
activeObjectQueue=self.getActiveObjectQueue()
import numpy
self.wipStatList=numpy.concatenate((self.wipStatList,[[self.env.now, len(activeObjectQueue)]]))
#calculate the offShift time for current entity
offShiftTimeInCurrentEntity=0
if self.interruptedBy:
if self.onShift==False: # and self.interruptedBy=='ShiftScheduler':
offShiftTimeInCurrentEntity=self.env.now-activeObject.timeLastShiftEnded
if self.isBlocked:
self.addBlockage()
#if object is currently processing an entity we should count this working time
if self.isProcessing:
'''XXX currentlyPerforming can be Setup or Processing '''
if self.currentlyPerforming:
if self.currentlyPerforming=='Setup':
activeObject.totalSetupTime+=self.env.now-self.timeLastOperationStarted
else:
activeObject.totalWorkingTime+=self.env.now-self.timeLastOperationStarted
else:
activeObject.totalWorkingTime+=self.env.now-self.timeLastProcessingStarted
# activeObject.totalTimeWaitingForOperator+=activeObject.operatorWaitTimeCurrentEntity
# if object is down we have to add this failure time to its total failure time
if self.Up==False:
if self.onShift:
activeObject.totalFailureTime+=self.env.now-activeObject.timeLastFailure
# if object is off shift add only the fail time before the shift ended
if not self.onShift and self.timeLastFailure < self.timeLastShiftEnded:
self.totalFailureTime+=self.timeLastShiftEnded-self.timeLastFailure
#if the object is off shift,add this to the off-shift time
if activeObject.onShift==False:
# if we ran the simulation for infinite time we have to identify the last event
now=self.env.now
if now==float('inf'):
now=0
lastExits=[]
for object in G.ExitList:
lastExits.append(object.timeLastEntityEntered)
if lastExits:
now=max(lastExits)
self.totalOffShiftTime+=now-self.timeLastShiftEnded
#object was idle when it was not in any other state
activeObject.totalWaitingTime=MaxSimtime-activeObject.totalWorkingTime-activeObject.totalBlockageTime-activeObject.totalFailureTime-activeObject.totalLoadTime-activeObject.totalSetupTime-self.totalOffShiftTime
if activeObject.totalBlockageTime<0 and activeObject.totalBlockageTime>-0.00001: #to avoid some effects of getting negative cause of rounding precision
self.totalBlockageTime=0
if activeObject.totalWaitingTime<0 and activeObject.totalWaitingTime>-0.00001: #to avoid some effects of getting negative cause of rounding precision
self.totalWaitingTime=0
activeObject.Failure.append(100*self.totalFailureTime/MaxSimtime)
activeObject.Blockage.append(100*self.totalBlockageTime/MaxSimtime)
activeObject.Waiting.append(100*self.totalWaitingTime/MaxSimtime)
activeObject.Working.append(100*self.totalWorkingTime/MaxSimtime)
activeObject.WaitingForOperator.append(100*self.totalTimeWaitingForOperator/MaxSimtime)
activeObject.WaitingForLoadOperator.append(100*self.totalTimeWaitingForLoadOperator/MaxSimtime)
activeObject.Loading.append(100*self.totalLoadTime/MaxSimtime)
activeObject.SettingUp.append(100*self.totalSetupTime/MaxSimtime)
activeObject.OffShift.append(100*self.totalOffShiftTime/MaxSimtime)
activeObject.WipStat.append(self.wipStatList.tolist())
# =======================================================================
# outputs results to JSON File
# =======================================================================
def outputResultsJSON(self):
pass
# =======================================================================
# checks if the Object can dispose an entity to the following object
# =======================================================================
def haveToDispose(self, callerObject=None):
activeObjectQueue=self.Res.users
return len(activeObjectQueue)>0
# =======================================================================
# checks if the Object can accept an entity and there is an entity
# in some possible giver waiting for it
# =======================================================================
def canAcceptAndIsRequested(self,callerObject=None):
pass
# =======================================================================
# checks if the Object can accept an entity
# =======================================================================
def canAccept(self, callerObject=None):
pass
#===========================================================================
# method used to check whether the station is a successor of the caller
#===========================================================================
def isInRouteOf(self, callerObject=None):
thecaller=callerObject
# if the caller is not defined then return True. We are only interested in checking whether
# the station can accept whatever entity from whichever giver
if not thecaller:
return True
#check it the caller object is predecessor to the activeObject
if thecaller in self.previous:
return True
return False
# =======================================================================
# sorts the Entities in the activeQ of the objects
# =======================================================================
def sortEntities(self):
pass
# =======================================================================
# get the active object. This always returns self
# =======================================================================
def getActiveObject(self):
return self
# =======================================================================
# get the activeQ of the active object.
# =======================================================================
def getActiveObjectQueue(self):
return self.Res.users
# =======================================================================
# get the giver object in a getEntity transaction.
# =======================================================================
def getGiverObject(self):
return self.giver
# =======================================================================
# get the giver object queue in a getEntity transaction.
# =======================================================================
def getGiverObjectQueue(self):
return self.giver.Res.users
# =======================================================================
# get the receiver object in a removeEntity transaction.
# =======================================================================
def getReceiverObject(self):
return self.receiver
# =======================================================================
# get the receiver object queue in a removeEntity transaction.
# =======================================================================
def getReceiverObjectQueue(self):
return self.receiver.Res.users
# =======================================================================
# calculates the processing time
# =======================================================================
def calculateProcessingTime(self):
# this is only for processing of the initial wip
if self.isProcessingInitialWIP:
activeEntity=self.getActiveObjectQueue()[0]
if activeEntity.remainingProcessingTime:
remainingProcessingTime=activeEntity.remainingProcessingTime
from RandomNumberGenerator import RandomNumberGenerator
initialWIPrng=RandomNumberGenerator(self, remainingProcessingTime)
return initialWIPrng.generateNumber()
return self.rng.generateNumber() # this is if we have a default processing time for all the entities
#===========================================================================
# calculates time (running through a dictionary) according to the type of processing given as argument
#===========================================================================
def calculateTime(self,type='Processing'):
return {
'Load': self.loadRng.generateNumber,
'Setup': self.stpRng.generateNumber,
'Processing': self.calculateProcessingTime
}[type]()
# =======================================================================
# checks if the object is blocked
# =======================================================================
def exitIsAssignedTo(self):
return self.exitAssignedToReceiver
# =======================================================================
# assign Exit of the object
# =======================================================================
def assignExitTo(self, callerObject=None):
self.exitAssignedToReceiver=callerObject
# =======================================================================
# unblock the object
# =======================================================================
def unAssignExit(self):
self.exitAssignedToReceiver = None
# =======================================================================
# checks if the object is blocked
# =======================================================================
def entryIsAssignedTo(self):
return self.entryAssignedToGiver
# =======================================================================
# assign Exit of the object
# =======================================================================
def assignEntryTo(self):
self.entryAssignedToGiver = self.giver
# =======================================================================
# unblock the object
# =======================================================================
def unAssignEntry(self):
self.entryAssignedToGiver = None
# =======================================================================
# actions to be carried whenever the object is interrupted
# (failure, break, preemption, etc)
# =======================================================================
def interruptionActions(self):
pass
# =======================================================================
# actions to be carried whenever the object recovers
# control after an interruption (failure, break, preemption, etc)
# =======================================================================
def postInterruptionActions(self):
pass
# =======================================================================
# method to execute preemption
# =======================================================================
def preempt(self):
#ToDO make a generic method
pass
# =======================================================================
# checks if the object is in an active position
# =======================================================================
def checkIfActive(self):
return self.Up and self.onShift
#===========================================================================
# filter that returns True if the activeObject Queue is empty and
# false if object holds entities in its queue
#===========================================================================
def activeQueueIsEmpty(self):
return len(self.Res.users)==0
# =======================================================================
# actions to be carried out when the processing of an Entity ends
# =======================================================================
def endOperationActions(self):
pass
#===========================================================================
# check if an entity is in the internal Queue of the object
#===========================================================================
def isInActiveQueue(self, entity=None):
activeObjectQueue = self.Res.users
return any(x==entity for x in activeObjectQueue)
| jerome-nexedi/dream | dream/simulation/CoreObject.py | Python | gpl-3.0 | 47,086 |
from vt_manager.communication.sfa.rspecs.elements.element import Element
class Execute(Element):
fields = [
'shell',
'command',
]
| dana-i2cat/felix | vt_manager/src/python/vt_manager/communication/sfa/rspecs/elements/execute.py | Python | apache-2.0 | 155 |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','ti.touchid.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','TiTouchidModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release OTHER_CFLAGS=\"-fembed-bitcode\"")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release OTHER_CFLAGS=\"-fembed-bitcode\"")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| titanium-forks/appcelerator-modules.ti.touchid | ios/build.py | Python | apache-2.0 | 8,826 |
import pyxel
from pyxel.ui.constants import WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME
class FieldCursor:
def __init__(
self,
data_getter,
pre_history_setter,
post_history_setter,
data_max_length,
data_view_length,
data_count,
):
self._get_data = data_getter
self._add_pre_history = pre_history_setter
self._add_post_history = post_history_setter
self._data_max_length = data_max_length
self._data_view_length = data_view_length
self._data_count = data_count
self._x = 0
self._y = 0
@property
def x(self):
return min(self._x, len(self.data), self._data_max_length - 1)
@property
def _max_x(self):
return min(len(self.data), self._data_max_length - 1)
@property
def y(self):
return self._y
@property
def data(self):
return self._get_data(self._y)
def move(self, x, y):
self._x = x
self._y = y
def move_left(self):
if self.x > 0:
self._x = self.x - 1
def move_right(self):
if self.x < self._max_x:
self._x += 1
def move_up(self):
cursor_view_y = self._x // self._data_view_length
if cursor_view_y > 0:
self._x -= self._data_view_length
elif self._y > 0:
self._y -= 1
data_view_y = self._max_x // self._data_view_length
self._x = (
self._data_view_length * data_view_y + self._x % self._data_view_length
)
def move_down(self):
cursor_view_y = self._x // self._data_view_length
data_view_y = self._max_x // self._data_view_length
if cursor_view_y < data_view_y:
self._x += self._data_view_length
elif self._y < self._data_count - 1:
self._y += 1
self._x %= self._data_view_length
def insert(self, value):
x = self.x
data = self.data
self._add_pre_history(self.x, self.y)
data.insert(x, value)
data[:] = data[: self._data_max_length]
self._x = x
self.move_right()
self._add_post_history(self.x, self.y)
def backspace(self):
x = self.x
data = self.data
if x == 0:
return
self._add_pre_history(self.x, self.y)
del data[x - 1]
if self._x <= self._max_x:
self.move_left()
self._add_post_history(self.x, self.y)
def delete(self):
x = self.x
data = self.data
if x >= len(data):
return
self._add_pre_history(self.x, self.y)
del data[x]
self._add_post_history(self.x, self.y)
def process_input(self):
if (
pyxel.btn(pyxel.KEY_SHIFT)
or pyxel.btn(pyxel.KEY_CONTROL)
or pyxel.btn(pyxel.KEY_ALT)
or pyxel.btn(pyxel.KEY_SUPER)
):
return
if pyxel.btnp(pyxel.KEY_LEFT, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.move_left()
if pyxel.btnp(pyxel.KEY_RIGHT, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.move_right()
if pyxel.btnp(pyxel.KEY_UP, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.move_up()
if pyxel.btnp(pyxel.KEY_DOWN, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.move_down()
if pyxel.btnp(pyxel.KEY_BACKSPACE, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.backspace()
if pyxel.btnp(pyxel.KEY_DELETE, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.delete()
| ferriman/SSandSP | pyxel-test/venv/lib/python3.8/site-packages/pyxel/editor/field_cursor.py | Python | gpl-3.0 | 3,620 |
#!/usr/bin/python3
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Gdk, GLib
from SettingsWidgets import SidePage
from xapp.GSettingsWidgets import *
class Module:
comment = _("Control mouse and touchpad settings")
name = "mouse"
category = "hardware"
def __init__(self, content_box):
keywords = _("mouse, touchpad, synaptic, double-click")
sidePage = SidePage(_("Mouse and Touchpad"), "cs-mouse", keywords, content_box, module=self)
self.sidePage = sidePage
def on_module_selected(self):
if not self.loaded:
print("Loading Mouse module")
self.sidePage.stack = SettingsStack()
self.sidePage.add_widget(self.sidePage.stack)
# Mouse
page = SettingsPage()
settings = page.add_section(_("General"))
switch = GSettingsSwitch(_("Left handed (mouse buttons inverted)"), "org.cinnamon.settings-daemon.peripherals.mouse", "left-handed")
settings.add_row(switch)
switch = GSettingsSwitch(_("Reverse scrolling direction"), "org.cinnamon.settings-daemon.peripherals.mouse", "natural-scroll")
settings.add_row(switch)
switch = GSettingsSwitch(_("Show position of pointer when the Control key is pressed"), "org.cinnamon.settings-daemon.peripherals.mouse", "locate-pointer")
settings.add_row(switch)
switch = GSettingsSwitch(_("Emulate middle click by clicking both left and right buttons"), "org.cinnamon.settings-daemon.peripherals.mouse", "middle-button-enabled")
settings.add_row(switch)
spin = GSettingsSpinButton(_("Drag-and-drop threshold"), "org.cinnamon.settings-daemon.peripherals.mouse", "drag-threshold", _("pixels"), 1, 400)
settings.add_row(spin)
settings = page.add_section(_("Pointer size and speed"))
widget = GSettingsRange(_("Size"), "org.cinnamon.desktop.interface", "cursor-size", _("Smaller"), _("Larger"), 5, 50, show_value=False)
widget.add_mark(24.0, Gtk.PositionType.TOP, None)
settings.add_row(widget)
widget = GSettingsSwitch(_("Custom Acceleration"), "org.cinnamon.settings-daemon.peripherals.mouse", "custom-acceleration")
settings.add_row(widget)
slider = GSettingsRange(_("Acceleration"), "org.cinnamon.settings-daemon.peripherals.mouse", "motion-acceleration", _("Slow"), _("Fast"), 1, 10, show_value=False)
settings.add_reveal_row(slider, "org.cinnamon.settings-daemon.peripherals.mouse", "custom-acceleration")
widget = GSettingsSwitch(_("Custom Sensitivity"), "org.cinnamon.settings-daemon.peripherals.mouse", "custom-threshold")
settings.add_row(widget)
slider = GSettingsRange(_("Sensitivity"), "org.cinnamon.settings-daemon.peripherals.mouse", "motion-threshold", _("Low"), _("High"), 1, 10, show_value=False, flipped=True)
settings.add_reveal_row(slider, "org.cinnamon.settings-daemon.peripherals.mouse", "custom-threshold")
settings = page.add_section(_("Double-Click timeout"))
slider = GSettingsRange(_("Timeout"), "org.cinnamon.settings-daemon.peripherals.mouse", "double-click", _("Short"), _("Long"), 100, 1000, show_value=False)
settings.add_row(slider)
box = SettingsWidget()
widget = Gtk.Button.new_with_label(_("Double-click test"))
widget.connect("button-press-event", self.test_button_clicked)
box.pack_start(widget, True, True, 0)
settings.add_row(box)
self.sidePage.stack.add_titled(page, "mouse", _("Mouse"))
# Touchpad
page = SettingsPage()
switch = GSettingsSwitch("", "org.cinnamon.settings-daemon.peripherals.touchpad", "touchpad-enabled")
switch.label.set_markup("<b>%s</b>" % _("Enable touchpad"))
switch.fill_row()
page.pack_start(switch, False, True, 0)
revealer = SettingsRevealer("org.cinnamon.settings-daemon.peripherals.touchpad", "touchpad-enabled")
page.pack_start(revealer, False, True, 0)
settings = SettingsSection(_("General"))
revealer.add(settings)
switch = GSettingsSwitch(_("Tap to click"), "org.cinnamon.settings-daemon.peripherals.touchpad", "tap-to-click")
settings.add_row(switch)
switch = GSettingsSwitch(_("Disable touchpad when a mouse is attached"), "org.cinnamon.settings-daemon.peripherals.touchpad", "disable-with-external-mouse")
settings.add_row(switch)
switch = GSettingsSwitch(_("Disable touchpad while typing"), "org.cinnamon.settings-daemon.peripherals.touchpad", "disable-while-typing")
settings.add_row(switch)
clickpad_list = [[0, _("Left click only")], [3, _("Automatic")], [1, _("Emulate mouse buttons")], [2, _("Use multiple fingers for right and middle click")]]
combo = GSettingsComboBox(_("Click actions"), "org.cinnamon.settings-daemon.peripherals.touchpad", "clickpad-click", clickpad_list, valtype=int)
settings.add_row(combo)
settings = SettingsSection(_("Scrolling"))
revealer.add(settings)
switch = GSettingsSwitch(_("Reverse scrolling direction"), "org.cinnamon.settings-daemon.peripherals.touchpad", "natural-scroll")
settings.add_row(switch)
clickpad_list = [[0, _("No scrolling")], [3, _("Automatic")], [1, _("Two-finger scrolling")], [2, _("Edge scrolling")]]
combo = GSettingsComboBox(_("Scrolling method"), "org.cinnamon.settings-daemon.peripherals.touchpad", "scrolling-method", clickpad_list, valtype=int)
settings.add_row(combo)
switch = GSettingsSwitch(_("Horizontal scrolling"), "org.cinnamon.settings-daemon.peripherals.touchpad", "horizontal-scrolling")
settings.add_row(switch)
settings = SettingsSection(_("Pointer speed"))
revealer.add(settings)
switch = GSettingsSwitch(_("Custom Acceleration"), "org.cinnamon.settings-daemon.peripherals.touchpad", "custom-acceleration")
settings.add_row(switch)
slider = GSettingsRange(_("Acceleration"), "org.cinnamon.settings-daemon.peripherals.touchpad", "motion-acceleration", _("Slow"), _("Fast"), 1, 10, show_value=False)
settings.add_reveal_row(slider, "org.cinnamon.settings-daemon.peripherals.touchpad", "custom-acceleration")
switch = GSettingsSwitch(_("Custom Sensitivity"), "org.cinnamon.settings-daemon.peripherals.touchpad", "custom-threshold")
settings.add_row(switch)
slider = GSettingsRange(_("Sensitivity"), "org.cinnamon.settings-daemon.peripherals.touchpad", "motion-threshold", _("Low"), _("High"), 1, 10, show_value=False, flipped=True)
settings.add_reveal_row(slider, "org.cinnamon.settings-daemon.peripherals.touchpad", "custom-threshold")
self.sidePage.stack.add_titled(page, "touchpad", _("Touchpad"))
def test_button_clicked(self, widget, event):
if event.type == Gdk.EventType._2BUTTON_PRESS:
widget.set_label(_("Success!"))
GLib.timeout_add(1000, self.reset_test_button, widget)
return True
def reset_test_button(self, widget):
widget.set_label(_("Double-click test"))
return False
| glls/Cinnamon | files/usr/share/cinnamon/cinnamon-settings/modules/cs_mouse.py | Python | gpl-2.0 | 7,459 |
# each GM builds a Team() to gain them the most points possible this season
from sys import exit
class Team(list):
"""Busiest Class in the program. Subclass of list() Object.
The application's goal is to make the best Team() (i.e. the team with
the most points by the end of the season). Object holds roster
(selected players), quota (minimum for each position), and logic for how
to select the next player in a draft.
"""
quota = {"QB": 1, "RB": 2, "WR": 2, "TE": 1, "K": 1, "DEF": 1}
positions = ["QB", "RB", "WR", "TE", "K", "DEF"]
strategies = ["ba", "sd", "r"]
accepted_inputs = ["SD", "BA", "R", "DRAFT"]
def __init__(self, owner, bot=True, test=False):
"""Bot value for automated picking. test value added to avoid loops
when testing.
"""
self.owner = owner
self.pool = None
self.bot = bot
self.turn = False
self.response_dict = {}
self.test = test
self.draftee = None
self.action_param = None
def check_quota(self):
"""Checks self against Team.quota and returns any position where
quota is not met.
"""
quota_not_met = []
for pos in Team.positions:
num = self.count(lambda player: player.position == pos)
if num < Team.quota[pos]:
quota_not_met.append(pos)
return quota_not_met
def draft_player(self, chosen_one):
"""Marks player as drafted and ends turn."""
self.append(chosen_one)
chosen_one.get_drafted()
self.turn = False
return self
def view_options(self, options):
"""Returns top players from list parameter options. Need to be revised
to take altered lists and return options.
"""
# TODO: Update this to take a specified position
options = options[:10]
#for o in options:
# print(options.index(o), o.name, o.position, o.points)
return options
def remove_player(self, tup, p_list):
"""Method for live drafts. Removes player from available list without
actually drafting player to a team.
"""
p = next(x for x in p_list if (
x.name.upper(), x.position) == (tup[0].upper(), tup[1].upper()))
if p:
p.get_drafted()
return self
def take_turn(self, pool):
"""Loop method to keep prompting user for input until user drafts."""
full_player_list = pool
parameter = "quota"
self.turn = True
"""Logic for automated picking"""
if self.bot:
self.auto_strategy(pool, "sd")
elif self.test:
param = input("> ")
self.actions(full_player_list, param)
self.turn = False
else:
while self.turn:
print("Valid actions: quota, options, sd, filter, find, draft, \
remove, end turn")
param = input("> ")
self.actions(full_player_list, param)
return self
def actions(self, pool, response):
"""Sloppy method to handle user input and return or print
appropriate values.
This method needs to be simplified and needs proper unit tests
Additionally, input needs to be normalized for capitalization.
"""
val = None
# TODO: Make a function that can use default values when testing
# response = "sd" # input("'quota', 'options', 'sd', or 'draft'\n> ") # test with "sd"
if response == 'quota':
val = self.check_quota()
print("Quota not met for:")
print(val)
return val
# TODO: Make this show only players that meet certain restrictions
# TODO: Currently shows best of all players only
elif response == 'options':
val = self.view_options(pool)
for v in val:
print(val.index(v), v.name, v.position, v.points)
elif response == 'sd':
val = []
for p in self.positions:
val.append((p, pool.standard_deviation(p)))
# TODO: added sort. need to add test for the sort
val.sort(key=lambda x: x[1], reverse=True)
for v in val:
print(v[0], v[1])
elif response == 'filter':
position = input("Select position to choose from\n> ")
print("selecting from", position)
options = pool.filter_by_position(position)
for o in options:
print(options.index(o), o.name, o.position, o.points)
elif response == 'find':
self.draftee = pool.return_player(self.find_player())
elif response == 'draft':
print("drafting", self.draftee.name, self.draftee.position)
self.draft_player(self.draftee)
self.reset_draftee()
elif response == 'remove':
print("removing", self.draftee.name, self.draftee.position)
self.draftee.get_drafted()
self.reset_draftee()
elif response == 'end turn':
self.reset_draftee()
self.turn = False
else:
print("Action not recognized. Please enter a valid action.")
# self.turn = False # turn on for testing
if self.draftee:
print("You currently have draftee", self.draftee.name, self.draftee.position)
# print("drafting", draftee.name, draftee.position)
# self.draft_player(draftee)
return val
def auto_strategy(self, player_list, strategy):
"""Defaults to a standard deviation, best available strategy."""
# TODO: Add new auto strategies and a way to select them each turn
new_list = []
# does not use check_quota for now
need_position = self.check_quota()
if strategy == "sd":
for p in Team.positions:
new_list.append((p, player_list.standard_deviation(p)))
position = max(new_list, key=lambda x: x[1])
options = player_list.filter_by_position(position[0])
draftee = options[0]
self.draft_player(draftee)
return self
# TODO: This module is unused (except for its test)
def user_input(self, prompt):
user_input = prompt.upper()
if user_input == "Q":
exit(0)
else:
return user_input
def find_player(self):
"""Returns tuple(name, position) value to PlayerRepo.return_player()"""
print('Which player would you like to find?')
print('Enter "FIRSTNAME LASTNAME", press enter, then enter "POSITION"')
if self.test:
name = "Drew Brees"
position = "QB"
else:
name = input("name: ")
position = input("position: ")
n_and_p = (name, position)
print(n_and_p)
return n_and_p
def reset_draftee(self):
"""self.draftee is used to control some logic flow and, as such
needs to be reset after each turn.
"""
self.draftee = None
return self
| aspic2/firstDraft | firstDraft/team.py | Python | gpl-3.0 | 7,183 |
from VanishingPoint import *
import os
import time
from dronekit import connect, VehicleMode, LocationGlobal, LocationGlobalRelative
from pymavlink import mavutil # Needed for command message definitions
def arm_and_takeoff(aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print "Basic pre-arm checks"
# Don't let the user try to arm until autopilot is ready
while not vehicle.is_armable:
print " Waiting for vehicle to initialise..."
time.sleep(1)
print "Arming motors"
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not vehicle.armed:
print " Waiting for arming..."
time.sleep(1)
print "Taking off!"
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto (otherwise the command
# after Vehicle.simple_takeoff will execute immediately).
while True:
print " Altitude: ", vehicle.location.global_relative_frame.alt
if vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95: #Trigger just below target alt.
print "Reached target altitude"
break
time.sleep(1)
def direction(img,xa,ya,xb,yb,width,height):
#xa,ya,xb,yb=point[0],point[1],point[2],point[3]
cenx = xa+(xb-xa)/2
ceny = ya+(yb-ya)/2
centerx=width/2
centery = height/2
timeout = time.time() + 5
#while True:
#_,img = cam.read()
print("image center are :", centerx,centery)
#img=cv2.blur(img, (3,3))
cv2.circle(img, (centerx,centery), 4, (0,0,123), thickness=3, lineType=7, shift=0) #Screen centre
cv2.circle(img, (cenx,ceny), 4, (0,232,123), thickness=1, lineType=7, shift=0) #new box centre
cv2.rectangle(img, (int(centerx-centery/4), centery+centerx/5), (int(centerx+centery/4), centery-centerx/5), (255, 123, 11), 3) #range rectangle
#cv2.rectangle(img, (xa,ya), (xb,yb), (34, 123, 145), 1) #Object rectangle
#if time.time() > timeout:
# break
if cenx < centerx-centery/4: #for left
#print ("Go Right")
print ("left and right boundaries",(centerx-centery/3,centerx+centery/3))
txt='Left'
loc = (xa+2,ya+(yb-ya)/2)
cv2.putText(img, txt, loc , cv2.FONT_HERSHEY_SIMPLEX, 1, (123,255,255), 3)
if (cenx > centerx-centery/4 and cenx < centerx+centery/4): # For Centre
#print ("center")
txt = "Center"
loc = (xa,ya+2)
cv2.putText(img, txt, loc , cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,123), 3)
if (cenx > centerx+centery/4): # For Right
#print ("Go Left")
txt = "Right"
loc = (xa,ya+2)
cv2.putText(img, txt, loc , cv2.FONT_HERSHEY_SIMPLEX, 1, (255,123,255), 3)
#cv2.imshow("tracker", img)
cv2.imwrite("C:/Users/naiti/Desktop/vanishingpoint/pictures/output/final.jpg", img)
cv2.waitKey(1)
if not txt:
txt = "none"
return txt
#return img
def direction1(cell_num):
"""
Based on block number received from grid(between 1 to 56) we can estimate the poistion for drone to be heading towards
here return is text where vanishing point detected , need to send copter towards that direction.
"""
if cell_num in [3,4,5,10,11,12,17,18,19]:
txt = 0 #left
#print txt
if cell_num in [38,39,40,45,46,47,52,53,54]:
txt = 1 #Right
#print txt
if cell_num in [24,25,26,31,32,33]:
txt = 2 #Center
#print txt
if cell_num in [1,2,8,9,15,16]:
txt = 5 #Top Left
#print txt
if cell_num in [22,23,29,30]:
txt = 3 #Top
#print txt
if cell_num in [36,37,43,44,50,51]:
txt = 6 #Top-Right
#print txt
if cell_num in [6,7,13,14,20,21]:
txt = 7 #Bottom-Left
#print txt
if cell_num in [27,28,34,35]:
txt = 4 #Bottom
#print txt
if cell_num in [41,42,48,49,55,56]:
txt = 8 #Bottom-Right
#print txt
if not txt:
txt = 99 #none
return txt
def condition_yaw(heading, relative=False):
"""
Send MAV_CMD_CONDITION_YAW message to point vehicle at a specified heading (in degrees).
This method sets an absolute heading by default, but you can set the `relative` parameter
to `True` to set yaw relative to the current yaw heading.
By default the yaw of the vehicle will follow the direction of travel. After setting
the yaw using this function there is no way to return to the default yaw "follow direction
of travel" behaviour (https://github.com/diydrones/ardupilot/issues/2427)
For more information see:
http://copter.ardupilot.com/wiki/common-mavlink-mission-command-messages-mav_cmd/#mav_cmd_condition_yaw
"""
if relative:
is_relative = 1 #yaw relative to direction of travel
else:
is_relative = 0 #yaw is an absolute angle
# create the CONDITION_YAW command using command_long_encode()
msg = vehicle.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_CONDITION_YAW, #command
0, #confirmation
heading, # param 1, yaw in degrees
0, # param 2, yaw speed deg/s
1, # param 3, direction -1 ccw, 1 cw
is_relative, # param 4, relative offset 1, absolute angle 0
0, 0, 0) # param 5 ~ 7 not used
# send command to vehicle
vehicle.send_mavlink(msg)
"""
Functions that move the vehicle by specifying the velocity components in each direction.
The two functions use different MAVLink commands. The main difference is
that depending on the frame used, the NED velocity can be relative to the vehicle
orientation.
The methods include:
* send_ned_velocity - Sets velocity components using SET_POSITION_TARGET_LOCAL_NED command
* send_global_velocity - Sets velocity components using SET_POSITION_TARGET_GLOBAL_INT command
"""
def send_ned_velocity(velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors and
for the specified duration.
This uses the SET_POSITION_TARGET_LOCAL_NED command with a type mask enabling only
velocity components
(http://dev.ardupilot.com/wiki/copter-commands-in-guided-mode/#set_position_target_local_ned).
Note that from AC3.3 the message should be re-sent every second (after about 3 seconds
with no message the velocity will drop back to zero). In AC3.2.1 and earlier the specified
velocity persists until it is canceled. The code below should work on either version
(sending the message multiple times does not cause problems).
See the above link for information on the type_mask (0=enable, 1=ignore).
At time of writing, acceleration and yaw bits are ignored.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(1)
def center(NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION):
print("Velocity strict North")
send_ned_velocity(NORTH,0,0,DURATION)
send_ned_velocity(0,0,0,1)
def left(NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION):
print("Velocity strict left")
send_ned_velocity(NORTH,WEST,0,DURATION)
send_ned_velocity(0,0,0,1)
def right(NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION):
print("Velocity strict right")
send_ned_velocity(NORTH,EAST,0,DURATION)
send_ned_velocity(0,0,0,1)
def top(NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION):
print("Velocity strict UP")
send_ned_velocity(NORTH,0,UP,DURATION)
send_ned_velocity(0,0,0,1)
def bottom(NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION):
print("Velocity strict down")
send_ned_velocity(NORTH,0,DOWN,DURATION)
send_ned_velocity(0,0,0,1)
def topl(NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION):
print("Velocity strict Top Left")
send_ned_velocity(NORTH,WEST,UP,DURATION)
send_ned_velocity(0,0,0,1)
def topr(NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION):
print("Velocity strict Top right")
send_ned_velocity(NORTH,EAST,UP,DURATION)
send_ned_velocity(0,0,0,1)
def bottoml(NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION):
print("Velocity strict bottom Left")
send_ned_velocity(NORTH,WEST,DOWN,DURATION)
send_ned_velocity(0,0,0,1)
def bottomr(NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION):
print("Velocity strict bottom right")
send_ned_velocity(NORTH,EAST,DOWN,DURATION)
send_ned_velocity(0,0,0,1)
def none(NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION):
print("Velocity strict stated")
#send_ned_velocity(NORTH,EAST,UP,DURATION)
send_ned_velocity(0,0,0,1)
def main():
#Arm and take of to altitude of 5 meters
arm_and_takeoff(5)
filepath = os.path.abspath("C:/Users/naiti/Desktop/vanishingpoint/pictures/input/corridor_6.jpg")
img = cv2.imread(filepath)
img = cv2.resize(img, (640, 480))
hough_lines = hough_transform(img)
if hough_lines:
random_sample = sample_lines(hough_lines, 100)
intersections = find_intersections(random_sample, img)
for x,y in intersections:
cv2.circle(img,(x,y), 5, (124,0,255), -1)
cv2.imwrite("C:/Users/naiti/Desktop/vanishingpoint/pictures/output/circle.jpg", img)
if intersections:
grid_size = min(img.shape[0], img.shape[1]) // 9
print img.shape[0],img.shape[1],img.shape[0]//9,grid_size
best_cell,best_cell_num = find_vanishing_point(img, grid_size, intersections)
#print vanishing_point[0],vanishing_point[1]
rx1 = best_cell[0] - grid_size / 2
ry1 = best_cell[1] - grid_size / 2
rx2 = best_cell[0] + grid_size / 2
ry2 = best_cell[1] + grid_size / 2
cv2.circle(img,(rx1,ry1), 5, (124,111,255), 2) #left point on best cell
cv2.circle(img,(rx2,ry2), 5, (124,111,255), 2) # right point on best cell
cv2.circle(img,(best_cell[0],best_cell[1]), 5, (124,111,255), 2) # centre point on best cell
#cv2.imshow("vanishing_point",best_cell)
#cv2.waitKey(10)
filename = "C:/Users/naiti/Desktop/vanishingpoint/pictures/output/corridor_6.jpg"
cv2.imwrite(filename, img)
direction_point = direction(img,rx1,ry1,rx2,ry2,img.shape[1],img.shape[0])
direction_block = direction1(best_cell_num)
# map the inputs to the function blocks
options ={ 0 : left,
1 : right,
2 : center,
3 : top,
4 : bottom,
5 : topl,
6 : topr,
7 : bottoml,
8 : bottomr,
99: none
}
print "Direction via point method: ", direction_point
print "Direction via block method: ", str(options[direction_block])
"""
Code starts for drone direction heading
Fly the vehicle in a SQUARE path using velocity vectors (the underlying code calls the
SET_POSITION_TARGET_LOCAL_NED command with the velocity parameters enabled).
The thread sleeps for a time (DURATION) which defines the distance that will be travelled.
The code also sets the yaw (MAV_CMD_CONDITION_YAW) using the `set_yaw()` method in each segment
so that the front of the vehicle points in the direction of travel
"""
#Set up velocity vector to map to each direction.
# vx > 0 => fly North
# vx < 0 => fly South
NORTH = 2
SOUTH = -1
# Note for vy:
# vy > 0 => fly East
# vy < 0 => fly West
EAST = 1
WEST = -1
# Note for vz:
# vz < 0 => ascend
# vz > 0 => descend
UP = -0.5
DOWN = 0.5
DURATION = 2 # 2 secs
# Calling direction functions.
options[direction_block](NORTH,SOUTH,EAST,WEST,UP,DOWN,DURATION)
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
# Connect to the Vehicle
print 'Connecting to vehicle on: %s' % connection_string
vehicle = connect(connection_string, wait_ready=True)
main()
| naitikshukla/drone | vanish_point/mainv1.py | Python | unlicense | 11,986 |
import multiprocessing as mp
import pytest
from manageiq_client.api import ManageIQClient as MiqApi
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import ONE
from cfme.rest.gen_data import automation_requests_data as _automation_requests_data
from cfme.rest.gen_data import vm as _vm
from cfme.utils.rest import assert_response
from cfme.utils.rest import query_resource_attributes
from cfme.utils.wait import wait_for
pytestmark = [
test_requirements.rest,
pytest.mark.provider(classes=[InfraProvider], selector=ONE),
pytest.mark.usefixtures('setup_provider')
]
@pytest.fixture(scope='function')
def vm(request, provider, appliance):
return _vm(request, provider, appliance)
def wait_for_requests(requests):
def _finished():
for request in requests:
request.reload()
if request.request_state != 'finished':
return False
return True
wait_for(_finished, num_sec=600, delay=5, message="automation_requests finished")
def gen_pending_requests(collection, rest_api, vm, requests=False):
requests_data = _automation_requests_data(vm, approve=False, requests_collection=requests)
response = collection.action.create(*requests_data[:2])
assert_response(rest_api)
assert len(response) == 2
for resource in response:
assert resource.request_state == 'pending'
return response
def create_requests(collection, rest_api, automation_requests_data, multiple):
if multiple:
requests = collection.action.create(*automation_requests_data)
else:
requests = collection.action.create(
automation_requests_data[0])
assert_response(rest_api)
wait_for_requests(requests)
for request in requests:
assert request.approval_state == 'approved'
resource = collection.get(id=request.id)
assert resource.type == 'AutomationRequest'
def create_pending_requests(collection, rest_api, requests_pending):
# The `approval_state` is `pending_approval`. Wait to see that
# it does NOT change - that would mean the request was auto-approved.
# The `wait_for` is expected to fail.
# It's enough to wait just for the first request, it gives
# other requests the same amount of time to change state.
waiting_request = requests_pending[0]
wait_for(
lambda: waiting_request.approval_state != 'pending_approval',
fail_func=waiting_request.reload,
num_sec=30,
delay=10,
silent_failure=True)
for request in requests_pending:
request.reload()
assert request.approval_state == 'pending_approval'
resource = collection.get(id=request.id)
assert_response(rest_api)
assert resource.type == 'AutomationRequest'
def approve_requests(collection, rest_api, requests_pending, from_detail):
if from_detail:
for request in requests_pending:
request.action.approve(reason="I said so")
else:
collection.action.approve(
reason="I said so", *requests_pending)
assert_response(rest_api)
wait_for_requests(requests_pending)
for request in requests_pending:
request.reload()
assert request.approval_state == 'approved'
def deny_requests(collection, rest_api, requests_pending, from_detail):
if from_detail:
for request in requests_pending:
request.action.deny(reason="I said so")
else:
collection.action.deny(
reason="I said so", *requests_pending)
assert_response(rest_api)
wait_for_requests(requests_pending)
for request in requests_pending:
request.reload()
assert request.approval_state == 'denied'
def edit_requests(collection, rest_api, requests_pending, from_detail):
body = {'options': {'arbitrary_key_allowed': 'test_rest'}}
if from_detail:
# testing BZ 1418331
for request in requests_pending:
request.action.edit(**body)
assert_response(rest_api)
else:
identifiers = []
for i, resource in enumerate(requests_pending):
loc = ({'id': resource.id}, {'href': '{}/{}'.format(collection._href, resource.id)})
identifiers.append(loc[i % 2])
collection.action.edit(*identifiers, **body)
assert_response(rest_api)
for request in requests_pending:
request.reload()
assert request.options['arbitrary_key_allowed'] == 'test_rest'
class TestAutomationRequestsRESTAPI(object):
"""Tests using /api/automation_requests."""
@pytest.fixture(scope='function')
def collection(self, appliance):
return appliance.rest_api.collections.automation_requests
@pytest.fixture(scope='function')
def automation_requests_data(self, vm):
return _automation_requests_data(vm)
@pytest.fixture(scope='function')
def requests_pending(self, appliance, vm):
return gen_pending_requests(
appliance.rest_api.collections.automation_requests, appliance.rest_api, vm)
@pytest.mark.tier(3)
def test_query_request_attributes(self, requests_pending, soft_assert):
"""Tests access to attributes of automation request using /api/automation_requests.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Rest
initialEstimate: 1/4h
"""
query_resource_attributes(requests_pending[0], soft_assert=soft_assert)
@pytest.mark.tier(3)
@pytest.mark.parametrize(
'multiple', [False, True],
ids=['one_request', 'multiple_requests'])
def test_create_requests(self, collection, appliance, automation_requests_data, multiple):
"""Test adding the automation request using /api/automation_requests.
Metadata:
test_flag: rest, requests
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/5h
"""
create_requests(collection, appliance.rest_api, automation_requests_data, multiple)
@pytest.mark.tier(3)
def test_create_pending_requests(self, appliance, requests_pending, collection):
"""Tests creating pending requests using /api/automation_requests.
Metadata:
test_flag: rest, requests
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/5h
"""
create_pending_requests(collection, appliance.rest_api, requests_pending)
@pytest.mark.tier(3)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_approve_requests(self, collection, appliance, requests_pending, from_detail):
"""Tests approving automation requests using /api/automation_requests.
Metadata:
test_flag: rest, requests
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/5h
"""
approve_requests(collection, appliance.rest_api, requests_pending, from_detail)
@pytest.mark.tier(3)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_deny_requests(self, collection, appliance, requests_pending, from_detail):
"""Tests denying automation requests using /api/automation_requests.
Metadata:
test_flag: rest, requests
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/5h
"""
deny_requests(collection, appliance.rest_api, requests_pending, from_detail)
@pytest.mark.tier(3)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_edit_requests(self, collection, appliance, requests_pending, from_detail):
"""Tests editing requests using /api/automation_requests.
Metadata:
test_flag: rest, requests
Bugzilla:
1418338
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/6h
"""
# testing BZ 1418338
edit_requests(collection, appliance.rest_api, requests_pending, from_detail)
class TestAutomationRequestsCommonRESTAPI(object):
"""Tests using /api/requests (common collection for all requests types)."""
@pytest.fixture(scope='function')
def collection(self, appliance):
return appliance.rest_api.collections.requests
@pytest.fixture(scope='function')
def automation_requests_data(self, vm):
return _automation_requests_data(vm, requests_collection=True)
@pytest.fixture(scope='function')
def requests_pending(self, appliance, vm):
return gen_pending_requests(
appliance.rest_api.collections.requests, appliance.rest_api, vm, requests=True)
@pytest.mark.tier(3)
def test_query_request_attributes(self, requests_pending, soft_assert):
"""Tests access to attributes of automation request using /api/requests.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/6h
"""
query_resource_attributes(requests_pending[0], soft_assert=soft_assert)
@pytest.mark.tier(3)
@pytest.mark.parametrize(
'multiple', [False, True],
ids=['one_request', 'multiple_requests'])
def test_create_requests(self, collection, appliance, automation_requests_data, multiple):
"""Test adding the automation request using /api/requests.
Metadata:
test_flag: rest, requests
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/6h
"""
create_requests(collection, appliance.rest_api, automation_requests_data, multiple)
@pytest.mark.tier(3)
def test_create_pending_requests(self, collection, appliance, requests_pending):
"""Tests creating pending requests using /api/requests.
Metadata:
test_flag: rest, requests
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/6h
"""
create_pending_requests(collection, appliance.rest_api, requests_pending)
@pytest.mark.tier(3)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_approve_requests(self, collection, appliance, requests_pending, from_detail):
"""Tests approving automation requests using /api/requests.
Metadata:
test_flag: rest, requests
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/6h
"""
approve_requests(collection, appliance.rest_api, requests_pending, from_detail)
@pytest.mark.tier(3)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_deny_requests(self, collection, appliance, requests_pending, from_detail):
"""Tests denying automation requests using /api/requests.
Metadata:
test_flag: rest, requests
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/6h
"""
deny_requests(collection, appliance.rest_api, requests_pending, from_detail)
@pytest.mark.tier(3)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_edit_requests(self, collection, appliance, requests_pending, from_detail):
"""Tests editing requests using /api/requests.
Metadata:
test_flag: rest, requests
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/6h
"""
edit_requests(collection, appliance.rest_api, requests_pending, from_detail)
def test_create_requests_parallel(self, appliance):
"""Create automation requests in parallel.
Metadata:
test_flag: rest, requests
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: medium
initialEstimate: 1/6h
"""
output = mp.Queue()
entry_point = appliance.rest_api._entry_point
auth = appliance.rest_api._auth
def _gen_automation_requests(output):
api = MiqApi(entry_point, auth, verify_ssl=False)
requests_data = _automation_requests_data(
'nonexistent_vm', requests_collection=True, approve=False)
api.collections.requests.action.create(*requests_data[:2])
result = (api.response.status_code, api.response.json())
output.put(result)
processes = [
mp.Process(target=_gen_automation_requests, args=(output,))
for _ in range(4)]
for proc in processes:
proc.start()
# wait for all processes to finish
for proc in processes:
proc.join()
for proc in processes:
status, response = output.get()
assert status == 200
for result in response['results']:
assert result['request_type'] == 'automation'
| RedHatQE/cfme_tests | cfme/tests/infrastructure/test_rest_automation_request.py | Python | gpl-2.0 | 13,910 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext as _
from django.utils import timezone
from taiga.base import response
from taiga.base.decorators import detail_route
from taiga.base.api import ReadOnlyListViewSet
from taiga.mdrender.service import render as mdrender
from . import permissions
from . import serializers
from . import services
class HistoryViewSet(ReadOnlyListViewSet):
serializer_class = serializers.HistoryEntrySerializer
content_type = None
def get_content_type(self):
app_name, model = self.content_type.split(".", 1)
return ContentType.objects.get_by_natural_key(app_name, model)
def get_queryset(self):
ct = self.get_content_type()
model_cls = ct.model_class()
qs = model_cls.objects.all()
filtered_qs = self.filter_queryset(qs)
return filtered_qs
def response_for_queryset(self, queryset):
# Switch between paginated or standard style responses
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_pagination_serializer(page)
else:
serializer = self.get_serializer(queryset, many=True)
return response.Ok(serializer.data)
@detail_route(methods=['get'])
def comment_versions(self, request, pk):
obj = self.get_object()
history_entry_id = request.QUERY_PARAMS.get('id', None)
history_entry = services.get_history_queryset_by_model_instance(obj).filter(id=history_entry_id).first()
if history_entry is None:
return response.NotFound()
self.check_permissions(request, 'comment_versions', history_entry)
if history_entry is None:
return response.NotFound()
history_entry.attach_user_info_to_comment_versions()
return response.Ok(history_entry.comment_versions)
@detail_route(methods=['post'])
def edit_comment(self, request, pk):
obj = self.get_object()
history_entry_id = request.QUERY_PARAMS.get('id', None)
history_entry = services.get_history_queryset_by_model_instance(obj).filter(id=history_entry_id).first()
if history_entry is None:
return response.NotFound()
obj = services.get_instance_from_key(history_entry.key)
comment = request.DATA.get("comment", None)
self.check_permissions(request, 'edit_comment', history_entry)
if history_entry is None:
return response.NotFound()
if comment is None:
return response.BadRequest({"error": _("comment is required")})
if history_entry.delete_comment_date or history_entry.delete_comment_user:
return response.BadRequest({"error": _("deleted comments can't be edited")})
# comment_versions can be None if there are no historic versions of the comment
comment_versions = history_entry.comment_versions or []
comment_versions.append({
"date": history_entry.created_at,
"comment": history_entry.comment,
"comment_html": history_entry.comment_html,
"user": {
"id": request.user.pk,
}
})
history_entry.edit_comment_date = timezone.now()
history_entry.comment = comment
history_entry.comment_html = mdrender(obj.project, comment)
history_entry.comment_versions = comment_versions
history_entry.save()
return response.Ok()
@detail_route(methods=['post'])
def delete_comment(self, request, pk):
obj = self.get_object()
history_entry_id = request.QUERY_PARAMS.get('id', None)
history_entry = services.get_history_queryset_by_model_instance(obj).filter(id=history_entry_id).first()
if history_entry is None:
return response.NotFound()
self.check_permissions(request, 'delete_comment', history_entry)
if history_entry is None:
return response.NotFound()
if history_entry.delete_comment_date or history_entry.delete_comment_user:
return response.BadRequest({"error": _("Comment already deleted")})
history_entry.delete_comment_date = timezone.now()
history_entry.delete_comment_user = {"pk": request.user.pk, "name": request.user.get_full_name()}
history_entry.save()
return response.Ok()
@detail_route(methods=['post'])
def undelete_comment(self, request, pk):
obj = self.get_object()
history_entry_id = request.QUERY_PARAMS.get('id', None)
history_entry = services.get_history_queryset_by_model_instance(obj).filter(id=history_entry_id).first()
if history_entry is None:
return response.NotFound()
self.check_permissions(request, 'undelete_comment', history_entry)
if history_entry is None:
return response.NotFound()
if not history_entry.delete_comment_date and not history_entry.delete_comment_user:
return response.BadRequest({"error": _("Comment not deleted")})
history_entry.delete_comment_date = None
history_entry.delete_comment_user = None
history_entry.save()
return response.Ok()
# Just for restframework! Because it raises
# 404 on main api root if this method not exists.
def list(self, request):
return response.NotFound()
def retrieve(self, request, pk):
obj = self.get_object()
self.check_permissions(request, "retrieve", obj)
qs = services.get_history_queryset_by_model_instance(obj)
qs = services.prefetch_owners_in_history_queryset(qs)
return self.response_for_queryset(qs)
class EpicHistory(HistoryViewSet):
content_type = "epics.epic"
permission_classes = (permissions.EpicHistoryPermission,)
class UserStoryHistory(HistoryViewSet):
content_type = "userstories.userstory"
permission_classes = (permissions.UserStoryHistoryPermission,)
class TaskHistory(HistoryViewSet):
content_type = "tasks.task"
permission_classes = (permissions.TaskHistoryPermission,)
class IssueHistory(HistoryViewSet):
content_type = "issues.issue"
permission_classes = (permissions.IssueHistoryPermission,)
class WikiHistory(HistoryViewSet):
content_type = "wiki.wikipage"
permission_classes = (permissions.WikiHistoryPermission,)
| xdevelsistemas/taiga-back-community | taiga/projects/history/api.py | Python | agpl-3.0 | 7,344 |
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a ResolverMatch object which provides access to all
attributes of the resolved URL match.
"""
import functools
import re
import threading
from importlib import import_module
from urllib.parse import quote
from django.conf import settings
from django.core.checks import Warning
from django.core.checks.urls import check_resolver
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import MultiValueDict
from django.utils.functional import cached_property
from django.utils.http import RFC3986_SUBDELIMS
from django.utils.regex_helper import normalize
from django.utils.translation import get_language
from .exceptions import NoReverseMatch, Resolver404
from .utils import get_callable
class ResolverMatch:
def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
# If a URLRegexResolver doesn't have a namespace or app_name, it passes
# in an empty value.
self.app_names = [x for x in app_names if x] if app_names else []
self.app_name = ':'.join(self.app_names)
self.namespaces = [x for x in namespaces if x] if namespaces else []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = func.__class__.__module__ + '.' + func.__class__.__name__
else:
# A function-based view
self._func_path = func.__module__ + '.' + func.__name__
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name,
self.app_names, self.namespaces,
)
@functools.lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@functools.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent URLconf pattern.
# This makes it possible to have captured parameters in the parent
# URLconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
class LocaleRegexDescriptor:
def __get__(self, instance, cls=None):
"""
Return a compiled regular expression based on the active language.
"""
if instance is None:
return self
# As a performance optimization, if the given regex string is a regular
# string (not a lazily-translated string proxy), compile it once and
# avoid per-language compilation.
if isinstance(instance._regex, str):
instance.__dict__['regex'] = self._compile(instance._regex)
return instance.__dict__['regex']
language_code = get_language()
if language_code not in instance._regex_dict:
instance._regex_dict[language_code] = self._compile(str(instance._regex))
return instance._regex_dict[language_code]
def _compile(self, regex):
"""
Compile and return the given regular expression.
"""
try:
return re.compile(regex)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' % (regex, e)
)
class LocaleRegexProvider:
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using gettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
regex = LocaleRegexDescriptor()
def describe(self):
"""
Format the URL pattern for display in warning messages.
"""
description = "'{}'".format(self.regex.pattern)
if getattr(self, 'name', False):
description += " [name='{}']".format(self.name)
return description
def _check_pattern_startswith_slash(self):
"""
Check that the pattern does not begin with a forward slash.
"""
regex_pattern = self.regex.pattern
if not settings.APPEND_SLASH:
# Skip check as it can be useful to start a URL pattern with a slash
# when APPEND_SLASH=False.
return []
if (regex_pattern.startswith('/') or regex_pattern.startswith('^/')) and not regex_pattern.endswith('/'):
warning = Warning(
"Your URL pattern {} has a regex beginning with a '/'. Remove this "
"slash as it is unnecessary. If this pattern is targeted in an "
"include(), ensure the include() pattern has a trailing '/'.".format(
self.describe()
),
id="urls.W002",
)
return [warning]
else:
return []
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
self.callback = callback # the view
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern)
def check(self):
warnings = self._check_pattern_name()
if not warnings:
warnings = self._check_pattern_startswith_slash()
return warnings
def _check_pattern_name(self):
"""
Check that the pattern name does not contain a colon.
"""
if self.name is not None and ":" in self.name:
warning = Warning(
"Your URL pattern {} has a name including a ':'. Remove the colon, to "
"avoid ambiguous namespace references.".format(self.describe()),
id="urls.W003",
)
return [warning]
else:
return []
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
args = () if kwargs else match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@cached_property
def lookup_str(self):
"""
A string that identifies the view (e.g. 'path.to.view_function' or
'path.to.ClassBasedView').
"""
callback = self.callback
# Python 3.5 collapses nested partials, so can change "while" to "if"
# when it's the minimum supported version.
while isinstance(callback, functools.partial):
callback = callback.func
if not hasattr(callback, '__name__'):
return callback.__module__ + "." + callback.__class__.__name__
return callback.__module__ + "." + callback.__qualname__
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is the dotted Python path to the module defining
# urlpatterns. It may also be an object with an urlpatterns attribute
# or urlpatterns itself.
self.urlconf_name = urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
self._local = threading.local()
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return '<%s %s (%s:%s) %s>' % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern,
)
def check(self):
warnings = self._check_include_trailing_dollar()
for pattern in self.url_patterns:
warnings.extend(check_resolver(pattern))
if not warnings:
warnings = self._check_pattern_startswith_slash()
return warnings
def _check_include_trailing_dollar(self):
"""
Check that include is not used with a regex ending with a dollar.
"""
regex_pattern = self.regex.pattern
if regex_pattern.endswith('$') and not regex_pattern.endswith(r'\$'):
warning = Warning(
"Your URL pattern {} uses include with a regex ending with a '$'. "
"Remove the dollar from the regex to avoid problems including "
"URLs.".format(self.describe()),
id="urls.W001",
)
return [warning]
else:
return []
def _populate(self):
# Short-circuit if called recursively in this thread to prevent
# infinite recursion. Concurrent threads may call this at the same
# time and will need to continue, so set 'populating' on a
# thread-local variable.
if getattr(self._local, 'populating', False):
return
self._local.populating = True
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
if isinstance(pattern, RegexURLPattern):
self._callback_strs.add(pattern.lookup_str)
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent_pat = pattern.regex.pattern
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = normalize(parent_pat + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
dict(defaults, **pattern.default_kwargs),
)
)
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
if not getattr(pattern._local, 'populating', False):
pattern._populate()
self._callback_strs.update(pattern._callback_strs)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._populated = True
self._local.populating = False
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def _is_callback(self, name):
if not self._populated:
self._populate()
return name in self._callback_strs
def resolve(self, path):
path = str(path) # path may be a reverse_lazy object
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([pattern] + t for t in sub_tried)
else:
tried.append([pattern])
else:
if sub_match:
# Merge captured arguments in match with submatch
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
# If there are *any* named groups, ignore all non-named groups.
# Otherwise, pass all non-named arguments as positional arguments.
sub_match_args = sub_match.args
if not sub_match_dict:
sub_match_args = match.groups() + sub_match.args
return ResolverMatch(
sub_match.func,
sub_match_args,
sub_match_dict,
sub_match.url_name,
[self.app_name] + sub_match.app_names,
[self.namespace] + sub_match.namespaces,
)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path': path})
@cached_property
def urlconf_module(self):
if isinstance(self.urlconf_name, str):
return import_module(self.urlconf_name)
else:
return self.urlconf_name
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
msg = (
"The included URLconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use lazy import, since
# django.conf.urls imports this file.
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, '', *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [str(v) for v in args]
text_kwargs = {k: str(v) for (k, v) in kwargs.items()}
if not self._populated:
self._populate()
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
candidate_subs = dict(zip(params, text_args))
else:
if set(kwargs).symmetric_difference(params).difference(defaults):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = _prefix.replace('%', '%%') + result
if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % candidate_subs):
# safe characters from `pchar` definition of RFC 3986
url = quote(candidate_pat % candidate_subs, safe=RFC3986_SUBDELIMS + '/~:@')
# Don't allow construction of scheme relative urls.
if url.startswith('//'):
url = '/%%2F%s' % url[2:]
return url
# lookup_view can be URL name or callable, but callables are not
# friendly in error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (possibility, pattern, defaults) in possibilities]
if patterns:
if args:
arg_msg = "arguments '%s'" % (args,)
elif kwargs:
arg_msg = "keyword arguments '%s'" % (kwargs,)
else:
arg_msg = "no arguments"
msg = (
"Reverse for '%s' with %s not found. %d pattern(s) tried: %s" %
(lookup_view_s, arg_msg, len(patterns), patterns)
)
else:
msg = (
"Reverse for '%(view)s' not found. '%(view)s' is not "
"a valid view function or pattern name." % {'view': lookup_view_s}
)
raise NoReverseMatch(msg)
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(
self, urlconf_name, default_kwargs=None, app_name=None, namespace=None,
prefix_default_language=True,
):
super().__init__(None, urlconf_name, default_kwargs, app_name, namespace)
self.prefix_default_language = prefix_default_language
@property
def regex(self):
language_code = get_language() or settings.LANGUAGE_CODE
if language_code not in self._regex_dict:
if language_code == settings.LANGUAGE_CODE and not self.prefix_default_language:
regex_string = ''
else:
regex_string = '^%s/' % language_code
self._regex_dict[language_code] = re.compile(regex_string)
return self._regex_dict[language_code]
| ifduyue/django | django/urls/resolvers.py | Python | bsd-3-clause | 21,041 |
# -*- coding: utf-8 -*-
from PIL import Image
def get_target_size(img_size, size, exact_size=False):
assert img_size[0] and img_size[1]
assert size[0] or size[1]
size = list(size)
if not size[0]:
size[0] = size[1] * img_size[0] // img_size[1]
if not size[1]:
size[1] = size[0] * img_size[1] // img_size[0]
if not exact_size:
return min(img_size[0], size[0]), min(img_size[1], size[1])
else:
return tuple(size)
def crop_by_aspect_ratio(image, aspect_ratio):
"""crop image by scale without aspect ratio distortion
:param image: a PIL image object
:param aspect_ratio: aspect ratio, as a 2-tuple: (width, height).
:returns: An :py:class:`~PIL.Image.Image` object.
"""
size = image.size
size1 = (size[0], size[0] * aspect_ratio[1] // aspect_ratio[0])
size2 = (size[1] * aspect_ratio[0] // aspect_ratio[1], size[1])
new_size = min(size1, size2)
if new_size == image.size:
return image
# calc left, upper, right, lower
left = (size[0] - new_size[0]) // 2
right = left + new_size[0]
upper = (size[1] - new_size[1]) // 2
lower = upper + new_size[1]
return image.crop((left, upper, right, lower))
def crop_resize(image, size, exact_size=False):
"""Crop out the proportional middle of the image and set to the desired size.
:param image: a PIL image object
:param size: a 2-tuple of (width,height); at least one must be specified
:param exact_size: whether to scale up for smaller images.
Defaults to ``False``.
If the image is bigger than the sizes passed,
this works as expected.
If the image is smaller than the sizes passed,
then behavior is dictated by the ``exact_size`` flag.
If the ``exact_size`` flag is false,
the image will be returned unmodified.
If the ``exact_size`` flag is true,
the image will be scaled up to the required size.
:return: An :py:class:`~PIL.Image.Image` object.
"""
target_size = get_target_size(image.size, size, exact_size)
img2 = crop_by_aspect_ratio(image, target_size)
return img2.resize(target_size, Image.ANTIALIAS)
| codeif/crimg | crimg/api.py | Python | mit | 2,205 |
import pytest
import pytz
from dateutil import parser
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.log_validator import LogValidator
@pytest.mark.tier(2)
@pytest.mark.uncollectif(lambda appliance: appliance.version < '5.9')
def test_configure_vmdb_last_start_time(appliance):
"""
Go to Settings -> Configure -> Database
Compare Vmdb Last Start Time with output of command
"journalctl -u rh-postgresql{}-postgresql.service --boot=0 | sed '4!d'"
"""
view = navigate_to(appliance.server, 'DatabaseSummary')
for item in view.summary('Properties').get_text_of('Data Directory').split('/'):
if 'rh-postgresql' in item:
logs_last_start_time = appliance.ssh_client.run_command(
"journalctl -u {}-postgresql.service --boot=0 | sed '4!d'".format(item))
ui_last_start_time = parser.parse(view.summary('Properties').get_text_of('Last Start Time'))
# timedatectl is used here as we will get full timezone name, like 'US/Eastern',
# which is easier and safer(to omit UnknownTimeZoneError) to use later
tz = pytz.timezone(appliance.ssh_client.run_command("timedatectl | grep 'Time zone'")
.output.strip().split(' ')[2])
ui_last_start_updated = ui_last_start_time.replace(
tzinfo=ui_last_start_time.tzinfo).astimezone(tz)
assert ui_last_start_updated.strftime('%Y-%m-%d %H:%M:%S %Z') in logs_last_start_time.output
@pytest.mark.tier(1)
def test_configuration_database_garbage_collection(appliance):
"""
Navigate to Settings -> Configuration -> Diagnostics -> CFME Region -> Database
Submit Run database Garbage Collection Now a check UI/logs for errors.
"""
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=[
'.*Queued the action: \[Database GC\] being run for user:.*'],
failure_patterns=['.*ERROR.*'])
evm_tail.fix_before_start()
view = navigate_to(appliance.server.zone.region, 'Database')
view.submit_db_garbage_collection_button.click()
view.flash.assert_message('Database Garbage Collection successfully initiated')
evm_tail.validate_logs()
| lkhomenk/integration_tests | cfme/tests/configure/test_database_ui.py | Python | gpl-2.0 | 2,269 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2014 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-03-07
#
"""
Generate a ReST table of icons in :mod:`workflow.workflow` with previews.
"""
from __future__ import print_function, unicode_literals
import os
import subprocess
import workflow
outdir = os.path.join(os.path.dirname(__file__), 'doc', '_static')
def make_thumbnail(infile, outfile):
cmd = ['sips', '-Z', '64', '-s', 'format', 'png', infile, '--out', outfile]
# print(cmd)
subprocess.call(cmd)
entries = []
col1 = col2 = 0
for name in dir(workflow):
if name.startswith('ICON_'):
const = getattr(workflow, name)
# print('{} : {}'.format(name, const))
filename = '{}.png'.format(name)
make_thumbnail(const, os.path.join(outdir, filename))
image = '.. image:: _static/{}'.format(filename)
entries.append((name, image))
if len(name) > col1:
col1 = len(name)
if len(image) > col2:
col2 = len(image)
col1 += 5
print('+' + ('-' * col1) + '+' + ('-' * col2) + '+')
print('| Name'.ljust(col1 + 1) + '| Preview'.ljust(col2 + 1) + '|')
print('+' + ('=' * col1) + '+' + ('=' * col2) + '+')
for name, image in entries:
print('|``{}``'.format(name).ljust(col1 + 1) + '|' + image.ljust(col2) + '|')
print('+' + ('-' * col1) + '+' + ('-' * col2) + '+')
| JT5D/Alfred-Popclip-Sublime | latest/alfred-workflow-master/gen_icon_table.py | Python | gpl-2.0 | 1,451 |
""" import the necessary modules """
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
# Create a class that will give us an object that we can use to connect to a database
class MySQLConnection(object):
def __init__(self, app, db):
config = {
'host': 'localhost',
'database': db, # we got db as an argument
'user': 'root',
'password': 'root',
'port': '3306' # change the port to match the port your SQL server is running on
}
# this will use the above values to generate the path to connect to your sql database
DATABASE_URI = "mysql://{}:{}@127.0.0.1:{}/{}".format(config['user'], config['password'], config['port'], config['database'])
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# establish the connection to database
self.db = SQLAlchemy(app)
# this is the method we will use to query the database
def query_db(self, query, data=None):
result = self.db.session.execute(text(query), data)
if query[0:6].lower() == 'select':
# if the query was a select
# convert the result to a list of dictionaries
list_result = [dict(r) for r in result]
# return the results as a list of dictionaries
return list_result
elif query[0:6].lower() == 'insert':
# if the query was an insert, return the id of the
# commit changes
self.db.session.commit()
# row that was inserted
return result.lastrowid
else:
# if the query was an update or delete, return nothing and commit changes
self.db.session.commit()
# This is the module method to be called by the user in server.py. Make sure to provide the db name!
def MySQLConnector(app, db):
return MySQLConnection(app, db) | authman/Python201609 | Guerrero_Melissa/flask_mysql/mysqlconnection.py | Python | mit | 1,977 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.platform.power_monitor import android_ds2784_power_monitor
class DS2784PowerMonitorMonitorTest(unittest.TestCase):
def testEnergyComsumption(self):
data = ('0000 1000 -10 12\n'
'1800 1000 -10 11\n'
'3600 1000 -10 09\n'
'5400 0000 -20 08\n'
'7200 0000 -20 11\n'
'9000 0000 -20 11\n')
results = (
android_ds2784_power_monitor.DS2784PowerMonitor.ParseSamplingOutput(
data))
self.assertEqual(results['power_samples_mw'], [1.2e-07, 1.1e-07, 9e-08,
1.6e-07, 2.2e-07, 2.2e-07])
self.assertEqual(results['energy_consumption_mwh'], 2.1e-07)
| sgraham/nope | tools/telemetry/telemetry/core/platform/power_monitor/android_ds2784_power_monitor_unittest.py | Python | bsd-3-clause | 884 |
from Tkinter import *
import PIL.Image
from PIL import ImageTk
WIDTH = 1366#3286#
HEIGHT = 768#1440#
# TODO: add multi-monitor support (figure out dimension of current monitor)
def init(window):
'''
global WIDTH, HEIGHT
WIDTH = window.winfo_screenwidth() #1366
HEIGHT = window.winfo_screenheight() #768
print "VALUES:", WIDTH, HEIGHT
'''
return
# supply vector of n colours and their centers and linearly combine them
def shadeN(colours, centers, v):
if len(colours) == 1:
return colours[0]
elif len(colours) == 0:
return (0,0,0)
# centers must be sorted
if v < min(centers): v = min(centers)
if v > max(centers): v = max(centers)
# figure out which range v is in
r = (0,1)
rIndex=0
for i in range(len(centers)-1):
m = centers[i]
M = centers[i+1]
if v >= m and v <= M:
r = (m, M)
rIndex=i
break
# now just return the shade in that range
vp = (1.0*v - 1.0*r[0])/(1.0*r[1]-1.0*r[0])
return shade(colours[rIndex], colours[rIndex+1], vp)
def shade(cA, cB, v):
# combine the colours cA and cB with proportions
# specified by v
# v in [0, 1]
# cA/cB given as 255RGB values
return combineColours(Iw=v, Is=1.0, LISC=(0,0,0), HIWC=cB, LIWC=cA)
def combineColours(Iw, Is, LISC, HIWC, LIWC):
A = [v*Iw for v in HIWC]
B = [v*(1.0-Iw) for v in LIWC]
ApB = [a+b for (a,b) in zip(A, B)]
C = [v*Is for v in ApB]
D = [v*(1.0-Is) for v in LISC]
WC = [a+b for (a,b) in zip(C, D)]
#WC_255 = [int(v*255) for v in WC]
#print WC
#return WC_255
return WC
def toFloatfHex(colour):
if colour[0]=='#':
colour=colour[1:]
r = int(colour[:2], 16)
g = int(colour[2:4], 16)
b = int(colour[4:], 16)
ret = toFloatf255((r, g, b))
#print ret
return ret
def toHexf255(colour):
return '#%02x%02x%02x'%tuple([int(v) for v in colour])
def toHex(colour):
return toHexf255([int(255*v) for v in colour])
def toFloatf255(colour):
return tuple([i/255.0 for i in colour])
BG_DARK_f = (0.1, 0.1, 0.1)
BG_DARK = toHex(BG_DARK_f)
BG_LIGHT_f= (0.2, 0.2, 0.2)
BG_LIGHT= toHex(BG_LIGHT_f)
PRIMARY_f = (0.258824, 0.521569, 0.956863)#(0.0, 0.65, 1.0)
#PRIMARY_f = (0.0, 1.0, 0.2)
PRIMARY = toHex(PRIMARY_f)
LIGHT_PRIMARY_f = tuple([v*0.4 + 0.6*1 for v in PRIMARY_f])
LIGHT_PRIMARY = toHex(LIGHT_PRIMARY_f)
DARK_PRIMARY_f = tuple([v*0.5 + 0.5*0 for v in PRIMARY_f])
DARK_PRIMARY = toHex(DARK_PRIMARY_f)
COMPLEMENT_f = tuple([1.0-v for v in PRIMARY_f])
COMPLEMENT = toHex(COMPLEMENT_f)
#LIGHT_PRIMARY = toHex((1.0, 0.85, 0.5))
#PRIMARY = toHex((1.0, 0.5, 0.0))
#DARK_PRIMARY = toHex((0.5, 0.25, 0.0))
mainFont = "Lucida Sans"
#mainFont = "Droid Sans"
SMALL_FONT = (mainFont, 8)
SMALL_BOLD_FONT = (mainFont, 8, "bold")
FONT = (mainFont, 10)
BOLD_FONT = (mainFont, 10, "bold")
MED_FONT = (mainFont, 12)
LARGE_FONT = (mainFont, 18, "bold")
'''
# ALTERNATIVE QUICKREADER COLOUR SCHEME
tk_FGC = (1.0, 1.0, 1.0) # foreground
tk_BGC = (0.0, 0.0, 0.0)
tk_FGStr = "white"
tk_BGStr = "black"
tk_LHLC = (0.1, 0.1, 0.1) # tag highlight colour on mouseover
tk_LISC = (0.0, 0.35, 0.54) # colour of low-importance sentences
tk_HIWC = (1.0, 0.6, 0.0) # colour of high-importance words
tk_LIWC = (0.85, 0.9, 1.0) # colour of low-importance words
tk_BHC = (0.0, 0.65, 1.0) # bar highlight colour (for sentence importance)
tk_BPC = tk_FGC # bar position colour
'''
def loadImage(fileName, size, imageList, root, background=BG_DARK):
#global IMG_LIST
tk_image=[]
with open(fileName,"rb") as fp:
original = PIL.Image.open(fp)
resized = original.resize(size,PIL.Image.ANTIALIAS)
image = ImageTk.PhotoImage(resized)
tk_image = Label(root, image=image, background=background, cursor='hand1')
imageList.append(image)
return tk_image, imageList | tannerbohn/RandomAttraction | graphicsTools.py | Python | gpl-2.0 | 3,697 |
"""
Tiny framework used to power LWR application, nothing in here is specific to running
or staging jobs. Mostly deals with routing web traffic and parsing parameters.
"""
from webob import Request
from webob import Response
from webob import exc
import inspect
from os.path import exists
import re
from json import dumps
from six import Iterator
class RoutingApp(object):
"""
Abstract definition for a python web application.
"""
def __init__(self):
self.routes = []
def add_route(self, route, controller, **args):
route_regex = self.__template_to_regex(route)
self.routes.append((route_regex, controller, args))
def __call__(self, environ, start_response):
req = Request(environ)
req.app = self
for route, controller, args in self.routes:
match = route.match(req.path_info)
if match:
request_args = dict(args)
route_args = match.groupdict()
request_args.update(route_args)
return controller(environ, start_response, **request_args)
return exc.HTTPNotFound()(environ, start_response)
def __template_to_regex(self, template):
var_regex = re.compile(r'''
\{ # The exact character "{"
(\w+) # The variable name (restricted to a-z, 0-9, _)
(?::([^}]+))? # The optional :regex part
\} # The exact character "}"
''', re.VERBOSE)
regex = ''
last_pos = 0
for match in var_regex.finditer(template):
regex += re.escape(template[last_pos:match.start()])
var_name = match.group(1)
expr = match.group(2) or '[^/]+'
expr = '(?P<%s>%s)' % (var_name, expr)
regex += expr
last_pos = match.end()
regex += re.escape(template[last_pos:])
regex = '^%s$' % regex
return re.compile(regex)
def build_func_args(func, *arg_dicts):
args = {}
def add_args(func_args, arg_values):
for func_arg in func_args:
if func_arg not in args and func_arg in arg_values:
args[func_arg] = arg_values[func_arg]
func_args = inspect.getargspec(func).args
for arg_dict in arg_dicts:
add_args(func_args, arg_dict)
return args
class Controller(object):
"""
Wraps python functions into controller methods.
"""
def __init__(self, response_type='OK'):
self.response_type = response_type
def __get_client_address(self, environ):
"""
http://stackoverflow.com/questions/7835030/obtaining-client-ip-address-from-a-wsgi-app-using-eventlet
"""
try:
return environ['HTTP_X_FORWARDED_FOR'].split(',')[-1].strip()
except KeyError:
return environ['REMOTE_ADDR']
def __add_args(self, args, func_args, arg_values):
for func_arg in func_args:
if func_arg not in args and func_arg in arg_values:
args[func_arg] = arg_values[func_arg]
def __handle_access(self, req, environ, start_response):
access_response = None
if hasattr(self, '_check_access'):
access_response = self._check_access(req, environ, start_response)
return access_response
def __build_args(self, func, args, req, environ):
args = build_func_args(func, args, req.GET, self._app_args(args, req))
func_args = inspect.getargspec(func).args
for func_arg in func_args:
if func_arg == "ip":
args["ip"] = self.__get_client_address(environ)
if 'body' in func_args:
args['body'] = req.body_file
return args
def __execute_request(self, func, args, req, environ):
args = self.__build_args(func, args, req, environ)
try:
result = func(**args)
except exc.HTTPException as e:
result = e
return result
def __build_response(self, result):
if self.response_type == 'file':
resp = file_response(result)
else:
resp = Response(body=self.body(result))
return resp
def __call__(self, func):
def controller_replacement(environ, start_response, **args):
req = Request(environ)
access_response = self.__handle_access(req, environ, start_response)
if access_response:
return access_response
result = self.__execute_request(func, args, req, environ)
resp = self.__build_response(result)
return resp(environ, start_response)
controller_replacement.func = func
controller_replacement.response_type = self.response_type
controller_replacement.body = self.body
controller_replacement.__name__ = func.__name__
controller_replacement.__controller__ = True
return controller_replacement
def body(self, result):
body = 'OK'
if self.response_type == 'json':
body = dumps(result)
return body
def _prepare_controller_args(self, req, args):
pass
def file_response(path):
resp = Response()
if exists(path):
resp.app_iter = FileIterator(path)
else:
raise exc.HTTPNotFound("No file found with path %s." % path)
return resp
class FileIterator(Iterator):
def __init__(self, path):
self.input = open(path, 'rb')
def __iter__(self):
return self
def __next__(self):
buffer = self.input.read(1024)
if(buffer == ""):
raise StopIteration
return buffer
| jmchilton/lwr | lwr/web/framework.py | Python | apache-2.0 | 5,653 |
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Rds20130528DescribeSecurityIpsRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceId = None
def getapiname(self):
return 'rds.aliyuncs.com.DescribeSecurityIps.2013-05-28'
| francisar/rds_manager | aliyun/api/rest/Rds20130528DescribeSecurityIpsRequest.py | Python | mit | 335 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-13 06:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('clothing', '0002_auto_20161112_2036'),
]
operations = [
migrations.RemoveField(
model_name='stormchaser',
name='zipcode',
),
migrations.RemoveField(
model_name='wardrobe',
name='inLaundry',
),
migrations.AddField(
model_name='stormchaser',
name='city',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='stormchaser',
name='state',
field=models.CharField(blank=True, max_length=2),
),
migrations.AddField(
model_name='stormchaser',
name='wardrobe',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='clothing.Wardrobe'),
),
migrations.AddField(
model_name='wardrobe',
name='for_chilly',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='wardrobe',
name='for_cold',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='wardrobe',
name='for_hot',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='wardrobe',
name='for_mild',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='wardrobe',
name='for_warm',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='wardrobe',
name='in_laundry',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='wardrobe',
name='cloth_name',
field=models.IntegerField(choices=[(1, 'T-SHIRT'), (2, 'POLO'), (3, 'BUTTONED_SHIRT'), (4, 'JEANS'), (5, 'SHORTS'), (6, 'SWEATS'), (7, 'COAT'), (8, 'HOODIE'), (9, 'SWEATER'), (10, 'EARMUFFS'), (11, 'SCARF'), (12, 'GLOVES')]),
),
]
| c-pari22/dressme | dressme/clothing/migrations/0003_auto_20161113_0657.py | Python | gpl-3.0 | 2,405 |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from utils import HTML5RequiredMixin
from models import BucketList, BucketListItem
class SignupForm(HTML5RequiredMixin, UserCreationForm):
"""
Form that creates a user from the given username and password
"""
password2 = forms.CharField(label="Confirm Password",
widget=forms.PasswordInput,
help_text="Enter the same password as above, for verification.")
class SigninForm(HTML5RequiredMixin, AuthenticationForm):
"""
Form for authenticating users with their username and password
"""
def confirm_login_allowed(self, user):
"""
overrides the default method to ensure that users can still
log in with is_active=False
"""
pass
class BucketListForm(HTML5RequiredMixin, forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
class Meta:
model = BucketList
fields = ('name', 'description',)
def __init__(self, *args, **kwargs):
super(BucketListForm, self).__init__(*args, **kwargs)
self.fields['name'].required = True
class BucketListItemForm(HTML5RequiredMixin, forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
class Meta:
model = BucketListItem
fields = ('name', 'done')
| andela-uawili/django-bucketlist-application | bucketlist/dashboard/forms.py | Python | mit | 1,530 |
#!/usr/bin/env python3
"""Project Euler - Problem 11 Module"""
from numpy import *
def problem11(grid, nr_adjacent):
"""Problem 11 - Largest product in a grid"""
result = 0
GRID_SHAPE = grid.shape
for row in range(0, GRID_SHAPE[0] - (nr_adjacent-1)):
for col in range(0, GRID_SHAPE[1] - (nr_adjacent-1)):
subgrid = GRID[row:row+nr_adjacent, col:col+nr_adjacent]
# Check n rows and columns
maxval = 0
for i in range(0, nr_adjacent):
maxval = max(maxval, prod(subgrid[i, :]), prod(subgrid[:, i]))
# Check diagonals
maxval = max(maxval, prod(diag(subgrid)), prod(diag(fliplr(subgrid))))
result = max(result, maxval)
return result
GRID = array([\
[8,2,22,97,38,15,0,40,0,75,4,5,7,78,52,12,50,77,91,8],\
[49,49,99,40,17,81,18,57,60,87,17,40,98,43,69,48,4,56,62,0],\
[81,49,31,73,55,79,14,29,93,71,40,67,53,88,30,3,49,13,36,65],\
[52,70,95,23,4,60,11,42,69,24,68,56,1,32,56,71,37,2,36,91],\
[22,31,16,71,51,67,63,89,41,92,36,54,22,40,40,28,66,33,13,80],\
[24,47,32,60,99,3,45,2,44,75,33,53,78,36,84,20,35,17,12,50],\
[32,98,81,28,64,23,67,10,26,38,40,67,59,54,70,66,18,38,64,70],\
[67,26,20,68,2,62,12,20,95,63,94,39,63,8,40,91,66,49,94,21],\
[24,55,58,5,66,73,99,26,97,17,78,78,96,83,14,88,34,89,63,72],\
[21,36,23,9,75,0,76,44,20,45,35,14,0,61,33,97,34,31,33,95],\
[78,17,53,28,22,75,31,67,15,94,3,80,4,62,16,14,9,53,56,92],\
[16,39,5,42,96,35,31,47,55,58,88,24,0,17,54,24,36,29,85,57],\
[86,56,0,48,35,71,89,7,5,44,44,37,44,60,21,58,51,54,17,58],\
[19,80,81,68,5,94,47,69,28,73,92,13,86,52,17,77,4,89,55,40],\
[4,52,8,83,97,35,99,16,7,97,57,32,16,26,26,79,33,27,98,66],\
[88,36,68,87,57,62,20,72,3,46,33,67,46,55,12,32,63,93,53,69],\
[4,42,16,73,38,25,39,11,24,94,72,18,8,46,29,32,40,62,76,36],\
[20,69,36,41,72,30,23,88,34,62,99,69,82,67,59,85,74,4,36,16],\
[20,73,35,29,78,31,90,1,74,31,49,71,48,86,81,16,23,57,5,54],\
[1,70,54,71,83,51,54,69,16,92,33,48,61,43,52,1,89,19,67,48],\
])
def run():
"""Default Run Method"""
return problem11(GRID, 4)
if __name__ == '__main__':
print("Result: ", run())
| rado0x54/project-euler | python/problem0011.py | Python | mit | 2,145 |
""" Sahana Eden Automated Test - HRM001 Create Volunteer Certificate
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class CreateStaffCertificate(SeleniumUnitTest):
def test_hrm001_create_volunteer_certificate(self):
"""
@case: HRM001
@description: Create Volunteer Certificate
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
self.login(account="admin", nexturl="hrm/certificate/create")
self.create("hrm_certificate",
[( "name",
"Basics of First Aid ATEST"
),
( "organisation_id",
"Timor-Leste Red Cross Society",
"autocomplete"),
( "expiry",
"12"
),
]
)
| vgupta6/Project-2 | modules/tests/staff/create_staff_certificate.py | Python | mit | 2,237 |
from django.conf.urls import include, handler404
from django.urls import path
from rest_framework_extensions.routers import ExtendedSimpleRouter
from rest_framework_jwt.views import obtain_jwt_token
from api import viewsets
from api.views import root_route, landing, dashboard, logout
router = ExtendedSimpleRouter()
(
router.register("bucketlists", viewsets.BucketlistViewset).register(
"items",
viewsets.ItemViewset,
base_name="bucketlists-item",
parents_query_lookups=["parent_bucketlist"],
)
)
urlpatterns = [
path("", landing),
path("dashboard/", dashboard),
path("logout/", logout),
path("api/", include(router.urls)),
path("api/", root_route),
path("auth/login/", obtain_jwt_token),
path("blst/", include("rest_framework.urls", namespace="rest_framework")),
# path("docs/", include("rest_framework_swagger.urls")),
]
handler404 = "api.views.custom_404"
| andela-cmutembei/III | api/urls.py | Python | mit | 935 |
from .tap import (TAPVizieR, TAP_Service, GaiaArchive, QueryStr, TAP_AsyncQuery,
resolve, timeit)
| mfouesneau/iasbs2017 | tap/__init__.py | Python | mit | 106 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
#DS adding:
#failed import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'pyscripts'
copyright = '2019, Dave Simpson'
author = 'Dave Simpson'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
#DS pygments_style = None
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyscriptsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyscripts.tex', 'pyscripts Documentation',
'Dave Simpson', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyscripts', 'pyscripts Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyscripts', 'pyscripts Documentation',
author, 'pyscripts', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| mifads/pyscripts | doc/conf.py | Python | gpl-3.0 | 5,381 |
#
# Copyright 2003,2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
# misc utilities
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import types
class seq_with_cursor (object):
__slots__ = [ 'items', 'index' ]
def __init__ (self, items, initial_index = None, initial_value = None):
assert len (items) > 0, "seq_with_cursor: len (items) == 0"
self.items = items
self.set_index (initial_index)
if initial_value is not None:
self.set_index_by_value(initial_value)
def set_index (self, initial_index):
if initial_index is None:
self.index = len (self.items) / 2
elif initial_index >= 0 and initial_index < len (self.items):
self.index = initial_index
else:
raise ValueError
def set_index_by_value(self, v):
"""
Set index to the smallest value such that items[index] >= v.
If there is no such item, set index to the maximum value.
"""
self.set_index(0) # side effect!
cv = self.current()
more = True
while cv < v and more:
cv, more = next(self) # side effect!
def __next__ (self):
new_index = self.index + 1
if new_index < len (self.items):
self.index = new_index
return self.items[new_index], True
else:
return self.items[self.index], False
def prev (self):
new_index = self.index - 1
if new_index >= 0:
self.index = new_index
return self.items[new_index], True
else:
return self.items[self.index], False
def current (self):
return self.items[self.index]
def get_seq (self):
return self.items[:] # copy of items
| jdemel/gnuradio | gnuradio-runtime/python/gnuradio/gru/seq_with_cursor.py | Python | gpl-3.0 | 1,933 |
from __future__ import print_function
from upower import UPowerManager
if __name__ == "__main__":
pwrMan = UPowerManager()
battery_device = None
print('Devices list:')
for dev in pwrMan.detect_devices():
print('\t', dev)
if 'battery' in dev:
battery_device = dev
print('Display Devices:\n\t', pwrMan.get_display_device())
print('Battery Devices:\n\t', battery_device)
print('Battery State:', pwrMan.get_state(battery_device))
| corerd/PyDomo | powerman/upower_demo.py | Python | mit | 485 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_info.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.grass.ext import HtmlReportPostProcessor
def postProcessResults(alg):
HtmlReportPostProcessor.postProcessResults(alg)
| mweisman/QGIS | python/plugins/processing/grass/ext/r_info.py | Python | gpl-2.0 | 1,238 |
"""Main functions for user interaction. All of these are imported into the
top-level namespace."""
import flavio
import numpy as np
from collections import defaultdict
from multiprocessing import Pool
from functools import partial
import warnings
def np_prediction(obs_name, wc_obj, *args, **kwargs):
"""Get the central value of the new physics prediction of an observable.
Parameters
----------
- `obs_name`: name of the observable as a string
- `wc_obj`: an instance of `flavio.WilsonCoefficients`
Additional arguments are passed to the observable and are necessary,
depending on the observable (e.g. $q^2$-dependent observables).
"""
obs = flavio.classes.Observable[obs_name]
return obs.prediction_central(flavio.default_parameters, wc_obj, *args, **kwargs)
def sm_prediction(obs_name, *args, **kwargs):
"""Get the central value of the Standard Model prediction of an observable.
Parameters
----------
- `obs_name`: name of the observable as a string
Additional arguments are passed to the observable and are necessary,
depending on the observable (e.g. $q^2$-dependent observables).
"""
obs = flavio.classes.Observable[obs_name]
wc_sm = flavio.physics.eft._wc_sm
return obs.prediction_central(flavio.default_parameters, wc_sm, *args, **kwargs)
def _obs_prediction_par(par, obs_name, wc_obj, *args, **kwargs):
obs = flavio.classes.Observable.get_instance(obs_name)
return obs.prediction_par(par, wc_obj, *args, **kwargs)
from functools import partial
def np_uncertainty(obs_name, wc_obj, *args, N=100, threads=1, **kwargs):
"""Get the uncertainty of the prediction of an observable in the presence
of new physics.
Parameters
----------
- `obs_name`: name of the observable as a string
- `wc_obj`: an instance of `flavio.WilsonCoefficients`
- `N` (optional): number of random evaluations of the observable.
The relative accuracy of the uncertainty returned is given by $1/\sqrt{2N}$.
- `threads` (optional): if bigger than one, number of threads for parallel
computation of the uncertainty.
Additional arguments are passed to the observable and are necessary,
depending on the observable (e.g. $q^2$-dependent observables).
"""
par_random = flavio.default_parameters.get_random_all(size=N)
par_random = [{k: v[i] for k, v in par_random.items()} for i in range(N)]
if threads == 1:
# not parallel
all_pred = np.array([_obs_prediction_par(par, obs_name, wc_obj, *args, **kwargs) for par in par_random])
else:
# parallel
pool = Pool(threads)
# convert args to kwargs
_kwargs = kwargs.copy()
obs_args = flavio.Observable[obs_name].arguments
for i, a in enumerate(args):
_kwargs[obs_args[i]] = a
all_pred = np.array(
pool.map(
partial(_obs_prediction_par,
obs_name=obs_name, wc_obj=wc_obj, **_kwargs),
par_random))
pool.close()
pool.join()
return np.std(all_pred)
def sm_uncertainty(obs_name, *args, N=100, threads=1, **kwargs):
"""Get the uncertainty of the Standard Model prediction of an observable.
Parameters
----------
- `obs_name`: name of the observable as a string
- `N` (optional): number of random evaluations of the observable.
The relative accuracy of the uncertainty returned is given by $1/\sqrt{2N}$.
- `threads` (optional): if bigger than one, number of threads for parallel
computation of the uncertainty.
Additional arguments are passed to the observable and are necessary,
depending on the observable (e.g. $q^2$-dependent observables).
"""
wc_sm = flavio.physics.eft._wc_sm
return np_uncertainty(obs_name, wc_sm, *args, N=N, threads=threads, **kwargs)
class AwareDict(dict):
"""Generalization of dictionary that adds the key to the
set `akeys` upon getting an item."""
def __init__(self, d):
"""Initialize the instance."""
super().__init__(d)
self.akeys = set()
self.d = d
def __getitem__(self, key):
"""Get an item, adding the key to the `pcalled` set."""
self.akeys.add(key)
return dict.__getitem__(self, key)
def __copy__(self):
cp = type(self)(self.d)
cp.akeys = self.akeys
return cp
def copy(self):
return self.__copy__()
class AwareWilson(flavio.WilsonCoefficients):
"""Subclass of `flavio.WilsonCoefficients` that adds the arguments of calls
to its `match_run` method to `atuples` attribute."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.atuples = set()
def match_run(self, scale, eft, basis, sectors='all'):
self.atuples.add((scale, eft, basis, sectors))
return super().match_run(scale, eft, basis, sectors)
def get_dependent_parameters_sm(obs_name, *args, **kwargs):
"""Get the set of parameters the SM prediction of the observable depends on."""
obs = flavio.classes.Observable[obs_name]
wc_sm = flavio.physics.eft._wc_sm
par_central = flavio.default_parameters.get_central_all()
apar_central = AwareDict(par_central)
obs.prediction_par(apar_central, wc_sm, *args, **kwargs)
# return all observed keys except the ones that don't actually correspond
# to existing parameter names (this might happen by user functions modifying
# the dictionaries)
return {p for p in apar_central.akeys
if p in flavio.Parameter.instances.keys()}
def get_dependent_wcs(obs_name, *args, **kwargs):
"""Get the EFT, basis, scale, and sector of Wilson coefficients
the NP prediction of the observable depends on.
Returns a set of tuples of the form
`(scale, eft, basis, sectors)`,
where sectors is a tuple of WCxf sectors or 'all'.
Note that this function simply checks the arguments with which the
`match_run` method of the underlying `wilson.Wilson` instance is called.
Thus it is only guaranteed that the Wilson coefficients the observable
actually depends on are contained in these sectors."""
awc = AwareWilson()
# need at least one non-zero WC to make sure match_run is called at all
awc.set_initial({'G': 1e-30}, 91.1876, 'SMEFT', 'Warsaw')
np_prediction(obs_name, awc, *args, **kwargs)
return awc.atuples
def sm_error_budget(obs_name, *args, N=50, **kwargs):
"""Get the *relative* uncertainty of the Standard Model prediction due to
variation of individual observables.
Parameters
----------
- `obs_name`: name of the observable as a string
- `N` (optional): number of random evaluations of the observable.
The relative accuracy of the uncertainties returned is given by $1/\sqrt{2N}$.
Additional arguments are passed to the observable and are necessary,
depending on the observable (e.g. $q^2$-dependent observables).
"""
obs = flavio.classes.Observable[obs_name]
wc_sm = flavio.physics.eft._wc_sm
par_central = flavio.default_parameters.get_central_all()
par_random = [flavio.default_parameters.get_random_all() for i in range(N)]
pred_central = obs.prediction_par(par_central, wc_sm, *args, **kwargs)
# Step 1: determine the parameters the observable depends on at all.
dependent_par = get_dependent_parameters_sm(obs_name, *args, **kwargs)
# Step 2: group parameters if correlated
par_constraint = {p: id(flavio.default_parameters._parameters[p][1]) for p in dependent_par}
v = defaultdict(list)
for key, value in par_constraint.items():
v[value].append(key)
dependent_par_lists = list(v.values())
# Step 3: for each of the (groups of) dependent parameters, determine the error
# analogous to the sm_uncertainty function. Normalize to the central
# prediction (so relative errors are returned)
individual_errors = {}
def make_par_random(keys, par_random):
par_tmp = par_central.copy()
for key in keys:
par_tmp[key] = par_random[key]
return par_tmp
for p in dependent_par_lists:
par_random_p = [make_par_random(p, pr) for pr in par_random]
all_pred = np.array([
obs.prediction_par(par, wc_sm, *args, **kwargs)
for par in par_random_p
])
# for the dictionary key, use the list element if there is only 1,
# otherwise use a tuple (which is hashable)
if len(p) == 1:
key = p[0]
else:
key = tuple(p)
individual_errors[key] = np.std(all_pred)/abs(pred_central)
return individual_errors
def _get_prediction_array_sm(par, obs_list):
wc_sm = flavio.physics.eft._wc_sm
def get_prediction_sm(obs, par):
obs_dict = flavio.classes.Observable.argument_format(obs, 'dict')
obs_obj = flavio.classes.Observable[obs_dict.pop('name')]
return obs_obj.prediction_par(par, wc_sm, **obs_dict)
return np.array([get_prediction_sm(obs, par) for obs in obs_list])
def sm_covariance(obs_list, N=100, par_vary='all', par_obj=None, threads=1,
**kwargs):
r"""Get the covariance matrix of the Standard Model predictions for a
list of observables.
Parameters
----------
- `obs_list`: a list of observables that should be given either as a string
name (for observables that do not depend on any arguments) or as a tuple
of a string and values for the arguements the observable depends on (e.g.
the values of `q2min` and `q2max` for a binned observable)
- `N` (optional): number of random evaluations of the observables.
The relative accuracy of the uncertainties returned is given
by $1/\sqrt{2N}$.
- `par_vary` (optional): a list of parameters to vary. Defaults to 'all', i.e. all
parameters are varied according to their probability distributions.
- `par_obj` (optional): an instance of ParameterConstraints, defaults to
flavio.default_parameters.
- `threads` (optional): number of CPU threads to use for the computation.
Defaults to 1, i.e. serial computation.
"""
par_obj = par_obj or flavio.default_parameters
par_central_all = par_obj.get_central_all()
par_random_all = par_obj.get_random_all(size=N)
def par_random_some(par_random, par_central):
# take the central values for the parameters not to be varied (N times)
par1 = {k: np.full(N, v) for k, v in par_central.items() if k not in par_vary}
# take the random values for the parameters to be varied
par2 = {k: v for k, v in par_random.items() if k in par_vary}
par1.update(par2) # merge them
return par1
if par_vary == 'all':
par_random = par_random_all
par_random = [{k: v[i] for k, v in par_random.items()} for i in range(N)]
else:
par_random = par_random_some(par_random_all, par_central_all)
par_random = [{k: v[i] for k, v in par_random.items()} for i in range(N)]
func_map = partial(_get_prediction_array_sm, obs_list=obs_list)
if threads == 1:
pred_map = map(func_map, par_random)
else:
pool = Pool(threads)
pred_map = pool.map(func_map, par_random)
pool.close()
pool.join()
all_pred = np.array(list(pred_map))
return np.cov(all_pred.T)
def combine_measurements(observable, include_measurements=None,
**kwargs):
"""Combine all existing measurements of a particular observable.
Returns a one-dimensional instance of `ProbabilityDistribution`.
Correlations with other obersables are ignored.
Parameters:
- `observable`: observable name
- `include_measurements`: iterable of measurement names to be included
(default: all)
Observable arguments have to be specified as keyword arguments, e.g.
`combine_measurements('<dBR/dq2>(B+->Kmumu)', q2min=1, q2max=6)`.
Note that this function returns inconsistent results (and a corresponding
warning is issued) if an observable is constrained by more than one
multivariate measurement.
"""
if not kwargs:
obs = observable
else:
args = flavio.Observable[observable].arguments
obs = (observable, ) + tuple(kwargs[a] for a in args)
constraints = []
_n_multivariate = 0 # number of multivariate constraints
for name, m in flavio.Measurement.instances.items():
if name.split(' ')[0] == 'Pseudo-measurement':
continue
elif include_measurements is not None and name not in include_measurements:
continue
elif obs not in m.all_parameters:
continue
num, constraint = m._parameters[obs]
if not np.isscalar(constraint.central_value):
_n_multivariate += 1
# for multivariate PDFs, reduce to 1D PDF
exclude = tuple([i for i, _ in enumerate(constraint.central_value)
if i != num]) # exclude all i but num
constraint1d = constraint.reduce_dimension(exclude=exclude)
constraints.append(constraint1d)
else:
constraints.append(constraint)
if _n_multivariate > 1:
warnings.warn(("{} of the measurements of '{}' are multivariate. "
"This can lead to inconsistent results as the other "
"observables are profiled over. "
"To be consistent, you should perform a multivariate "
"combination that is not yet supported by `combine_measurements`."
).format(_n_multivariate, obs))
if not constraints:
raise ValueError("No experimental measurements found for this observable.")
elif len(constraints) == 1:
return constraints[0]
else:
return flavio.statistics.probability.combine_distributions(constraints)
| flav-io/flavio | flavio/functions.py | Python | mit | 13,966 |
# Converts a METIS-graph into the DIMACS format
import sys, os, re
filename = sys.argv[1]
if not os.path.isfile(filename):
print "File not found."
sys.exit(0)
number_nodes = 0
number_edges = 0
edges_counted = 0
adjacency = []
print "Reading the file."
with open(filename) as f:
node = 0
for line in f:
args = line.strip().split()
if node == 0:
number_nodes = int(args[0])
number_edges = int(args[1])
print "Graph has " + str(number_nodes) + " nodes and " + str(number_edges) + " edges"
adjacency = [[] for _ in range(0, number_nodes + 1)]
adjacency[0] = args
else:
adjacency[node] = args
edges_counted += len(args)
node += 1
print "Writing new file"
filepath = os.path.splitext(filename);
new_file = filepath[0] + '.dimacs'
with open(new_file, 'w') as f:
node = 0
for neighbors in adjacency:
if node == 0:
spec = ' '.join(neighbors)
f.write('p ' + spec)
f.write('\n')
else:
for neighbor_node in neighbors:
if neighbor_node != '':
f.write('e ' + str(node) + ' ' + neighbor_node)
f.write('\n')
if str(node) in adjacency[int(neighbor_node)]:
adjacency[int(neighbor_node)].remove(str(node))
node += 1
print "Finished converting."
| sebalamm/KaMIS | misc/conversion/metis_to_dimacs.py | Python | gpl-2.0 | 1,447 |
'''This module contains a wrapper for C{os.environ} that deals with
proper encoding / decoding of values
When this module is loaded it will try to set proper values
for C{HOME} and C{USER} if they are not set and C{APPDATA} on windows.
'''
import os
import logging
import collections
logger = logging.getLogger('zim')
from zim.fs import ENCODING, isdir
class Environ(collections.MutableMapping):
def __getitem__(self, k):
# Do NOT use zim.fs.decode here, we want real decoding on windows,
# not just convert to unicode
v = os.environ[k]
if isinstance(v, str):
return v.decode(ENCODING)
else:
return v
def __setitem__(self, k, v):
if isinstance(v, unicode):
v = v.encode(ENCODING)
os.environ[k] = v
def __delitem__(self, k):
del os.environ[k]
def __iter__(self):
return iter(os.environ)
def __len__(self):
return len(os.environ)
def get(self, k, default=None):
'''Get a parameter from the environment. Like C{os.environ.get()}
but does decoding for non-ascii characters.
@param k: the parameter to get
@param default: the default if C{param} does not exist
@returns: a unicode string or C{default}
'''
try:
v = self[k]
except KeyError:
return default
else:
if not v or v.isspace(): # existing but empty is edge case in environ
return default
else:
return v
def get_list(self, k, default=None, sep=None):
'''Get a parameter from the environment and convert to a list.
@param k: the parameter to get
@param default: the default if C{param} does not exist
@param sep: optional seperator, defaults to C{os.pathsep} if not given
@returns: a list or the default
'''
v = self.get(k, default)
if v is None:
return []
elif isinstance(v, basestring):
if sep is None:
sep = os.pathsep
return v.split(sep)
else:
assert isinstance(v, (list, tuple))
return v
environ = Environ() # Singleton
## Check environment
if os.name == 'nt':
# Windows specific environment variables
# os.environ does not support setdefault() ...
if not 'USER' in environ or not environ['USER']:
environ['USER'] = environ['USERNAME']
if not 'HOME' in environ or not environ['HOME']:
if 'USERPROFILE' in environ:
environ['HOME'] = environ['USERPROFILE']
elif 'HOMEDRIVE' in environ and 'HOMEPATH' in environ:
environ['HOME'] = \
environ['HOMEDRIVE'] + environ['HOMEPATH']
if not 'APPDATA' in environ or not environ['APPDATA']:
environ['APPDATA'] = environ['HOME'] + '\\Application Data'
assert isdir(environ['HOME']), \
'ERROR: environment variable $HOME not set correctly value is "%s"'
# using our own environ here to ensure encoding
if not 'USER' in environ or not environ['USER']:
# E.g. Maemo doesn't define $USER
environ['USER'] = os.path.basename(environ['HOME'])
logger.info('Environment variable $USER was not set, set to "%s"', environ['USER'])
| gratteur/zim-desktop | zim/environ.py | Python | gpl-2.0 | 2,881 |
#
# Elastic Model: Testelastic
#
from elasticsearch_dsl import Date, Boolean, Text, Integer, Byte, Float, Keyword
from {{appname}}.models.elastic.dsl_basemodel import ElasticDSLBaseModel
from {{appname}}.lib.powlib import relation
from elasticsearch_dsl import DocType
from {{appname}}.database.elasticdblib import dbname
from datetime import datetime
@relation.setup_elastic_dsl_schema()
class {{model_class_name}}(ElasticBaseModel):
#
# Use the cerberus schema style
# which offer you an ElasticDSL schema and
# immediate validation with cerberus
#
class Meta:
index = dbname
schema = {
'title': {
'type': 'string',
"elastic" : {
"analyzer" : "snowball",
"fields" : {'raw': Keyword()}
}
},
'body': {
'type': 'string', 'maxlength' : 235,
"elastic" : {
"analyzer" : "snowball"
}
},
'tags': {
'type': 'list',
"elastic" : {
"index" : "not_analyzed"
}
},
'published_from' : { "type": 'date' },
'lines' : { "type": 'integer' }
}
#
# your model's methods down here
# (the two below are just examples from the elasticsearch_dsl py documentation)
#
def save(self, ** kwargs):
self.lines = len(self.body.split())
self.upsert()
def is_published(self):
return datetime.now() < self.published_from
| pythononwheels/pow_devel | pythononwheels/start/stubs/elasticdsl_model_template.py | Python | mit | 1,539 |
from __future__ import absolute_import
from .utils import *
| mfixstsci/peewee4cosmo | cosmo_peewee/utils/__init__.py | Python | bsd-3-clause | 61 |
import glob
import json
import os
import typing
import zipfile
from argparse import ArgumentParser
from datetime import datetime
import cauldron
from cauldron import cli
from cauldron import environ
from cauldron.cli import sync
from cauldron.environ import Response
from cauldron.session import projects
NAME = 'save'
DESCRIPTION = """
Saves the current project's notebook as a Cauldron Display File (CAULDRON)
for viewing in the Cauldron reader application.
"""
def populate(
parser: ArgumentParser,
raw_args: typing.List[str],
assigned_args: dict
):
"""..."""
parser.add_argument(
'path',
default=None,
nargs='?',
help=cli.reformat("""
The file path to the Cauldron file to be saved. If the file
extension is missing it will be appended to the end of the path's
filename. If a directory is specified instead of a file, the
Cauldron file will saved into that directory using the name of the
project as the filename.
""")
)
def get_default_path() -> str:
"""..."""
project = cauldron.project.internal_project
if not project or not project.remote_source_directory:
return os.path.abspath(os.path.expanduser('~'))
downloads_directory = os.path.realpath(os.path.join(
project.source_directory,
'..',
'__cauldron_downloads'
))
count = len(os.listdir(downloads_directory))
return os.path.join(downloads_directory, '{}.cauldron'.format(count))
def clean_path(project_title: str, path: str) -> str:
"""..."""
cleaned = environ.paths.clean(path)
if os.path.isdir(cleaned):
return os.path.join(cleaned, '{}.cauldron'.format(project_title))
if not cleaned.endswith('.cauldron'):
return '{}.cauldron'.format(cleaned)
return cleaned
def create_settings(project: 'projects.Project') -> dict:
"""..."""
return dict(
title=project.title,
version=environ.notebook_version,
timestamp=datetime.now().isoformat(),
steps=[
{'name': s.name, 'title': s.definition.title}
for s in project.steps
]
)
def make_directory(path: str):
"""..."""
save_directory = os.path.dirname(path)
if not os.path.exists(save_directory):
os.makedirs(save_directory)
def write_file(project: 'projects.Project', path: str) -> str:
"""
:param project:
:param path:
:return:
"""
save_path = path if path else get_default_path()
save_path = clean_path(project.title, save_path)
make_directory(save_path)
z = zipfile.ZipFile(save_path, 'w', allowZip64=True)
root_folder = project.output_directory
globber = glob.iglob('{}/**/*.*'.format(root_folder), recursive=True)
def add(file_path: str) -> str:
slug = file_path[len(root_folder):].strip(os.sep)
z.write(file_path, slug)
return slug
slugs = [add(match) for match in globber]
settings = create_settings(project)
settings['files'] = slugs
z.writestr('reader_configs.json', json.dumps(settings))
z.close()
return save_path
def execute_remote(context: cli.CommandContext, path: str = None) -> Response:
"""..."""
thread = sync.send_remote_command(
command='save',
remote_connection=context.remote_connection,
show_logs=False,
asynchronous=False
)
thread.join()
save_response = thread.responses[-1]
if save_response.failed:
save_response.log_notifications()
return context.response.consume(save_response)
filename = os.path.basename(save_response.data.get('path'))
project_title = save_response.data.get('project_title', 'Project')
save_path = clean_path(
project_title,
path if path else get_default_path()
)
make_directory(save_path)
download_response = sync.comm.download_file(
filename=filename,
save_path=save_path,
remote_connection=context.remote_connection
)
if download_response.success:
download_response.notify(
kind='SAVED',
code='DOWNLOAD_SAVED',
message='Project has been saved to: {}'.format(save_path)
).console(whitespace=1)
return context.response.consume(download_response)
def execute(context: cli.CommandContext, path: str = None) -> Response:
response = context.response
project = cauldron.project.internal_project
if not project:
return response.fail(
code='NO_PROJECT',
message='No project is open. Nothing to save'
).console(whitespace=1).response
try:
saved_path = write_file(project, path)
except Exception as error:
return response.fail(
code='WRITE_SAVE_ERROR',
message='Unable to write the Cauldron file output',
error=error
).console(whitespace=1).response
return response.update(
path=saved_path,
project_title=project.title
).notify(
kind='SUCCESS',
code='SAVED',
message='The project has been saved to: {}'.format(saved_path),
).console(whitespace=1).response
| sernst/cauldron | cauldron/cli/commands/save.py | Python | mit | 5,246 |
"""
I'm a service module without a handler.
""" | elishacook/mabruk | mabruk/tests/mock/bad_service_missing_handler.py | Python | mit | 47 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('transit_indicators', '0026_auto_20140911_2016'),
]
operations = [
migrations.AddField(
model_name='indicatorjob',
name='city_name',
field=models.CharField(default=b'My City', max_length=255),
preserve_default=True,
),
]
| flibbertigibbet/open-transit-indicators | python/django/transit_indicators/migrations/0027_auto_20140916_2114.py | Python | gpl-3.0 | 476 |
import io
import base64
import gevent
from Tkinter import Label
from PIL import ImageTk, Image
class AnimatedImgLabel(Label):
# http://stackoverflow.com/questions/7960600/python-tkinter-display-animated-gif-using-pil
def __init__(self, master, data, encoding='base64', **kwargs):
if encoding == 'base64':
data = base64.b64decode(data)
self.img = Image.open(io.BytesIO(data))
seq = list()
try:
while True:
seq.append(self.img.copy())
self.img.seek(len(seq)) # skip to next frame
except EOFError:
pass # we're done
try:
self.delay = float(self.img.info['duration'])/1000
except KeyError:
self.delay = 0.200
self.frames = list()
for frame in seq:
#frame = frame.convert('RGBA')
self.frames.append(ImageTk.PhotoImage(frame))
self.idx = 0
self.first = self.frames[0]
Label.__init__(self, master, image=self.first, **kwargs)
self.greenlet = gevent.spawn_later(self.delay, self.play)
def destroy(self):
self.greenlet.kill()
Label.destroy(self)
def play(self):
try:
self.config(image=self.frames[self.idx])
self.master.update()
self.idx += 1
if self.idx == len(self.frames):
self.idx = 0
self.greenlet = gevent.spawn_later(self.delay, self.play)
except:
import traceback
traceback.print_exc()
raise
| MoroGasper/client | client/plugins/ui/tk/animate.py | Python | gpl-3.0 | 1,581 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Tracker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('goal', models.CharField(max_length=100)),
('date', models.DateField()),
('title', models.CharField(max_length=140)),
('content', models.TextField()),
('hours', models.DecimalField(max_digits=5, decimal_places=2)),
('remark', models.TextField(null=True, blank=True)),
('pubtime', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['goal', 'pubtime'],
},
bases=(models.Model,),
),
]
| kwailamchan/programming-languages | python/django/elf/elf/src/hours10k/migrations/0001_initial.py | Python | mit | 1,153 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import theano
import theano.tensor as T
import numpy as np
from .. import activations, initializations
from ..utils.theano_utils import shared_zeros, floatX
from ..utils.generic_utils import make_tuple
from ..regularizers import ActivityRegularizer
from .. import constraints
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from six.moves import zip
srng = RandomStreams(seed=np.random.randint(10e6))
class Layer(object):
def __init__(self):
self.params = []
def set_previous(self, layer):
if not self.supports_masked_input() and layer.get_output_mask() is not None:
raise Exception("Attached non-masking layer to layer with masked output")
self.previous = layer
def get_output(self, train=False):
return self.get_input(train)
def get_input(self, train=False):
if hasattr(self, 'previous'):
return self.previous.get_output(train=train)
else:
return self.input
def supports_masked_input(self):
''' Whether or not this layer respects the output mask of its previous layer in its calculations. If you try
to attach a layer that does *not* support masked_input to a layer that gives a non-None output_mask() that is
an error'''
return False
def get_output_mask(self, train=None):
'''
For some models (such as RNNs) you want a way of being able to mark some output data-points as
"masked", so they are not used in future calculations. In such a model, get_output_mask() should return a mask
of one less dimension than get_output() (so if get_output is (nb_samples, nb_timesteps, nb_dimensions), then the mask
is (nb_samples, nb_timesteps), with a one for every unmasked datapoint, and a zero for every masked one.
If there is *no* masking then it shall return None. For instance if you attach an Activation layer (they support masking)
to a layer with an output_mask, then that Activation shall also have an output_mask. If you attach it to a layer with no
such mask, then the Activation's get_output_mask shall return None.
Some layers have an output_mask even if their input is unmasked, notably Embedding which can turn the entry "0" into
a mask.
'''
return None
def set_weights(self, weights):
for p, w in zip(self.params, weights):
if p.eval().shape != w.shape:
raise Exception("Layer shape %s not compatible with weight shape %s." % (p.eval().shape, w.shape))
p.set_value(floatX(w))
def get_weights(self):
weights = []
for p in self.params:
weights.append(p.get_value())
return weights
def get_config(self):
return {"name":self.__class__.__name__}
def get_params(self):
consts = []
if hasattr(self, 'regularizers'):
regularizers = self.regularizers
else:
regularizers = []
if hasattr(self, 'constraints') and len(self.constraints) == len(self.params):
for c in self.constraints:
if c:
consts.append(c)
else:
consts.append(constraints.identity())
elif hasattr(self, 'constraint') and self.constraint:
consts += [self.constraint for _ in range(len(self.params))]
else:
consts += [constraints.identity() for _ in range(len(self.params))]
return self.params, regularizers, consts
class MaskedLayer(Layer):
'''
If your layer trivially supports masking (by simply copying the input mask to the output), then subclass MaskedLayer
instead of Layer, and make sure that you incorporate the input mask into your calculation of get_output()
'''
def supports_masked_input(self):
return True
def get_input_mask(self, train=False):
if hasattr(self, 'previous'):
return self.previous.get_output_mask(train)
else:
return None
def get_output_mask(self, train=False):
''' The default output mask is just the input mask unchanged. Override this in your own
implementations if, for instance, you are reshaping the input'''
return self.get_input_mask(train)
class Merge(object):
def __init__(self, layers, mode='sum'):
''' Merge the output of a list of layers or containers into a single tensor.
mode: {'sum', 'concat'}
'''
if len(layers) < 2:
raise Exception("Please specify two or more input layers (or containers) to merge")
self.mode = mode
self.layers = layers
self.params = []
self.regularizers = []
self.constraints = []
for l in self.layers:
params, regs, consts = l.get_params()
self.regularizers += regs
# params and constraints have the same size
for p, c in zip(params, consts):
if not p in self.params:
self.params.append(p)
self.constraints.append(c)
def get_params(self):
return self.params, self.regularizers, self.constraints
def get_output(self, train=False):
if self.mode == 'sum':
s = self.layers[0].get_output(train)
for i in range(1, len(self.layers)):
s += self.layers[i].get_output(train)
return s
elif self.mode == 'concat':
inputs = [self.layers[i].get_output(train) for i in range(len(self.layers))]
return T.concatenate(inputs, axis=-1)
else:
raise Exception('Unknown merge mode')
def get_input(self, train=False):
res = []
for i in range(len(self.layers)):
o = self.layers[i].get_input(train)
if not type(o) == list:
o = [o]
for output in o:
if output not in res:
res.append(output)
return res
@property
def input(self):
return self.get_input()
def supports_masked_input(self):
return False
def get_output_mask(self, train=None):
return None
def get_weights(self):
weights = []
for l in self.layers:
weights += l.get_weights()
return weights
def set_weights(self, weights):
for i in range(len(self.layers)):
nb_param = len(self.layers[i].params)
self.layers[i].set_weights(weights[:nb_param])
weights = weights[nb_param:]
def get_config(self):
return {"name":self.__class__.__name__,
"layers":[l.get_config() for l in self.layers],
"mode":self.mode}
class Dropout(MaskedLayer):
'''
Hinton's dropout.
'''
def __init__(self, p):
super(Dropout, self).__init__()
self.p = p
def get_output(self, train=False):
X = self.get_input(train)
if self.p > 0.:
retain_prob = 1. - self.p
if train:
X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
else:
X *= retain_prob
return X
def get_config(self):
return {"name":self.__class__.__name__,
"p":self.p}
class Activation(MaskedLayer):
'''
Apply an activation function to an output.
'''
def __init__(self, activation, target=0, beta=0.1):
super(Activation, self).__init__()
self.activation = activations.get(activation)
self.target = target
self.beta = beta
def get_output(self, train=False):
X = self.get_input(train)
return self.activation(X)
def get_config(self):
return {"name":self.__class__.__name__,
"activation":self.activation.__name__,
"target":self.target,
"beta":self.beta}
class Reshape(Layer):
'''
Reshape an output to a certain shape.
Can't be used as first layer in a model (no fixed input!)
First dimension is assumed to be nb_samples.
'''
def __init__(self, *dims):
super(Reshape, self).__init__()
self.dims = dims
def get_output(self, train=False):
X = self.get_input(train)
nshape = make_tuple(X.shape[0], *self.dims)
return theano.tensor.reshape(X, nshape)
def get_config(self):
return {"name":self.__class__.__name__,
"dims":self.dims}
class Flatten(Layer):
'''
Reshape input to flat shape.
First dimension is assumed to be nb_samples.
'''
def __init__(self):
super(Flatten, self).__init__()
def get_output(self, train=False):
X = self.get_input(train)
size = theano.tensor.prod(X.shape) // X.shape[0]
nshape = (X.shape[0], size)
return theano.tensor.reshape(X, nshape)
class RepeatVector(Layer):
'''
Repeat input n times.
Dimensions of input are assumed to be (nb_samples, dim).
Return tensor of shape (nb_samples, n, dim).
'''
def __init__(self, n):
super(RepeatVector, self).__init__()
self.n = n
def get_output(self, train=False):
X = self.get_input(train)
tensors = [X]*self.n
stacked = theano.tensor.stack(*tensors)
return stacked.dimshuffle((1, 0, 2))
def get_config(self):
return {"name":self.__class__.__name__,
"n":self.n}
class Dense(Layer):
'''
Just your regular fully connected NN layer.
'''
def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None, name=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None):
super(Dense, self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input_dim = input_dim
self.output_dim = output_dim
self.input = T.matrix()
self.W = self.init((self.input_dim, self.output_dim))
self.b = shared_zeros((self.output_dim))
self.params = [self.W, self.b]
self.regularizers = []
if W_regularizer:
W_regularizer.set_param(self.W)
self.regularizers.append(W_regularizer)
if b_regularizer:
b_regularizer.set_param(self.b)
self.regularizers.append(b_regularizer)
if activity_regularizer:
activity_regularizer.set_layer(self)
self.regularizers.append(activity_regularizer)
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
if name is not None:
self.set_name(name)
def set_name(self, name):
self.W.name = '%s_W' % name
self.b.name = '%s_b' % name
def get_output(self, train=False):
X = self.get_input(train)
output = self.activation(T.dot(X, self.W) + self.b)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"input_dim":self.input_dim,
"output_dim":self.output_dim,
"init":self.init.__name__,
"activation":self.activation.__name__}
class ActivityRegularization(Layer):
'''
Layer that passes through its input unchanged, but applies an update
to the cost function based on the activity.
'''
def __init__(self, l1=0., l2=0.):
super(ActivityRegularization, self).__init__()
self.l1 = l1
self.l2 = l2
activity_regularizer = ActivityRegularizer(l1=l1, l2=l2)
activity_regularizer.set_layer(self)
self.regularizers = [activity_regularizer]
def get_output(self, train=False):
return self.get_input(train)
def get_config(self):
return {"name":self.__class__.__name__,
"l1":self.l1,
"l2":self.l2}
class TimeDistributedDense(MaskedLayer):
'''
Apply a same DenseLayer for each dimension[1] (shared_dimension) input
Especially useful after a recurrent network with 'return_sequence=True'
Tensor input dimensions: (nb_sample, shared_dimension, input_dim)
Tensor output dimensions: (nb_sample, shared_dimension, output_dim)
'''
def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None):
super(TimeDistributedDense, self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input_dim = input_dim
self.output_dim = output_dim
self.input = T.tensor3()
self.W = self.init((self.input_dim, self.output_dim))
self.b = shared_zeros((self.output_dim))
self.params = [self.W, self.b]
self.regularizers = []
if W_regularizer:
W_regularizer.set_param(self.W)
self.regularizers.append(W_regularizer)
if b_regularizer:
b_regularizer.set_param(self.b)
self.regularizers.append(b_regularizer)
if activity_regularizer:
activity_regularizer.set_layer(self)
self.regularizers.append(activity_regularizer)
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train=False):
X = self.get_input(train)
output = self.activation(T.dot(X.dimshuffle(1, 0, 2), self.W) + self.b)
return output.dimshuffle(1, 0, 2)
def get_config(self):
return {"name":self.__class__.__name__,
"input_dim":self.input_dim,
"output_dim":self.output_dim,
"init":self.init.__name__,
"activation":self.activation.__name__}
class AutoEncoder(Layer):
'''
A customizable autoencoder model.
If output_reconstruction then dim(input) = dim(output)
else dim(output) = dim(hidden)
'''
def __init__(self, encoder, decoder, output_reconstruction=True, tie_weights=False, weights=None):
super(AutoEncoder, self).__init__()
self.output_reconstruction = output_reconstruction
self.tie_weights = tie_weights
self.encoder = encoder
self.decoder = decoder
self.decoder.set_previous(self.encoder)
self.params = []
self.regularizers = []
self.constraints = []
for layer in [self.encoder, self.decoder]:
self.params += layer.params
if hasattr(layer, 'regularizers'):
self.regularizers += layer.regularizers
if hasattr(layer, 'constraints'):
self.constraints += layer.constraints
if weights is not None:
self.set_weights(weights)
def set_previous(self, node):
self.encoder.set_previous(node)
def get_weights(self):
weights = []
for layer in [self.encoder, self.decoder]:
weights += layer.get_weights()
return weights
def set_weights(self, weights):
nb_param = len(self.encoder.params)
self.encoder.set_weights(weights[:nb_param])
self.decoder.set_weights(weights[nb_param:])
def get_input(self, train=False):
return self.encoder.get_input(train)
@property
def input(self):
return self.encoder.input
def _get_hidden(self, train=False):
return self.encoder.get_output(train)
def get_output(self, train=False):
if not train and not self.output_reconstruction:
return self.encoder.get_output(train)
decoded = self.decoder.get_output(train)
if self.tie_weights:
encoder_params = self.encoder.get_weights()
decoder_params = self.decoder.get_weights()
for dec_param, enc_param in zip(decoder_params, encoder_params):
if len(dec_param.shape) > 1:
enc_param = dec_param.T
return decoded
def get_config(self):
return {"name":self.__class__.__name__,
"encoder_config":self.encoder.get_config(),
"decoder_config":self.decoder.get_config(),
"output_reconstruction":self.output_reconstruction,
"tie_weights":self.tie_weights}
class MaxoutDense(Layer):
'''
Max-out layer, nb_feature is the number of pieces in the piecewise linear approx.
Refer to http://arxiv.org/pdf/1302.4389.pdf
'''
def __init__(self, input_dim, output_dim, nb_feature=4, init='glorot_uniform', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None):
super(MaxoutDense, self).__init__()
self.init = initializations.get(init)
self.input_dim = input_dim
self.output_dim = output_dim
self.nb_feature = nb_feature
self.input = T.matrix()
self.W = self.init((self.nb_feature, self.input_dim, self.output_dim))
self.b = shared_zeros((self.nb_feature, self.output_dim))
self.params = [self.W, self.b]
self.regularizers = []
if W_regularizer:
W_regularizer.set_param(self.W)
self.regularizers.append(W_regularizer)
if b_regularizer:
b_regularizer.set_param(self.b)
self.regularizers.append(b_regularizer)
if activity_regularizer:
activity_regularizer.set_layer(self)
self.regularizers.append(activity_regularizer)
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train=False):
X = self.get_input(train)
# -- don't need activation since it's just linear.
output = T.max(T.dot(X, self.W) + self.b, axis=1)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"input_dim":self.input_dim,
"output_dim":self.output_dim,
"init":self.init.__name__,
"nb_feature" : self.nb_feature}
| kfoss/keras | keras/layers/core.py | Python | mit | 18,396 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mps.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| UPDDI/mps-database-server | manage.py | Python | mit | 246 |
#!/home/sam/p/drumm-farm/drumm_env/bin/python
from __future__ import print_function
import base64
import os
import sys
if __name__ == "__main__":
# create font data chunk for embedding
font = "Tests/images/courB08"
print(" f._load_pilfont_data(")
print(" # %s" % os.path.basename(font))
print(" BytesIO(base64.decodestring(b'''")
base64.encode(open(font + ".pil", "rb"), sys.stdout)
print("''')), Image.open(BytesIO(base64.decodestring(b'''")
base64.encode(open(font + ".pbm", "rb"), sys.stdout)
print("'''))))")
# End of file
| samdowd/drumm-farm | drumm_env/bin/createfontdatachunk.py | Python | mit | 584 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from datetime import datetime
import time
from ikalog.utils import *
# Needed in GUI mode
try:
import wx
except:
pass
# @package IkaOutput_CSV
# IkaOutput_CSV: IkaLog CSV Output Plugin
#
# Log Splatoon game results as CSV format.
class CSV(object):
def apply_ui(self):
self.enabled = self.checkEnable.GetValue()
self.csv_filename = self.editCsvFilename.GetValue()
def refresh_ui(self):
self._internal_update = True
self.checkEnable.SetValue(self.enabled)
if not self.csv_filename is None:
self.editCsvFilename.SetValue(self.csv_filename)
else:
self.editCsvFilename.SetValue('')
def on_config_reset(self, context=None):
self.enabled = False
self.csv_filename = os.path.join(os.getcwd(), 'ika.csv')
def on_config_load_from_context(self, context):
self.on_config_reset(context)
try:
conf = context['config']['csv']
except:
conf = {}
if 'Enable' in conf:
self.enabled = conf['Enable']
if 'CsvFilename' in conf:
self.csv_filename = conf['CsvFilename']
self.refresh_ui()
return True
def on_config_save_to_context(self, context):
context['config']['csv'] = {
'Enable': self.enabled,
'CsvFilename': self.csv_filename,
}
def on_config_apply(self, context):
self.apply_ui()
def on_option_tab_create(self, notebook):
self.panel = wx.Panel(notebook, wx.ID_ANY)
self.page = notebook.InsertPage(0, self.panel, 'CSV')
self.layout = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.layout)
self.checkEnable = wx.CheckBox(
self.panel, wx.ID_ANY, u'CSVファイルへ戦績を出力する')
self.editCsvFilename = wx.TextCtrl(self.panel, wx.ID_ANY, u'hoge')
self.layout.Add(wx.StaticText(self.panel, wx.ID_ANY, u'CSV保存先ファイル'))
self.layout.Add(self.editCsvFilename, flag=wx.EXPAND)
self.layout.Add(self.checkEnable)
##
# Write a line to text file.
# @param self The Object Pointer.
# @param record Record (text)
#
def write_record(self, record):
try:
csv_file = open(self.csv_filename, "a")
csv_file.write(record)
csv_file.close
except:
print("CSV: Failed to write CSV File")
##
# Generate a message for on_game_individual_result.
# @param self The Object Pointer.
# @param context IkaLog context
#
def get_record_game_individual_result(self, context):
map = IkaUtils.map2text(context['game']['map'])
rule = IkaUtils.rule2text(context['game']['rule'])
won = IkaUtils.getWinLoseText(
context['game']['won'], win_text="勝ち", lose_text="負け", unknown_text="不明")
t = datetime.now()
t_str = t.strftime("%Y,%m,%d,%H,%M")
t_unix = int(time.mktime(t.timetuple()))
s_won = IkaUtils.getWinLoseText(
won, win_text="勝ち", lose_text="負け", unknown_text="不明")
return "%s,%s,%s,%s,%s\n" % (t_unix, t_str, map, rule, won)
##
# on_game_individual_result Hook
# @param self The Object Pointer
# @param context IkaLog context
#
def on_game_individual_result(self, context):
IkaUtils.dprint('%s (enabled = %s)' % (self, self.enabled))
if not self.enabled:
return
record = self.get_record_game_individual_result(context)
self.write_record(record)
##
# Constructor
# @param self The Object Pointer.
# @param csv_filename CSV log file name
#
def __init__(self, csv_filename=None):
self.enabled = (not csv_filename is None)
self.csv_filename = csv_filename
| mzsm/IkaLog | ikalog/outputs/csv.py | Python | apache-2.0 | 4,550 |
import re
import sublime
import sublime_plugin
try:
from .settings import *
from .tools import *
except ValueError:
from settings import *
from tools import *
class PhpSettings():
def getConsoleFunc(self):
return settings().get('php').get('consoleFunc', ['print_r'])
def getPreTag(self):
return settings().get('php').get('preTag', True)
def getDieAfterLog(self):
return settings().get('php').get('dieAfterLog', False)
def getConsoleLogTypes(self):
return []
def getConsoleStr(self):
return "{variable}"
def getConsoleSingleQuotes(self):
return True
class PhpWrapp(PhpSettings):
def create(self, view, edit, cursor, insert_before):
line_region = view.line(cursor)
string = view.substr(line_region)
match = re.search(r"(\s*)", string)
end = 0
if self.is_log_string(string):
return end
if match:
# check if cursor is on the word and trying to get that word
if cursor.begin() == cursor.end():
word = view.word(cursor)
else:
word = cursor
var_text = view.substr(word).strip()
# if selection is empty and there is no word under cursor use clipboard
if not var_text:
var_text = sublime.get_clipboard()
if var_text[-1:] == ";":
var_text = var_text[:-1]
if len(var_text) == 0:
return sublime.status_message('Console Wrap: Please make a selection or copy something.')
else:
indent_str = self.get_indent(view, line_region,insert_before)
text = self.get_wrapper(view, var_text, indent_str, insert_before)
# msg('text', text)
if insert_before:
lineReg = line_region.begin()
else:
lineReg = line_region.end()
view.insert(edit, lineReg, text)
end = view.line(lineReg + 1).end()
return end
def is_log_string(self, line):
logFunc = self.getConsoleFunc()[0]
return re.match(r"((\/\/\s)|(;\s))?(echo '<pre>';\s?)?(.+)?("+logFunc+")(\.?)(\w+)?\((.+)?\);( echo '<\/pre>';)?", line.strip())
def get_indent(self, view, region, insert_before):
matches = re.findall(r'^(\s*)[^\s]', view.substr(region))
indent_str = matches and len(matches) and matches[0] or ''
if insert_before:
return indent_str
indent_line = view.substr(self.find_next_line(view, region)).strip()
need_indent = [True for i in ['{', '=', ':', '->', '=>'] if indent_line.endswith(i)]
indent_line.lstrip('{}[]() \t')
if need_indent:
indent_str += '\t'
return indent_str
def find_next_line(self, view, region):
while 0 < region.a and region.b < view.size() and view.classify(region.a) is sublime.CLASS_EMPTY_LINE:
region = view.line(region.a - 1)
return region
def get_wrapper(self, view, var, indent_str, insert_before):
consoleStr = self.getConsoleStr()
consoleFunc = self.getConsoleFunc()
preTag = self.getPreTag()
dieAfterLog = self.getDieAfterLog()
separator = ", "
consoleArr = consoleStr.split(separator)
t = consoleArr[0]
if len(consoleArr) >= 2:
v = ', '.join(consoleArr[1:])
else:
v = t
tmpl = indent_str if insert_before else ("\n" + indent_str)
openPre = "echo '<pre>'; " if preTag else ""
closePre = " echo '</pre>';" if preTag else ""
dieStr = " die();" if dieAfterLog else ""
a = "{2}{0}({1});{3}{4}".format("->".join(consoleFunc), v, openPre, closePre, dieStr)
a = a.format(variable=var)
tmpl += a
tmpl += "\n" if insert_before else ""
return tmpl
def comment(self, view, edit, cursor):
logFunc = self.getConsoleFunc()[0]
get_selections(view, sublime)
cursor = view.sel()[0]
line_region = view.line(cursor)
string = view.substr(line_region)
rgx = re.compile('^[ \t]*((echo|'+logFunc+').*;)', re.MULTILINE)
for line in rgx.finditer(string):
string = string.replace(line.group(1), "// "+line.group(1))
# remove duplicate
for match in re.finditer(r"((\/\/\s?){2,})((echo|'+logFunc+').*;)", string, re.MULTILINE):
string = string.replace(match.group(1), "// ")
view.replace(edit, line_region, string)
view.sel().clear()
def remove(self, view, edit, cursor):
logFunc = self.getConsoleFunc()[0]
get_selections(view, sublime)
cursor = view.sel()[0]
line_region = view.line(cursor)
string = view.substr(line_region)
newstring = re.sub(r"((\/\/\s)|(;\s))?(echo '<pre>';\s?)?(.+)?("+logFunc+")(\.?)(\w+)?\((.+)?\);( echo '<\/pre>';)?", '', string)
view.replace(edit, line_region, newstring)
view.sel().clear()
def quick_nav_done(self, view, index, regions, showOnly=False):
region = sublime.Region(regions[index].b)
if not showOnly:
view.sel().add(region)
view.show_at_center(region)
def show_quick_nav(self, view, edit, cursor):
tags = []
regions = []
logFunc = self.getConsoleFunc()[0]
get_selections(view, sublime)
counter = 1
regex = re.compile(r""+logFunc+"(\.?)(\w+)?\((.+)?\);?", re.UNICODE|re.DOTALL)
for comment_region in view.sel():
for splited_region in view.split_by_newlines(comment_region):
m = regex.search(view.substr(splited_region))
if m:
# Add a counter for faster selection
tag = m.group(0)
tags.append(str(counter) + '. ' + tag)
regions.append(splited_region)
counter += 1
if (len(tags)):
view.window().show_quick_panel(tags, lambda index : self.quick_nav_done(view, index, regions), 0, 0, lambda index : self.quick_nav_done(view, index, regions, True))
else:
sublime.status_message("Console Wrap: No 'logs' found")
view.sel().clear()
def remove_commented(self, view, edit, cursor):
logFunc = self.getConsoleFunc()[0]
get_selections(view, sublime)
cursor = view.sel()[0]
line_region = view.line(cursor)
string = view.substr(line_region)
newstring = re.sub(r"(\/\/\s)(echo '<pre>';\s?)?(.+)?("+logFunc+")(\.?)(\w+)?\((.+)?\);( echo '<\/pre>';)?", '', string)
view.replace(edit, line_region, newstring)
view.sel().clear()
| unknownuser88/consolewrap | core/php_wrapper.py | Python | mit | 5,774 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Deploys autobuild engineering release to shona and neo.
- Log basic info to ~/autobuild/status/deploy.syslog
- Grab deployment lock (~/autobuild/status/deploy.lock) but only with 2 minute
timeout. If can't grab it, then exit (this prevents us from doing
the disk-intensive searches for latest passed/deployed build)
- Find the latest passed build (unless --revision)
- Find the latest deployed build (~/autobuild/status/deploy.latest)
- If the latest deployed build is >= latest passed build, then exit (other
deploy scripts are only locked out for two minutes)
- Re-grab deployment lock with timeout of for 6 hours, so deployment happens at most every 6 hours.
- Write latest deployed build file (~/autobuild/status/deploy.latest)
- Deploy on neo and shona
(note: if deployment on neo/shona fails, that build will not be recopied
because deployed build file is already written)
Has options for:
- specifying a specific tarball to deploy (--tarball)
- specifying a specific revision number to deploy (--revision)
- deploying even if there is an unexpired lock (--force)
- using a link name other than "current"
"""
import sys
import os
import time
import getopt
import autobuild
mydir = sys.path[0]
buildSystemDir = os.path.abspath(os.path.normpath(os.path.join(mydir)))
sys.path.append(buildSystemDir)
import pybuild.utils as utils
import pybuild.test_release as test
testOnly = False
# Initially grab the lock only for two minutes
initialLockTime = 120
# If we decide to deploy, grab it for 6 hours to reduce frequency of copying
deployLockTime = 6 * 3600
def getLatestPassedRevision(releasesdir):
dirs = [f for f in os.listdir(releasesdir) if f.startswith("r")]
dirs = sorted(dirs, reverse=True)
found = False
for dir in dirs:
# strip off "r"
revision = int(dir[1:])
build = getTarballFromRevision(revision)
if os.path.exists(os.path.join(releasesdir, dir, build)):
found = True
break
if found == False:
raise Exception("No passed builds found")
return revision
def getLatestDeployedRevision(filename):
if not os.path.exists(filename):
return 0
lines = open(filename).readlines()
if len(lines) == 0:
try:
os.remove(filename)
except:
pass
raise Exception("getLatestDeployedRevision: filename %s is empty - deleting" % filename)
revision = int(lines[0].strip())
return revision
def setLatestDeployedRevision(filename, revision):
print "Setting latest deployed revision to %s" % revision
open(filename, "w").write(str(revision))
def getTarballFromRevision(revision):
return os.path.join(releasesdir,
"r%s" % revision,
"nupic-npp-r%s-linux64.tgz" % revision)
def deploy(tarball, host, label):
print "Deploying tarball %s to host %s with label %s" % (tarball, host, label)
if testOnly:
return
tarballFilename = os.path.basename(tarball)
tarballBasename, ext = os.path.splitext(tarballFilename)
print "Copying build %s to %s" % (tarballBasename, host)
utils.copyFilesToDirOnRemoteHost(tarball, host, "/tmp", "buildaccount")
command = 'ssh %s "cd /neo/nta; ' % host
command = command + "rm -rf %s; " % tarballBasename
command = command + "tar xzf /tmp/%s; " % tarballFilename
command = command + "rm -f %s; " % label
command = command + "ln -s %s %s; " % (tarballBasename, label)
command = command + "rm -f /tmp/%s; " % tarballFilename
command = command + '"'
print "Extracting tarball on host %s" % host
print "Running command: %s" % command
utils.runCommand(command)
def syslog(filename, message) :
"""
Append a single date-stamped message to the given file.
Used for build system startup/shutdown messages.
Heavyweight, because it opens and closes the file each time.
All other message go into a build logs with the
logger methods (INFO, DEBUG, WARN, ERROR).
"""
file = open(filename, "a")
out = "%s %s\n" % (time.strftime("%m/%d-%H:%M:%S "), message)
file.write(out)
print out,
file.close()
def usage():
print "usage: %s [--force] [[--revision <revision>] | [--tarball <tarball>]] [--label <label>]" % sys.argv[0]
sys.exit(1)
options = ["force", "revision=", "tarball=", "label="]
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "", options)
except getopt.GetoptError:
usage()
if len(args) > 0:
usage()
force = False
revision = None
tarball = None
label = "current"
for o, a in opts:
if o == "--force":
force = True
elif o == "--revision":
revision = int(a)
elif o == "--tarball":
if revision is not None:
print "Both --revision and --tarball specified. Only one allowed"
usage()
tarball = a
elif o == "--label":
label = a
rootdir = os.path.expanduser("~/autobuild")
statusdir = os.path.join(rootdir, "status")
releasesdir = os.path.join(rootdir, "releases")
latestDeployFile = os.path.join(statusdir, "deploy.latest")
utils.createDir(statusdir, True)
syslogFile = os.path.join(statusdir, "deploylog")
syslog(syslogFile, "Deploying")
deploylockfile = os.path.join(rootdir, "status", "deploy.lock")
try:
lock = utils.getLock(deploylockfile, initialLockTime, processFree=True, force=force)
if not lock:
raise Exception("Unable to get deployment lock %s. Use --force to override" % deploylockfile)
if tarball is None:
if revision is None:
revision = getLatestPassedRevision(releasesdir)
if revision is None:
raise Exception("Unable to get latest passed revision")
deployedRevision = getLatestDeployedRevision(latestDeployFile)
if revision <= deployedRevision:
raise Exception("Latest passed revision %d is not greater than latest deployed revision %d" % (revision, deployedRevision))
tarball = getTarballFromRevision(revision)
lock = utils.getLock(deploylockfile, deployLockTime, processFree=True, force=True)
if revision is not None:
setLatestDeployedRevision(latestDeployFile, revision)
# deploy(tarball, "shona1", label)
# syslog(syslogFile, "Deployed %s with label %s to shona1" % (tarball, label))
deploy(tarball, "matrix.numenta.com", label)
syslog(syslogFile, "Deployed %s with label %s to neo" % (tarball, label))
except Exception, e:
tb = sys.exc_info()[2]
import traceback
lineNumber = traceback.extract_tb(tb)[-1][1]
syslog(syslogFile, "Exception (line %d): %s" % (lineNumber, e))
# sys.exc_info()[2] is traceback object
# traceback.extract_tb(traceback,limit=1)[0] -> [filename, line number, function name, text]
| tkaitchuck/nupic | build_system/autobuild/deploy.py | Python | gpl-3.0 | 7,654 |
import praw, cPickle as pickle
from collections import Counter
from nltk import word_tokenize,sent_tokenize
def unpickle(filename):
f = open(filename,"rb")
heroes = pickle.load(f)
return heroes
def writePickle(struct, filename):
file1 = open(filename,"wb")
pickle.dump(struct,file1)
file1.close()
r = praw.Reddit(user_agent='getting political stuff')
r.login('css-throwaway','csspassword')
linebreak = '-----==----==---==-----'
# this may take a while. start early.
def getThreads(subreddit,num_comments=10,max_threads=5000,max_comments=100,min_comments=10,verbose=False):
comment_counter = 0
already_done = [] #keep track of threads you've already seen (you can get them twice)
subred = r.get_subreddit(subreddit) #get a subreddit
comments = []
questionComment = []
for sub in subred.get_top_from_all(limit=max_threads):
if sub.id not in already_done and comment_counter < num_comments:
already_done.append(sub.id)
sub.replace_more_comments(limit=None, threshold=1)
flat_comments = praw.helpers.flatten_tree(sub.comments)
for comment in flat_comments:
diff_comment = True
for sentence in sent_tokenize(comment.body.encode('utf-8')):
if '[deleted]' in sentence:
break
comments.append(sentence)
if '?' in sentence and not diff_comment:
s = {}
s['Request'] = comments[-2]+' '+sentence
s['id'] = comment.id
s['score'] = comment.score
questionComment.append(s)
comment_counter += 1
print 'Added question. Comment counter',comment_counter
diff_comment = False
if comment_counter>num_comments:
return [comments,questionComment]
return [comments,questionComment]
thread_names = ['Progressive','Socialism']
reddit_data = {}
for thread_name in thread_names:
print thread_name
reddit_data[thread_name] = getThreads(thread_name,num_comments=1000,max_threads=40000,max_comments=1000,min_comments=0,verbose=True)
writePickle(reddit_data,"reddit_data_ps.pickle") | nidishrajendran/computational-politeness | data/reddit/threadGetter_countries.py | Python | apache-2.0 | 2,361 |
# This file is part of sydpy.
#
# Copyright (C) 2014-2015 Bogdan Vukobratovic
#
# sydpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# sydpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with sydpy. If not, see
# <http://www.gnu.org/licenses/>.
"""Module implements the basic sequencer module."""
from sydpy import Module, arch_def, always
from sydpy._simulator import simwait
from sydpy._delay import Delay
class BasicSeq(Module):
"""Basic sequence module that sends the transactions created by the
supplied generator function.
Example instantiation:
self.inst(BasicSeq, seq_o, gen, flow_ctrl, intfs={'seq_o' : tlm(bit).master})
seq_o - The output interface
gen - The supplied generator function should return two values:
next_seqi - The transaction that should be written to channel
next_delay - The delay before writing the transaction.
flow_ctrl - Can have the following values:
True - Transaction is not sent until the channel is empty
False - Transaction is sent regardless of the channel
intfs - Declares the interface type of the output. Interface has to be a subclass
of tlm and it has to be master.
"""
#@arch_def
def tlm(self, seq_o, gen=None, flow_ctrl=True):
@always(self)
def main():
for next_seqi, next_delay in gen:
if next_delay:
simwait(Delay(next_delay))
if flow_ctrl:
seq_o.blk_next = next_seqi
else:
seq_o.next = next_seqi
| bogdanvuk/sydpy | sydpy/verif/basic_seq.py | Python | lgpl-2.1 | 2,192 |
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import re
from collections import OrderedDict
from docker.errors import NotFound
from docker.types import IPAMConfig
from docker.types import IPAMPool
from docker.utils import version_gte
from docker.utils import version_lt
from . import __version__
from .config import ConfigurationError
from .const import LABEL_NETWORK
from .const import LABEL_PROJECT
from .const import LABEL_VERSION
log = logging.getLogger(__name__)
OPTS_EXCEPTIONS = [
'com.docker.network.driver.overlay.vxlanid_list',
'com.docker.network.windowsshim.hnsid',
'com.docker.network.windowsshim.networkname'
]
class Network(object):
def __init__(self, client, project, name, driver=None, driver_opts=None,
ipam=None, external=False, internal=False, enable_ipv6=False,
labels=None, custom_name=False):
self.client = client
self.project = project
self.name = name
self.driver = driver
self.driver_opts = driver_opts
self.ipam = create_ipam_config_from_dict(ipam)
self.external = external
self.internal = internal
self.enable_ipv6 = enable_ipv6
self.labels = labels
self.custom_name = custom_name
self.legacy = None
def ensure(self):
if self.external:
if self.driver == 'overlay':
# Swarm nodes do not register overlay networks that were
# created on a different node unless they're in use.
# See docker/compose#4399
return
try:
self.inspect()
log.debug(
'Network {0} declared as external. No new '
'network will be created.'.format(self.name)
)
except NotFound:
raise ConfigurationError(
'Network {name} declared as external, but could'
' not be found. Please create the network manually'
' using `{command} {name}` and try again.'.format(
name=self.full_name,
command='docker network create'
)
)
return
self._set_legacy_flag()
try:
data = self.inspect(legacy=self.legacy)
check_remote_network_config(data, self)
except NotFound:
driver_name = 'the default driver'
if self.driver:
driver_name = 'driver "{}"'.format(self.driver)
log.info(
'Creating network "{}" with {}'.format(self.full_name, driver_name)
)
self.client.create_network(
name=self.full_name,
driver=self.driver,
options=self.driver_opts,
ipam=self.ipam,
internal=self.internal,
enable_ipv6=self.enable_ipv6,
labels=self._labels,
attachable=version_gte(self.client._version, '1.24') or None,
check_duplicate=True,
)
def remove(self):
if self.external:
log.info("Network %s is external, skipping", self.true_name)
return
log.info("Removing network {}".format(self.true_name))
self.client.remove_network(self.true_name)
def inspect(self, legacy=False):
if legacy:
return self.client.inspect_network(self.legacy_full_name)
return self.client.inspect_network(self.full_name)
@property
def legacy_full_name(self):
if self.custom_name:
return self.name
return '{0}_{1}'.format(
re.sub(r'[_-]', '', self.project), self.name
)
@property
def full_name(self):
if self.custom_name:
return self.name
return '{0}_{1}'.format(self.project, self.name)
@property
def true_name(self):
self._set_legacy_flag()
if self.legacy:
return self.legacy_full_name
return self.full_name
@property
def _labels(self):
if version_lt(self.client._version, '1.23'):
return None
labels = self.labels.copy() if self.labels else {}
labels.update({
LABEL_PROJECT: self.project,
LABEL_NETWORK: self.name,
LABEL_VERSION: __version__,
})
return labels
def _set_legacy_flag(self):
if self.legacy is not None:
return
try:
data = self.inspect(legacy=True)
self.legacy = data is not None
except NotFound:
self.legacy = False
def create_ipam_config_from_dict(ipam_dict):
if not ipam_dict:
return None
return IPAMConfig(
driver=ipam_dict.get('driver') or 'default',
pool_configs=[
IPAMPool(
subnet=config.get('subnet'),
iprange=config.get('ip_range'),
gateway=config.get('gateway'),
aux_addresses=config.get('aux_addresses'),
)
for config in ipam_dict.get('config', [])
],
options=ipam_dict.get('options')
)
class NetworkConfigChangedError(ConfigurationError):
def __init__(self, net_name, property_name):
super(NetworkConfigChangedError, self).__init__(
'Network "{}" needs to be recreated - {} has changed'.format(
net_name, property_name
)
)
def check_remote_ipam_config(remote, local):
remote_ipam = remote.get('IPAM')
ipam_dict = create_ipam_config_from_dict(local.ipam)
if local.ipam.get('driver') and local.ipam.get('driver') != remote_ipam.get('Driver'):
raise NetworkConfigChangedError(local.true_name, 'IPAM driver')
if len(ipam_dict['Config']) != 0:
if len(ipam_dict['Config']) != len(remote_ipam['Config']):
raise NetworkConfigChangedError(local.true_name, 'IPAM configs')
remote_configs = sorted(remote_ipam['Config'], key='Subnet')
local_configs = sorted(ipam_dict['Config'], key='Subnet')
while local_configs:
lc = local_configs.pop()
rc = remote_configs.pop()
if lc.get('Subnet') != rc.get('Subnet'):
raise NetworkConfigChangedError(local.true_name, 'IPAM config subnet')
if lc.get('Gateway') is not None and lc.get('Gateway') != rc.get('Gateway'):
raise NetworkConfigChangedError(local.true_name, 'IPAM config gateway')
if lc.get('IPRange') != rc.get('IPRange'):
raise NetworkConfigChangedError(local.true_name, 'IPAM config ip_range')
if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')):
raise NetworkConfigChangedError(local.true_name, 'IPAM config aux_addresses')
remote_opts = remote_ipam.get('Options') or {}
local_opts = local.ipam.get('Options') or {}
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if remote_opts.get(k) != local_opts.get(k):
raise NetworkConfigChangedError(local.true_name, 'IPAM option "{}"'.format(k))
def check_remote_network_config(remote, local):
if local.driver and remote.get('Driver') != local.driver:
raise NetworkConfigChangedError(local.true_name, 'driver')
local_opts = local.driver_opts or {}
remote_opts = remote.get('Options') or {}
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if k in OPTS_EXCEPTIONS:
continue
if remote_opts.get(k) != local_opts.get(k):
raise NetworkConfigChangedError(local.true_name, 'option "{}"'.format(k))
if local.ipam is not None:
check_remote_ipam_config(remote, local)
if local.internal is not None and local.internal != remote.get('Internal', False):
raise NetworkConfigChangedError(local.true_name, 'internal')
if local.enable_ipv6 is not None and local.enable_ipv6 != remote.get('EnableIPv6', False):
raise NetworkConfigChangedError(local.true_name, 'enable_ipv6')
local_labels = local.labels or {}
remote_labels = remote.get('Labels', {})
for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
if k.startswith('com.docker.'): # We are only interested in user-specified labels
continue
if remote_labels.get(k) != local_labels.get(k):
log.warn(
'Network {}: label "{}" has changed. It may need to be'
' recreated.'.format(local.true_name, k)
)
def build_networks(name, config_data, client):
network_config = config_data.networks or {}
networks = {
network_name: Network(
client=client, project=name,
name=data.get('name', network_name),
driver=data.get('driver'),
driver_opts=data.get('driver_opts'),
ipam=data.get('ipam'),
external=bool(data.get('external', False)),
internal=data.get('internal'),
enable_ipv6=data.get('enable_ipv6'),
labels=data.get('labels'),
custom_name=data.get('name') is not None,
)
for network_name, data in network_config.items()
}
if 'default' not in networks:
networks['default'] = Network(client, name, 'default')
return networks
class ProjectNetworks(object):
def __init__(self, networks, use_networking):
self.networks = networks or {}
self.use_networking = use_networking
@classmethod
def from_services(cls, services, networks, use_networking):
service_networks = {
network: networks.get(network)
for service in services
for network in get_network_names_for_service(service)
}
unused = set(networks) - set(service_networks) - {'default'}
if unused:
log.warn(
"Some networks were defined but are not used by any service: "
"{}".format(", ".join(unused)))
return cls(service_networks, use_networking)
def remove(self):
if not self.use_networking:
return
for network in self.networks.values():
try:
network.remove()
except NotFound:
log.warn("Network %s not found.", network.true_name)
def initialize(self):
if not self.use_networking:
return
for network in self.networks.values():
network.ensure()
def get_network_defs_for_service(service_dict):
if 'network_mode' in service_dict:
return {}
networks = service_dict.get('networks', {'default': None})
return dict(
(net, (config or {}))
for net, config in networks.items()
)
def get_network_names_for_service(service_dict):
return get_network_defs_for_service(service_dict).keys()
def get_networks(service_dict, network_definitions):
networks = {}
for name, netdef in get_network_defs_for_service(service_dict).items():
network = network_definitions.get(name)
if network:
networks[network.true_name] = netdef
else:
raise ConfigurationError(
'Service "{}" uses an undefined network "{}"'
.format(service_dict['name'], name))
return OrderedDict(sorted(
networks.items(),
key=lambda t: t[1].get('priority') or 0, reverse=True
))
| dnephin/compose | compose/network.py | Python | apache-2.0 | 11,573 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.page import legacy_page_test
from telemetry.timeline.model import TimelineModel
from telemetry.timeline import tracing_config
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
_CR_RENDERER_MAIN = 'CrRendererMain'
_RUN_SMOOTH_ACTIONS = 'RunSmoothAllActions'
def _AddTracingResults(thread, results):
_GC_REASONS = ['precise', 'conservative', 'idle', 'forced']
_GC_STAGES = ['mark', 'lazy_sweep', 'complete_sweep']
def GetGcReason(event, async_slices):
args = event.args
# Old format
if 'precise' in args:
if args['forced']:
return 'forced'
return 'precise' if args['precise'] else 'conservative'
if args['gcReason'] == 'ConservativeGC':
return 'conservative'
if args['gcReason'] == 'PreciseGC':
return 'precise'
if args['gcReason'] == 'ForcedGCForTesting':
for s in async_slices:
if s.start <= event.start and event.end <= s.end:
return 'forced'
# Ignore this forced GC being out of target ranges
return None
if args['gcReason'] == 'IdleGC':
return 'idle'
return None # Unknown
def DumpMetric(page, name, values, unit, results):
if values[name]:
results.AddValue(list_of_scalar_values.ListOfScalarValues(
page, name, unit, values[name]))
results.AddValue(scalar.ScalarValue(
page, name + '_max', unit, max(values[name])))
results.AddValue(scalar.ScalarValue(
page, name + '_total', unit, sum(values[name])))
events = thread.all_slices
async_slices = [s for s in thread.async_slices
if s.name == 'BlinkGCTimeMeasurement']
# Prepare
values = {'oilpan_coalesce': []}
for reason in _GC_REASONS:
for stage in _GC_STAGES:
values['oilpan_%s_%s' % (reason, stage)] = []
# Parse trace events
reason = None
mark_time = 0
lazy_sweep_time = 0
complete_sweep_time = 0
for event in events:
duration = event.thread_duration or event.duration
if event.name == 'ThreadHeap::coalesce':
values['oilpan_coalesce'].append(duration)
continue
if event.name == 'BlinkGCMarking':
if reason is not None:
values['oilpan_%s_mark' % reason].append(mark_time)
values['oilpan_%s_lazy_sweep' % reason].append(lazy_sweep_time)
values['oilpan_%s_complete_sweep' % reason].append(complete_sweep_time)
reason = GetGcReason(event, async_slices)
mark_time = duration
lazy_sweep_time = 0
complete_sweep_time = 0
continue
if event.name == 'ThreadHeap::lazySweepPages':
lazy_sweep_time += duration
continue
if event.name == 'ThreadState::completeSweep':
complete_sweep_time += duration
continue
if reason is not None:
values['oilpan_%s_mark' % reason].append(mark_time)
values['oilpan_%s_lazy_sweep' % reason].append(lazy_sweep_time)
values['oilpan_%s_complete_sweep' % reason].append(complete_sweep_time)
page = results.current_page
unit = 'ms'
# Dump each metric
DumpMetric(page, 'oilpan_coalesce', values, unit, results)
for reason in _GC_REASONS:
for stage in _GC_STAGES:
DumpMetric(page, 'oilpan_%s_%s' % (reason, stage), values, unit, results)
# Summarize each stage
for stage in _GC_STAGES:
total_time = 0
for reason in _GC_REASONS:
total_time += sum(values['oilpan_%s_%s' % (reason, stage)])
results.AddValue(
scalar.ScalarValue(page, 'oilpan_%s' % stage, unit, total_time))
# Summarize sweeping time
total_sweep_time = 0
for stage in ['lazy_sweep', 'complete_sweep']:
sweep_time = 0
for reason in _GC_REASONS:
sweep_time += sum(values['oilpan_%s_%s' % (reason, stage)])
key = 'oilpan_%s' % stage
results.AddValue(scalar.ScalarValue(page, key, unit, sweep_time))
total_sweep_time += sweep_time
results.AddValue(
scalar.ScalarValue(page, 'oilpan_sweep', unit, total_sweep_time))
gc_time = 0
for key in values:
gc_time += sum(values[key])
results.AddValue(scalar.ScalarValue(page, 'oilpan_gc', unit, gc_time))
class _OilpanGCTimesBase(legacy_page_test.LegacyPageTest):
def __init__(self, action_name=''):
super(_OilpanGCTimesBase, self).__init__(action_name)
def WillNavigateToPage(self, page, tab):
del page # unused
# FIXME: Remove webkit.console when blink.console lands in chromium and
# the ref builds are updated. crbug.com/386847
config = tracing_config.TracingConfig()
for c in ['webkit.console', 'blink.console', 'blink_gc']:
config.chrome_trace_config.category_filter.AddIncludedCategory(c)
config.enable_chrome_trace = True
tab.browser.platform.tracing_controller.StartTracing(config, timeout=1000)
def ValidateAndMeasurePage(self, page, tab, results):
del page # unused
timeline_data = tab.browser.platform.tracing_controller.StopTracing()
timeline_model = TimelineModel(timeline_data)
threads = timeline_model.GetAllThreads()
for thread in threads:
if thread.name == _CR_RENDERER_MAIN:
_AddTracingResults(thread, results)
def DidRunPage(self, platform):
if platform.tracing_controller.is_tracing_running:
platform.tracing_controller.StopTracing()
class OilpanGCTimesForSmoothness(_OilpanGCTimesBase):
def __init__(self):
super(OilpanGCTimesForSmoothness, self).__init__()
self._interaction = None
def DidNavigateToPage(self, page, tab):
del page # unused
self._interaction = tab.action_runner.CreateInteraction(_RUN_SMOOTH_ACTIONS)
self._interaction.Begin()
def ValidateAndMeasurePage(self, page, tab, results):
self._interaction.End()
super(OilpanGCTimesForSmoothness, self).ValidateAndMeasurePage(
page, tab, results)
class OilpanGCTimesForBlinkPerf(_OilpanGCTimesBase):
def __init__(self):
super(OilpanGCTimesForBlinkPerf, self).__init__()
with open(os.path.join(os.path.dirname(__file__), '..', 'benchmarks',
'blink_perf.js'), 'r') as f:
self._blink_perf_js = f.read()
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = self._blink_perf_js
super(OilpanGCTimesForBlinkPerf, self).WillNavigateToPage(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression('testRunner.isDone', 600)
super(OilpanGCTimesForBlinkPerf, self).ValidateAndMeasurePage(
page, tab, results)
class OilpanGCTimesForInternals(OilpanGCTimesForBlinkPerf):
def __init__(self):
super(OilpanGCTimesForInternals, self).__init__()
@classmethod
def CustomizeBrowserOptions(cls, options):
# 'expose-internals-for-testing' can be enabled on content shell.
assert 'content-shell' in options.browser_type
options.AppendExtraBrowserArgs(['--expose-internals-for-testing',
'--js-flags=--expose-gc'])
| danakj/chromium | tools/perf/measurements/oilpan_gc_times.py | Python | bsd-3-clause | 7,075 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
parser = argparse.ArgumentParser( description = 'Generate label-term-distributions.csv from topic-term-distributions.csv.' )
parser.add_argument( 'path', type = str, help = 'Path of STMT model output' )
args = parser.parse_args()
path = args.path
lines = open( '{}/term-counts.csv'.format( path ) ).read().splitlines()
writer = open( '{}/term-freqs.txt'.format( path ), 'w' )
for line in lines :
values = line.split( ',' )
writer.write( '{}\t{}\n'.format( values[0], values[1] ) )
writer.close()
| StanfordHCI/termite | pipeline/stmt/extract-term-freqs.py | Python | bsd-3-clause | 563 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Windows constants for IOCP
"""
# this stuff should really be gotten from Windows headers via pyrex, but it
# probably is not going to change
ERROR_PORT_UNREACHABLE = 1234
ERROR_NETWORK_UNREACHABLE = 1231
ERROR_CONNECTION_REFUSED = 1225
ERROR_IO_PENDING = 997
ERROR_OPERATION_ABORTED = 995
WAIT_TIMEOUT = 258
ERROR_NETNAME_DELETED = 64
ERROR_HANDLE_EOF = 38
INFINITE = -1
SO_UPDATE_CONNECT_CONTEXT = 0x7010
SO_UPDATE_ACCEPT_CONTEXT = 0x700B
| hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/internet/iocpreactor/const.py | Python | bsd-3-clause | 550 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.