code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for end-to-end tests."""
import collections
import inspect
import os
import json
import time
import traceback
import unittest
import opensearchpy
import opensearchpy.helpers
import pandas as pd
from timesketch_api_client import client as api_client
from timesketch_import_client import importer
# Default values based on Docker config.
TEST_DATA_DIR = '/usr/local/src/timesketch/end_to_end_tests/test_data'
HOST_URI = 'http://127.0.0.1'
OPENSEARCH_HOST = 'opensearch'
OPENSEARCH_PORT = 9200
OPENSEARCH_MAPPINGS_FILE = '/etc/timesketch/plaso.mappings'
USERNAME = 'test'
PASSWORD = 'test'
class BaseEndToEndTest(object):
"""Base class for end to end tests.
Attributes:
api: Instance of an API client
sketch: Instance of Sketch object
assertions: Instance of unittest.TestCase
"""
NAME = 'name'
_ANALYZERS_COMPLETE_SET = frozenset(['ERROR', 'DONE'])
def __init__(self):
"""Initialize the end-to-end test object."""
self.api = api_client.TimesketchApi(
host_uri=HOST_URI, username=USERNAME, password=PASSWORD)
self.sketch = self.api.create_sketch(name=self.NAME)
self.assertions = unittest.TestCase()
self._counter = collections.Counter()
self._imported_files = []
def import_timeline(self, filename):
"""Import a Plaso, CSV or JSONL file.
Args:
filename (str): Filename of the file to be imported.
Raises:
TimeoutError if import takes too long.
"""
if filename in self._imported_files:
return
file_path = os.path.join(TEST_DATA_DIR, filename)
print('Importing: {0:s}'.format(file_path))
with importer.ImportStreamer() as streamer:
streamer.set_sketch(self.sketch)
streamer.set_timeline_name(file_path)
streamer.add_file(file_path)
timeline = streamer.timeline
# Poll the timeline status and wait for the timeline to be ready
max_time_seconds = 600 # Timeout after 10min
sleep_time_seconds = 5 # Sleep between API calls
max_retries = max_time_seconds / sleep_time_seconds
retry_count = 0
while True:
if retry_count >= max_retries:
raise TimeoutError
_ = timeline.lazyload_data(refresh_cache=True)
status = timeline.status
# TODO: Do something with other statuses? (e.g. failed)
if status == 'ready' and timeline.index.status == 'ready':
break
retry_count += 1
time.sleep(sleep_time_seconds)
# Adding in one more sleep for good measure (preventing flaky tests).
time.sleep(sleep_time_seconds)
self._imported_files.append(filename)
def import_directly_to_opensearch(self, filename, index_name):
"""Import a CSV file directly into OpenSearch.
Args:
filename (str): Filename of the file to be imported.
index_name (str): The OpenSearch index to store the documents in.
Raises:
ValueError: In case the file cannot be ingested, does not exist or
is faulty.
"""
if filename in self._imported_files:
return
file_path = os.path.join(TEST_DATA_DIR, filename)
print('Importing: {0:s}'.format(file_path))
if not os.path.isfile(file_path):
raise ValueError('File [{0:s}] does not exist.'.format(file_path))
es = opensearchpy.OpenSearch(
[{'host': OPENSEARCH_HOST, 'port': OPENSEARCH_PORT}],
http_compress=True)
df = pd.read_csv(file_path, error_bad_lines=False)
if 'datetime' in df:
df['datetime'] = pd.to_datetime(df['datetime'])
def _pandas_to_opensearch(data_frame):
for _, row in data_frame.iterrows():
row.dropna(inplace=True)
yield {
'_index': index_name,
'_type': '_doc',
'_source': row.to_dict()
}
if os.path.isfile(OPENSEARCH_MAPPINGS_FILE):
mappings = {}
with open(OPENSEARCH_MAPPINGS_FILE, 'r') as file_object:
mappings = json.load(file_object)
if not es.indices.exists(index_name):
es.indices.create(
body={'mappings': mappings}, index=index_name)
opensearchpy.helpers.bulk(es, _pandas_to_opensearch(df))
# Introduce a short break to allow data to be indexed.
time.sleep(3)
self._imported_files.append(filename)
def _get_test_methods(self):
"""Inspect class and list all methods that matches the criteria.
Yields:
Function name and bound method.
"""
for name, func in inspect.getmembers(self, predicate=inspect.ismethod):
if name.startswith('test_'):
yield name, func
def run_analyzer(self, timeline_name, analyzer_name):
"""Run an analyzer on an imported timeline.
Args:
timeline_name (str): the name of the imported timeline.
analyzer_name (str): the name of the analyzer to run.
"""
timeline = None
for time_obj in self.sketch.list_timelines():
if time_obj.name == timeline_name:
timeline = time_obj
break
if not timeline:
print(
f'Unable to run analyzer: {analyzer_name} on {timeline_name}, '
'didn\'t find the timeline, timeline name correct?')
return
results = timeline.run_analyzer(analyzer_name)
# Poll the analyzer status to see when analyzer completes it's run.
max_time_seconds = 600 # Timeout after 10 min
sleep_time_seconds = 5 # Sleep between API calls
max_retries = max_time_seconds / sleep_time_seconds
retry_count = 0
while True:
if retry_count >= max_retries:
raise TimeoutError('Unable to wait for analyzer run to end.')
status_set = set()
for line in results.status.split('\n'):
status_set.add(line.split()[-1])
if status_set.issubset(self._ANALYZERS_COMPLETE_SET):
break
retry_count += 1
time.sleep(sleep_time_seconds)
def setup(self):
"""Setup function that is run before any tests.
This is a good place to import any data that is needed.
"""
return NotImplementedError
def run_tests(self):
"""Run all test functions from the class.
Returns:
Counter of number of tests and errors.
"""
print('*** {0:s} ***'.format(self.NAME))
for test_name, test_func in self._get_test_methods():
self._counter['tests'] += 1
print('Running test: {0:s} ...'.format(
test_name), end='', flush=True)
try:
test_func()
except Exception: # pylint: disable=broad-except
# TODO: Change to logging module instead of prints
print(traceback.format_exc())
self._counter['errors'] += 1
continue
print('[OK]')
return self._counter
| google/timesketch | end_to_end_tests/interface.py | Python | apache-2.0 | 7,957 |
import sublime_plugin
from ..libs.global_vars import get_language_service_enabled
from ..libs.view_helpers import is_typescript, active_view
class TypeScriptBaseTextCommand(sublime_plugin.TextCommand):
def is_enabled(self):
return is_typescript(self.view) and get_language_service_enabled()
class TypeScriptBaseWindowCommand(sublime_plugin.WindowCommand):
def is_enabled(self):
return is_typescript(self.window.active_view()) and get_language_service_enabled()
class TypeScriptBaseApplicationCommand(sublime_plugin.ApplicationCommand):
def is_enabled(self):
return is_typescript(active_view()) and get_language_service_enabled()
| Microsoft/TypeScript-Sublime-Plugin | typescript/commands/base_command.py | Python | apache-2.0 | 671 |
from btserver import BTServer
from btserver import BTError
from sensor import SensorServer
import argparse
import asyncore
import json
import logging
import sqlite3
from threading import Thread
from time import gmtime, sleep, strftime, time
import datetime
logger = logging.getLogger(__name__)
if __name__ == '__main__':
# Create option parser
usage = "usage: %prog [options] arg"
parser = argparse.ArgumentParser()
parser.add_argument("--output", dest="output_format", default="csv",
help="set output format: csv, json")
parser.add_argument("--database", dest="database_name", default="air_pollution_data.db",
help="specify database file")
args = parser.parse_args()
# Create a BT server
uuid = "94f39d29-7d6d-437d-973b-fba39e49d4ee"
bt_service_name = "Air Pollution Sensor"
bt_server = BTServer(uuid, bt_service_name)
# Create the server thread and run it
bt_server_thread = Thread(target=asyncore.loop, name="Gossip BT Server Thread")
bt_server_thread.daemon = True
bt_server_thread.start()
sensor_server = SensorServer(database_name=args.database_name)
sensor_server.daemon = True
sensor_server.start()
try:
db_conn = sqlite3.connect(args.database_name)
db_cur = db_conn.cursor()
except Exception as e:
logger.error("Error connecting the database {}, reason: {}".format(args.database_name, e.message))
while True:
for client_handler in bt_server.active_client_handlers.copy():
# Use a copy() to get the copy of the set, avoiding 'set change size during iteration' error
# Create CSV message "'realtime', time, temp, SN1, SN2, SN3, SN4, PM25\n"
sensor_output = sensor_server.get_sensor_output()
#raw = sensor_output.get('Temp', -1)
#v = 5./4096 * raw
#t = (1000 * v) - 277
#temp = t
temp = sensor_output.get('Temp', -1)
epoch_time = int(time()) # epoch time
#real_time = time.localtime() # real time
SN1 = sensor_output.get('SN1', -1) # real NO2 value
SN2 = sensor_output.get('SN2', -1) # real O3 value
SN3 = sensor_output.get('SN3', -1) # real CO value
SN4 = sensor_output.get('SN4', -1) # real SO2 value
PM25 = sensor_output.get('PM25', -1) # real PM25 value
msg = ""
if args.output_format == "csv":
msg = "realtime, %d, %f, %f, %f, %f, %f, %f" % (epoch_time, temp, SN1, SN2, SN3, SN4, PM25)
elif args.output_format == "json":
output = {'type': '0',
'time': epoch_time,
'temp': temp,
'NO2': SN1,
'O3': SN2,
'CO': SN3,
'SO2': SN4,
'PM25': PM25}
msg = json.dumps(output)
try:
client_handler.send(msg + '\n')
except Exception as e:
BTError.print_error(handler=client_handler, error=BTError.ERR_WRITE, error_message=repr(e))
client_handler.handle_close()
# Sleep for 3 seconds
sleep(3)
| JungInkyo/Qualcomm_sensor | pa10a/Team_C_sensor.py | Python | gpl-2.0 | 3,324 |
import bpy
from mathutils import *
from math import *
from . import collada as c, fix, armatures
from bpy.props import StringProperty
import xml.etree.cElementTree as etree
try:
from . import simox
use_simox = True
except ImportError:
use_simox = False
try:
from . import mmm
use_mmm = True
except ImportError:
use_mmm = False
def parseTree(tree, parentName):
print("parsetree")
armName = bpy.context.active_object.name
armatures.createBone(armName, tree.name, parentName)
bpy.ops.roboteditor.selectbone(boneName = tree.name)
print (tree.name)
boneProp = bpy.context.active_bone.RobotEditor
m = Matrix()
print(tree.transformations)
for i in tree.transformations:
# We expect a matrix here!
# Todo accept rotation and translations too!
if type(i[0]) is list:
m=m*Matrix(i)
elif len(i)==3:
#TODO
pass
elif len(i)==4:
#TODO
pass
else:
raise Exception("ParsingError")
print(m)
bpy.context.active_bone.RobotEditor.Euler.x.value = m.translation[0]/1000
bpy.context.active_bone.RobotEditor.Euler.y.value = m.translation[1]/1000
bpy.context.active_bone.RobotEditor.Euler.z.value = m.translation[2]/1000
bpy.context.active_bone.RobotEditor.Euler.gamma.value = degrees(m.to_euler().z)
bpy.context.active_bone.RobotEditor.Euler.beta.value = degrees(m.to_euler().y)
bpy.context.active_bone.RobotEditor.Euler.alpha.value = degrees(m.to_euler().x)
if(tree.axis_type == 'revolute'):
bpy.context.active_bone.RobotEditor.jointMode = 'REVOLUTE'
#boneProp.theta.value = float(tree.initalValue)
bpy.context.active_bone.RobotEditor.theta.max = float(tree.max)
bpy.context.active_bone.RobotEditor.theta.min = float(tree.min)
else:
bpy.context.active_bone.RobotEditor.jointMode = 'PRISMATIC'
#boneProp.d.value = float(tree.initialValue)
bpy.context.active_bone.RobotEditor.d.max = float(tree.max)
bpy.context.active_bone.RobotEditor.d.min = float(tree.min)
if tree.axis is not None:
for i,axis in enumerate(tree.axis):
if axis == -1.0:
bpy.context.active_bone.RobotEditor.axis_revert = True
tree.axis[i]=1.0
if tree.axis==[1.0,0.0,0.0]:
bpy.context.active_bone.RobotEditor.axis = 'X'
elif tree.axis==[0.0,1.0,0.0]:
bpy.context.active_bone.RobotEditor.axis = 'Y'
elif tree.axis==[0.0,0.0,1.0]:
bpy.context.active_bone.RobotEditor.axis = 'Z'
print("parsetree done")
for child in tree.children:
parseTree(child, tree.name)
def extractData(boneName):
tree = c.Tree()
arm = bpy.context.active_object
bpy.ops.roboteditor.selectbone(boneName = boneName)
currentBone = bpy.context.active_bone
tree.name = boneName
if currentBone.parent:
parentName = currentBone.parent.name
else:
parentName = None
if currentBone.RobotEditor.axis_revert:
inverted = -1
else:
inverted = 1
axis = ["0", "0", "0"]
if currentBone.RobotEditor.axis == 'X':
axis[0] = str(inverted)
elif currentBone.RobotEditor.axis == 'Y':
axis[1] = str(inverted)
elif currentBone.RobotEditor.axis == 'Z':
axis[2] = str(inverted)
tree.axis = axis
trafo, dummy = currentBone.RobotEditor.getTransform()
# translation
tree.addTrafo([str(e) for e in trafo.translation])
# rotation
rotation = trafo.to_euler()
tree.addTrafo([str(e) for e in [0,0,1, rotation.z]])
tree.addTrafo([str(e) for e in [0,1,0, rotation.y]])
tree.addTrafo([str(e) for e in [1,0,0, rotation.x]])
if(currentBone.RobotEditor.jointMode == 'REVOLUTE'):
tree.initialValue = str(currentBone.RobotEditor.theta.offset)
tree.min = str(currentBone.RobotEditor.theta.min)
tree.max = str(currentBone.RobotEditor.theta.max)
tree.axis_type = 'revolute'
else:
tree.initialValue = str(currentBone.RobotEditor.d.offset)
tree.min = str(currentBone.RobotEditor.d.min)
tree.max = str(currentBone.RobotEditor.d.max)
tree.axis_type = 'prismatic'
children = [child.name for child in currentBone.children]
tree.meshes = [mesh.name for mesh in bpy.data.objects if mesh.type == 'MESH' and mesh.parent_bone == boneName]
markers = [m for m in bpy.data.objects if m.RobotEditor.tag == 'MARKER' and m.parent_bone == boneName]
#tree.markers = [(m.name,(currentBone.matrix_local.inverted()*m.matrix_world.translation).to_tuple()) for m in markers]
#tree.markers = [(m.name,(m.matrix_parent_inverse*m.matrix_world.translation).to_tuple()) for m in markers]
poseBone = arm.pose.bones[boneName]
tree.markers = [(m.name, (poseBone.matrix.inverted()*arm.matrix_world.inverted()*m.matrix_world.translation).to_tuple() ) for m in markers]
for child in children:
tree.addChild(extractData(child))
return tree
# operator to export an armature to COLLADA 1.5
class RobotEditor_exportCollada(bpy.types.Operator):
bl_idname = "roboteditor.colladaexport"
bl_label = "Export to COLLADA 1.5"
filepath = StringProperty(subtype = 'FILE_PATH')
def execute(self, context):
bpy.ops.wm.collada_export(filepath=self.filepath, \
check_existing=False, filter_blender=False,\
filter_image=False, filter_movie=False, \
filter_python=False, filter_font=False, \
filter_sound=False, filter_text=False,\
filter_btx=False, filter_collada=True, \
filter_folder=True)
fix.fixCollada(self.filepath, self.filepath)
handler = c.COLLADA()
handler.import14(self.filepath)
arm = context.active_object
baseBoneName = arm.data.bones[0].name
tree = c.Tree()
tree.name = arm.name
tree.addChild(extractData(baseBoneName))
handler.attach(tree)
massFrames = [obj for obj in bpy.data.objects if obj.RobotEditor.tag == 'PHYSICS_FRAME' and not obj.parent_bone is '']
for frame in massFrames:
#transform = frame.parent.data.bones[frame.parent_bone].matrix_local.inverted() * frame.matrix_local
boneName = frame.parent.data.bones[frame.parent_bone].name
poseBone = arm.pose.bones[boneName]
transform = poseBone.matrix.inverted()*arm.matrix_world.inverted()*frame.matrix_world
frameTrafos = []
frameTrafos.append(tuple(v for v in transform.translation))
frameRotation = transform.to_euler()
frameTrafos.append(tuple([0,0,1,frameRotation.z]))
frameTrafos.append(tuple([0,1,0,frameRotation.y]))
frameTrafos.append(tuple([1,0,0,frameRotation.x]))
collisionModels = []
collisionModelTransformations = {}
for model in [i for i in bpy.data.objects if i.parent == frame]:
modelName = model.data.name.replace('.','_')+'-mesh'
collisionModels.append(modelName)
#matrix = model.parent.data.bones[model.parent_bone].matrix_local.inverted() * model.matrix_local
matrix = model.matrix_local
collisionModelTransformations[modelName]=[tuple(v for v in matrix.translation)]
rotation = matrix.to_euler()
collisionModelTransformations[modelName].append(tuple([0,0,1,rotation.z]))
collisionModelTransformations[modelName].append(tuple([0,1,0,rotation.y]))
collisionModelTransformations[modelName].append(tuple([1,0,0,rotation.x]))
#TODO also bring the matrix_local to all collisionmodels
print ("mass frames", frame.name, collisionModels)
handler.addMassObject(frame.name, frameTrafos, tuple(v for v in frame.RobotEditor.dynamics.inertiaTensor), frame.RobotEditor.dynamics.mass, collisionModels,collisionModelTransformations)
handler.write(self.filepath)
return{'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
# operator to import an MMM-Motion
class RobotEditor_importMMM(bpy.types.Operator):
bl_idname = "roboteditor.mmmimport"
bl_label = "Import MMM"
filepath = StringProperty(subtype = 'FILE_PATH')
def execute(self, context):
mmm.read(self.filepath)
return{'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def draw(layout, context):
layout.operator("roboteditor.colladaexport")
layout.operator("roboteditor.mmmimport")
# operator to import the kinematics in a SIMOX-XML file
class RobotEditor_importSIMOX(bpy.types.Operator):
bl_idname = "roboteditor.simoximport"
bl_label = "Import SIMOX XML"
filepath = StringProperty(subtype = 'FILE_PATH')
def execute(self, context):
tree=simox.read(self.filepath)
parseTree(tree,None)
return{'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def draw(layout, context):
layout.operator("roboteditor.colladaexport")
if use_simox:
layout.operator("roboteditor.simoximport")
if use_mmm:
layout.operator("roboteditor.mmmimport")
def register():
bpy.utils.register_class(RobotEditor_exportCollada)
if use_simox:
bpy.utils.register_class(RobotEditor_importSIMOX)
if use_mmm:
bpy.utils.register_class(RobotEditor_importMMM)
def unregister():
bpy.utils.unregister_class(RobotEditor_exportCollada)
if use_simox:
bpy.utils.unregister_class(RobotEditor_importSIMOX)
if use_mmm:
bpy.utils.unregister_class(RobotEditor_importMMM)
| StefanUlbrich/RobotEditor | files.py | Python | gpl-2.0 | 9,977 |
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
""" Contains functions and classes related to fields.
"""
import datetime, fnmatch, re, struct, sys
from array import array
from decimal import Decimal
from whoosh import analysis, columns, formats
from whoosh.compat import u, b, PY3
from whoosh.compat import with_metaclass
from whoosh.compat import itervalues, xrange
from whoosh.compat import bytes_type, string_type, integer_types, text_type
from whoosh.system import emptybytes
from whoosh.system import pack_byte, unpack_byte
from whoosh.util.numeric import to_sortable, from_sortable
from whoosh.util.numeric import typecode_max, NaN
from whoosh.util.text import utf8encode, utf8decode
from whoosh.util.times import datetime_to_long, long_to_datetime
# Exceptions
class FieldConfigurationError(Exception):
pass
class UnknownFieldError(Exception):
pass
# Field Types
class FieldType(object):
"""Represents a field configuration.
The FieldType object supports the following attributes:
* format (formats.Format): the storage format for the field's contents.
* analyzer (analysis.Analyzer): the analyzer to use to turn text into
terms.
* vector (formats.Format): the storage format for the field's vectors
(forward index), or None if the field should not store vectors.
* scorable (boolean): whether searches against this field may be scored.
This controls whether the index stores per-document field lengths for
this field.
* stored (boolean): whether the content of this field is stored for each
document. For example, in addition to indexing the title of a document,
you usually want to store the title so it can be presented as part of
the search results.
* unique (boolean): whether this field's value is unique to each document.
For example, 'path' or 'ID'. IndexWriter.update_document() will use
fields marked as 'unique' to find the previous version of a document
being updated.
* multitoken_query is a string indicating what kind of query to use when
a "word" in a user query parses into multiple tokens. The string is
interpreted by the query parser. The strings understood by the default
query parser are "first" (use first token only), "and" (join the tokens
with an AND query), "or" (join the tokens with OR), "phrase" (join
the tokens with a phrase query), and "default" (use the query parser's
default join type).
The constructor for the base field type simply lets you supply your own
configured field format, vector format, and scorable and stored values.
Subclasses may configure some or all of this for you.
"""
analyzer = format = vector = scorable = stored = unique = None
indexed = True
multitoken_query = "default"
sortable_typecode = None
spelling = False
column_type = None
def __init__(self, format, analyzer, vector=None, scorable=False,
stored=False, unique=False, multitoken_query="default",
sortable=False):
assert isinstance(format, formats.Format)
self.format = format
self.analyzer = analyzer
self.vector = vector
self.scorable = scorable
self.stored = stored
self.unique = unique
self.multitoken_query = multitoken_query
self.set_sortable(sortable)
def __repr__(self):
temp = "%s(format=%r, vector=%r, scorable=%s, stored=%s, unique=%s)"
return temp % (self.__class__.__name__, self.format, self.vector,
self.scorable, self.stored, self.unique)
def __eq__(self, other):
return all((isinstance(other, FieldType),
(self.format == other.format),
(self.vector == other.vector),
(self.scorable == other.scorable),
(self.stored == other.stored),
(self.unique == other.unique),
(self.column_type == other.column_type)))
def __ne__(self, other):
return not(self.__eq__(other))
# Column methods
def set_sortable(self, sortable):
if sortable:
if isinstance(sortable, columns.Column):
self.column_type = sortable
else:
self.column_type = self.default_column()
else:
self.column_type = None
def default_column(self):
return columns.VarBytesColumn()
# Methods for converting input into indexing information
def index(self, value, **kwargs):
"""Returns an iterator of (btext, frequency, weight, encoded_value)
tuples for each unique word in the input value.
The default implementation uses the ``analyzer`` attribute to tokenize
the value into strings, then encodes them into bytes using UTF-8.
"""
if not self.format:
raise Exception("%s field %r cannot index without a format"
% (self.__class__.__name__, self))
if not isinstance(value, (text_type, list, tuple)):
raise ValueError("%r is not unicode or sequence" % value)
assert isinstance(self.format, formats.Format)
if "mode" not in kwargs:
kwargs["mode"] = "index"
word_values = self.format.word_values
ana = self.analyzer
for tstring, freq, wt, vbytes in word_values(value, ana, **kwargs):
yield (utf8encode(tstring)[0], freq, wt, vbytes)
def process_text(self, qstring, mode='', **kwargs):
"""Analyzes the given string and returns an iterator of token texts.
>>> field = fields.TEXT()
>>> list(field.process_text("The ides of March"))
["ides", "march"]
"""
if not self.format:
raise Exception("%s field has no format" % self)
return (t.text for t in self.tokenize(qstring, mode=mode, **kwargs))
def tokenize(self, value, **kwargs):
"""Analyzes the given string and returns an iterator of Token objects
(note: for performance reasons, actually the same token yielded over
and over with different attributes).
"""
if not self.analyzer:
raise Exception("%s field has no analyzer" % self.__class__)
return self.analyzer(value, **kwargs)
def to_bytes(self, value):
"""Returns a bytes representation of the given value, appropriate to be
written to disk. The default implementation assumes a unicode value and
encodes it using UTF-8.
"""
if isinstance(value, (list, tuple)):
value = value[0]
if not isinstance(value, bytes_type):
value = utf8encode(value)[0]
return value
def to_column_value(self, value):
"""Returns an object suitable to be inserted into the document values
column for this field. The default implementation simply calls
``self.to_bytes(value)``.
"""
return self.to_bytes(value)
def from_column_value(self, value):
return self.from_bytes(value)
def from_bytes(self, bs):
return utf8decode(bs)[0]
# Methods related to query parsing
def self_parsing(self):
"""Subclasses should override this method to return True if they want
the query parser to call the field's ``parse_query()`` method instead
of running the analyzer on text in this field. This is useful where
the field needs full control over how queries are interpreted, such
as in the numeric field type.
"""
return False
def parse_query(self, fieldname, qstring, boost=1.0):
"""When ``self_parsing()`` returns True, the query parser will call
this method to parse basic query text.
"""
raise NotImplementedError(self.__class__.__name__)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
"""When ``self_parsing()`` returns True, the query parser will call
this method to parse range query text. If this method returns None
instead of a query object, the parser will fall back to parsing the
start and end terms using process_text().
"""
return None
# Methods related to sortings
def sortable_terms(self, ixreader, fieldname):
"""Returns an iterator of the "sortable" tokens in the given reader and
field. These values can be used for sorting. The default implementation
simply returns all tokens in the field.
This can be overridden by field types such as NUMERIC where some values
in a field are not useful for sorting.
"""
return ixreader.lexicon(fieldname)
# Methods related to spelling
def separate_spelling(self):
"""Returns True if this field requires special handling of the words
that go into the field's word graph.
The default behavior is to return True if the field is "spelled" but
not indexed, or if the field is indexed but the analyzer has
morphological transformations (e.g. stemming). Exotic field types may
need to override this behavior.
This method should return False if the field does not support spelling
(i.e. the ``spelling`` attribute is False).
"""
return self.spelling and self.analyzer.has_morph()
def spellable_words(self, value):
"""Returns an iterator of each unique word (in sorted order) in the
input value, suitable for inclusion in the field's word graph.
The default behavior is to call the field analyzer with the keyword
argument ``no_morph=True``, which should make the analyzer skip any
morphological transformation filters (e.g. stemming) to preserve the
original form of the words. Exotic field types may need to override
this behavior.
"""
if isinstance(value, (list, tuple)):
words = value
else:
words = [token.text for token
in self.analyzer(value, no_morph=True)]
return iter(sorted(set(words)))
def has_morph(self):
"""Returns True if this field by default performs morphological
transformations on its terms, e.g. stemming.
"""
if self.analyzer:
return self.analyzer.has_morph()
else:
return False
# Methods related to the posting/vector formats
def supports(self, name):
"""Returns True if the underlying format supports the given posting
value type.
>>> field = TEXT()
>>> field.supports("positions")
True
>>> field.supports("characters")
False
"""
return self.format.supports(name)
def clean(self):
"""Clears any cached information in the field and any child objects.
"""
if self.format and hasattr(self.format, "clean"):
self.format.clean()
if self.vector and hasattr(self.vector, "clean"):
self.vector.clean()
# Event methods
def on_add(self, schema, fieldname):
pass
def on_remove(self, schema, fieldname):
pass
class ID(FieldType):
"""Configured field type that indexes the entire value of the field as one
token. This is useful for data you don't want to tokenize, such as the path
of a file.
"""
__inittypes__ = dict(stored=bool, unique=bool, field_boost=float)
def __init__(self, stored=False, unique=False, field_boost=1.0,
spelling=False, sortable=False, analyzer=None):
"""
:param stored: Whether the value of this field is stored with the
document.
"""
self.analyzer = analyzer or analysis.IDAnalyzer()
self.format = formats.Existence(field_boost=field_boost)
self.stored = stored
self.unique = unique
self.spelling = spelling
self.set_sortable(sortable)
class IDLIST(FieldType):
"""Configured field type for fields containing IDs separated by whitespace
and/or punctuation (or anything else, using the expression param).
"""
__inittypes__ = dict(stored=bool, unique=bool, expression=bool,
field_boost=float)
def __init__(self, stored=False, unique=False, expression=None,
field_boost=1.0, spelling=False):
"""
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
:param expression: The regular expression object to use to extract
tokens. The default expression breaks tokens on CRs, LFs, tabs,
spaces, commas, and semicolons.
"""
expression = expression or re.compile(r"[^\r\n\t ,;]+")
self.analyzer = analysis.RegexAnalyzer(expression=expression)
self.format = formats.Existence(field_boost=field_boost)
self.stored = stored
self.unique = unique
self.spelling = spelling
class NUMERIC(FieldType):
"""Special field type that lets you index integer or floating point
numbers in relatively short fixed-width terms. The field converts numbers
to sortable bytes for you before indexing.
You specify the numeric type of the field (``int`` or ``float``) when you
create the ``NUMERIC`` object. The default is ``int``. For ``int``, you can
specify a size in bits (``32`` or ``64``). For both ``int`` and ``float``
you can specify a ``signed`` keyword argument (default is ``True``).
>>> schema = Schema(path=STORED, position=NUMERIC(int, 64, signed=False))
>>> ix = storage.create_index(schema)
>>> with ix.writer() as w:
... w.add_document(path="/a", position=5820402204)
...
You can also use the NUMERIC field to store Decimal instances by specifying
a type of ``int`` or ``long`` and the ``decimal_places`` keyword argument.
This simply multiplies each number by ``(10 ** decimal_places)`` before
storing it as an integer. Of course this may throw away decimal prcesision
(by truncating, not rounding) and imposes the same maximum value limits as
``int``/``long``, but these may be acceptable for certain applications.
>>> from decimal import Decimal
>>> schema = Schema(path=STORED, position=NUMERIC(int, decimal_places=4))
>>> ix = storage.create_index(schema)
>>> with ix.writer() as w:
... w.add_document(path="/a", position=Decimal("123.45")
...
"""
def __init__(self, numtype=int, bits=32, stored=False, unique=False,
field_boost=1.0, decimal_places=0, shift_step=4, signed=True,
sortable=False, default=None):
"""
:param numtype: the type of numbers that can be stored in this field,
either ``int``, ``float``. If you use ``Decimal``,
use the ``decimal_places`` argument to control how many decimal
places the field will store.
:param bits: When ``numtype`` is ``int``, the number of bits to use to
store the number: 8, 16, 32, or 64.
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
:param decimal_places: specifies the number of decimal places to save
when storing Decimal instances. If you set this, you will always
get Decimal instances back from the field.
:param shift_steps: The number of bits of precision to shift away at
each tiered indexing level. Values should generally be 1-8. Lower
values yield faster searches but take up more space. A value
of `0` means no tiered indexing.
:param signed: Whether the numbers stored in this field may be
negative.
"""
# Allow users to specify strings instead of Python types in case
# docstring isn't clear
if numtype == "int":
numtype = int
if numtype == "float":
numtype = float
# Raise an error if the user tries to use a type other than int or
# float
if numtype is Decimal:
numtype = int
if not decimal_places:
raise TypeError("To store Decimal instances, you must set the "
"decimal_places argument")
elif numtype not in (int, float):
raise TypeError("Can't use %r as a type, use int or float"
% numtype)
# Sanity check
if numtype is float and decimal_places:
raise Exception("A float type and decimal_places argument %r are "
"incompatible" % decimal_places)
intsizes = [8, 16, 32, 64]
intcodes = ["B", "H", "I", "Q"]
# Set up field configuration based on type and size
if numtype is float:
bits = 64 # Floats are converted to 64 bit ints
else:
if bits not in intsizes:
raise Exception("Invalid bits %r, use 8, 16, 32, or 64"
% bits)
# Type code for the *sortable* representation
self.sortable_typecode = intcodes[intsizes.index(bits)]
self._struct = struct.Struct(">" + self.sortable_typecode)
self.numtype = numtype
self.bits = bits
self.stored = stored
self.unique = unique
self.decimal_places = decimal_places
self.shift_step = shift_step
self.signed = signed
self.analyzer = analysis.IDAnalyzer()
self.format = formats.Existence(field_boost=field_boost)
self.min_value, self.max_value = self._min_max()
# Column configuration
if default is None:
if numtype is int:
default = typecode_max[self.sortable_typecode]
else:
default = NaN
elif not self.is_valid(default):
raise Exception("The default %r is not a valid number for this "
"field" % default)
self.default = default
self.set_sortable(sortable)
def __getstate__(self):
d = self.__dict__.copy()
if "_struct" in d:
del d["_struct"]
return d
def __setstate__(self, d):
self.__dict__.update(d)
self._struct = struct.Struct(">" + self.sortable_typecode)
if "min_value" not in d:
d["min_value"], d["max_value"] = self._min_max()
def _min_max(self):
numtype = self.numtype
bits = self.bits
signed = self.signed
# Calculate the minimum and maximum possible values for error checking
min_value = from_sortable(numtype, bits, signed, 0)
max_value = from_sortable(numtype, bits, signed, 2 ** bits - 1)
return min_value, max_value
def default_column(self):
return columns.NumericColumn(self.sortable_typecode,
default=self.default)
def is_valid(self, x):
try:
x = self.to_bytes(x)
except ValueError:
return False
except OverflowError:
return False
return True
def index(self, num, **kwargs):
# If the user gave us a list of numbers, recurse on the list
if isinstance(num, (list, tuple)):
for n in num:
for item in self.index(n):
yield item
return
# word, freq, weight, valuestring
if self.shift_step:
for shift in xrange(0, self.bits, self.shift_step):
yield (self.to_bytes(num, shift), 1, 1.0, emptybytes)
else:
yield (self.to_bytes(num), 1, 1.0, emptybytes)
def prepare_number(self, x):
if x == emptybytes or x is None:
return x
dc = self.decimal_places
if dc and isinstance(x, (string_type, Decimal)):
x = Decimal(x) * (10 ** dc)
elif isinstance(x, Decimal):
raise TypeError("Can't index a Decimal object unless you specified "
"decimal_places on the field")
try:
x = self.numtype(x)
except OverflowError:
raise ValueError("Value %r overflowed number type %r"
% (x, self.numtype))
if x < self.min_value or x > self.max_value:
raise ValueError("Numeric field value %s out of range [%s, %s]"
% (x, self.min_value, self.max_value))
return x
def unprepare_number(self, x):
dc = self.decimal_places
if dc:
s = str(x)
x = Decimal(s[:-dc] + "." + s[-dc:])
return x
def to_column_value(self, x):
if isinstance(x, (list, tuple, array)):
x = x[0]
x = self.prepare_number(x)
return to_sortable(self.numtype, self.bits, self.signed, x)
def from_column_value(self, x):
x = from_sortable(self.numtype, self.bits, self.signed, x)
return self.unprepare_number(x)
def to_bytes(self, x, shift=0):
# Try to avoid re-encoding; this sucks because on Python 2 we can't
# tell the difference between a string and encoded bytes, so we have
# to require the user use unicode when they mean string
if isinstance(x, bytes_type):
return x
if x == emptybytes or x is None:
return self.sortable_to_bytes(0)
x = self.prepare_number(x)
x = to_sortable(self.numtype, self.bits, self.signed, x)
return self.sortable_to_bytes(x, shift)
def sortable_to_bytes(self, x, shift=0):
if shift:
x >>= shift
return pack_byte(shift) + self._struct.pack(x)
def from_bytes(self, bs):
x = self._struct.unpack(bs[1:])[0]
x = from_sortable(self.numtype, self.bits, self.signed, x)
x = self.unprepare_number(x)
return x
def process_text(self, text, **kwargs):
return (self.to_bytes(text),)
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
from whoosh.qparser.common import QueryParserError
if qstring == "*":
return query.Every(fieldname, boost=boost)
if not self.is_valid(qstring):
raise QueryParserError("%r is not a valid number" % qstring)
token = self.to_bytes(qstring)
return query.Term(fieldname, token, boost=boost)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
from whoosh import query
from whoosh.qparser.common import QueryParserError
if start is not None:
if not self.is_valid(start):
raise QueryParserError("Range start %r is not a valid number"
% start)
start = self.prepare_number(start)
if end is not None:
if not self.is_valid(end):
raise QueryParserError("Range end %r is not a valid number"
% end)
end = self.prepare_number(end)
return query.NumericRange(fieldname, start, end, startexcl, endexcl,
boost=boost)
def sortable_terms(self, ixreader, fieldname):
zero = b("\x00")
for token in ixreader.lexicon(fieldname):
if token[0:1] != zero:
# Only yield the full-precision values
break
yield token
class DATETIME(NUMERIC):
"""Special field type that lets you index datetime objects. The field
converts the datetime objects to sortable text for you before indexing.
Since this field is based on Python's datetime module it shares all the
limitations of that module, such as the inability to represent dates before
year 1 in the proleptic Gregorian calendar. However, since this field
stores datetimes as an integer number of microseconds, it could easily
represent a much wider range of dates if the Python datetime implementation
ever supports them.
>>> schema = Schema(path=STORED, date=DATETIME)
>>> ix = storage.create_index(schema)
>>> w = ix.writer()
>>> w.add_document(path="/a", date=datetime.now())
>>> w.commit()
"""
__inittypes__ = dict(stored=bool, unique=bool)
def __init__(self, stored=False, unique=False, sortable=False):
"""
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
"""
super(DATETIME, self).__init__(int, 64, stored=stored,
unique=unique, shift_step=8,
sortable=sortable)
def prepare_datetime(self, x):
from whoosh.util.times import floor
if isinstance(x, text_type):
# For indexing, support same strings as for query parsing --
# convert unicode to datetime object
x = self._parse_datestring(x)
x = floor(x) # this makes most sense (unspecified = lowest)
if isinstance(x, datetime.datetime):
return datetime_to_long(x)
elif isinstance(x, bytes_type):
return x
else:
raise Exception("%r is not a datetime" % (x,))
def to_column_value(self, x):
if isinstance(x, bytes_type):
raise Exception("%r is not a datetime" % (x,))
if isinstance(x, (list, tuple)):
x = x[0]
return self.prepare_datetime(x)
def from_column_value(self, x):
return long_to_datetime(x)
def to_bytes(self, x, shift=0):
x = self.prepare_datetime(x)
return NUMERIC.to_bytes(self, x, shift=shift)
def from_bytes(self, bs):
x = NUMERIC.from_bytes(self, bs)
return long_to_datetime(x)
def _parse_datestring(self, qstring):
# This method parses a very simple datetime representation of the form
# YYYY[MM[DD[hh[mm[ss[uuuuuu]]]]]]
from whoosh.util.times import adatetime, fix, is_void
qstring = qstring.replace(" ", "").replace("-", "").replace(".", "")
year = month = day = hour = minute = second = microsecond = None
if len(qstring) >= 4:
year = int(qstring[:4])
if len(qstring) >= 6:
month = int(qstring[4:6])
if len(qstring) >= 8:
day = int(qstring[6:8])
if len(qstring) >= 10:
hour = int(qstring[8:10])
if len(qstring) >= 12:
minute = int(qstring[10:12])
if len(qstring) >= 14:
second = int(qstring[12:14])
if len(qstring) == 20:
microsecond = int(qstring[14:])
at = fix(adatetime(year, month, day, hour, minute, second,
microsecond))
if is_void(at):
raise Exception("%r is not a parseable date" % qstring)
return at
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
from whoosh.util.times import is_ambiguous
try:
at = self._parse_datestring(qstring)
except:
e = sys.exc_info()[1]
return query.error_query(e)
if is_ambiguous(at):
startnum = datetime_to_long(at.floor())
endnum = datetime_to_long(at.ceil())
return query.NumericRange(fieldname, startnum, endnum)
else:
return query.Term(fieldname, at, boost=boost)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
from whoosh import query
if start is None and end is None:
return query.Every(fieldname, boost=boost)
if start is not None:
startdt = self._parse_datestring(start).floor()
start = datetime_to_long(startdt)
if end is not None:
enddt = self._parse_datestring(end).ceil()
end = datetime_to_long(enddt)
return query.NumericRange(fieldname, start, end, boost=boost)
class BOOLEAN(FieldType):
"""Special field type that lets you index boolean values (True and False).
The field converts the boolean values to text for you before indexing.
>>> schema = Schema(path=STORED, done=BOOLEAN)
>>> ix = storage.create_index(schema)
>>> w = ix.writer()
>>> w.add_document(path="/a", done=False)
>>> w.commit()
"""
bytestrings = (b("f"), b("t"))
trues = frozenset(u("t true yes 1").split())
falses = frozenset(u("f false no 0").split())
__inittypes__ = dict(stored=bool, field_boost=float)
def __init__(self, stored=False, field_boost=1.0):
"""
:param stored: Whether the value of this field is stored with the
document.
"""
self.stored = stored
self.field_boost = field_boost
self.format = formats.Existence(field_boost=field_boost)
def _obj_to_bool(self, x):
# We special case strings such as "true", "false", "yes", "no", but
# otherwise call bool() on the query value. This lets you pass objects
# as query values and do the right thing.
if isinstance(x, string_type) and x.lower() in self.trues:
x = True
elif isinstance(x, string_type) and x.lower() in self.falses:
x = False
else:
x = bool(x)
return x
def to_bytes(self, x):
if isinstance(x, bytes_type):
return x
elif isinstance(x, string_type):
x = x.lower() in self.trues
else:
x = bool(x)
bs = self.bytestrings[int(x)]
return bs
def index(self, bit, **kwargs):
if isinstance(bit, string_type):
bit = bit.lower() in self.trues
else:
bit = bool(bit)
# word, freq, weight, valuestring
return [(self.bytestrings[int(bit)], 1, 1.0, emptybytes)]
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
if qstring == "*":
return query.Every(fieldname, boost=boost)
return query.Term(fieldname, self._obj_to_bool(qstring), boost=boost)
class STORED(FieldType):
"""Configured field type for fields you want to store but not index.
"""
indexed = False
stored = True
def __init__(self):
pass
class COLUMN(FieldType):
"""Configured field type for fields you want to store as a per-document
value column but not index.
"""
indexed = False
stored = False
def __init__(self, columnobj=None):
if columnobj is None:
columnobj = columns.VarBytesColumn()
if not isinstance(columnobj, columns.Column):
raise TypeError("%r is not a column object" % (columnobj,))
self.column_type = columnobj
def to_bytes(self, v):
return v
def from_bytes(self, b):
return b
class KEYWORD(FieldType):
"""Configured field type for fields containing space-separated or
comma-separated keyword-like data (such as tags). The default is to not
store positional information (so phrase searching is not allowed in this
field) and to not make the field scorable.
"""
__inittypes__ = dict(stored=bool, lowercase=bool, commas=bool,
scorable=bool, unique=bool, field_boost=float)
def __init__(self, stored=False, lowercase=False, commas=False,
vector=None, scorable=False, unique=False, field_boost=1.0,
spelling=False, sortable=False):
"""
:param stored: Whether to store the value of the field with the
document.
:param comma: Whether this is a comma-separated field. If this is False
(the default), it is treated as a space-separated field.
:param scorable: Whether this field is scorable.
"""
self.analyzer = analysis.KeywordAnalyzer(lowercase=lowercase,
commas=commas)
self.format = formats.Frequency(field_boost=field_boost)
self.scorable = scorable
self.stored = stored
self.unique = unique
self.spelling = spelling
if vector:
if type(vector) is type:
vector = vector()
elif isinstance(vector, formats.Format):
pass
else:
vector = self.format
else:
vector = None
self.vector = vector
if sortable:
self.column_type = self.default_column()
class TEXT(FieldType):
"""Configured field type for text fields (for example, the body text of an
article). The default is to store positional information to allow phrase
searching. This field type is always scorable.
"""
__inittypes__ = dict(analyzer=analysis.Analyzer, phrase=bool,
vector=object, stored=bool, field_boost=float)
def __init__(self, analyzer=None, phrase=True, chars=False, vector=None,
stored=False, field_boost=1.0, multitoken_query="default",
spelling=False, sortable=False, lang=None):
"""
:param analyzer: The analysis.Analyzer to use to index the field
contents. See the analysis module for more information. If you omit
this argument, the field uses analysis.StandardAnalyzer.
:param phrase: Whether the store positional information to allow phrase
searching.
:param chars: Whether to store character ranges along with positions.
If this is True, "phrase" is also implied.
:param vector: A :class:`whoosh.formats.Format` object to use to store
term vectors, or ``True`` to store vectors using the same format as
the inverted index, or ``None`` or ``False`` to not store vectors.
By default, fields do not store term vectors.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param spelling: Whether to generate word graphs for this field to make
spelling suggestions much faster.
:param sortable: If True, make this field sortable using the default
column type. If you pass a :class:`whoosh.columns.Column` instance
instead of True, the field will use the given column type.
:param lang: automaticaly configure a
:class:`whoosh.analysis.LanguageAnalyzer` for the given language.
This is ignored if you also specify an ``analyzer``.
"""
if analyzer:
self.analyzer = analyzer
elif lang:
self.analyzer = analysis.LanguageAnalyzer(lang)
else:
self.analyzer = analysis.StandardAnalyzer()
if chars:
formatclass = formats.Characters
elif phrase:
formatclass = formats.Positions
else:
formatclass = formats.Frequency
self.format = formatclass(field_boost=field_boost)
if vector:
if type(vector) is type:
vector = vector()
elif isinstance(vector, formats.Format):
pass
else:
vector = formatclass()
else:
vector = None
self.vector = vector
if sortable:
if isinstance(sortable, columns.Column):
self.column_type = sortable
else:
self.column_type = columns.VarBytesColumn()
else:
self.column_type = None
self.multitoken_query = multitoken_query
self.scorable = True
self.stored = stored
self.spelling = spelling
class NGRAM(FieldType):
"""Configured field that indexes text as N-grams. For example, with a field
type NGRAM(3,4), the value "hello" will be indexed as tokens
"hel", "hell", "ell", "ello", "llo". This field type chops the entire text
into N-grams, including whitespace and punctuation. See :class:`NGRAMWORDS`
for a field type that breaks the text into words first before chopping the
words into N-grams.
"""
__inittypes__ = dict(minsize=int, maxsize=int, stored=bool,
field_boost=float, queryor=bool, phrase=bool)
scorable = True
def __init__(self, minsize=2, maxsize=4, stored=False, field_boost=1.0,
queryor=False, phrase=False, sortable=False):
"""
:param minsize: The minimum length of the N-grams.
:param maxsize: The maximum length of the N-grams.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param queryor: if True, combine the N-grams with an Or query. The
default is to combine N-grams with an And query.
:param phrase: store positions on the N-grams to allow exact phrase
searching. The default is off.
"""
formatclass = formats.Frequency
if phrase:
formatclass = formats.Positions
self.analyzer = analysis.NgramAnalyzer(minsize, maxsize)
self.format = formatclass(field_boost=field_boost)
self.stored = stored
self.queryor = queryor
self.set_sortable(sortable)
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
terms = [query.Term(fieldname, g)
for g in self.process_text(qstring, mode='query')]
cls = query.Or if self.queryor else query.And
return cls(terms, boost=boost)
class NGRAMWORDS(NGRAM):
"""Configured field that chops text into words using a tokenizer,
lowercases the words, and then chops the words into N-grams.
"""
__inittypes__ = dict(minsize=int, maxsize=int, stored=bool,
field_boost=float, tokenizer=analysis.Tokenizer,
at=str, queryor=bool)
scorable = True
def __init__(self, minsize=2, maxsize=4, stored=False, field_boost=1.0,
tokenizer=None, at=None, queryor=False, sortable=False):
"""
:param minsize: The minimum length of the N-grams.
:param maxsize: The maximum length of the N-grams.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param tokenizer: an instance of :class:`whoosh.analysis.Tokenizer`
used to break the text into words.
:param at: if 'start', only takes N-grams from the start of the word.
If 'end', only takes N-grams from the end. Otherwise the default
is to take all N-grams from each word.
:param queryor: if True, combine the N-grams with an Or query. The
default is to combine N-grams with an And query.
"""
self.analyzer = analysis.NgramWordAnalyzer(minsize, maxsize, tokenizer,
at=at)
self.format = formats.Frequency(field_boost=field_boost)
self.stored = stored
self.queryor = queryor
self.set_sortable(sortable)
# Schema class
class MetaSchema(type):
def __new__(cls, name, bases, attrs):
super_new = super(MetaSchema, cls).__new__
if not any(b for b in bases if isinstance(b, MetaSchema)):
# If this isn't a subclass of MetaSchema, don't do anything special
return super_new(cls, name, bases, attrs)
# Create the class
special_attrs = {}
for key in list(attrs.keys()):
if key.startswith("__"):
special_attrs[key] = attrs.pop(key)
new_class = super_new(cls, name, bases, special_attrs)
fields = {}
for b in bases:
if hasattr(b, "_clsfields"):
fields.update(b._clsfields)
fields.update(attrs)
new_class._clsfields = fields
return new_class
def schema(self):
return Schema(**self._clsfields)
class Schema(object):
"""Represents the collection of fields in an index. Maps field names to
FieldType objects which define the behavior of each field.
Low-level parts of the index use field numbers instead of field names for
compactness. This class has several methods for converting between the
field name, field number, and field object itself.
"""
def __init__(self, **fields):
""" All keyword arguments to the constructor are treated as fieldname =
fieldtype pairs. The fieldtype can be an instantiated FieldType object,
or a FieldType sub-class (in which case the Schema will instantiate it
with the default constructor before adding it).
For example::
s = Schema(content = TEXT,
title = TEXT(stored = True),
tags = KEYWORD(stored = True))
"""
self._fields = {}
self._dyn_fields = {}
for name in sorted(fields.keys()):
self.add(name, fields[name])
def copy(self):
"""Returns a shallow copy of the schema. The field instances are not
deep copied, so they are shared between schema copies.
"""
return self.__class__(**self._fields)
def __eq__(self, other):
return (other.__class__ is self.__class__
and list(self.items()) == list(other.items()))
def __ne__(self, other):
return not(self.__eq__(other))
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.names())
def __iter__(self):
"""Returns the field objects in this schema.
"""
return iter(self._fields.values())
def __getitem__(self, name):
"""Returns the field associated with the given field name.
"""
if name in self._fields:
return self._fields[name]
for expr, fieldtype in itervalues(self._dyn_fields):
if expr.match(name):
return fieldtype
raise KeyError("No field named %r" % (name,))
def __len__(self):
"""Returns the number of fields in this schema.
"""
return len(self._fields)
def __contains__(self, fieldname):
"""Returns True if a field by the given name is in this schema.
"""
# Defined in terms of __getitem__ so that there's only one method to
# override to provide dynamic fields
try:
field = self[fieldname]
return field is not None
except KeyError:
return False
def items(self):
"""Returns a list of ("fieldname", field_object) pairs for the fields
in this schema.
"""
return sorted(self._fields.items())
def names(self, check_names=None):
"""Returns a list of the names of the fields in this schema.
:param check_names: (optional) sequence of field names to check
whether the schema accepts them as (dynamic) field names -
acceptable names will also be in the result list.
Note: You may also have static field names in check_names, that
won't create duplicates in the result list. Unsupported names
will not be in the result list.
"""
fieldnames = set(self._fields.keys())
if check_names is not None:
check_names = set(check_names) - fieldnames
fieldnames.update(fieldname for fieldname in check_names
if fieldname in self)
return sorted(fieldnames)
def clean(self):
for field in self:
field.clean()
def add(self, name, fieldtype, glob=False):
"""Adds a field to this schema.
:param name: The name of the field.
:param fieldtype: An instantiated fields.FieldType object, or a
FieldType subclass. If you pass an instantiated object, the schema
will use that as the field configuration for this field. If you
pass a FieldType subclass, the schema will automatically
instantiate it with the default constructor.
"""
# Check field name
if name.startswith("_"):
raise FieldConfigurationError("Field names cannot start with an "
"underscore")
if " " in name:
raise FieldConfigurationError("Field names cannot contain spaces")
if name in self._fields or (glob and name in self._dyn_fields):
raise FieldConfigurationError("Schema already has a field %r"
% name)
# If the user passed a type rather than an instantiated field object,
# instantiate it automatically
if type(fieldtype) is type:
try:
fieldtype = fieldtype()
except:
e = sys.exc_info()[1]
raise FieldConfigurationError("Error: %s instantiating field "
"%r: %r" % (e, name, fieldtype))
if not isinstance(fieldtype, FieldType):
raise FieldConfigurationError("%r is not a FieldType object"
% fieldtype)
if glob:
expr = re.compile(fnmatch.translate(name))
self._dyn_fields[name] = (expr, fieldtype)
else:
fieldtype.on_add(self, name)
self._fields[name] = fieldtype
def remove(self, fieldname):
if fieldname in self._fields:
self._fields[fieldname].on_remove(self, fieldname)
del self._fields[fieldname]
elif fieldname in self._dyn_fields:
del self._dyn_fields[fieldname]
else:
raise KeyError("No field named %r" % fieldname)
def has_vectored_fields(self):
"""Returns True if any of the fields in this schema store term vectors.
"""
return any(ftype.vector for ftype in self)
def has_scorable_fields(self):
return any(ftype.scorable for ftype in self)
def stored_names(self):
"""Returns a list of the names of fields that are stored.
"""
return [name for name, field in self.items() if field.stored]
def scorable_names(self):
"""Returns a list of the names of fields that store field
lengths.
"""
return [name for name, field in self.items() if field.scorable]
def vector_names(self):
"""Returns a list of the names of fields that store vectors.
"""
return [name for name, field in self.items() if field.vector]
def separate_spelling_names(self):
"""Returns a list of the names of fields that require special handling
for generating spelling graphs... either because they store graphs but
aren't indexed, or because the analyzer is stemmed.
"""
return [name for name, field in self.items()
if field.spelling and field.separate_spelling()]
class SchemaClass(with_metaclass(MetaSchema, Schema)):
"""Allows you to define a schema using declarative syntax, similar to
Django models::
class MySchema(SchemaClass):
path = ID
date = DATETIME
content = TEXT
You can use inheritance to share common fields between schemas::
class Parent(SchemaClass):
path = ID(stored=True)
date = DATETIME
class Child1(Parent):
content = TEXT(positions=False)
class Child2(Parent):
tags = KEYWORD
This class overrides ``__new__`` so instantiating your sub-class always
results in an instance of ``Schema``.
>>> class MySchema(SchemaClass):
... title = TEXT(stored=True)
... content = TEXT
...
>>> s = MySchema()
>>> type(s)
<class 'whoosh.fields.Schema'>
"""
def __new__(cls, *args, **kwargs):
obj = super(Schema, cls).__new__(Schema)
kw = getattr(cls, "_clsfields", {})
kw.update(kwargs)
obj.__init__(*args, **kw)
return obj
def ensure_schema(schema):
if isinstance(schema, type) and issubclass(schema, Schema):
schema = schema.schema()
if not isinstance(schema, Schema):
raise FieldConfigurationError("%r is not a Schema" % schema)
return schema
def merge_fielddict(d1, d2):
keyset = set(d1.keys()) | set(d2.keys())
out = {}
for name in keyset:
field1 = d1.get(name)
field2 = d2.get(name)
if field1 and field2 and field1 != field2:
raise Exception("Inconsistent field %r: %r != %r"
% (name, field1, field2))
out[name] = field1 or field2
return out
def merge_schema(s1, s2):
schema = Schema()
schema._fields = merge_fielddict(s1._fields, s2._fields)
schema._dyn_fields = merge_fielddict(s1._dyn_fields, s2._dyn_fields)
return schema
def merge_schemas(schemas):
schema = schemas[0]
for i in xrange(1, len(schemas)):
schema = merge_schema(schema, schemas[i])
return schema
| cortext/crawtextV2 | ~/venvs/crawler/lib/python2.7/site-packages/whoosh/fields.py | Python | mit | 51,656 |
import time
import traceback
from couchpotato.core.helpers.variable import getImdb, md5, cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.base import YarrProvider
from couchpotato.environment import Env
log = CPLog(__name__)
class TorrentProvider(YarrProvider):
protocol = 'torrent'
proxy_domain = None
proxy_list = []
def imdbMatch(self, url, imdbId):
if getImdb(url) == imdbId:
return True
if url[:4] == 'http':
try:
cache_key = md5(url)
data = self.getCache(cache_key, url)
except IOError:
log.error('Failed to open %s.', url)
return False
return getImdb(data) == imdbId
return False
def getDomain(self, url = ''):
forced_domain = self.conf('domain')
if forced_domain:
return cleanHost(forced_domain).rstrip('/') + url
if not self.proxy_domain:
for proxy in self.proxy_list:
prop_name = 'proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
continue
data = ''
try:
data = self.urlopen(proxy, timeout = 3, show_error = False)
except:
log.debug('Failed %s proxy %s: %s', (self.getName(), proxy, traceback.format_exc()))
if self.correctProxy(data):
log.debug('Using proxy for %s: %s', (self.getName(), proxy))
self.proxy_domain = proxy
break
Env.prop(prop_name, time.time())
if not self.proxy_domain:
log.error('No %s proxies left, please add one in settings, or let us know which one to add on the forum.', self.getName())
return None
return cleanHost(self.proxy_domain).rstrip('/') + url
def correctProxy(self, data):
return True
class TorrentMagnetProvider(TorrentProvider):
protocol = 'torrent_magnet'
download = None
| koomik/CouchPotatoServer | couchpotato/core/media/_base/providers/torrent/base.py | Python | gpl-3.0 | 2,163 |
from __future__ import (absolute_import, print_function, division)
import os
import select
import socket
import sys
import threading
import time
import traceback
import binascii
from six.moves import range
import certifi
from backports import ssl_match_hostname
import six
import OpenSSL
from OpenSSL import SSL
from netlib import certutils
from netlib import version_check
from netlib import basetypes
from netlib import exceptions
from netlib import basethread
# This is a rather hackish way to make sure that
# the latest version of pyOpenSSL is actually installed.
version_check.check_pyopenssl_version()
if six.PY2:
socket_fileobject = socket._fileobject
else:
socket_fileobject = socket.SocketIO
EINTR = 4
if os.environ.get("NO_ALPN"):
HAS_ALPN = False
else:
HAS_ALPN = OpenSSL._util.lib.Cryptography_HAS_ALPN
# To enable all SSL methods use: SSLv23
# then add options to disable certain methods
# https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
SSL_BASIC_OPTIONS = (
SSL.OP_CIPHER_SERVER_PREFERENCE
)
if hasattr(SSL, "OP_NO_COMPRESSION"):
SSL_BASIC_OPTIONS |= SSL.OP_NO_COMPRESSION
SSL_DEFAULT_METHOD = SSL.SSLv23_METHOD
SSL_DEFAULT_OPTIONS = (
SSL.OP_NO_SSLv2 |
SSL.OP_NO_SSLv3 |
SSL_BASIC_OPTIONS
)
if hasattr(SSL, "OP_NO_COMPRESSION"):
SSL_DEFAULT_OPTIONS |= SSL.OP_NO_COMPRESSION
"""
Map a reasonable SSL version specification into the format OpenSSL expects.
Don't ask...
https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
"""
sslversion_choices = {
"all": (SSL.SSLv23_METHOD, SSL_BASIC_OPTIONS),
# SSLv23_METHOD + NO_SSLv2 + NO_SSLv3 == TLS 1.0+
# TLSv1_METHOD would be TLS 1.0 only
"secure": (SSL.SSLv23_METHOD, (SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL_BASIC_OPTIONS)),
"SSLv2": (SSL.SSLv2_METHOD, SSL_BASIC_OPTIONS),
"SSLv3": (SSL.SSLv3_METHOD, SSL_BASIC_OPTIONS),
"TLSv1": (SSL.TLSv1_METHOD, SSL_BASIC_OPTIONS),
"TLSv1_1": (SSL.TLSv1_1_METHOD, SSL_BASIC_OPTIONS),
"TLSv1_2": (SSL.TLSv1_2_METHOD, SSL_BASIC_OPTIONS),
}
class SSLKeyLogger(object):
def __init__(self, filename):
self.filename = filename
self.f = None
self.lock = threading.Lock()
# required for functools.wraps, which pyOpenSSL uses.
__name__ = "SSLKeyLogger"
def __call__(self, connection, where, ret):
if where == SSL.SSL_CB_HANDSHAKE_DONE and ret == 1:
with self.lock:
if not self.f:
d = os.path.dirname(self.filename)
if not os.path.isdir(d):
os.makedirs(d)
self.f = open(self.filename, "ab")
self.f.write(b"\r\n")
client_random = binascii.hexlify(connection.client_random())
masterkey = binascii.hexlify(connection.master_key())
self.f.write(b"CLIENT_RANDOM %s %s\r\n" % (client_random, masterkey))
self.f.flush()
def close(self):
with self.lock:
if self.f:
self.f.close()
@staticmethod
def create_logfun(filename):
if filename:
return SSLKeyLogger(filename)
return False
log_ssl_key = SSLKeyLogger.create_logfun(
os.getenv("MITMPROXY_SSLKEYLOGFILE") or os.getenv("SSLKEYLOGFILE"))
class _FileLike(object):
BLOCKSIZE = 1024 * 32
def __init__(self, o):
self.o = o
self._log = None
self.first_byte_timestamp = None
def set_descriptor(self, o):
self.o = o
def __getattr__(self, attr):
return getattr(self.o, attr)
def start_log(self):
"""
Starts or resets the log.
This will store all bytes read or written.
"""
self._log = []
def stop_log(self):
"""
Stops the log.
"""
self._log = None
def is_logging(self):
return self._log is not None
def get_log(self):
"""
Returns the log as a string.
"""
if not self.is_logging():
raise ValueError("Not logging!")
return b"".join(self._log)
def add_log(self, v):
if self.is_logging():
self._log.append(v)
def reset_timestamps(self):
self.first_byte_timestamp = None
class Writer(_FileLike):
def flush(self):
"""
May raise exceptions.TcpDisconnect
"""
if hasattr(self.o, "flush"):
try:
self.o.flush()
except (socket.error, IOError) as v:
raise exceptions.TcpDisconnect(str(v))
def write(self, v):
"""
May raise exceptions.TcpDisconnect
"""
if v:
self.first_byte_timestamp = self.first_byte_timestamp or time.time()
try:
if hasattr(self.o, "sendall"):
self.add_log(v)
return self.o.sendall(v)
else:
r = self.o.write(v)
self.add_log(v[:r])
return r
except (SSL.Error, socket.error) as e:
raise exceptions.TcpDisconnect(str(e))
class Reader(_FileLike):
def read(self, length):
"""
If length is -1, we read until connection closes.
"""
result = b''
start = time.time()
while length == -1 or length > 0:
if length == -1 or length > self.BLOCKSIZE:
rlen = self.BLOCKSIZE
else:
rlen = length
try:
data = self.o.read(rlen)
except SSL.ZeroReturnError:
# TLS connection was shut down cleanly
break
except (SSL.WantWriteError, SSL.WantReadError):
# From the OpenSSL docs:
# If the underlying BIO is non-blocking, SSL_read() will also return when the
# underlying BIO could not satisfy the needs of SSL_read() to continue the
# operation. In this case a call to SSL_get_error with the return value of
# SSL_read() will yield SSL_ERROR_WANT_READ or SSL_ERROR_WANT_WRITE.
if (time.time() - start) < self.o.gettimeout():
time.sleep(0.1)
continue
else:
raise exceptions.TcpTimeout()
except socket.timeout:
raise exceptions.TcpTimeout()
except socket.error as e:
raise exceptions.TcpDisconnect(str(e))
except SSL.SysCallError as e:
if e.args == (-1, 'Unexpected EOF'):
break
raise exceptions.TlsException(str(e))
except SSL.Error as e:
raise exceptions.TlsException(str(e))
self.first_byte_timestamp = self.first_byte_timestamp or time.time()
if not data:
break
result += data
if length != -1:
length -= len(data)
self.add_log(result)
return result
def readline(self, size=None):
result = b''
bytes_read = 0
while True:
if size is not None and bytes_read >= size:
break
ch = self.read(1)
bytes_read += 1
if not ch:
break
else:
result += ch
if ch == b'\n':
break
return result
def safe_read(self, length):
"""
Like .read, but is guaranteed to either return length bytes, or
raise an exception.
"""
result = self.read(length)
if length != -1 and len(result) != length:
if not result:
raise exceptions.TcpDisconnect()
else:
raise exceptions.TcpReadIncomplete(
"Expected %s bytes, got %s" % (length, len(result))
)
return result
def peek(self, length):
"""
Tries to peek into the underlying file object.
Returns:
Up to the next N bytes if peeking is successful.
Raises:
exceptions.TcpException if there was an error with the socket
TlsException if there was an error with pyOpenSSL.
NotImplementedError if the underlying file object is not a [pyOpenSSL] socket
"""
if isinstance(self.o, socket_fileobject):
try:
return self.o._sock.recv(length, socket.MSG_PEEK)
except socket.error as e:
raise exceptions.TcpException(repr(e))
elif isinstance(self.o, SSL.Connection):
try:
if tuple(int(x) for x in OpenSSL.__version__.split(".")[:2]) > (0, 15):
return self.o.recv(length, socket.MSG_PEEK)
else:
# TODO: remove once a new version is released
# Polyfill for pyOpenSSL <= 0.15.1
# Taken from https://github.com/pyca/pyopenssl/commit/1d95dea7fea03c7c0df345a5ea30c12d8a0378d2
buf = SSL._ffi.new("char[]", length)
result = SSL._lib.SSL_peek(self.o._ssl, buf, length)
self.o._raise_ssl_error(self.o._ssl, result)
return SSL._ffi.buffer(buf, result)[:]
except SSL.Error as e:
six.reraise(exceptions.TlsException, exceptions.TlsException(str(e)), sys.exc_info()[2])
else:
raise NotImplementedError("Can only peek into (pyOpenSSL) sockets")
class Address(basetypes.Serializable):
"""
This class wraps an IPv4/IPv6 tuple to provide named attributes and
ipv6 information.
"""
def __init__(self, address, use_ipv6=False):
self.address = tuple(address)
self.use_ipv6 = use_ipv6
def get_state(self):
return {
"address": self.address,
"use_ipv6": self.use_ipv6
}
def set_state(self, state):
self.address = state["address"]
self.use_ipv6 = state["use_ipv6"]
@classmethod
def from_state(cls, state):
return Address(**state)
@classmethod
def wrap(cls, t):
if isinstance(t, cls):
return t
else:
return cls(t)
def __call__(self):
return self.address
@property
def host(self):
return self.address[0]
@property
def port(self):
return self.address[1]
@property
def use_ipv6(self):
return self.family == socket.AF_INET6
@use_ipv6.setter
def use_ipv6(self, b):
self.family = socket.AF_INET6 if b else socket.AF_INET
def __repr__(self):
return "{}:{}".format(self.host, self.port)
def __eq__(self, other):
if not other:
return False
other = Address.wrap(other)
return (self.address, self.family) == (other.address, other.family)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.address) ^ 42 # different hash than the tuple alone.
def ssl_read_select(rlist, timeout):
"""
This is a wrapper around select.select() which also works for SSL.Connections
by taking ssl_connection.pending() into account.
Caveats:
If .pending() > 0 for any of the connections in rlist, we avoid the select syscall
and **will not include any other connections which may or may not be ready**.
Args:
rlist: wait until ready for reading
Returns:
subset of rlist which is ready for reading.
"""
return [
conn for conn in rlist
if isinstance(conn, SSL.Connection) and conn.pending() > 0
] or select.select(rlist, (), (), timeout)[0]
def close_socket(sock):
"""
Does a hard close of a socket, without emitting a RST.
"""
try:
# We already indicate that we close our end.
# may raise "Transport endpoint is not connected" on Linux
sock.shutdown(socket.SHUT_WR)
# Section 4.2.2.13 of RFC 1122 tells us that a close() with any pending
# readable data could lead to an immediate RST being sent (which is the
# case on Windows).
# http://ia600609.us.archive.org/22/items/TheUltimateSo_lingerPageOrWhyIsMyTcpNotReliable/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable.html
#
# This in turn results in the following issue: If we send an error page
# to the client and then close the socket, the RST may be received by
# the client before the error page and the users sees a connection
# error rather than the error page. Thus, we try to empty the read
# buffer on Windows first. (see
# https://github.com/mitmproxy/mitmproxy/issues/527#issuecomment-93782988)
#
if os.name == "nt": # pragma: no cover
# We cannot rely on the shutdown()-followed-by-read()-eof technique
# proposed by the page above: Some remote machines just don't send
# a TCP FIN, which would leave us in the unfortunate situation that
# recv() would block infinitely. As a workaround, we set a timeout
# here even if we are in blocking mode.
sock.settimeout(sock.gettimeout() or 20)
# limit at a megabyte so that we don't read infinitely
for _ in range(1024 ** 3 // 4096):
# may raise a timeout/disconnect exception.
if not sock.recv(4096):
break
# Now we can close the other half as well.
sock.shutdown(socket.SHUT_RD)
except socket.error:
pass
sock.close()
class _Connection(object):
rbufsize = -1
wbufsize = -1
def _makefile(self):
"""
Set up .rfile and .wfile attributes from .connection
"""
# Ideally, we would use the Buffered IO in Python 3 by default.
# Unfortunately, the implementation of .peek() is broken for n>1 bytes,
# as it may just return what's left in the buffer and not all the bytes we want.
# As a workaround, we just use unbuffered sockets directly.
# https://mail.python.org/pipermail/python-dev/2009-June/089986.html
if six.PY2:
self.rfile = Reader(self.connection.makefile('rb', self.rbufsize))
self.wfile = Writer(self.connection.makefile('wb', self.wbufsize))
else:
self.rfile = Reader(socket.SocketIO(self.connection, "rb"))
self.wfile = Writer(socket.SocketIO(self.connection, "wb"))
def __init__(self, connection):
if connection:
self.connection = connection
self.ip_address = Address(connection.getpeername())
self._makefile()
else:
self.connection = None
self.ip_address = None
self.rfile = None
self.wfile = None
self.ssl_established = False
self.finished = False
def get_current_cipher(self):
if not self.ssl_established:
return None
name = self.connection.get_cipher_name()
bits = self.connection.get_cipher_bits()
version = self.connection.get_cipher_version()
return name, bits, version
def finish(self):
self.finished = True
# If we have an SSL connection, wfile.close == connection.close
# (We call _FileLike.set_descriptor(conn))
# Closing the socket is not our task, therefore we don't call close
# then.
if not isinstance(self.connection, SSL.Connection):
if not getattr(self.wfile, "closed", False):
try:
self.wfile.flush()
self.wfile.close()
except exceptions.TcpDisconnect:
pass
self.rfile.close()
else:
try:
self.connection.shutdown()
except SSL.Error:
pass
def _create_ssl_context(self,
method=SSL_DEFAULT_METHOD,
options=SSL_DEFAULT_OPTIONS,
verify_options=SSL.VERIFY_NONE,
ca_path=None,
ca_pemfile=None,
cipher_list=None,
alpn_protos=None,
alpn_select=None,
alpn_select_callback=None,
):
"""
Creates an SSL Context.
:param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD, TLSv1_1_METHOD, or TLSv1_2_METHOD
:param options: A bit field consisting of OpenSSL.SSL.OP_* values
:param verify_options: A bit field consisting of OpenSSL.SSL.VERIFY_* values
:param ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool
:param ca_pemfile: Path to a PEM formatted trusted CA certificate
:param cipher_list: A textual OpenSSL cipher list, see https://www.openssl.org/docs/apps/ciphers.html
:rtype : SSL.Context
"""
context = SSL.Context(method)
# Options (NO_SSLv2/3)
if options is not None:
context.set_options(options)
# Verify Options (NONE/PEER and trusted CAs)
if verify_options is not None:
def verify_cert(conn, x509, errno, err_depth, is_cert_verified):
if not is_cert_verified:
self.ssl_verification_error = dict(errno=errno,
depth=err_depth)
return is_cert_verified
context.set_verify(verify_options, verify_cert)
if ca_path is None and ca_pemfile is None:
ca_pemfile = certifi.where()
context.load_verify_locations(ca_pemfile, ca_path)
# Workaround for
# https://github.com/pyca/pyopenssl/issues/190
# https://github.com/mitmproxy/mitmproxy/issues/472
# Options already set before are not cleared.
context.set_mode(SSL._lib.SSL_MODE_AUTO_RETRY)
# Cipher List
if cipher_list:
try:
context.set_cipher_list(cipher_list)
# TODO: maybe change this to with newer pyOpenSSL APIs
context.set_tmp_ecdh(OpenSSL.crypto.get_elliptic_curve('prime256v1'))
except SSL.Error as v:
raise exceptions.TlsException("SSL cipher specification error: %s" % str(v))
# SSLKEYLOGFILE
if log_ssl_key:
context.set_info_callback(log_ssl_key)
if HAS_ALPN:
if alpn_protos is not None:
# advertise application layer protocols
context.set_alpn_protos(alpn_protos)
elif alpn_select is not None and alpn_select_callback is None:
# select application layer protocol
def alpn_select_callback(conn_, options):
if alpn_select in options:
return bytes(alpn_select)
else: # pragma no cover
return options[0]
context.set_alpn_select_callback(alpn_select_callback)
elif alpn_select_callback is not None and alpn_select is None:
context.set_alpn_select_callback(alpn_select_callback)
elif alpn_select_callback is not None and alpn_select is not None:
raise exceptions.TlsException("ALPN error: only define alpn_select (string) OR alpn_select_callback (method).")
return context
class ConnectionCloser(object):
def __init__(self, conn):
self.conn = conn
self._canceled = False
def pop(self):
"""
Cancel the current closer, and return a fresh one.
"""
self._canceled = True
return ConnectionCloser(self.conn)
def __enter__(self):
return self
def __exit__(self, *args):
if not self._canceled:
self.conn.close()
class TCPClient(_Connection):
def __init__(self, address, source_address=None):
super(TCPClient, self).__init__(None)
self.address = address
self.source_address = source_address
self.cert = None
self.server_certs = []
self.ssl_verification_error = None
self.sni = None
@property
def address(self):
return self.__address
@address.setter
def address(self, address):
if address:
self.__address = Address.wrap(address)
else:
self.__address = None
@property
def source_address(self):
return self.__source_address
@source_address.setter
def source_address(self, source_address):
if source_address:
self.__source_address = Address.wrap(source_address)
else:
self.__source_address = None
def close(self):
# Make sure to close the real socket, not the SSL proxy.
# OpenSSL is really good at screwing up, i.e. when trying to recv from a failed connection,
# it tries to renegotiate...
if isinstance(self.connection, SSL.Connection):
close_socket(self.connection._socket)
else:
close_socket(self.connection)
def create_ssl_context(self, cert=None, alpn_protos=None, **sslctx_kwargs):
context = self._create_ssl_context(
alpn_protos=alpn_protos,
**sslctx_kwargs)
# Client Certs
if cert:
try:
context.use_privatekey_file(cert)
context.use_certificate_file(cert)
except SSL.Error as v:
raise exceptions.TlsException("SSL client certificate error: %s" % str(v))
return context
def convert_to_ssl(self, sni=None, alpn_protos=None, **sslctx_kwargs):
"""
cert: Path to a file containing both client cert and private key.
options: A bit field consisting of OpenSSL.SSL.OP_* values
verify_options: A bit field consisting of OpenSSL.SSL.VERIFY_* values
ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool
ca_pemfile: Path to a PEM formatted trusted CA certificate
"""
verification_mode = sslctx_kwargs.get('verify_options', None)
if verification_mode == SSL.VERIFY_PEER and not sni:
raise exceptions.TlsException("Cannot validate certificate hostname without SNI")
context = self.create_ssl_context(
alpn_protos=alpn_protos,
**sslctx_kwargs
)
self.connection = SSL.Connection(context, self.connection)
if sni:
self.sni = sni
self.connection.set_tlsext_host_name(sni.encode("idna"))
self.connection.set_connect_state()
try:
self.connection.do_handshake()
except SSL.Error as v:
if self.ssl_verification_error:
raise exceptions.InvalidCertificateException("SSL handshake error: %s" % repr(v))
else:
raise exceptions.TlsException("SSL handshake error: %s" % repr(v))
else:
# Fix for pre v1.0 OpenSSL, which doesn't throw an exception on
# certificate validation failure
if verification_mode == SSL.VERIFY_PEER and self.ssl_verification_error is not None:
raise exceptions.InvalidCertificateException("SSL handshake error: certificate verify failed")
self.cert = certutils.SSLCert(self.connection.get_peer_certificate())
# Keep all server certificates in a list
for i in self.connection.get_peer_cert_chain():
self.server_certs.append(certutils.SSLCert(i))
# Validate TLS Hostname
try:
crt = dict(
subjectAltName=[("DNS", x.decode("ascii", "strict")) for x in self.cert.altnames]
)
if self.cert.cn:
crt["subject"] = [[["commonName", self.cert.cn.decode("ascii", "strict")]]]
if sni:
hostname = sni
else:
hostname = "no-hostname"
ssl_match_hostname.match_hostname(crt, hostname)
except (ValueError, ssl_match_hostname.CertificateError) as e:
self.ssl_verification_error = dict(depth=0, errno="Invalid Hostname")
if verification_mode == SSL.VERIFY_PEER:
raise exceptions.InvalidCertificateException("Presented certificate for {} is not valid: {}".format(sni, str(e)))
self.ssl_established = True
self.rfile.set_descriptor(self.connection)
self.wfile.set_descriptor(self.connection)
def connect(self):
try:
connection = socket.socket(self.address.family, socket.SOCK_STREAM)
if self.source_address:
connection.bind(self.source_address())
connection.connect(self.address())
self.source_address = Address(connection.getsockname())
except (socket.error, IOError) as err:
raise exceptions.TcpException(
'Error connecting to "%s": %s' %
(self.address.host, err)
)
self.connection = connection
self.ip_address = Address(connection.getpeername())
self._makefile()
return ConnectionCloser(self)
def settimeout(self, n):
self.connection.settimeout(n)
def gettimeout(self):
return self.connection.gettimeout()
def get_alpn_proto_negotiated(self):
if HAS_ALPN and self.ssl_established:
return self.connection.get_alpn_proto_negotiated()
else:
return b""
class BaseHandler(_Connection):
"""
The instantiator is expected to call the handle() and finish() methods.
"""
def __init__(self, connection, address, server):
super(BaseHandler, self).__init__(connection)
self.address = Address.wrap(address)
self.server = server
self.clientcert = None
def create_ssl_context(self,
cert, key,
handle_sni=None,
request_client_cert=None,
chain_file=None,
dhparams=None,
extra_chain_certs=None,
**sslctx_kwargs):
"""
cert: A certutils.SSLCert object or the path to a certificate
chain file.
handle_sni: SNI handler, should take a connection object. Server
name can be retrieved like this:
connection.get_servername()
And you can specify the connection keys as follows:
new_context = Context(TLSv1_METHOD)
new_context.use_privatekey(key)
new_context.use_certificate(cert)
connection.set_context(new_context)
The request_client_cert argument requires some explanation. We're
supposed to be able to do this with no negative effects - if the
client has no cert to present, we're notified and proceed as usual.
Unfortunately, Android seems to have a bug (tested on 4.2.2) - when
an Android client is asked to present a certificate it does not
have, it hangs up, which is frankly bogus. Some time down the track
we may be able to make the proper behaviour the default again, but
until then we're conservative.
"""
context = self._create_ssl_context(**sslctx_kwargs)
context.use_privatekey(key)
if isinstance(cert, certutils.SSLCert):
context.use_certificate(cert.x509)
else:
context.use_certificate_chain_file(cert)
if extra_chain_certs:
for i in extra_chain_certs:
context.add_extra_chain_cert(i.x509)
if handle_sni:
# SNI callback happens during do_handshake()
context.set_tlsext_servername_callback(handle_sni)
if request_client_cert:
def save_cert(conn_, cert, errno_, depth_, preverify_ok_):
self.clientcert = certutils.SSLCert(cert)
# Return true to prevent cert verification error
return True
context.set_verify(SSL.VERIFY_PEER, save_cert)
# Cert Verify
if chain_file:
context.load_verify_locations(chain_file)
if dhparams:
SSL._lib.SSL_CTX_set_tmp_dh(context._context, dhparams)
return context
def convert_to_ssl(self, cert, key, **sslctx_kwargs):
"""
Convert connection to SSL.
For a list of parameters, see BaseHandler._create_ssl_context(...)
"""
context = self.create_ssl_context(
cert,
key,
**sslctx_kwargs)
self.connection = SSL.Connection(context, self.connection)
self.connection.set_accept_state()
try:
self.connection.do_handshake()
except SSL.Error as v:
raise exceptions.TlsException("SSL handshake error: %s" % repr(v))
self.ssl_established = True
self.rfile.set_descriptor(self.connection)
self.wfile.set_descriptor(self.connection)
def handle(self): # pragma: no cover
raise NotImplementedError
def settimeout(self, n):
self.connection.settimeout(n)
def get_alpn_proto_negotiated(self):
if HAS_ALPN and self.ssl_established:
return self.connection.get_alpn_proto_negotiated()
else:
return b""
class Counter:
def __init__(self):
self._count = 0
self._lock = threading.Lock()
@property
def count(self):
with self._lock:
return self._count
def __enter__(self):
with self._lock:
self._count += 1
def __exit__(self, *args):
with self._lock:
self._count -= 1
class TCPServer(object):
request_queue_size = 20
def __init__(self, address):
self.address = Address.wrap(address)
self.__is_shut_down = threading.Event()
self.__shutdown_request = False
self.socket = socket.socket(self.address.family, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.address())
self.address = Address.wrap(self.socket.getsockname())
self.socket.listen(self.request_queue_size)
self.handler_counter = Counter()
def connection_thread(self, connection, client_address):
with self.handler_counter:
client_address = Address(client_address)
try:
self.handle_client_connection(connection, client_address)
except:
self.handle_error(connection, client_address)
finally:
close_socket(connection)
def serve_forever(self, poll_interval=0.1):
self.__is_shut_down.clear()
try:
while not self.__shutdown_request:
try:
r, w_, e_ = select.select(
[self.socket], [], [], poll_interval)
except select.error as ex: # pragma: no cover
if ex[0] == EINTR:
continue
else:
raise
if self.socket in r:
connection, client_address = self.socket.accept()
t = basethread.BaseThread(
"TCPConnectionHandler (%s: %s:%s -> %s:%s)" % (
self.__class__.__name__,
client_address[0],
client_address[1],
self.address.host,
self.address.port
),
target=self.connection_thread,
args=(connection, client_address),
)
t.setDaemon(1)
try:
t.start()
except threading.ThreadError:
self.handle_error(connection, Address(client_address))
connection.close()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
def shutdown(self):
self.__shutdown_request = True
self.__is_shut_down.wait()
self.socket.close()
self.handle_shutdown()
def handle_error(self, connection_, client_address, fp=sys.stderr):
"""
Called when handle_client_connection raises an exception.
"""
# If a thread has persisted after interpreter exit, the module might be
# none.
if traceback and six:
exc = six.text_type(traceback.format_exc())
print(u'-' * 40, file=fp)
print(
u"Error in processing of request from %s" % repr(client_address), file=fp)
print(exc, file=fp)
print(u'-' * 40, file=fp)
def handle_client_connection(self, conn, client_address): # pragma: no cover
"""
Called after client connection.
"""
raise NotImplementedError
def handle_shutdown(self):
"""
Called after server shutdown.
"""
def wait_for_silence(self, timeout=5):
start = time.time()
while 1:
if time.time() - start >= timeout:
raise exceptions.Timeout(
"%s service threads still alive" %
self.handler_counter.count
)
if self.handler_counter.count == 0:
return
| dufferzafar/mitmproxy | netlib/tcp.py | Python | mit | 34,025 |
#!/usr/bin/env python
import vtk
def main():
colors = vtk.vtkNamedColors()
points = vtk.vtkPoints()
points.InsertNextPoint(0.0, 0.0, 0.0)
points.InsertNextPoint(1.0, 0.0, 0.0)
points.InsertNextPoint(2.0, 0.0, 0.0)
points.InsertNextPoint(3.0, 0.0, 0.0)
points.InsertNextPoint(4.0, 0.0, 0.0)
lines = vtk.vtkCellArray()
line = vtk.vtkLine()
line.GetPointIds().SetId(0, 0)
line.GetPointIds().SetId(1, 1)
lines.InsertNextCell(line)
line.GetPointIds().SetId(0, 1)
line.GetPointIds().SetId(1, 2)
lines.InsertNextCell(line)
line.GetPointIds().SetId(0, 2)
line.GetPointIds().SetId(1, 3)
lines.InsertNextCell(line)
line.GetPointIds().SetId(0, 3)
line.GetPointIds().SetId(1, 4)
lines.InsertNextCell(line)
warpData = vtk.vtkDoubleArray()
warpData.SetNumberOfComponents(3)
warpData.SetName("warpData")
warp = [0.0, 0.0, 0.0]
warp[1] = 0.0
warpData.InsertNextTuple(warp)
warp[1] = 0.1
warpData.InsertNextTuple(warp)
warp[1] = 0.3
warpData.InsertNextTuple(warp)
warp[1] = 0.0
warpData.InsertNextTuple(warp)
warp[1] = 0.1
warpData.InsertNextTuple(warp)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
polydata.GetPointData().AddArray(warpData)
polydata.GetPointData().SetActiveVectors(warpData.GetName())
# WarpVector will use the array marked as active vector in polydata
# it has to be a 3 component array
# with the same number of tuples as points in polydata
warpVector = vtk.vtkWarpVector()
warpVector.SetInputData(polydata)
warpVector.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(warpVector.GetPolyDataOutput())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d('cobalt_green'))
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/PolyData/WarpVector.py | Python | apache-2.0 | 2,235 |
import logging
import plugins
logger = logging.getLogger(__name__)
bot = plugins.tracking.bot
@bot.message_handler( commands = [ 'echo' ] )
def echo_message(message):
logger.info( "{} - {}".format( message.chat.id, message.text ) )
mes = ""
if len(message.text.split( " " )) == 1 :
pass
else:
mes = " ".join(message.text.split(" ")[1:])
bot.reply_to(message, "Reply: {}".format( mes) )
| ssdoz2sk/Telegram_ENL-Bot | plugins/echo.py | Python | agpl-3.0 | 426 |
"""
REST player
"""
from .player import Player
class RESTPlayer(Player):
"""Implementation of Player which gets moves moves from a REST Api"""
def __init__(self, state):
super(RESTPlayer, self).__init__(state)
self.move = None
def __str__(self):
return 'REST {}'.format(super(RESTPlayer, self).__str__())
def queue_move(self, move):
"""Set the move in the user so that if can be pulled out again using
get_move"""
self.move = move
def get_move(self, dummy):
"""Returns the move that was queued"""
return self.move
| christianreimer/raottt | raottt/player/rest.py | Python | mit | 606 |
class BrokenRequestMiddleware(object):
def process_request(self, request):
raise ImportError('request')
class BrokenResponseMiddleware(object):
def process_response(self, request, response):
raise ImportError('response')
class BrokenViewMiddleware(object):
def process_view(self, request, func, args, kwargs):
raise ImportError('view')
class FilteringMiddleware(object):
def process_exception(self, request, exception):
if isinstance(exception, IOError):
exception.skip_sentry = True | dirtycoder/opbeat_python | tests/contrib/django/testapp/middleware.py | Python | bsd-3-clause | 549 |
#!/usr/bin/python
# Copyright (C) Vladimir Prus 2003. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
# Regression test: virtual targets with different dependency properties
# were considered different by 'virtual-target.register', but the code
# which determined target paths ignored dependency properties --- so both
# targets used to be placed to the same location.
from BoostBuild import Tester, List
from string import find
t = Tester()
t.write("project-root.jam", "")
t.write("Jamfile", """
lib foo : foo.cpp ;
exe hello : hello.cpp ;
exe hello2 : hello.cpp : <library>foo ;
""")
t.write("hello.cpp", """
int main()
{
return 0;
}
""")
t.write("foo.cpp", """
#ifdef _WIN32
__declspec(dllexport)
#endif
void foo() {}
""")
t.run_build_system("--no-error-backtrace", status=1)
t.fail_test(find(t.stdout(), "Duplicate name of actual target") == -1)
t.cleanup()
| pixelspark/corespark | Libraries/Spirit/boost/miniboost/tools/build/v2/test/dependency_property.py | Python | lgpl-3.0 | 1,139 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
ogr2ogrclip.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputVector
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools import dataobjects
from processing.tools.system import isWindows
from processing.tools.vector import ogrConnectionString, ogrLayerName
class Ogr2OgrClip(GdalAlgorithm):
OUTPUT_LAYER = 'OUTPUT_LAYER'
INPUT_LAYER = 'INPUT_LAYER'
CLIP_LAYER = 'CLIP_LAYER'
OPTIONS = 'OPTIONS'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Clip vectors by polygon')
self.group, self.i18n_group = self.trAlgorithm('[OGR] Geoprocessing')
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer')))
self.addParameter(ParameterVector(self.CLIP_LAYER,
self.tr('Clip layer'), [dataobjects.TYPE_VECTOR_POLYGON]))
self.addParameter(ParameterString(self.OPTIONS,
self.tr('Additional creation options'), '', optional=True))
self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Clipped (polygon)'), datatype=[dataobjects.TYPE_VECTOR_POLYGON]))
def getConsoleCommands(self):
inLayer = self.getParameterValue(self.INPUT_LAYER)
ogrLayer = ogrConnectionString(inLayer)[1:-1]
clipLayer = self.getParameterValue(self.CLIP_LAYER)
ogrClipLayer = ogrConnectionString(clipLayer)[1:-1]
output = self.getOutputFromName(self.OUTPUT_LAYER)
outFile = output.value
output = ogrConnectionString(outFile)
options = str(self.getParameterValue(self.OPTIONS))
arguments = []
arguments.append('-clipsrc')
arguments.append(ogrClipLayer)
arguments.append("-clipsrclayer")
arguments.append(ogrLayerName(clipLayer))
if len(options) > 0:
arguments.append(options)
arguments.append(output)
arguments.append(ogrLayer)
arguments.append(ogrLayerName(inLayer))
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'ogr2ogr.exe',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['ogr2ogr', GdalUtils.escapeAndJoin(arguments)]
return commands
def commandName(self):
return "ogr2ogr"
| wonder-sk/QGIS | python/plugins/processing/algs/gdal/ogr2ogrclip.py | Python | gpl-2.0 | 3,659 |
import sys, argparse
import nose
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-p', '--public', action='store_true')
parser.add_argument('-t', '--tests', action='store_true')
if 'runtests' in sys.argv[0]:
args = parser.parse_args(sys.argv[1:])
else:
args = parser.parse_args(sys.argv)
argv = ['', '--with-coverage', '--cover-erase',
'--cover-tests', '--cover-html', '--cover-package=simpleml']
if args.verbose:
argv.append('-v')
if args.tests:
argv.append('--cover-package=tests')
if args.public:
argv.append('./tests/public_tests')
else:
argv.append('./tests/')
nose.main(argv=argv)
| rogerfan/simpleml | runtests.py | Python | mpl-2.0 | 693 |
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.resources import resource_find
from kivy.clock import Clock
import timeit
Builder.load_string('''
<PerfApp>:
value: 0
but: but.__self__
slider: slider
text_input: text_input
BoxLayout:
orientation: 'vertical'
TextInput:
id: text_input
BoxLayout:
orientation: 'vertical'
size_hint: 1, .2
BoxLayout:
Button:
id: but
text: 'Start Test'
on_release: root.start_test() if self.text == 'Start Test'\
else ''
Slider:
id: slider
min: 0
max: 100
value: root.value
''')
class PerfApp(App, FloatLayout):
def build(self):
return self
def __init__(self, **kwargs):
super(PerfApp, self).__init__(**kwargs)
self.tests = []
tests = (self.load_large_text, self.stress_insert,
self.stress_del, self.stress_selection)
for test in tests:
but = type(self.but)(text=test.__name__)
self.but.parent.add_widget(but)
but.test = test
self.tests.append(but)
self.test_done = True
def load_large_text(self, *largs):
print('loading uix/textinput.py....')
self.test_done = False
fd = open(resource_find('uix/textinput.py'), 'r')
print('putting text in textinput')
def load_text(*l):
self.text_input.text = fd.read()
t = timeit.Timer(load_text)
ttk = t.timeit(1)
fd.close()
import resource
print('mem usage after test')
print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024, 'MB')
print('------------------------------------------')
print('Loaded', len(self.text_input._lines), 'lines', ttk, 'secs')
print('------------------------------------------')
self.test_done = True
def stress_del(self, *largs):
self.test_done = False
text_input = self.text_input
self.lt = len_text = len(text_input.text)
target = len_text - (210 * 9)
self.tot_time = 0
def dlt(*l):
if len(text_input.text) <= target:
Clock.unschedule(dlt)
print('Done!')
m_len = len(text_input._lines)
print('deleted 210 characters 9 times')
import resource
print('mem usage after test')
print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\
/ 1024, 'MB')
print('total lines in text input:', m_len)
print('--------------------------------------')
print('total time elapsed:', self.tot_time)
print('--------------------------------------')
self.test_done = True
return
text_input.select_text(self.lt - 220, self.lt - 10)
text_input.delete_selection()
self.lt -= 210
text_input.scroll_y -= 100
self.tot_time += l[0]
Clock.schedule_once(dlt)
Clock.schedule_once(dlt)
def stress_insert(self, *largs):
self.test_done = False
text_input = self.text_input
text_input.select_all()
text_input._copy(text_input.selection_text)
text_input.cursor = text_input.get_cursor_from_index(
text_input.selection_to)
len_text = len(text_input._lines)
self.tot_time = 0
def pste(*l):
if len(text_input._lines) >= (len_text) * 9:
Clock.unschedule(pste)
print('Done!')
m_len = len(text_input._lines)
print('pasted', len_text, 'lines', \
round((m_len - len_text) / len_text), 'times')
import resource
print('mem usage after test')
print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\
/ 1024, 'MB')
print('total lines in text input:', m_len)
print('--------------------------------------')
print('total time elapsed:', self.tot_time)
print('--------------------------------------')
self.test_done = True
return
self.tot_time += l[0]
text_input._paste()
Clock.schedule_once(pste)
Clock.schedule_once(pste)
def stress_selection(self, *largs):
self.test_done = False
text_input = self.text_input
self.tot_time = 0
old_selection_from = text_input.selection_from - 210
def pste(*l):
if text_input.selection_from >= old_selection_from:
Clock.unschedule(pste)
print('Done!')
import resource
print('mem usage after test')
print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\
/ 1024, 'MB')
print('--------------------------------------')
print('total time elapsed:', self.tot_time)
print('--------------------------------------')
self.test_done = True
return
text_input.select_text(text_input.selection_from - 1,
text_input.selection_to)
Clock.schedule_once(pste)
Clock.schedule_once(pste)
def start_test(self, *largs):
self.but.text = 'test started'
self.slider.max = len(self.tests)
def test(*l):
if self.test_done:
try:
but = self.tests[int(self.slider.value)]
self.slider.value += 1
but.state = 'down'
print('=====================')
print('Test:', but.text)
print('=====================')
but.test(but)
except IndexError:
for but in self.tests:
but.state = 'normal'
self.but.text = 'Start Test'
self.slider.value = 0
print('===================')
print('All Tests Completed')
print('===================')
Clock.unschedule(test)
Clock.schedule_interval(test, 1)
if __name__ in ('__main__', ):
PerfApp().run()
| JulienMcJay/eclock | windows/kivy/kivy/tests/perf_test_textinput.py | Python | gpl-2.0 | 6,601 |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.toontowngui.IntuitiveColorPicker
from panda3d.core import Texture
from direct.gui.DirectGui import *
from otp.otpgui.ColorPicker import ColorPicker
from toontown.toonbase import TTLocalizer, ToontownGlobals
import math, colorsys
class IntuitiveColorPicker(DirectFrame):
def __init__(self, parent, minSat, maxSat, minVal, maxVal, callback, colors, pos = (0, 0, 0)):
self.colors = colors[:]
self.colors.sort(key=lambda rgb: colorsys.rgb_to_hsv(*rgb[:-1]))
self.autoPress = False
self.callback = callback
self.part = 0
self.row = 0
DirectFrame.__init__(self, parent, relief=None, image=Preloaded['squareBox'], image_color=(0.16, 0.69, 1, 1), image_scale=(0.9, 1, 0.8), pos=pos)
self.initialiseoptions(IntuitiveColorPicker)
self.colorPicker = ColorPicker(self, minSat, maxSat, minVal, maxVal, callback, (0.135, 0, 0))
self.switchButton = DirectButton(self, relief=None, image=Preloaded['squareBox'], image_color=(0, 1, 0, 1), image_scale=(0.9, 0, 0.1), pos=(0, 0, -0.41), text=TTLocalizer.ColorPicker, text_scale=0.055, text_pos=(0, -0.0185), command=self.__switch, pressEffect=False)
self.regularFrame = self.attachNewNode('regularFrame')
self.regularButtons = []
for row in xrange(5):
self.regularButtons.append([ self.getRegularButton(row, column) for column in xrange(6) ])
self.regularIncButton = DirectButton(self.regularFrame, relief=None, image=Preloaded['scrollBlueArrow'], pos=(0, 0, 0.32), command=self.__incrementRow, extraArgs=[-1])
self.regularDecButton = DirectButton(self.regularFrame, relief=None, image=Preloaded['scrollBlueArrow'], pos=(0, 0, -0.32), hpr=(0, 0, 180), command=self.__incrementRow, extraArgs=[1])
self.partButtons = []
self.colorPicker.hide()
self.__updateRows()
return
def setAutoPress(self, autoPress, button = None):
self.autoPress = autoPress
if button is not None:
self.__chooseRegular(button.color)
return
def removeNode(self):
self.destroy()
def destroy(self):
if not self.switchButton:
return
else:
DirectFrame.destroy(self)
for button in self.partButtons:
button.destroy()
for buttons in self.regularButtons:
for button in buttons:
button.destroy()
self.switchButton.destroy()
self.colorPicker.removeNode()
self.regularFrame.removeNode()
self.regularIncButton.destroy()
self.regularDecButton.destroy()
self.switchButton = None
self.colorPicker = None
self.regularFrame = None
self.partButtons = []
self.regularButtons = []
self.regularIncButton = None
self.regularDecButton = None
self.callback = None
return
def setPartButtons(self, partButtons, xStart, xIncrement):
self.partButtons = [ self.getPartButton(xStart, xIncrement, *button) for button in partButtons ]
self.__choosePart(0)
def getPartButton(self, xStart, xIncrement, part, text):
return DirectButton(self, relief=None, image=Preloaded['squareBox'], image_scale=(0.2, 1, 0.1), pos=(xStart + part * xIncrement, 0, 0.435), text=text, text_scale=0.055, text_pos=(0, -0.0185), command=self.__choosePart, extraArgs=[part])
def getRegularButton(self, row, column):
button = DirectButton(self.regularFrame, relief=None, image=ToontownGlobals.getWhiteTexture(), image_scale=0.06, pos=(-0.305 + column * 0.12, 0, 0.24 - row * 0.12), pressEffect=False)
button.bind(DGG.B1PRESS, lambda event: self.setAutoPress(True, button))
button.bind(DGG.B1RELEASE, lambda event: self.setAutoPress(False))
button.bind(DGG.WITHIN, lambda event: self.__checkAutoPress(button))
return button
def getMaxRow(self):
return int(math.ceil((len(self.colors) - 30) / 5.0)) - 1
def isPartChosen(self, part):
return self.part in (0, part)
def __checkAutoPress(self, button):
if self.autoPress:
self.__chooseRegular(button.color)
def __choosePart(self, part):
for i, button in enumerate(self.partButtons):
button['image_color'] = (1, 0.5, 0, 1) if i == part else (1, 1, 0, 1)
self.part = part
def __chooseRegular(self, regular):
if regular != -1:
self.callback(self.colors[regular])
def __switch(self):
if self.colorPicker.isHidden():
self.colorPicker.show()
self.regularFrame.hide()
self.switchButton['text'] = TTLocalizer.ColorRegular
else:
self.colorPicker.hide()
self.regularFrame.show()
self.switchButton['text'] = TTLocalizer.ColorPicker
def __updateRows(self):
self.regularIncButton.hide() if self.row == 0 else self.regularIncButton.show()
self.regularDecButton.hide() if self.row == self.getMaxRow() else self.regularDecButton.show()
start = self.row * 6
for buttons in self.regularButtons:
for button in buttons:
button.color = -1
button['image_color'] = (0, 0.2, 0.4, 1)
for i, color in enumerate(self.colors[start:start + 30]):
button = self.regularButtons[i / 6][i % 6]
button.color = start + i
button['image_color'] = color
def __incrementRow(self, increment):
self.row += increment
self.__updateRows() | DedMemez/ODS-August-2017 | toontowngui/IntuitiveColorPicker.py | Python | apache-2.0 | 5,821 |
__author__ = 'laharah'
import gtk
import os
import time
import webbrowser
from twisted.internet import defer
from deluge.ui.client import client
import deluge.component as component
from filebottool.common import get_resource
from filebottool.common import LOG
from filebottool.gtkui.common import EditableList
from filebottool.gtkui.handler_editor import HandlerEditor
import filebottool.auto_sort
import user_messenger
SORT_OPERATORS = filebottool.auto_sort.OPERATOR_MAP.keys()
VALID_FIELDS = filebottool.auto_sort.VALID_FIELDS
FilterRule = filebottool.auto_sort.FilterRule
log = LOG
class ConfigUI(object):
"""handles the UI portion of getting and setting preferences"""
def __init__(self, settings=None):
self.glade = gtk.glade.XML(get_resource("config.glade"))
self.config_page = self.glade.get_widget("prefs_box")
self.pref_dialog = component.get("Preferences").pref_dialog
fb_icon = self.glade.get_widget("fb_icon")
image = get_resource("fb_icon16.png")
fb_icon.set_from_file(image)
model = gtk.ListStore(str)
view = self.glade.get_widget('saved_handlers_listview')
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Profile Name", renderer, text=0)
view.append_column(column)
model.set_sort_column_id(0, gtk.SORT_ASCENDING)
self.handlers_list = EditableList(view, model)
model = gtk.ListStore(str, str, str, str)
view = self.glade.get_widget('rule_listview')
options = [
("Field:", VALID_FIELDS),
("Comparison Operator:", SORT_OPERATORS),
]
for col_index, tup in enumerate(options):
name, items = tup
combo_model = gtk.ListStore(str)
for item in items:
combo_model.append([item])
cb = build_combo_renderer_cb(model, col_index, items)
renderer = build_combo_cellrenderer(combo_model, cb)
column = gtk.TreeViewColumn(name, renderer, text=col_index)
view.append_column(column)
renderer = gtk.CellRendererText()
renderer.set_property("editable", True)
def text_edited(widget, path, text):
model[path][2] = text
renderer.connect("edited", text_edited)
column = gtk.TreeViewColumn("Pattern to Match:", renderer, text=2)
view.append_column(column)
self.rules_list = EditableList(view, model)
self.glade.signal_autoconnect({
"on_add_handler": self.on_add_handler,
"on_remove_handler": self.handlers_list.remove,
"on_edit_handler": self.on_edit_handler,
"on_move_rule_up": self.rules_list.move_up,
"on_move_rule_down": self.rules_list.move_down,
"on_remove_rule": self.rules_list.remove,
"on_add_rule": self.on_add_rule,
"on_auto_sort_help_clicked": self.on_auto_sort_help_clicked,
"on_debug_button_clicked": self.on_debug_button_clicked,
"on_license_button_clicked": self.on_license_button_clicked,
})
self.gather_time = None
if settings:
self.populate_settings(settings)
def populate_settings(self, settings):
"""populates the UI widgets with the given settings"""
# workaround for new settings being overwritten by previous settings
if self.gather_time:
if time.time() - self.gather_time < 1:
return
self.config = settings
self.saved_handlers = settings["saved_handlers"]
self.handlers_list.clear()
for name in self.saved_handlers:
self.handlers_list.add([name])
rules = settings["auto_sort_rules"]
if len(self.rules_list.view.get_columns()) == 4: # force refresh
self.rules_list.view.remove_column(self.rules_list.view.get_column(3))
self.rule_handler_combo = build_combo_cellrenderer(
self.handlers_list.model, self.on_rule_handler_combo_changed)
column_name = "Profile to Use:"
column = gtk.TreeViewColumn(column_name, self.rule_handler_combo, text=3)
self.rules_list.view.append_column(column)
self.rules_list.clear()
for rule in rules:
self.rules_list.add(rule[1:])
for column in self.rules_list.view.get_columns():
column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
column.set_resizable(True)
if not rules:
for column in self.rules_list.view.get_columns():
column.set_expand(True)
def gather_settings(self):
"""
Updates the given config dictionary and updates the appropriate
settings.
"""
self.gather_time = time.time()
handlers = {}
for row in self.handlers_list.get_data():
handlers[row[0]] = self.saved_handlers[row[0]]
self.saved_handlers = handlers
self.config["saved_handlers"] = self.saved_handlers
rules = []
log.debug(self.rules_list.get_data())
for index, row in enumerate(self.rules_list.get_data()):
field, op, pat, handler = row
rules.append([index, field, op, pat, handler])
self.config['auto_sort_rules'] = rules
return self.config
#########
# Section: signal handlers
#########
def on_add_handler(self, widget):
def new_handler_cb(id, handlers):
self.handlers_list.add([id])
self.saved_handlers = handlers
log.debug(self.saved_handlers)
HandlerEditor(handlers=self.saved_handlers, cb=new_handler_cb,
parent=self.pref_dialog)
def on_edit_handler(self, widget):
handler_name = self.handlers_list.get_row()[0]
def edited_cb(id, handlers):
self.saved_handlers = handlers
if id != handler_name:
del self.saved_handlers[handler_name]
self.handlers_list.clear()
for name in self.saved_handlers:
self.handlers_list.add([name])
HandlerEditor(handlers=self.saved_handlers, initial=handler_name,
cb=edited_cb, parent=self.pref_dialog)
def on_add_rule(self, *args):
self.rules_list.add(['', "is exactly", '', ''])
path = self.rules_list.model.get_string_from_iter(self.rules_list.model[-1].iter)
self.rules_list.view.set_cursor(path)
def on_rule_handler_combo_changed(self, widget, path, text):
self.rules_list.model[path][3] = text
def on_auto_sort_help_clicked(self, *args):
webbrowser.open('https://github.com/Laharah/deluge-FileBotTool/wiki/Auto-Sorting',
new=2)
@defer.inlineCallbacks
def on_debug_button_clicked(self, button):
log.debug("Sending request for FileBot debug info...")
button.set_sensitive(False)
info = yield client.filebottool.get_filebot_debug()
log.debug("Displaying debug info")
dialog = user_messenger.UserMessenger()
dialog.display_text("Filebot Debug Info", info)
button.set_sensitive(True)
@defer.inlineCallbacks
def on_license_button_clicked(self, button):
log.debug("License button clicked.")
chooser = gtk.FileChooserDialog(_("Choose your FileBot license file"),
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN,
gtk.RESPONSE_OK))
chooser.set_transient_for(self.pref_dialog)
chooser.set_property("skip-taskbar-hint", True)
chooser.set_local_only(False)
file_filter = gtk.FileFilter()
file_filter.set_name(_("FileBot license files"))
file_filter.add_pattern("*." + "psm")
chooser.add_filter(file_filter)
file_filter = gtk.FileFilter()
file_filter.set_name(_("All files"))
file_filter.add_pattern("*")
chooser.add_filter(file_filter)
# Run the dialog
response = chooser.run()
if response == gtk.RESPONSE_OK:
license = chooser.get_filenames()[0]
else:
chooser.destroy()
return
chooser.destroy()
# License file should definetly be under 10K
size = os.stat(license).st_size
if size > 10*1000:
e = user_messenger.InfoDialog("Error", "License file is too big.")
e.resize(220, 125)
e.run_async()
defer.returnValue()
with open(license, 'rb') as l:
license_data = l.read()
log.debug("Sending license data to server.")
result = yield client.filebottool.activate_filebot_license(license_data)
log.debug("Recieved reply from server: %s", result)
if result.startswith("FilebotLicenseError: "):
title = "Error with License File"
msg = result[21:]
else:
title = "Success!"
msg = result
dialog = user_messenger.InfoDialog(title, msg)
dialog.resize(220, 125)
dialog.run_async()
#########
# Section: Utilities
#########
def build_combo_renderer_cb(list_store, column_number, allowed=None):
def cb(widget, path, text):
if allowed:
if text not in allowed:
return
log.debug('{0} {1} {2}'.format(widget, path, text))
list_store[path][column_number] = text
return cb
def build_combo_cellrenderer(model, cb):
renderer = gtk.CellRendererCombo()
if model:
renderer.set_property("model", model)
renderer.set_property("editable", True)
renderer.set_property("text-column", 0)
renderer.connect("edited", cb)
return renderer
| Laharah/deluge-FileBotTool | filebottool/gtkui/config_ui.py | Python | gpl-3.0 | 9,780 |
'''
A package of utilities for exporting NEURON models to NeuroML 2 & for analysing/comparing NEURON models to NeuroML versions
Will use some some utilities from https://github.com/OpenSourceBrain/NEURONShowcase
'''
from pyneuroml.pynml import validate_neuroml1
from pyneuroml.pynml import validate_neuroml2
from pyneuroml.pynml import print_comment, print_comment_v
import os
from pyneuroml.neuron.nrn_export_utils import set_erev_for_mechanism
from neuron import *
from nrn import *
def export_to_neuroml2(hoc_or_python_file,
nml2_file_name,
includeBiophysicalProperties=True,
separateCellFiles=False,
known_rev_potentials={},
validate=True):
if hoc_or_python_file is not None:
if hoc_or_python_file.endswith(".py"):
print_comment_v("***************\nImporting Python scripts not yet implemented...\n***************")
else:
if not os.path.isfile(hoc_or_python_file):
print_comment_v("***************\nProblem importing file %s (%s)..\n***************"%(hoc_or_python_file, os.path.abspath(hoc_or_python_file)))
h.load_file(1, hoc_or_python_file) # Using 1 to force loading of the file, in case file with same name was loaded before...
else:
print_comment_v("hoc_or_python_file variable is None; exporting what's currently in memory...")
for ion in known_rev_potentials.keys():
set_erev_for_mechanism(ion,known_rev_potentials[ion])
print_comment_v("Loaded NEURON file: %s"%hoc_or_python_file)
h.load_file("mview.hoc")
h('objref mv')
h('mv = new ModelView(0)')
h.load_file("%s/mview_neuroml2.hoc"%(os.path.dirname(__file__)))
h('objref mvnml')
h('mvnml = new ModelViewNeuroML2(mv)')
nml2_level = 2 if includeBiophysicalProperties else 1
h.mvnml.exportNeuroML2(nml2_file_name, nml2_level, int(separateCellFiles))
if validate:
validate_neuroml2(nml2_file_name)
h('mv.destroy()')
def export_to_neuroml1(hoc_file, nml1_file_name, level=1, validate=True):
if not (level==1 or level == 2):
print_comment_v("Only options for Levels in NeuroMLv1.8.1 are 1 or 2")
return None
h.load_file(hoc_file)
print_comment_v("Loaded NEURON file: %s"%hoc_file)
h.load_file("mview.hoc")
h('objref mv')
h('mv = new ModelView()')
h.load_file("%s/mview_neuroml1.hoc"%(os.path.dirname(__file__)))
h('objref mvnml1')
h('mvnml1 = new ModelViewNeuroML1(mv)')
h.mvnml1.exportNeuroML(nml1_file_name, level)
if validate:
validate_neuroml1(nml1_file_name)
| rgerkin/pyNeuroML | pyneuroml/neuron/__init__.py | Python | lgpl-3.0 | 2,783 |
import sys
import lexer
from ast import *
debug = False
class SymbolTable(dict):
"""
Class representing a symbol table. It should
provide functionality for adding and looking
up nodes associated with identifiers.
"""
def __init__(self, decl=None):
super().__init__()
self.decl = decl
def add(self, name, value):
self[name] = value
def lookup(self, name):
return self.get(name, None)
def return_type(self):
if self.decl:
return self.decl
return None
class ExprType(object):
def __init__(self, type, unary_ops, binary_ops, closed_dyadic_ops, default_value):
super().__init__()
self.type = type
self.unary_ops = unary_ops
self.binary_ops = binary_ops
self.closed_dyadic_ops = closed_dyadic_ops
self.default_value = default_value
def __str__(self):
return self.type
relational_ops = ["!=", "==", ">", ">=", "<", "<="]
membership_ops = ["in"]
int_type = ExprType("int",["-"],["+", "-", "*", "/", "%", "!=", "==", ">", ">=", "<", "<="],['+=','-=','*=','/=','%='],0)
bool_type = ExprType("bool",["!"],["==", "!=", "&&", "||"],[],False)
char_type = ExprType("char",[],[],[],"")
string_type = ExprType("string",[],["+", "==", "!="],['+='],"")
void_type = ExprType("void",[],[],[],"")
class Environment(object):
def __init__(self):
self.stack = []
self.scope_stack = []
self.root = SymbolTable()
self.stack.append(self.root)
self.offset = 0
self.current_scope = -1
self.scope_offset = dict()
self.parameter_space_stack = []
self.procedure_scope_stack = []
self.procedure_offset_stack = []
self.procedure_is_loc_stack = []
self.procedure_has_returns_stack = []
self.expected_return_stack = []
self.root.update({
"int": int_type,
"char": char_type,
"string": string_type,
"bool": bool_type
})
def get_current_scope(self):
return self.scope_stack[-1]
def push(self, enclosure):
self.current_scope += 1
self.stack.append(SymbolTable(decl=enclosure))
self.scope_stack.append(self.current_scope)
self.offset = 0
self.scope_offset[self.current_scope] = 0
def pop(self):
self.scope_stack.pop()
self.stack.pop()
def peek(self):
return self.stack[-1]
def parent_scope(self):
return self.stack[-2]
def scope_level(self):
return len(self.stack)
def add_local(self, name, value):
self.peek().add(name, value)
def add_parent(self, name, value):
self.parent_scope().add(name, value)
def add_root(self, name, value):
self.root.add(name, value)
def lookup(self, name):
for scope in reversed(self.stack):
hit = scope.lookup(name)
if hit is not None:
return hit
return None
def find(self, name):
if name in self.stack[-1]:
return True
else:
return False
def printStack(self):
print("Printing environment scope stack:")
for table in self.stack:
print(table)
def getOffset(self):
return self.offset
class Visitor(NodeVisitor):
"""
Program Visitor class. This class uses the visitor pattern as
described in lya_ast.py. It’s define methods of the form
visit_NodeName() for each kind of AST node that we want to process.
Note: You will need to adjust the names of the AST nodes if you
picked different names.
"""
def __init__(self):
self.environment = Environment()
self.typemap = {
"int": int_type,
"char": char_type,
"string": string_type,
"bool": bool_type,
"void": void_type
}
self.assign = '='
self.array_symbol = '$'
self.ref_symbol = '&'
self.loc_symbol = '!'
self.semantic_error = False
self.string_literals = []
self.string_literals_ascii = []
def executeBinaryOperation(self, a, b, operation, lineno):
try:
if operation == '+':
return a+b
elif operation == '-':
return a-b
elif operation == '*':
return a*b
elif operation == '/':
return int(a/b)
elif operation == '%':
return a%b
elif operation == '>':
return a > b
elif operation == '>=':
return a >= b
elif operation == '<':
return a < b
elif operation == '<=':
return a <= b
elif operation == '==':
return a == b
elif operation == '!=':
return a != b
elif operation == '&&':
return a and b
elif operation == '||':
return a or b
except:
self.print_error(lineno, "Attempted binary operation on mismatching types ({}{}{})".format(a, operation, b))
def executeUnaryOperation(self, a, operation, lineno):
try:
if operation == '-':
return -a
elif operation == '!':
return not a
except:
self.print_error(lineno, "Attempted binary operation on mismatching types ({}{})".format(operation, a))
def print_error(self, lineno, text):
if lineno is None:
e = "ERROR: "
else:
e = "ERROR (line " + str(lineno) + "): "
print(e + text)
self.semantic_error = True
def get_exprType(self, raw_type, lineno):
if raw_type in self.typemap:
return self.typemap[raw_type]
self.print_error(lineno, "Type {} not found".format(raw_type))
return self.typemap["void"]
def raw_type_unary(self, node, op, val):
if hasattr(val, "raw_type") and (val.raw_type != None):
val_type = self.get_exprType(val.raw_type, node.lineno)
if op not in val_type.unary_ops:
self.print_error(node.lineno,
"Unary operator {} not supported".format(op))
return val.raw_type
return None
def raw_type_binary(self, node, op, left, right):
if hasattr(left, "raw_type") and hasattr(right, "raw_type") and (left.raw_type != None) and (right.raw_type != None):
left_type = self.get_exprType(left.raw_type, node.lineno)
right_type = self.get_exprType(right.raw_type, node.lineno)
if left_type != right_type:
self.print_error(node.lineno,
"Binary operator {} does not have matching types: {} and {}".format(op, left_type, right_type))
return left_type
errside = None
if op not in left_type.binary_ops:
errside = "LHS"
if op not in right_type.binary_ops:
errside = "RHS"
if errside is not None:
self.print_error(node.lineno,
"Binary operator {} not supported on {} of expression".format(op, errside))
if op in relational_ops:
return 'bool'
return left.raw_type
if(not hasattr(left, "raw_type")):
self.print_error(node.lineno,
"Operand {} has no type".format(left))
else:
self.print_error(node.lineno,
"Operand {} has no type".format(right))
return None
def visit_Program(self,node):
self.environment.push(node)
node.environment = self.environment
node.symtab = self.environment.peek()
# Visit all of the statements
if not node.stmts is None:
for stmts in node.stmts: self.visit(stmts)
node.scope_offset = self.environment.scope_offset
for strin in self.string_literals:
s = ''
for c in strin:
s += chr(c)
self.string_literals_ascii.append(s)
def visit_Declaration_Statement(self,node):
# Visit all of the declarations
if not node.declaration_list is None:
for dcl in node.declaration_list: self.visit(dcl)
def visit_Declaration(self,node):
self.visit(node.mode)
if not node.initialization is None:
self.visit(node.initialization)
if(node.mode.raw_type != node.initialization.raw_type):
self.print_error(node.lineno, "Mismatched type initialization, expected " + str(node.mode.raw_type) + ", found " + str(node.initialization.raw_type))
# Visit all of the identifiers
if not node.identifier_list is None:
for ident in node.identifier_list:
aux_type = self.environment.lookup(ident.ID)
if not aux_type is None and aux_type[0] == 'var' and aux_type[5] == self.environment.get_current_scope():
self.print_error(node.lineno,
"Identifier " + str(ident.ID) + " already declared as {} {}".format(aux_type[0],
aux_type[1]))
else:
node.scope = self.environment.get_current_scope()
node.offset = self.environment.scope_offset[node.scope]
self.environment.scope_offset[node.scope] += node.mode.size
self.environment.add_local(ident.ID, ['var',
node.mode.raw_type,
False,
node.mode.size,
node.offset,
node.scope,
node.mode.upper_bound_value,
node.mode.lower_bound_value])
self.visit(ident)
def visit_Identifier(self, node):
node.type = self.environment.lookup(node.ID)
node.raw_type = '^UNDEFINED^'
node.dcl_type = '^UNDEFINED^'
node.loc = False
node.offset = 0
node.scope = 0
node.lower_bound_value = 0
node.upper_bound_value = 0
if(node.type != None):
node.dcl_type = node.type[0] # declaration type (var, proc, synonym, label, etc.)
node.raw_type = node.type[1] # raw type (int, bool, char, etc.)
node.loc = node.type[2] # loc
if node.dcl_type != 'proc':
node.size = node.type[3]
node.offset = node.type[4]
node.scope = node.type[5]
if node.dcl_type == 'synonym':
node.value = node.type[6]
else:
node.offset = node.type[4]
node.scope = node.type[5]
if node.raw_type[0] == '$' or node.raw_type == 'string': # this is an array
node.lower_bound_value = node.type[-1]
node.upper_bound_value = node.type[-2]
else:
self.print_error(node.lineno,
"Identifier {} was not defined".format(node.ID))
def visit_Synonym_Statement(self, node):
# Visit all of the synonyms
if not node.synonym_list is None:
for syn in node.synonym_list: self.visit(syn)
def visit_Synonym_Definition(self, node):
self.visit(node.constant_expression)
if not node.mode is None:
self.visit(node.mode)
if (node.mode.raw_type != node.constant_expression.raw_type):
self.print_error(node.lineno,
"Mismatched type initialization, expected " + node.mode.raw_type + ", found " + node.initialization.type)
for ident in node.identifier_list:
aux_type = self.environment.lookup(ident.ID)
if not aux_type is None:
self.print_error(node.lineno,
"Identifier " + str(ident.ID) + " already declared as {} {}".format(aux_type[0], aux_type[1]))
else:
self.environment.add_local(ident.ID, ['synonym',
node.constant_expression.raw_type,
False,
node.constant_expression.size,
node.constant_expression.size,
self.environment.get_current_scope(),
node.constant_expression.value,
node.constant_expression.upper_bound_value,
node.constant_expression.lower_bound_value])
def visit_Constant_Expression(self, node):
self.visit(node.expression)
if node.expression.value == None:
self.print_error(node.lineno,
"Expression in synonym declaration is not constant")
node.value = node.expression.value
node.raw_type = node.expression.raw_type
node.size = node.expression.size
node.lower_bound_value = 0
node.upper_bound_value = 0
def visit_Newmode_Statement(self, node):
if not node.newmode_list is None:
for newmode in node.newmode_list: self.visit(newmode)
def visit_Mode_Definition(self, node):
self.visit(node.mode)
if not node.identifier_list is None:
for ident in node.identifier_list:
aux_type = self.environment.lookup(ident.ID)
if not aux_type is None:
self.print_error(node.lineno,
"Identifier " + str(ident.ID) + " already declared as {} {}".format(aux_type[0],
aux_type[1]))
else:
self.environment.add_local(ident.ID, ['mode',
node.mode.raw_type,
False,
node.mode.size,
node.mode.size,
self.environment.get_current_scope(),
node.mode.upper_bound_value,
node.mode.lower_bound_value])
def visit_Integer_Mode(self, node):
node.raw_type = 'int'
node.size = 1
node.lower_bound_value = 0
node.upper_bound_value = 0
def visit_Boolean_Mode(self, node):
node.raw_type = 'bool'
node.size = 1
node.lower_bound_value = 0
node.upper_bound_value = 0
def visit_Character_Mode(self, node):
node.raw_type = 'char'
node.size = 1
node.lower_bound_value = 0
node.upper_bound_value = 0
def visit_Discrete_Range_Mode(self, node):
self.visit(node.literal_range)
node.lower_bound_value = 0
node.upper_bound_value = 0
raw_type = None
params = []
if node.identifier is not None:
self.visit(node.identifier)
raw_type = node.identifier.raw_type
else:
self.visit(node.discrete_mode)
raw_type = node.discrete_mode.raw_type
if hasattr(node.discrete_mode, "params"):
params = node.discrete_mode.params
node.raw_type = raw_type
node.params = params.append(node.literal_range.raw_type)
def visit_Mode_Name(self, node):
self.visit(node.identifier)
if node.identifier.type is None or (node.identifier.type is not None and node.identifier.type[0] != 'mode'):
self.print_error(node.lineno, "{} is not a valid mode.".format(node.identifier.ID))
node.raw_type = node.identifier.raw_type
node.size = node.identifier.size
node.lower_bound_value = node.identifier.lower_bound_value
node.upper_bound_value = node.identifier.upper_bound_value
def visit_Literal_Range(self, node):
self.visit(node.lower_bound.expression)
self.visit(node.upper_bound.expression)
node.size = 1
if node.lower_bound.expression.raw_type != None and node.upper_bound.expression.raw_type != None:
if node.lower_bound.expression.raw_type != node.upper_bound.expression.raw_type:
self.print_error(node.lineno, "Mismatching bound types in literal range")
else:
node.size = node.upper_bound.expression.value - node.lower_bound.expression.value + 1
if node.size < 0:
self.print_error(node.lineno, "Upper bound must be greater than lower bound")
else:
self.print_error(node.lineno, "Improper literal range")
node.raw_type = node.lower_bound.expression.raw_type
node.lower_bound_value = node.lower_bound.expression.value
node.upper_bound_value = node.upper_bound.expression.value
def visit_Reference_Mode(self, node):
self.visit(node.mode)
node.raw_type = self.ref_symbol + node.mode.raw_type
node.size = node.mode.size
node.lower_bound_value = node.mode.lower_bound_value
node.upper_bound_value = node.mode.upper_bound_value
def visit_String_Mode(self, node):
node.raw_type = 'string'
node.size += 1
node.lower_bound_value = 0
node.upper_bound_value = node.size
def visit_Array_Mode(self, node):
node.size = 1
if not node.index_mode_list is None:
for index_mode in node.index_mode_list:
self.visit(index_mode)
node.size *= index_mode.size
self.visit(node.element_mode)
node.raw_type = self.array_symbol + node.element_mode.raw_type
node.size *= node.element_mode.size
node.lower_bound_value = node.index_mode_list[0].lower_bound_value
node.upper_bound_value = node.index_mode_list[0].upper_bound_value
def visit_Element_Mode(self, node):
self.visit(node.mode)
node.raw_type = node.mode.raw_type
def visit_Integer_Expression(self, node):
self.visit(node.expression)
exp_type = node.expression.raw_type
if (exp_type != 'int'):
self.print_error(node.lineno, "Expected integer expression, found {}".format(exp_type))
else:
node.value = node.expression.value
node.raw_type = exp_type
# location
def visit_Dereferenced_Reference(self, node):
self.visit(node.location)
raw_type = None
if node.location.raw_type is not None and node.location.raw_type[0] == self.ref_symbol:
raw_type = node.location.raw_type.replace(self.ref_symbol, '', 1)
else:
self.print_error(node.lineno, "Attempted to dereference in non-reference element")
node.raw_type = raw_type
node.dcl_type = node.location.dcl_type
node.ID = node.location.ID
node.loc = node.location.loc
# expression_list
def visit_Array_Element(self, node):
self.visit(node.array_location)
raw_type = None
if node.array_location.raw_type is not None:
if node.array_location.raw_type == 'string':
raw_type = 'char'
elif node.array_location.raw_type[0] == self.array_symbol:
raw_type = node.array_location.raw_type.replace(self.array_symbol,'',1)
else:
self.print_error(node.lineno, "Attempted subscript in non-array element")
if not node.expression_list is None:
for expression in node.expression_list: self.visit(expression)
node.raw_type = raw_type
node.dcl_type = node.array_location.dcl_type
node.ID = node.array_location.ID
node.loc = node.array_location.loc
node.lower_bound_value = node.array_location.lower_bound_value
node.upper_bound_value = node.array_location.upper_bound_value
def visit_Array_Slice(self, node):
self.visit(node.array_location)
self.visit(node.lower_bound)
self.visit(node.upper_bound)
node.raw_type = node.array_location.raw_type
node.dcl_type = node.array_location.dcl_type
node.ID = node.array_location.ID
node.loc = node.array_location.loc
if (node.lower_bound.raw_type != node.upper_bound.raw_type):
self.print_error(node.lineno, "Mismatching bound types {} and {} in array slice".format(node.lower_bound.raw_type, node.upper_bound.raw_type))
def visit_Array_Location(self, node):
self.visit(node.location)
node.raw_type = node.location.raw_type
node.dcl_type = node.location.dcl_type
node.ID = node.location.ID
node.loc = node.location.loc
node.lower_bound_value = 0
node.upper_bound_value = 0
if hasattr(node.location, "lower_bound_value"):
node.lower_bound_value = node.location.lower_bound_value
node.upper_bound_value = node.location.upper_bound_value
else:
self.print_error(node.lineno, "Array must have a lower bound")
def visit_Integer_Literal(self, node):
node.raw_type = 'int'
node.dcl_type = 'literal'
node.ID = None
node.loc = False
def visit_Boolean_Literal(self, node):
node.raw_type = 'bool'
node.dcl_type = 'literal'
node.ID = None
node.loc = False
def visit_Character_Literal(self, node):
node.raw_type = 'char'
node.dcl_type = 'literal'
node.ID = None
node.loc = False
def visit_Empty_Literal(self, node):
node.raw_type = 'void'
node.dcl_type = 'literal'
node.ID = None
node.loc = False
def visit_Character_String_Literal(self, node):
node.raw_type = 'string'
node.dcl_type = 'literal'
node.ID = None
node.loc = False
node.heap_index = len(self.string_literals)
st = node.value[1:-1]
nt = []
f = True
for i in range(len(st)):
if(st[i] == ord('\\') and st[i + 1] == ord('n')):
nt = nt + [ord('\n')]
i += 1
f = False
elif f:
nt = nt + [st[i]]
else:
f = True
self.string_literals.append(nt)
node.value = nt
def visit_Value_Array_Element(self, node):
self.visit(node.array_primitive_value)
self.visit(node.integer_expression)
node.raw_type = node.array_primitive_value.raw_type
node.dcl_type = 'literal'
node.ID = None
node.loc = False
def visit_Value_Array_Slice(self, node):
self.visit(node.array_primitive_value)
self.visit(node.lower_bound)
self.visit(node.upper_bound)
node.raw_type = node.array_primitive_value.raw_type
node.dcl_type = 'literal'
node.ID = None
node.loc = False
def visit_Array_Primitive_Value(self, node):
self.visit(node.primitive_value)
node.raw_type = node.array_primitive_value.raw_type
# expression
def visit_Conditional_Expression(self, node):
self.visit(node.boolean_expression)
self.visit(node.then_expression)
then_type = node.then_expression.raw_type
elsif_type = then_type
if not node.elsif_expression is None:
self.visit(node.elsif_expression)
elsif_type = node.elsif_expression.raw_type
self.visit(node.else_expression)
else_type = node.else_expression.raw_type
if not (then_type == elsif_type and elsif_type == else_type):
aux_msg = "Mismatching types in conditional expression, found {}".format(then_type)
if not node.elsif_expression is None:
aux_msg += ", {}".format(elsif_type)
aux_msg += " and {}".format(else_type)
self.print_error(node.lineno, aux_msg)
else:
if node.boolean_expression.value != None:
if node.boolean_expression.value:
node.value = node.then_expression.value
elif node.elsif_expression != None:
choice = node.elsif_expression.was_chosen
if choice != None:
if choice:
node.value = node.elsif_expression.value
else:
node.value = node.else_expression.value
node.raw_type = then_type
node.dcl_type = 'conditional expression'
node.ID = None
node.loc = False
def visit_Boolean_Expression(self, node):
self.visit(node.expression)
exp_type = None
if node.expression.raw_type != None:
exp_type = node.expression.raw_type
if (exp_type != 'bool'):
self.print_error(node.lineno, "Expected boolean expression, found {}".format(exp_type))
else:
node.value = node.expression.value
node.raw_type = exp_type
def visit_Then_Expression(self, node):
self.visit(node.expression)
exp_type = node.expression.raw_type
node.raw_type = exp_type
node.value = node.expression.value
def visit_Else_Expression(self, node):
self.visit(node.expression)
exp_type = node.expression.raw_type
node.raw_type = exp_type
node.value = node.expression.value
def visit_Elsif_Expression(self, node):
node.was_chosen = None
self.visit(node.boolean_expression)
self.visit(node.then_expression)
then_type = node.then_expression.raw_type
if node.elsif_expression == None:
if node.boolean_expression.value != None:
if node.boolean_expression.value:
node.value = node.then_expression.value
node.was_chosen = node.boolean_expression.value
else:
self.visit(node.elsif_expression)
elsif_type = node.elsif_expression.raw_type
if(then_type != elsif_type):
self.print_error(node.lineno, "Mismatching types in Elsif expression {} and {}".format(then_type, elsif_type))
else:
if node.elsif_expression.was_chosen:
node.value = node.elsif_expression.value
node.was_chosen = True
elif node.elsif_expression.was_chosen == False:
node.was_chosen = node.boolean_expression.value
if node.was_chosen:
node.value = node.then_expression.value
node.raw_type = then_type
def visit_Rel_Mem_Expression(self, node):
self.visit(node.operand0)
self.visit(node.operand1)
node.raw_type = self.raw_type_binary(node, node.operator1, node.operand0, node.operand1)
if node.operand0.value != None and node.operand1.value != None and not self.semantic_error:
node.value = self.executeBinaryOperation(node.operand0.value, node.operand1.value, node.operator1, node.lineno)
node.dcl_type = 'relational expression'
node.ID = None
node.loc = False
# operator1
# relational_operator
# membership_operator
def visit_Binary_Expression(self, node):
self.visit(node.operand1)
self.visit(node.operand2)
node.raw_type = self.raw_type_binary(node, node.operator2, node.operand1, node.operand2)
if node.operand1.value != None and node.operand2.value != None and not self.semantic_error:
if node.operand1.raw_type != 'string':
node.value = self.executeBinaryOperation(node.operand1.value, node.operand2.value, node.operator2, node.lineno)
else:
node.value = node.operand1.value + node.operand2.value
node.heap_index = len(self.string_literals)
self.string_literals.append(node.value)
node.dcl_type = 'binary expression'
node.ID = None
node.loc = False
# operator2
# arithmetic_additive_operator
# string_concatenation_operator
# operand2
# arithmetic_multiplicative_operator
def visit_Unary_Expression(self, node):
self.visit(node.operand4)
node.raw_type = self.raw_type_unary(node, node.monadic_operator, node.operand4)
if node.operand4.value != None and not self.semantic_error:
node.value = self.executeUnaryOperation(node.operand4.value, node.monadic_operator, node.lineno)
node.dcl_type = 'unary expression'
node.ID = None
node.loc = False
# monadic_operator
# operand4
def visit_Referenced_Location(self, node):
self.visit(node.location)
node.raw_type = self.ref_symbol + node.location.raw_type
node.dcl_type = node.location.dcl_type
node.ID = node.location.ID
node.loc = node.location.loc
def visit_Action_Statement(self, node):
self.visit(node.label_id)
self.visit(node.action)
def visit_Label_Id(self, node):
ident = node.identifier
aux_type = self.environment.lookup(ident.ID)
if not aux_type is None:
self.print_error(node.lineno,
"Identifier " + str(ident.ID) + " already declared as {} {}".format(aux_type[0],
aux_type[1]))
else:
self.environment.add_local(ident.ID, ['label',
'void',
False,
0,
0,
self.environment.get_current_scope(),
0])
# action
# bracketed_action
def visit_Assignment_Action(self, node):
self.visit(node.location)
self.visit(node.expression)
if(hasattr(node.location, "dcl_type") and hasattr(node.location, "raw_type") and hasattr(node.expression, "raw_type")):
if node.location.dcl_type is None:
self.print_error(node.lineno, "Assigning to undefined location")
return
if node.location.dcl_type != 'var' and node.location.dcl_type != 'proc':
self.print_error(node.lineno, "Assignment to unsupported dcl_type {}".format(node.location.dcl_type))
return
if node.location.dcl_type == 'proc' and not node.location.loc:
self.print_error(node.lineno, "Assignment to unsupported dcl_type {}".format(node.location.dcl_type))
return
if node.expression.raw_type is None:
self.print_error(node.lineno, "Assigning from undefined location")
return
exp_type = node.expression.raw_type
if(node.location.raw_type != exp_type):
self.print_error(node.lineno,
"Mismatched assignment types {} and {}".format(node.location.raw_type, exp_type))
if(node.assigning_operator != self.assign):
loc_type = self.get_exprType(node.location.raw_type, node.lineno)
if not (node.assigning_operator in loc_type.closed_dyadic_ops):
self.print_error(node.lineno, "Assignment operator {} not supported".format(node.assigning_operator))
if(not hasattr(node.location, "dcl_type")):
self.print_error(node.lineno,
"Location {} has no dcl_type".format(node.location))
if (not hasattr(node.location, "raw_type")):
self.print_error(node.lineno,
"Location {} has no type".format(node.location))
if(not hasattr(node.expression, "raw_type")):
self.print_error(node.lineno,
"Expression {} has no type".format(node.expression))
# assigning_operator
# closed_dyadic_operator
def visit_If_Action(self, node):
self.visit(node.boolean_expression)
self.environment.push("IF_ACTION.THEN_CLAUSE")
self.visit(node.then_clause)
self.environment.pop()
if(node.else_clause != None):
self.environment.push("IF_ACTION.ELSE_CLAUSE")
self.visit(node.else_clause)
self.environment.pop()
def visit_Then_Clause(self, node):
if not node.action_statement_list is None:
for action_statement in node.action_statement_list: self.visit(action_statement)
# action_statement_list
def visit_Else_Clause(self, node):
if not node.action_statement_list is None:
for action_statement in node.action_statement_list: self.visit(action_statement)
self.visit(node.boolean_expression)
self.visit(node.then_clause)
self.visit(node.else_clause)
def visit_Do_Action(self, node):
self.environment.push("DO_ACTION")
self.visit(node.control_part)
if not node.action_statement_list is None:
for action_statement in node.action_statement_list: self.visit(action_statement)
self.environment.pop()
def visit_Control_Part(self, node):
self.visit(node.for_control)
self.visit(node.while_control)
# for_control
# iteration
def visit_Step_Enumeration(self, node):
self.visit(node.loop_counter)
self.visit(node.start_value)
self.visit(node.step_value)
self.visit(node.end_value)
def visit_Loop_Counter(self, node):
self.visit(node.identifier)
if node.identifier.dcl_type != 'var':
self.print_error(node.lineno, "Loop counter is not variable.")
# start_value
# step_value
# end_value
# discrete_expression
def visit_Range_Enumeration(self, node):
self.visit(node.loop_counter)
self.visit(node.discrete_mode)
def visit_While_Control(self, node):
self.visit(node.boolean_expression)
def visit_Procedure_Call(self, node):
self.visit(node.identifier)
# type is a matrix.
# type[0] is the dcl_type, expected to be 'proc'.
# type[1] is the raw return type.
# type[2] is a boolean value representing LOC
# type[3] is a list of parameters. each parameter is a list by itself:
# type[3][i][0] is the parameter type.
# type[3][i][0] is the parameter size im memory.
# type[3][i][2] is a boolean value representing LOC
# type[4] is the return offset (in the procedure scope, so it's something like
### offset = -2 (this is the base) - total parameter size - return size =~ -3 or lower
# type[5] is the procedure scope
# now go forth and conquer the beast!
type = self.environment.lookup(node.identifier.ID)
if type is None:
self.print_error(node.lineno,"Procedure {} not found".format(node.identifier.ID))
return
node.dcl_type = type[0]
node.raw_type = type[1]
node.ID = node.identifier.ID
node.loc = node.identifier.loc
node.offset = node.identifier.offset
node.scope = node.identifier.scope
node.return_size = 0
if type[1] != None:
node.return_size = 1
if (type[0] != 'proc'):
self.print_error(node.lineno, "Expected Procedure call {}, found {} {}".format(node.identifier.ID, type[0], type[1]))
else:
parameter_count = 0
if not node.parameter_list is None:
parameter_count = len(node.parameter_list)
expected_count = len(type[3])
if (parameter_count != expected_count):
self.print_error(node.lineno, "Incorrect parameter count at Procedure {}; Expected {}, found {}".format(node.identifier.ID, expected_count, parameter_count))
elif not node.parameter_list is None:
for i, param in enumerate(node.parameter_list, start=0):
self.visit(param)
param.is_reference = False
if (param.raw_type != type[3][i][0]):
self.print_error(node.lineno,
"Incorrect parameter type at position i={}; Expected {}, found {}".format(
i, type[3][i][0], param.raw_type))
elif type[3][i][2]:
if param.dcl_type != 'var' and param.dcl_type != 'proc':
self.print_error(node.lineno,
"Expected location at position i={}; Found {} instead".format(
i, param.dcl_type))
elif param.dcl_type == 'proc' and not param.loc:
self.print_error(node.lineno,
"Expected location at position i={}; Found {} instead".format(
i, param.dcl_type))
else:
param.is_reference = True
# parameter_list
def visit_Parameter(self, node):
self.visit(node.expression)
node.raw_type = node.expression.raw_type
node.dcl_type = node.expression.dcl_type
node.ID = node.expression.ID
node.loc = node.expression.loc
def visit_Exit_Action(self, node):
self.visit(node.exit_label_id)
def visit_Exit_Label_ID(self, node):
node.type = self.environment.lookup(node.ID)
node.raw_type = None
node.dcl_type = None
node.loc = False
node.offset = 0
node.scope = 0
if(node.type != None):
node.dcl_type = node.type[0]
node.raw_type = node.type[1]
node.loc = node.type[2]
if node.raw_type != 'lbl':
self.print_error(node.lineno,
"Label {} was not defined".format(node.ID))
def visit_Return_Action(self, node):
self.environment.procedure_has_returns_stack[-1] = True
node.scope = self.environment.procedure_scope_stack[-1]
node.parameter_space = self.environment.parameter_space_stack[-1]
node.offset = self.environment.procedure_offset_stack[-1]
node.loc = False
found_type = 'void'
if not node.result is None:
self.visit(node.result)
found_type = node.result.raw_type
if self.environment.expected_return_stack[-1] is None:
if not (found_type is None or found_type == 'void'):
self.print_error(node.lineno, "Expected void return, found {}".format(found_type))
else:
node.loc = self.environment.expected_return_stack[-1].loc
if (found_type != self.environment.expected_return_stack[-1].mode.raw_type):
self.print_error(node.lineno, "Expected {} return, found {}".format(self.environment.expected_return_stack[-1].mode.raw_type, found_type))
def visit_Result_Action(self, node):
self.environment.procedure_has_returns_stack[-1] = True
node.scope = self.environment.procedure_scope_stack[-1]
node.parameter_space = self.environment.parameter_space_stack[-1]
node.offset = self.environment.procedure_offset_stack[-1]
node.loc = self.environment.expected_return_stack[-1].loc
self.visit(node.result)
found_type = node.result.raw_type
if self.environment.expected_return_stack[-1] is None:
self.print_error(node.lineno, "Expected void result, found {}".format(found_type))
else:
if (found_type != self.environment.expected_return_stack[-1].mode.raw_type):
self.print_error(node.lineno, "Expected {} result, found {}".format(self.environment.expected_return_stack[-1].mode.raw_type, found_type))
def visit_Builtin_Call(self, node):
self.visit(node.builtin_name)
if not node.parameter_list is None:
for param in node.parameter_list: self.visit(param)
node.raw_type = 'void'
node.dcl_type = 'proc'
node.ID = node.builtin_name.name
node.loc = False
if node.ID in ['asc','lower','upper']:
node.raw_type = 'char'
elif node.ID in ['num', 'abs','length']:
node.raw_type = 'int'
def visit_Builtin_Name(self, node):
return
def visit_Procedure_Statement(self, node):
proc_name = node.label_id.identifier.ID
self.environment.push('PROCEDURE DECLARATION '+ proc_name)
self.visit(node.procedure_definition)
self.environment.pop()
self.environment.parameter_space_stack.pop()
self.environment.procedure_scope_stack.pop()
self.environment.procedure_offset_stack.pop()
node.scope = node.procedure_definition.scope
def visit_Procedure_Definition(self, node):
self.visit(node.formal_procedure_head)
node.scope = node.formal_procedure_head.scope
node.parameter_space = node.formal_procedure_head.parameter_space
self.environment.parameter_space_stack.append(node.parameter_space)
self.environment.procedure_scope_stack.append(node.scope)
self.environment.procedure_offset_stack.append(node.formal_procedure_head.offset)
self.environment.procedure_has_returns_stack.append(False)
self.environment.expected_return_stack.append(node.formal_procedure_head.result_spec)
if not node.statement_list is None:
for statement in node.statement_list:
self.visit(statement)
if not self.environment.procedure_has_returns_stack[-1] and self.environment.expected_return_stack[-1] is not None:
print(self.environment.procedure_has_returns_stack, self.environment.expected_return_stack[-1])
proc_name = self.environment.peek().return_type().replace("PROCEDURE DECLARATION ", "")
self.print_error(node.lineno, "Procedure {} has no return".format(proc_name))
self.environment.procedure_has_returns_stack.pop()
self.environment.expected_return_stack.pop()
def visit_Formal_Procedure_Head(self, node):
node.param_types = []
node.scope = self.environment.get_current_scope()
self.environment.offset = -2
node.offset = -2
if not node.formal_parameter_list is None:
for formal_param in node.formal_parameter_list:
self.visit(formal_param)
node.param_types += formal_param.param_list
param_list = node.param_types
if node.result_spec is None:
result_type = 'void'
result_loc = False
else:
self.visit(node.result_spec)
result_type = node.result_spec.mode.raw_type
result_loc = node.result_spec.loc
node.offset = self.environment.offset - node.result_spec.mode.size
node.parameter_space = (-2) - self.environment.offset
proc_name = self.environment.peek().return_type().replace("PROCEDURE DECLARATION ","")
aux_type = self.environment.lookup(proc_name)
if not aux_type is None:
self.print_error(node.lineno,
"Identifier " + str(proc_name) + " already declared as {} {}".format(aux_type[0],
aux_type[1]))
else:
self.environment.add_parent(proc_name, ['proc',
result_type,
result_loc,
node.param_types,
node.offset,
node.scope])
node.raw_type = result_type
node.loc = result_loc
self.environment.offset = 0
# formal_parameter_list
def visit_Formal_Parameter(self, node):
self.visit(node.parameter_spec)
node.mode = node.parameter_spec.mode
node.raw_type = node.mode.raw_type
node.loc = node.parameter_spec.loc
node.param_list = []
if not node.identifier_list is None:
for ident in node.identifier_list:
aux_type = self.environment.lookup(ident.ID)
if not aux_type is None and aux_type[0] == 'var' and aux_type[5] == self.environment.get_current_scope():
self.print_error(node.lineno,
"Identifier " + str(ident.ID) + " already declared as {} {}".format(aux_type[0],aux_type[1]))
else:
self.environment.offset -= 1
offset = self.environment.offset
self.environment.add_local(ident.ID, ['var',
node.mode.raw_type,
node.loc,
node.mode.size,
offset,
self.environment.get_current_scope(),
node.mode.upper_bound_value,
node.mode.lower_bound_value])
node.param_list.append([node.raw_type, node.mode.size, node.loc])
def visit_Parameter_Spec(self, node):
self.visit(node.mode)
def visit_Result_Spec(self, node):
self.visit(node.mode)
| gmCrivelli/Lya-Compiler | semantic.py | Python | mit | 46,544 |
import sys
from pyspark.sql import SparkSession, functions, types
spark = SparkSession.builder.appName('reddit averages').getOrCreate()
assert sys.version_info >= (3, 4) # make sure we have Python 3.4+
assert spark.version >= '2.1' # make sure we have Spark 2.1+
schema = types.StructType([ # commented-out fields won't be read
#types.StructField('archived', types.BooleanType(), False),
#types.StructField('author', types.StringType(), False),
#types.StructField('author_flair_css_class', types.StringType(), False),
#types.StructField('author_flair_text', types.StringType(), False),
#types.StructField('body', types.StringType(), False),
#types.StructField('controversiality', types.LongType(), False),
#types.StructField('created_utc', types.StringType(), False),
#types.StructField('distinguished', types.StringType(), False),
#types.StructField('downs', types.LongType(), False),
#types.StructField('edited', types.StringType(), False),
#types.StructField('gilded', types.LongType(), False),
#types.StructField('id', types.StringType(), False),
#types.StructField('link_id', types.StringType(), False),
#types.StructField('name', types.StringType(), False),
#types.StructField('parent_id', types.StringType(), True),
#types.StructField('retrieved_on', types.LongType(), False),
types.StructField('score', types.LongType(), False),
#types.StructField('score_hidden', types.BooleanType(), False),
types.StructField('subreddit', types.StringType(), False),
#types.StructField('subreddit_id', types.StringType(), False),
#types.StructField('ups', types.LongType(), False),
])
def main():
in_directory = sys.argv[1]
out_directory = sys.argv[2]
comments = spark.read.json(in_directory, schema=schema)
# TODO: calculate averages, sort by subreddit. Sort by average score and output that too.
#averages_by_subreddit.write.csv(out_directory + '-subreddit', mode='overwrite')
#averages_by_score.write.csv(out_directory + '-score', mode='overwrite')
if __name__=='__main__':
main() | MockyJoke/numbers | ex10/code/reddit_averages_hint.py | Python | mit | 2,099 |
# Copyright Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import xml
from tests.tools import *
from azurelinuxagent.common.protocol.wire import *
from azurelinuxagent.common.osutil import get_osutil
class TestRemoteAccess(AgentTestCase):
def test_parse_remote_access(self):
data_str = load_data('wire/remote_access_single_account.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals("1", remote_access.incarnation)
self.assertEquals(1, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state',
return_value=GoalState(load_data('wire/goal_state.xml')))
def test_update_remote_access_conf_no_remote_access(self, _):
protocol = WireProtocol('12.34.56.78')
goal_state = protocol.client.get_goal_state()
protocol.client.update_remote_access_conf(goal_state)
def test_parse_two_remote_access_accounts(self):
data_str = load_data('wire/remote_access_two_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals("1", remote_access.incarnation)
self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount1", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
self.assertEquals("testAccount2", remote_access.user_list.users[1].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.")
def test_parse_ten_remote_access_accounts(self):
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(10, len(remote_access.user_list.users), "User count does not match.")
def test_parse_duplicate_remote_access_accounts(self):
data_str = load_data('wire/remote_access_duplicate_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[1].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.")
def test_parse_zero_remote_access_accounts(self):
data_str = load_data('wire/remote_access_no_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(0, len(remote_access.user_list.users), "User count does not match.")
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state',
return_value=GoalState(load_data('wire/goal_state_remote_access.xml')))
@patch('azurelinuxagent.common.protocol.wire.WireClient.fetch_config',
return_value=load_data('wire/remote_access_single_account.xml'))
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_header_for_cert')
def test_update_remote_access_conf_remote_access(self, _1, _2, _3):
protocol = WireProtocol('12.34.56.78')
goal_state = protocol.client.get_goal_state()
protocol.client.update_remote_access_conf(goal_state)
self.assertNotEquals(None, protocol.client.remote_access)
self.assertEquals(1, len(protocol.client.remote_access.user_list.users))
self.assertEquals('testAccount', protocol.client.remote_access.user_list.users[0].name)
self.assertEquals('encryptedPasswordString', protocol.client.remote_access.user_list.users[0].encrypted_password)
def test_parse_bad_remote_access_data(self):
data = "foobar"
self.assertRaises(xml.parsers.expat.ExpatError, RemoteAccess, data) | hglkrijger/WALinuxAgent | tests/ga/test_remoteaccess.py | Python | apache-2.0 | 5,846 |
#!/usr/bin/env python
# coding=UTF-8
__author__ = "Pierre-Yves Langlois"
__copyright__ = "https://github.com/pylanglois/uwsa/blob/master/LICENCE"
__credits__ = ["Pierre-Yves Langlois"]
__license__ = "BSD"
__maintainer__ = "Pierre-Yves Langlois"
from uwsas.common import *
from uwsas.core import L
from uwsas.commands.command_manager import cmanager
from uwsas.commands.install import InstallCommand
class InstallAWStatsCommand(InstallCommand):
NAME = 'awstats'
def __init__(self):
InstallCommand.__init__(self)
self.packages = "awstats"
self.add_package(self.packages)
cmanager.get(InstallCommand.NAME).register(InstallAWStatsCommand.NAME, InstallAWStatsCommand())
| pylanglois/uwsa | uwsas/commands/install_awstats.py | Python | bsd-3-clause | 704 |
#encoding: utf8
import struct
import functools
import os
import rejit.common
import rejit.x86encoder
from rejit.x86encoder import int32bin, Scale, Reg, Opcode
class CompilationError(rejit.common.RejitError): pass
class JITCompiler:
def __init__(self):
pass
def compile_to_x86_32(self, ir, args, var_sizes, save_hex_file=None):
# used to relay information between passes (other than transformed IR)
compilation_data = {'args': args, 'var_sizes':var_sizes}
compilation_data['encoder'] = rejit.x86encoder.Encoder32()
# apply compilation passes in this order
ir_transformed, compilation_data = functools.reduce(lambda ir_data, ir_pass: ir_pass(ir_data),
[
JITCompiler._find_vars_pass,
JITCompiler._allocate_vars_pass,
JITCompiler._add_function_prologue_pass,
JITCompiler._replace_vars_pass,
JITCompiler._replace_values_pass,
JITCompiler._impl_cmp_pass,
JITCompiler._impl_mov_pass,
JITCompiler._impl_inc_pass,
JITCompiler._impl_set_pass,
JITCompiler._impl_ret_pass,
JITCompiler._find_labels_pass,
JITCompiler._impl_jmps_ins_placeholder_pass,
JITCompiler._impl_jmps_pass,
JITCompiler._purge_labels_pass,
],
(ir, compilation_data))
# merge generated x86 instructions to create final binary
x86_code = JITCompiler._merge_binary_instructions(ir_transformed)
if save_hex_file:
with open(save_hex_file, 'wt') as output:
for b in x86_code:
output.write('{:02x} '.format(b))
return x86_code, compilation_data
def compile_to_x86_64(self, ir, args, var_sizes, save_hex_file=None):
# used to relay information between passes (other than transformed IR)
compilation_data = {'args': args, 'var_sizes':var_sizes}
compilation_data['encoder'] = rejit.x86encoder.Encoder64()
# apply compilation passes in this order
ir_transformed, compilation_data = functools.reduce(lambda ir_data, ir_pass: ir_pass(ir_data),
[
JITCompiler._find_vars_pass,
JITCompiler._allocate_vars_pass_64,
JITCompiler._add_function_prologue_pass_64,
JITCompiler._replace_vars_pass,
JITCompiler._replace_values_pass,
JITCompiler._impl_cmp_pass,
JITCompiler._impl_mov_pass,
JITCompiler._impl_inc_pass_64,
JITCompiler._impl_set_pass,
JITCompiler._impl_ret_pass,
JITCompiler._find_labels_pass,
JITCompiler._impl_jmps_ins_placeholder_pass,
JITCompiler._impl_jmps_pass,
JITCompiler._purge_labels_pass,
],
(ir, compilation_data))
# merge generated x86 instructions to create final binary
x86_code = JITCompiler._merge_binary_instructions(ir_transformed)
if save_hex_file:
with open(save_hex_file, 'wt') as output:
for b in x86_code:
output.write('{:02x} '.format(b))
return x86_code, compilation_data
@staticmethod
def _find_vars_pass(ir_data):
ir, data = ir_data
names_read = set()
names_written = set()
# find variables referenced by IR instructions
for inst in ir:
if inst[0] == 'cmp name':
names_read.add(inst[1])
names_read.add(inst[2])
elif inst[0] == 'cmp value':
names_read.add(inst[1])
elif inst[0] == 'set':
names_written.add(inst[1])
elif inst[0] == 'inc':
names_read.add(inst[1])
elif inst[0] == 'move':
names_written.add(inst[1])
names_read.add(inst[2])
elif inst[0] == 'move indexed':
names_written.add(inst[1])
names_read.add(inst[2])
names_read.add(inst[3])
# probably could skip instructions which only use write-only vars...
# and do: vars_to_allocate = set(names_read)
vars_to_allocate = names_read | names_written
data['names_read'] = names_read
data['names_written'] = names_written
data['vars_to_allocate'] = vars_to_allocate
return ir_data
@staticmethod
def _allocate_vars_pass(ir_data):
ir, data = ir_data
vars_to_allocate = data['vars_to_allocate']
# registers available for variables
# can't access ESI EDI lowest byte in 32bit mode
reg_list = [Reg.EAX, Reg.ECX, Reg.EDX, Reg.EBX]
# currently variables can be stored in registers only
if len(reg_list) < len(vars_to_allocate):
raise CompilationError('Not enough registers')
var_regs = dict(zip(vars_to_allocate, reg_list))
used_regs = set(var_regs.values())
# calle-saved registers
calle_saved = [Reg.EBX, Reg.ESI, Reg.EDI, Reg.EBP]
# find registers which have to be restored
regs_to_restore = list(used_regs & set(calle_saved))
data['var_regs'] = var_regs
data['used_regs'] = used_regs
data['regs_to_restore'] = regs_to_restore
return ir_data
@staticmethod
def _allocate_vars_pass_64(ir_data):
ir, data = ir_data
args = data['args']
vars_to_allocate = data['vars_to_allocate']
if os.name == 'nt':
# first arguments are passed in registers (and we don't support more)
arg_regs = [Reg.ECX, Reg.EDX, Reg.R8, Reg.R9]
# caller-saved registers - no need to restore them
scratch_regs = [Reg.EAX, Reg.ECX, Reg.EDX, Reg.R8, Reg.R9, Reg.R10, Reg.R11]
elif os.name == 'posix':
arg_regs = [Reg.EDI, Reg.ESI, Reg.EDX, Reg.ECX, Reg.R8, Reg.R9]
scratch_regs = [Reg.EAX, Reg.ECX, Reg.EDX, Reg.ESI, Reg.EDI, Reg.R8, Reg.R9, Reg.R10, Reg.R11]
else:
raise CompilationError('Not supported system: {}'.format(os.name))
if len(args) > len(arg_regs):
raise CompilationError('More than {} args currently not supported on this platform'.format(len(args)))
# arguments are allocated to their registers
var_regs = dict(zip(args, arg_regs))
# registers which are available for variables which aren't arguments
reg_list = set(scratch_regs) - set(var_regs.values())
not_allocated = vars_to_allocate - set(args)
# currently variables can be stored in registers only
if len(reg_list) < len(not_allocated):
raise CompilationError('not enough registers')
var_regs.update(dict(zip(not_allocated, reg_list)))
used_regs = set(var_regs.values())
# no need for using calle saved registers in 64bit mode
regs_to_restore = []
data['var_regs'] = var_regs
data['used_regs'] = used_regs
data['regs_to_restore'] = regs_to_restore
return ir_data
@staticmethod
def _add_function_prologue_pass(ir_data):
ir, data = ir_data
args = data['args']
encoder = data['encoder']
var_regs = data['var_regs']
var_sizes = data['var_sizes']
regs_to_restore = data['regs_to_restore']
ir_new_stack_frame = JITCompiler._new_stack_frame(encoder)
ir_calle_reg_save = JITCompiler._calle_reg_save(regs_to_restore, encoder)
ir_load_args = JITCompiler._load_args(args, var_regs, var_sizes, encoder)
return (ir_new_stack_frame + ir_calle_reg_save + ir_load_args + ir, data)
@staticmethod
def _add_function_prologue_pass_64(ir_data):
ir, data = ir_data
encoder = data['encoder']
ir_new_stack_frame = JITCompiler._new_stack_frame(encoder)
return (ir_new_stack_frame + ir, data)
@staticmethod
def _load_args(args, var_regs, var_sizes, encoder):
# offset from [ebp] to arguments (return address, old ebp)
# warning: different in 64bit code
args_offset = 8
ir_1 = []
total = args_offset
for arg in args:
if arg in var_regs:
binary = encoder.encode_instruction([Opcode.MOV_R_RM], reg=var_regs[arg], base=Reg.EBP, disp=total, size=var_sizes[arg])
ir_1.append((('mov',var_regs[arg],'=[',Reg.ESP,'+',total,']'), binary))
total += encoder.type2size(var_sizes[arg])
return ir_1
@staticmethod
def _new_stack_frame(encoder):
ir_1 = []
binary = encoder.enc_push(Reg.EBP)
ir_1.append((('push', Reg.EBP),binary))
binary = encoder.encode_instruction([Opcode.MOV_R_RM], reg=Reg.EBP,reg_mem=Reg.ESP, size='long')
ir_1.append((('mov',Reg.EBP,Reg.ESP), binary))
return ir_1
@staticmethod
def _calle_reg_save(regs_to_restore, encoder):
ir_1 = []
for reg in regs_to_restore:
binary = encoder.enc_push(reg)
ir_1.append((('push', reg),binary))
return ir_1
@staticmethod
def _replace_vars_pass(ir_data):
ir, data = ir_data
var_regs = data['var_regs']
var_sizes = data['var_sizes']
encoder = data['encoder']
ir_1 = []
for inst in ir:
if inst[0] in { 'cmp name', 'cmp value', 'set', 'inc', 'move', 'move indexed'}:
if inst[0] == 'cmp name':
assert encoder.type2size(var_sizes[inst[1]]) == encoder.type2size(var_sizes[inst[2]])
ir_1.append((inst[0], var_regs[inst[1]], var_regs[inst[2]], var_sizes[inst[1]]))
elif inst[0] == 'cmp value':
ir_1.append((inst[0], var_regs[inst[1]], inst[2], var_sizes[inst[1]]))
elif inst[0] == 'set':
ir_1.append((inst[0], var_regs[inst[1]], inst[2], var_sizes[inst[1]]))
elif inst[0] == 'inc':
ir_1.append((inst[0], var_regs[inst[1]], var_sizes[inst[1]]))
elif inst[0] == 'move':
assert var_sizes[inst[1]] == var_sizes[inst[2]]
ir_1.append((inst[0], var_regs[inst[1]], var_regs[inst[2]], var_sizes[inst[1]]))
elif inst[0] == 'move indexed':
assert encoder.type2size(var_sizes[inst[2]]) == encoder.type2size(var_sizes[inst[3]])
ir_1.append((inst[0], var_regs[inst[1]], var_regs[inst[2]], var_regs[inst[3]],
var_sizes[inst[1]], var_sizes[inst[2]]))
else:
ir_1.append(inst)
return (ir_1, data)
@staticmethod
def _replace_values_pass(ir_data):
ir, data = ir_data
ir_1 = []
for inst in ir:
if inst[0] == 'cmp value':
ir_1.append((inst[0], inst[1], ord(inst[2]), inst[3]))
elif inst[0] == 'set':
ir_1.append((inst[0], inst[1], inst[2], inst[3]))
elif inst[0] == 'ret':
ir_1.append((inst[0], 1 if inst[1] else 0))
else:
ir_1.append(inst)
return (ir_1, data)
@staticmethod
def _impl_cmp_pass(ir_data):
ir, data = ir_data
encoder = data['encoder']
ir_1 = []
for inst in ir:
if inst[0] == 'cmp value':
binary = encoder.encode_instruction([Opcode.CMP_RM_8_IMM_8], opex=Opcode.CMP_RM_8_IMM_8_EX, reg_mem=inst[1], imm=inst[2], size=inst[3])
ir_1.append((('cmp',inst[1],inst[2]), binary))
elif inst[0] == 'cmp name':
binary = encoder.encode_instruction([Opcode.CMP_RM_R], reg=inst[1], reg_mem=inst[2], size=inst[3])
ir_1.append((('cmp',inst[1],inst[2]), binary))
else:
ir_1.append(inst)
return (ir_1, data)
@staticmethod
def _impl_mov_pass(ir_data):
ir, data = ir_data
encoder = data['encoder']
ir_1 = []
for inst in ir:
if inst[0] == 'move indexed':
binary = encoder.encode_instruction([Opcode.MOV_R_RM_8], reg=inst[1],base=inst[2],index=inst[3],scale=Scale.MUL_1,size=inst[4], address_size=inst[5])
ir_1.append((('mov',inst[1],'=[',inst[2],'+',inst[3],']'), binary))
elif inst[0] == 'move':
binary = encoder.encode_instruction([Opcode.MOV_R_RM], reg=inst[1],reg_mem=inst[2],size=inst[3])
ir_1.append((inst, binary))
else:
ir_1.append(inst)
return (ir_1, data)
@staticmethod
def _impl_inc_pass(ir_data):
ir, data = ir_data
encoder = data['encoder']
ir_1 = []
for inst in ir:
if inst[0] == 'inc':
binary = encoder.enc_inc(inst[1], size=inst[2])
ir_1.append((inst, binary))
else:
ir_1.append(inst)
return (ir_1, data)
@staticmethod
def _impl_inc_pass_64(ir_data):
ir, data = ir_data
encoder = data['encoder']
ir_1 = []
for inst in ir:
if inst[0] == 'inc':
binary = encoder.enc_inc(inst[1], inst[2])
ir_1.append((inst, binary))
else:
ir_1.append(inst)
return (ir_1, data)
@staticmethod
def _impl_set_pass(ir_data):
ir, data = ir_data
encoder = data['encoder']
ir_1 = []
for inst in ir:
if inst[0] == 'set':
binary = encoder.encode_instruction([Opcode.MOV_R_IMM], opcode_reg=inst[1], imm=inst[2], size=inst[3])
ir_1.append((('mov',inst[1], inst[2]), binary))
else:
ir_1.append(inst)
return (ir_1, data)
@staticmethod
def _impl_ret_pass(ir_data):
ir, data = ir_data
regs_to_restore = data['regs_to_restore']
encoder = data['encoder']
ir_1 = []
for inst in ir:
if inst[0] == 'ret':
binary = encoder.encode_instruction([Opcode.MOV_R_IMM], opcode_reg=Reg.EAX, imm=1 if inst[1] else 0,size='int')
ir_1.append((('mov', Reg.EAX, inst[1]),binary))
ir_1.append(('jump','return'))
else:
ir_1.append(inst)
ir_1.append(('label', 'return'))
for reg in reversed(regs_to_restore):
binary = encoder.enc_pop(reg)
ir_1.append((('pop', reg),binary))
binary = encoder.enc_pop(Reg.EBP)
ir_1.append((('pop', Reg.EBP),binary))
binary = encoder.enc_ret()
ir_1.append((('ret',),binary))
return (ir_1, data)
@staticmethod
def _find_labels_pass(ir_data):
ir, data = ir_data
labels = dict()
for num,inst in enumerate(ir):
if inst[0] == 'label':
if inst[1] in labels:
raise CompilationError('label "{}" already defined'.format(inst[1]))
labels[inst[1]] = num
data['labels'] = labels
return ir_data
@staticmethod
def _impl_jmps_ins_placeholder_pass(ir_data):
ir, data = ir_data
labels = data['labels']
encoder = data['encoder']
labels_set = set(labels)
ir_1 = []
jmp_targets = set()
jmp_map = {'jump':'jmp', 'jump eq':'je', 'jump ne':'jne'}
for num,inst in enumerate(ir):
if inst[0] in {'jump', 'jump eq', 'jump ne'}:
if inst[1] not in labels_set:
raise CompilationError('label "{}" not found'.format(inst[1]))
jmp_targets.add(inst[1])
if inst[0] == 'jump':
binary = encoder.enc_jmp_near(0)
elif inst[0] == 'jump eq':
binary = encoder.enc_je_near(0)
elif inst[0] == 'jump ne':
binary = encoder.enc_jne_near(0)
ir_1.append(((jmp_map[inst[0]], inst[1]), binary))
else:
ir_1.append(inst)
data['jmp_targets'] = jmp_targets
return (ir_1, data)
@staticmethod
def _impl_jmps_pass(ir_data):
ir, data = ir_data
labels = data['labels']
ir_1 = []
for num,inst in enumerate(ir):
if inst[0][0] in {'jmp', 'je', 'jne'}:
# calculate jump offset
target_num = labels[inst[0][1]]
if target_num > num:
no_label = filter(lambda x: x[0]!='label', ir[num+1:target_num])
jump_length = functools.reduce(lambda acc,x: acc + len(x[1]), no_label, 0)
else:
no_label = filter(lambda x: x[0]!='label', ir[target_num:num+1])
jump_length = functools.reduce(lambda acc,x: acc - len(x[1]), no_label, 0)
new_bin = inst[1][:-4] + int32bin(jump_length)
ir_1.append((inst[0], new_bin))
else:
ir_1.append(inst)
return (ir_1, data)
@staticmethod
def _purge_labels_pass(ir_data):
ir, data = ir_data
return (list(filter(lambda x: x[0]!='label', ir)), data)
@staticmethod
def _merge_binary_instructions(ir):
return functools.reduce(lambda acc, x: acc+x, map(lambda x: x[1], ir))
| ziowk/rejit | rejit/jitcompiler.py | Python | gpl-2.0 | 17,668 |
from corehq.apps.adm.admin import BaseADMAdminInterface
from corehq.apps.adm.admin.forms import ADMReportForm
from corehq.apps.adm.models import ADMReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
class ADMReportAdminInterface(BaseADMAdminInterface):
name = "Default ADM Reports"
description = "The report that shows up by default for each domain"
slug = "default_adm_reports"
crud_item_type = "ADM Report"
document_class = ADMReport
form_class = ADMReportForm
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Reporting Section"),
DataTablesColumn("Slug"),
DataTablesColumn("Domain"),
DataTablesColumn("Report Name"),
DataTablesColumn("Description"),
DataTablesColumn("Columns"),
DataTablesColumn("Sort By Default Column"),
DataTablesColumn("Sort By Direction"),
DataTablesColumn("Key Type"),
DataTablesColumn("Edit"),
)
@property
def rows(self):
rows = []
key = ["defaults all slug"]
data = self.document_class.view('adm/all_default_reports',
reduce=False,
include_docs=True,
startkey=key,
endkey=key+[{}]
).all()
for item in data:
rows.append(item.admin_crud.row)
return rows
| gmimano/commcaretest | corehq/apps/adm/admin/reports.py | Python | bsd-3-clause | 1,425 |
# THIS FILE IS GENERATED FROM NUMPY SETUP.PY
short_version = '1.8.1'
version = '1.8.1'
full_version = '1.8.1'
git_revision = '62a7575fd82ddf028517780c01fecf7e0cca27aa'
release = True
if not release:
version = full_version
| AdaptiveApplications/carnegie | tarc_bus_locator_client/numpy-1.8.1/numpy/version.py | Python | mit | 228 |
"""
Tools for generating forms based on Peewee models
(cribbed from wtforms.ext.django)
"""
from collections import namedtuple
from wtforms import Form
from wtforms import fields as f
from wtforms import validators
from wtfpeewee.fields import ModelSelectField
from wtfpeewee.fields import SelectChoicesField
from wtfpeewee.fields import SelectQueryField
from wtfpeewee.fields import WPDateField
from wtfpeewee.fields import WPDateTimeField
from wtfpeewee.fields import WPTimeField
from wtfpeewee._compat import text_type
from peewee import BareField
from peewee import BigIntegerField
from peewee import BlobField
from peewee import BooleanField
from peewee import CharField
from peewee import DateField
from peewee import DateTimeField
from peewee import DecimalField
from peewee import DoubleField
from peewee import FloatField
from peewee import ForeignKeyField
from peewee import IntegerField
from peewee import PrimaryKeyField
from peewee import TextField
from peewee import TimeField
from peewee import TimestampField
__all__ = (
'FieldInfo',
'ModelConverter',
'model_fields',
'model_form')
def handle_null_filter(data):
if data == '':
return None
return data
FieldInfo = namedtuple('FieldInfo', ('name', 'field'))
class ModelConverter(object):
defaults = {
BareField: f.TextField,
BigIntegerField: f.IntegerField,
BlobField: f.TextAreaField,
BooleanField: f.BooleanField,
CharField: f.TextField,
DateField: WPDateField,
DateTimeField: WPDateTimeField,
DecimalField: f.DecimalField,
DoubleField: f.FloatField,
FloatField: f.FloatField,
IntegerField: f.IntegerField,
PrimaryKeyField: f.HiddenField,
TextField: f.TextAreaField,
TimeField: WPTimeField,
TimestampField: WPDateTimeField,
}
coerce_defaults = {
BigIntegerField: int,
CharField: text_type,
DoubleField: float,
FloatField: float,
IntegerField: int,
TextField: text_type,
}
required = (
CharField,
DateTimeField,
ForeignKeyField,
PrimaryKeyField,
TextField)
def __init__(self, additional=None, additional_coerce=None, overrides=None):
self.converters = {ForeignKeyField: self.handle_foreign_key}
if additional:
self.converters.update(additional)
self.coerce_settings = dict(self.coerce_defaults)
if additional_coerce:
self.coerce_settings.update(additional_coerce)
self.overrides = overrides or {}
def handle_foreign_key(self, model, field, **kwargs):
if field.null:
kwargs['allow_blank'] = True
if field.choices is not None:
field_obj = SelectQueryField(query=field.choices, **kwargs)
else:
field_obj = ModelSelectField(model=field.rel_model, **kwargs)
return FieldInfo(field.name, field_obj)
def convert(self, model, field, field_args):
kwargs = {
'label': field.verbose_name,
'validators': [],
'filters': [],
'default': field.default,
'description': field.help_text}
if field_args:
kwargs.update(field_args)
if kwargs['validators']:
# Create a copy of the list since we will be modifying it.
kwargs['validators'] = list(kwargs['validators'])
if field.null:
# Treat empty string as None when converting.
kwargs['filters'].append(handle_null_filter)
if (field.null or (field.default is not None)) and not field.choices:
# If the field can be empty, or has a default value, do not require
# it when submitting a form.
kwargs['validators'].append(validators.Optional())
else:
if isinstance(field, self.required):
kwargs['validators'].append(validators.Required())
if field.name in self.overrides:
return FieldInfo(field.name, self.overrides[field.name](**kwargs))
# Allow custom-defined Peewee field classes to define their own conversion,
# making it so that code which calls model_form() doesn't have to have special
# cases, especially when called for the same peewee.Model from multiple places, or
# when called in a generic context which the end-developer has less control over,
# such as via flask-admin.
if hasattr(field, 'wtf_field'):
return FieldInfo(field.name, field.wtf_field(model, **kwargs))
for converter in self.converters:
if isinstance(field, converter):
return self.converters[converter](model, field, **kwargs)
else:
for converter in self.defaults:
if not isinstance(field, converter):
# Early-continue because it simplifies reading the following code.
continue
if issubclass(self.defaults[converter], f.FormField):
# FormField fields (i.e. for nested forms) do not support
# filters.
kwargs.pop('filters')
if field.choices or 'choices' in kwargs:
choices = kwargs.pop('choices', field.choices)
if converter in self.coerce_settings or 'coerce' in kwargs:
coerce_fn = kwargs.pop('coerce',
self.coerce_settings[converter])
allow_blank = kwargs.pop('allow_blank', field.null)
kwargs.update({
'choices': choices,
'coerce': coerce_fn,
'allow_blank': allow_blank})
return FieldInfo(field.name, SelectChoicesField(**kwargs))
return FieldInfo(field.name, self.defaults[converter](**kwargs))
raise AttributeError("There is not possible conversion "
"for '%s'" % type(field))
def model_fields(model, allow_pk=False, only=None, exclude=None,
field_args=None, converter=None):
"""
Generate a dictionary of fields for a given Peewee model.
See `model_form` docstring for description of parameters.
"""
converter = converter or ModelConverter()
field_args = field_args or {}
model_fields = list(model._meta.sorted_fields)
if not allow_pk:
model_fields.pop(0)
if only:
model_fields = [x for x in model_fields if x.name in only]
elif exclude:
model_fields = [x for x in model_fields if x.name not in exclude]
field_dict = {}
for model_field in model_fields:
name, field = converter.convert(
model,
model_field,
field_args.get(model_field.name))
field_dict[name] = field
return field_dict
def model_form(model, base_class=Form, allow_pk=False, only=None, exclude=None,
field_args=None, converter=None):
"""
Create a wtforms Form for a given Peewee model class::
from wtfpeewee.orm import model_form
from myproject.myapp.models import User
UserForm = model_form(User)
:param model:
A Peewee model class
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
field_dict = model_fields(model, allow_pk, only, exclude, field_args, converter)
return type(model.__name__ + 'Form', (base_class, ), field_dict)
| bryhoyt/wtf-peewee | wtfpeewee/orm.py | Python | mit | 8,207 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_parent(parent: str) -> str:
# Sample function parameter parent in create_hyperparameter_tuning_job_using_python_package_sample
parent = parent
return parent
def make_hyperparameter_tuning_job(
display_name: str, executor_image_uri: str, package_uri: str, python_module: str,
) -> google.cloud.aiplatform_v1beta1.types.hyperparameter_tuning_job.HyperparameterTuningJob:
# study_spec
metric = {
"metric_id": "val_rmse",
"goal": aiplatform.gapic.StudySpec.MetricSpec.GoalType.MINIMIZE,
}
conditional_parameter_decay = {
"parameter_spec": {
"parameter_id": "decay",
"double_value_spec": {"min_value": 1e-07, "max_value": 1},
"scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
},
"parent_discrete_values": {"values": [32, 64]},
}
conditional_parameter_learning_rate = {
"parameter_spec": {
"parameter_id": "learning_rate",
"double_value_spec": {"min_value": 1e-07, "max_value": 1},
"scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
},
"parent_discrete_values": {"values": [4, 8, 16]},
}
parameter = {
"parameter_id": "batch_size",
"discrete_value_spec": {"values": [4, 8, 16, 32, 64, 128]},
"scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
"conditional_parameter_specs": [
conditional_parameter_decay,
conditional_parameter_learning_rate,
],
}
# trial_job_spec
machine_spec = {
"machine_type": "n1-standard-4",
"accelerator_type": aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80,
"accelerator_count": 1,
}
worker_pool_spec = {
"machine_spec": machine_spec,
"replica_count": 1,
"python_package_spec": {
"executor_image_uri": executor_image_uri,
"package_uris": [package_uri],
"python_module": python_module,
"args": [],
},
}
# hyperparameter_tuning_job
hyperparameter_tuning_job = {
"display_name": display_name,
"max_trial_count": 4,
"parallel_trial_count": 2,
"study_spec": {
"metrics": [metric],
"parameters": [parameter],
"algorithm": aiplatform.gapic.StudySpec.Algorithm.RANDOM_SEARCH,
},
"trial_job_spec": {"worker_pool_specs": [worker_pool_spec]},
}
return hyperparameter_tuning_job
| sasha-gitg/python-aiplatform | .sample_configs/param_handlers/create_hyperparameter_tuning_job_python_package_sample.py | Python | apache-2.0 | 3,158 |
from website.addons.base.serializer import OAuthAddonSerializer
from website.addons.googledrive.exceptions import ExpiredAuthError
class GoogleDriveSerializer(OAuthAddonSerializer):
@property
def addon_serialized_urls(self):
node = self.node_settings.owner
return {
'files': node.web_url_for('collect_file_trees'),
'config': node.api_url_for('googledrive_config_put'),
'deauthorize': node.api_url_for('googledrive_remove_user_auth'),
'importAuth': node.api_url_for('googledrive_import_user_auth'),
'folders': node.api_url_for('googledrive_folders'),
'accounts': node.api_url_for('list_googledrive_user_accounts')
}
@property
def serialized_node_settings(self):
result = super(GoogleDriveSerializer, self).serialized_node_settings
valid_credentials = True
if self.node_settings.external_account is not None:
try:
self.node_settings.fetch_access_token()
except ExpiredAuthError:
valid_credentials = False
result['validCredentials'] = valid_credentials
return {'result': result}
| haoyuchen1992/osf.io | website/addons/googledrive/serializer.py | Python | apache-2.0 | 1,189 |
# -*- coding: utf-8 -*-
#!/usr/bin/python
#
# This is derived from a cadquery script for generating PDIP models in X3D format
#
# from https://bitbucket.org/hyOzd/freecad-macros
# author hyOzd
# This is a
# Dimensions are from Microchips Packaging Specification document:
# DS00000049BY. Body drawing is the same as QFP generator#
## requirements
## cadquery FreeCAD plugin
## https://github.com/jmwright/cadquery-freecad-module
## to run the script just do: freecad main_generator.py modelName
## e.g. c:\freecad\bin\freecad main_generator.py DIP8
## the script will generate STEP and VRML parametric models
## to be used with kicad StepUp script
#* These are a FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* *
#* cadquery script for generating QFP/SOIC/SSOP/TSSOP models in STEP AP214 *
#* Copyright (c) 2015 *
#* Maurice https://launchpad.net/~easyw *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
__title__ = "make Valve 3D models"
__author__ = "Stefan, based on Valve script"
__Comment__ = 'make varistor 3D models exported to STEP and VRML for Kicad StepUP script'
___ver___ = "1.3.3 14/08/2015"
# maui import cadquery as cq
# maui from Helpers import show
from collections import namedtuple
import math
import sys, os
import datetime
from datetime import datetime
sys.path.append("../_tools")
import exportPartToVRML as expVRML
import shaderColors
# maui start
import FreeCAD, Draft, FreeCADGui
import ImportGui
import FreeCADGui as Gui
#from Gui.Command import *
outdir=os.path.dirname(os.path.realpath(__file__) + os.sep + '..' + os.sep + '_3Dmodels')
scriptdir=os.path.dirname(os.path.realpath(__file__))
sys.path.append(outdir)
sys.path.append(scriptdir)
if FreeCAD.GuiUp:
from PySide import QtCore, QtGui
# Licence information of the generated models.
#################################################################################################
STR_licAuthor = "kicad StepUp"
STR_licEmail = "ksu"
STR_licOrgSys = "kicad StepUp"
STR_licPreProc = "OCC"
STR_licOrg = "FreeCAD"
#################################################################################################
import cq_belfuse # modules parameters
from cq_belfuse import *
import cq_keystone # modules parameters
from cq_keystone import *
import cq_bulgin # modules parameters
from cq_bulgin import *
import cq_schurter # modules parameters
from cq_schurter import *
import cq_tme # modules parameters
from cq_tme import *
import cq_littlefuse # modules parameters
from cq_littlefuse import *
different_models = [
cq_belfuse(),
cq_keystone(),
cq_bulgin(),
cq_schurter(),
cq_tme(),
cq_littlefuse(),
]
def make_3D_model(models_dir, model_class, modelID):
LIST_license = ["",]
CheckedmodelName = 'A_' + modelID.replace('.', '').replace('-', '_').replace('(', '').replace(')', '')
CheckedmodelName = CheckedmodelName
Newdoc = App.newDocument(CheckedmodelName)
App.setActiveDocument(CheckedmodelName)
Gui.ActiveDocument=Gui.getDocument(CheckedmodelName)
destination_dir = model_class.get_dest_3D_dir(modelID)
material_substitutions = model_class.make_3D_model(modelID)
modelName = model_class.get_model_name(modelID)
doc = FreeCAD.ActiveDocument
doc.Label = CheckedmodelName
objs=GetListOfObjects(FreeCAD, doc)
objs[0].Label = CheckedmodelName
restore_Main_Tools()
script_dir=os.path.dirname(os.path.realpath(__file__))
expVRML.say(models_dir)
out_dir=models_dir+os.sep+destination_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
exportSTEP(doc, modelName, out_dir)
if LIST_license[0]=="":
LIST_license=Lic.LIST_int_license
LIST_license.append("")
Lic.addLicenseToStep(out_dir + os.sep, modelName+".step", LIST_license,\
STR_licAuthor, STR_licEmail, STR_licOrgSys, STR_licOrg, STR_licPreProc)
# scale and export Vrml model
scale=1/2.54
#exportVRML(doc,modelName,scale,out_dir)
del objs
objs=GetListOfObjects(FreeCAD, doc)
expVRML.say("######################################################################")
expVRML.say(objs)
expVRML.say("######################################################################")
export_objects, used_color_keys = expVRML.determineColors(Gui, objs, material_substitutions)
export_file_name=out_dir+os.sep+modelName+'.wrl'
colored_meshes = expVRML.getColoredMesh(Gui, export_objects , scale)
#expVRML.writeVRMLFile(colored_meshes, export_file_name, used_color_keys)# , LIST_license
expVRML.writeVRMLFile(colored_meshes, export_file_name, used_color_keys, LIST_license)
#scale=0.3937001
#exportVRML(doc,modelName,scale,out_dir)
# Save the doc in Native FC format
saveFCdoc(App, Gui, doc, modelName,out_dir)
#display BBox
Gui.activateWorkbench("PartWorkbench")
Gui.SendMsgToActiveView("ViewFit")
Gui.activeDocument().activeView().viewAxometric()
#FreeCADGui.ActiveDocument.activeObject.BoundingBox = True
def run():
## # get variant names from command line
return
#import step_license as L
import add_license as Lic
# when run from command line
if __name__ == "__main__" or __name__ == "main_generator":
FreeCAD.Console.PrintMessage('\r\nRunning...\r\n')
full_path=os.path.realpath(__file__)
expVRML.say(full_path)
scriptdir=os.path.dirname(os.path.realpath(__file__))
expVRML.say(scriptdir)
sub_path = full_path.split(scriptdir)
expVRML.say(sub_path)
sub_dir_name =full_path.split(os.sep)[-2]
expVRML.say(sub_dir_name)
sub_path = full_path.split(sub_dir_name)[0]
expVRML.say(sub_path)
models_dir=sub_path+"_3Dmodels"
model_to_build = ''
if len(sys.argv) < 3:
FreeCAD.Console.PrintMessage('No variant name is given, add a valid model name as an argument or the argument "all"\r\n')
sys.exit()
else:
model_to_build=sys.argv[2]
found_one = False
if len(model_to_build) > 0:
if model_to_build == 'all' or model_to_build == 'All' or model_to_build == 'ALL':
found_one = True
for n in different_models:
listall = n.get_list_all()
for i in listall:
make_3D_model(models_dir, n, i)
elif model_to_build == 'list':
found_one = True
FreeCAD.Console.PrintMessage('\r\n')
for n in different_models:
listall = n.get_list_all()
for i in listall:
FreeCAD.Console.PrintMessage(i + '\r\n')
else:
for n in different_models:
if n.model_exist(model_to_build):
found_one = True
make_3D_model(models_dir, n, model_to_build)
if not found_one:
print("Parameters for %s doesn't exist, skipping. " % model_to_build)
| easyw/kicad-3d-models-in-freecad | cadquery/FCAD_script_generator/Fuse/main_generator.py | Python | gpl-2.0 | 8,654 |
""" Some utility functions for handling the data. """
import base64, subprocess, numpy
from subprocess import Popen, PIPE, STDOUT
def hash_distance(v1, v2):
"""
Most likely the most inefficient code to calculate hamming distance ever.
"""
result = v1 ^ v2
return float(bin(result).count('1'))
def distance_matrix(list_of_hashes):
result = numpy.array(
[[ hash_distance(x[0],y[0]) for x in list_of_hashes ] for y in list_of_hashes ])
return result
def read_inputs(symbolfile, db_dump_file, file_id_and_address=False):
"""
Reads the input files and returns a list of tuples of the form
[ (hash, function_name, file_name) ] or alternatively
[ (hash, function_name, file_name, file_id, address) ].
"""
# Reads the symbols file for quick lookup of symbols for a given
# fileID:address combination.
syms = open(symbolfile, "r").readlines()
sym_mapping = {}
result = []
for line in syms:
tokens = line.split()
file_id = tokens[0]
file_name = tokens[1]
address = tokens[2]
symbol = tokens[3]
sym_mapping[(file_id, address)] = (symbol, file_name)
print("[!] Read symbol file.")
# Reads the database dump.
db_dump = open(db_dump_file, "r").readlines()
for line in db_dump[3:]:
tokens = line.split()
file_id = tokens[3]
address = tokens[4]
simhash = int(tokens[1]+tokens[2], 16)
lookup = sym_mapping.get((file_id, address))
if lookup:
function_name, file_name = lookup
decoded = SaneBase64Decode(function_name)
if file_id_and_address:
result.append((simhash, decoded, file_name,
file_id, address))
else:
result.append((simhash, decoded, file_name))
print("[!] Read DB dump.")
return result
def SaneBase64Decode(input_string):
""" Because Python3 attempts to win 'most idiotic language ever', not only
does encoding produce strange newlines added to the encoded version, but
decoding may or may not truncate the last character of the decoded string.
This code is an insane solution to obtain sane behavior: Call command line
base64 -d instead of dealing with Python. """
encoded_string = subprocess.run(["base64", "-d"], stdout=PIPE,
input=bytes(input_string, encoding="utf-8")).stdout.decode("utf-8")
return encoded_string
| googleprojectzero/functionsimsearch | testdata/functionsimsearchutil.py | Python | apache-2.0 | 2,293 |
#!/usr/bin/env python3
import subprocess
from functions import *
from variables import *
# Re-create old folder
RecreateDir(DEPLOY_DIR)
# Copy files/dirs for deployment
dst = DEPLOY_DIR
for src in RELEASE_PATHS_TO_DEPLOY:
Copy(src, dst)
# Deploy
if Os() == 'mac':
args = ['macdeployqt',
'{}'.format(DEPLOY_PATHS[DEPLOY_NAMES.index(WINDOW_APP_FILE)][0]),
'-executable={}'.format(DEPLOY_PATHS[DEPLOY_NAMES.index(CONSOLE_APP_FILE)][0]),
'-executable={}'.format(DEPLOY_PATHS[DEPLOY_NAMES.index(TESTS_FILE)][0]),
'-verbose=1']
subprocess.run(args, stdout=subprocess.PIPE).stdout.decode('utf-8')
elif Os() == 'win':
for path in DEPLOY_PATHS:
args = ['windeployqt',
'{}'.format(pjoin(path)),
'--release',
'--verbose', '1']
subprocess.run(args, stdout=subprocess.PIPE).stdout.decode('utf-8')
elif Os() == 'lin':
args = ['linuxdeployqt',
'{}'.format(DEPLOY_PATHS[DEPLOY_NAMES.index(WINDOW_APP_FILE)][0]),
'-executable={}'.format(DEPLOY_PATHS[DEPLOY_NAMES.index(CONSOLE_APP_FILE)][0]),
'-executable={}'.format(DEPLOY_PATHS[DEPLOY_NAMES.index(TESTS_FILE)][0]),
'-bundle-non-qt-libs']
result = subprocess.run(args, stdout=subprocess.PIPE).stdout.decode('utf-8')
print(result)
#args = ['linuxdeployqt',
# '{}'.format(DEPLOY_PATHS[DEPLOY_NAMES.index(WINDOW_APP_FILE)][0]),
# '-executable={}'.format(DEPLOY_PATHS[DEPLOY_NAMES.index(CONSOLE_APP_FILE)][0]),
# '-executable={}'.format(DEPLOY_PATHS[DEPLOY_NAMES.index(TESTS_FILE)][0]),
# '-appimage']
#result = subprocess.run(args, stdout=subprocess.PIPE).stdout.decode('utf-8')
#print(result)
else:
print('ERROR: Unknown OS')
exit()
| AndrewSazonov/Davinci | Scripts/makeDeploy.py | Python | gpl-3.0 | 1,821 |
def overlap(start_1, end_1, start_2, end_2):
result = min([end_1, end_2]) - max([start_1, start_2]) + 1
if result >= 0:
return result
return 0
| konrad/kufpybio | kufpybio/helpers.py | Python | isc | 163 |
"""normalize constraint and key names
correct keys for pre 0.5.6 naming convention
Revision ID: 438c27ec1c9
Revises: 439766f6104d
Create Date: 2015-06-13 21:16:32.358778
"""
from __future__ import unicode_literals
from alembic import op
from alembic.context import get_context
from sqlalchemy.dialects.postgresql.base import PGDialect
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "438c27ec1c9"
down_revision = "439766f6104d"
# correct keys for pre 0.5.6 naming convention
def upgrade():
c = get_context()
insp = sa.inspect(c.connection.engine)
# existing migration
# pre naming convention keys
groups_permissions_pkey = "groups_permissions_pkey"
groups_pkey = "groups_pkey"
groups_resources_permissions_pkey = "groups_resources_permissions_pkey"
users_groups_pkey = "users_groups_pkey"
users_permissions_pkey = "users_permissions_pkey"
users_resources_permissions_pkey = "users_resources_permissions_pkey"
if isinstance(c.connection.engine.dialect, PGDialect):
op.execute(
"ALTER INDEX groups_unique_group_name_key RENAME to ix_groups_uq_group_name_key"
) # noqa
op.drop_constraint("groups_permissions_perm_name_check", "groups_permissions")
op.execute(
"""
ALTER TABLE groups_permissions
ADD CONSTRAINT ck_groups_permissions_perm_name CHECK (perm_name::text = lower(perm_name::text));
"""
) # noqa
op.drop_constraint(
"groups_resources_permissions_perm_name_check",
"groups_resources_permissions",
)
op.execute(
"""
ALTER TABLE groups_resources_permissions
ADD CONSTRAINT ck_groups_resources_permissions_perm_name CHECK (perm_name::text = lower(perm_name::text));
"""
) # noqa
op.drop_constraint("user_permissions_perm_name_check", "users_permissions")
op.execute(
"""
ALTER TABLE users_permissions
ADD CONSTRAINT ck_user_permissions_perm_name CHECK (perm_name::text = lower(perm_name::text));
"""
) # noqa
op.drop_constraint(
"users_resources_permissions_perm_name_check", "users_resources_permissions"
)
op.execute(
"""
ALTER TABLE users_resources_permissions
ADD CONSTRAINT ck_users_resources_permissions_perm_name CHECK (perm_name::text = lower(perm_name::text));
"""
) # noqa
op.execute("ALTER INDEX users_email_key2 RENAME to ix_users_uq_lower_email")
op.execute(
"ALTER INDEX users_username_uq2 RENAME to ix_users_ux_lower_username"
) # noqa
if (
groups_permissions_pkey
== insp.get_pk_constraint("groups_permissions")["name"]
):
op.execute(
"ALTER INDEX groups_permissions_pkey RENAME to pk_groups_permissions"
) # noqa
if groups_pkey == insp.get_pk_constraint("groups")["name"]:
op.execute("ALTER INDEX groups_pkey RENAME to pk_groups")
if (
groups_resources_permissions_pkey
== insp.get_pk_constraint("groups_resources_permissions")["name"]
):
op.execute(
"ALTER INDEX groups_resources_permissions_pkey RENAME to pk_groups_resources_permissions"
) # noqa
if users_groups_pkey == insp.get_pk_constraint("users_groups")["name"]:
op.execute("ALTER INDEX users_groups_pkey RENAME to pk_users_groups")
if (
users_permissions_pkey
== insp.get_pk_constraint("users_permissions")["name"]
):
op.execute(
"ALTER INDEX users_permissions_pkey RENAME to pk_users_permissions"
) # noqa
if (
users_resources_permissions_pkey
== insp.get_pk_constraint("users_resources_permissions")["name"]
):
op.execute(
"ALTER INDEX users_resources_permissions_pkey RENAME to pk_users_resources_permissions"
) # noqa
if (
"external_identities_pkey"
== insp.get_pk_constraint("external_identities")["name"]
):
op.execute(
"ALTER INDEX external_identities_pkey RENAME to pk_external_identities"
) # noqa
if "external_identities_local_user_name_fkey" in [
c["name"] for c in insp.get_foreign_keys("external_identities") # noqa
]:
op.drop_constraint(
"external_identities_local_user_name_fkey",
"external_identities",
type_="foreignkey",
)
op.create_foreign_key(
None,
"external_identities",
"users",
remote_cols=["user_name"],
local_cols=["local_user_name"],
onupdate="CASCADE",
ondelete="CASCADE",
)
if "groups_permissions_group_id_fkey" in [
c["name"] for c in insp.get_foreign_keys("groups_permissions")
]:
op.drop_constraint(
"groups_permissions_group_id_fkey",
"groups_permissions",
type_="foreignkey",
)
op.create_foreign_key(
None,
"groups_permissions",
"groups",
remote_cols=["id"],
local_cols=["group_id"],
onupdate="CASCADE",
ondelete="CASCADE",
)
if "groups_group_name_key" in [
c["name"] for c in insp.get_unique_constraints("groups")
]:
op.execute(
"ALTER INDEX groups_group_name_key RENAME to uq_groups_group_name"
) # noqa
if "groups_resources_permissions_group_id_fkey" in [
c["name"]
for c in insp.get_foreign_keys("groups_resources_permissions") # noqa
]:
op.drop_constraint(
"groups_resources_permissions_group_id_fkey",
"groups_resources_permissions",
type_="foreignkey",
)
op.create_foreign_key(
None,
"groups_resources_permissions",
"groups",
remote_cols=["id"],
local_cols=["group_id"],
onupdate="CASCADE",
ondelete="CASCADE",
)
if "groups_resources_permissions_resource_id_fkey" in [
c["name"]
for c in insp.get_foreign_keys("groups_resources_permissions") # noqa
]:
op.drop_constraint(
"groups_resources_permissions_resource_id_fkey",
"groups_resources_permissions",
type_="foreignkey",
)
op.create_foreign_key(
None,
"groups_resources_permissions",
"resources",
remote_cols=["resource_id"],
local_cols=["resource_id"],
onupdate="CASCADE",
ondelete="CASCADE",
)
if "resources_pkey" == insp.get_pk_constraint("resources")["name"]:
op.execute("ALTER INDEX resources_pkey RENAME to pk_resources")
if "resources_owner_group_id_fkey" in [
c["name"] for c in insp.get_foreign_keys("resources")
]:
op.drop_constraint(
"resources_owner_group_id_fkey", "resources", type_="foreignkey"
)
op.create_foreign_key(
None,
"resources",
"groups",
remote_cols=["id"],
local_cols=["owner_group_id"],
onupdate="CASCADE",
ondelete="SET NULL",
)
if "resources_owner_user_id_fkey" in [
c["name"] for c in insp.get_foreign_keys("resources")
]:
op.drop_constraint(
"resources_owner_user_id_fkey", "resources", type_="foreignkey"
)
op.create_foreign_key(
None,
"resources",
"users",
remote_cols=["id"],
local_cols=["owner_user_id"],
onupdate="CASCADE",
ondelete="SET NULL",
)
if "resources_parent_id_fkey" in [
c["name"] for c in insp.get_foreign_keys("resources")
]:
op.drop_constraint(
"resources_parent_id_fkey", "resources", type_="foreignkey"
)
op.create_foreign_key(
None,
"resources",
"resources",
remote_cols=["resource_id"],
local_cols=["parent_id"],
onupdate="CASCADE",
ondelete="SET NULL",
)
if "users_pkey" == insp.get_pk_constraint("users")["name"]:
op.execute("ALTER INDEX users_pkey RENAME to pk_users")
if "users_email_key" in [
c["name"] for c in insp.get_unique_constraints("users")
]:
op.execute("ALTER INDEX users_email_key RENAME to uq_users_email")
if "users_user_name_key" in [
c["name"] for c in insp.get_unique_constraints("users")
]:
op.execute("ALTER INDEX users_user_name_key RENAME to uq_users_user_name")
if "users_groups_group_id_fkey" in [
c["name"] for c in insp.get_foreign_keys("users_groups")
]:
op.drop_constraint(
"users_groups_group_id_fkey", "users_groups", type_="foreignkey"
)
op.create_foreign_key(
None,
"users_groups",
"groups",
remote_cols=["id"],
local_cols=["group_id"],
onupdate="CASCADE",
ondelete="CASCADE",
)
if "users_groups_user_id_fkey" in [
c["name"] for c in insp.get_foreign_keys("users_groups")
]:
op.drop_constraint(
"users_groups_user_id_fkey", "users_groups", type_="foreignkey"
)
op.create_foreign_key(
None,
"users_groups",
"users",
remote_cols=["id"],
local_cols=["user_id"],
onupdate="CASCADE",
ondelete="CASCADE",
)
if "users_permissions_user_id_fkey" in [
c["name"] for c in insp.get_foreign_keys("users_permissions")
]:
op.drop_constraint(
"users_permissions_user_id_fkey",
"users_permissions",
type_="foreignkey",
)
op.create_foreign_key(
None,
"users_permissions",
"users",
remote_cols=["id"],
local_cols=["user_id"],
onupdate="CASCADE",
ondelete="CASCADE",
)
if "users_resources_permissions_resource_id_fkey" in [
c["name"]
for c in insp.get_foreign_keys( # noqa # noqa
"users_resources_permissions"
)
]:
op.drop_constraint(
"users_resources_permissions_resource_id_fkey",
"users_resources_permissions",
type_="foreignkey",
)
op.create_foreign_key(
None,
"users_resources_permissions",
"resources",
remote_cols=["resource_id"],
local_cols=["resource_id"],
onupdate="CASCADE",
ondelete="CASCADE",
)
if "users_resources_permissions_user_id_fkey" in [
c["name"]
for c in insp.get_foreign_keys("users_resources_permissions") # noqa
]:
op.drop_constraint(
"users_resources_permissions_user_id_fkey",
"users_resources_permissions",
type_="foreignkey",
)
op.create_foreign_key(
None,
"users_resources_permissions",
"users",
remote_cols=["id"],
local_cols=["user_id"],
onupdate="CASCADE",
ondelete="CASCADE",
)
def downgrade():
pass
| ergo/ziggurat_foundations | ziggurat_foundations/migrations/versions/438c27ec1c9_normalize_constraint_and_key_names.py | Python | bsd-3-clause | 12,558 |
from setuptools import setup, find_packages
setup(name='pygame_toolbox',
version='0.1.2',
license='MIT',
description='Tools to help with game development using pygame\nFor' +
' complete details please reference the github page',
author='James Milam',
author_email='jmilam343@gmail.com',
url='https://github.com/jbm950/pygame_toolbox',
packages=find_packages(),
classifers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Games/Entertainment',
'Topic :: Software Development :: Libraries :: pygame'
],
keywords='pygame'
)
| jbm950/pygame_toolbox | setup.py | Python | mit | 780 |
from __future__ import unicode_literals
from rest_framework import viewsets
from rest_framework import permissions
from videos.api.serializers import video as video_serializers
from videos.models import Video
class VideoViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
def get_queryset(self):
queryset = Video.objects.all()
if self.request.method not in permissions.SAFE_METHODS:
if self.request.user.is_authenticated:
queryset = queryset.filter_owner(user=self.request.user)
else:
return queryset.none()
return queryset
def get_serializer_class(self):
if self.request.method in permissions.SAFE_METHODS:
return video_serializers.RetrieveSerializer
return video_serializers.DefaultSerializer
def get_serializer_context(self):
context = super(VideoViewSet, self).get_serializer_context()
if self.request.method not in permissions.SAFE_METHODS \
and not self.request.user.is_superuser:
context['exclude'] = ('sites', )
return context
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
| sdeleeuw/contagement | videos/api/viewsets/video.py | Python | gpl-3.0 | 1,272 |
#-*- coding: utf-8 -*-
'''
123123
123
'''
# 统计包括c/c++,python程序代码行、注释行
# 建字典,保存对应的程序语言和后缀、单行注释符、多行注释符
import pdb
# 扩展其他程序可以在这里加入
lan_postfix={'python':'.py','c':'.c'}
lan_comment={'python':'#','c':'//'}
# 解决多行注释 如python中的'''
multi_comment_start={'python':'\'\'\'','c':'/*'}
multi_comment_end={'python':'\'\'\'','c':'*/'}
def stat_lines(file_name,file_type):
import re
is_comment=False
stat_result={'comment':0,'content':0,'null_line':0}
with open(file_name,'rt',encoding='utf-8') as f:
if file_name.endswith(lan_postfix[file_type]):
for line in f.readlines():
# pdb.set_trace()
line_next=line.lstrip()
# fuck a bug here in c multi comment
if not is_comment and line_next.startswith(multi_comment_start[file_type]):
is_comment= True
stat_result['comment']+=1
continue
if line_next.startswith(multi_comment_end[file_type]):
is_comment=False
stat_result['comment']+=1
# print(line)
# print(line_next)
if not line.split():
stat_result['null_line']+=1
elif line_next.startswith(lan_comment[file_type]) or is_comment:
stat_result['comment']+=1
else:
stat_result['content']+=1
return stat_result
def main():
import os
import pdb
c_sum_stat={'comment':0,'content':0,'null_line':0}
python_sum_stat={'comment':0,'content':0,'null_line':0}
for file in os.listdir('.'):
if file.endswith('.c') or file.endswith('.cpp'):
# c_stat应该是一个字典,keys包括comment,content,null_line
# pdb.set_trace()
c_stat=stat_lines(file,'c')
c_sum_stat['comment']+=c_stat['comment']
c_sum_stat['content']+=c_stat['content']
c_sum_stat['null_line']+=c_stat['null_line']
elif file.endswith('.py'):
python_stat=stat_lines(file,'python')
python_sum_stat['comment']+=python_stat['comment']
python_sum_stat['content']+=python_stat['content']
python_sum_stat['null_line']+=python_stat['null_line']
print('C code lines:{0}\nC comment lines:{1}\nC null_line lines:{2}'.format(c_sum_stat['content'],\
c_sum_stat['comment'],c_sum_stat['null_line']))
print('Python code lines:{0}\nPython comment lines:{1}\nPython null_line lines:{2}'.format(python_sum_stat['content'],\
python_sum_stat['comment'],python_sum_stat['null_line']))
if __name__=='__main__':
main() | Supersuuu/python | burness/0007/Program_lines_stat.py | Python | mit | 2,401 |
#!/usr/bin/env python3
# Copyright (C) 2013-2019 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
import boxes
class DrillStand(Boxes):
"""Box for drills with each compartment of a different height"""
description = """Note: `sh` gives the hight of the rows front to back. It though should have the same number of entries as `sy`. These heights are the one on the left side and increase throughout the row. To have each compartement a bit higher than the previous one the steps in `sh` should be a bit bigger than `extra_height`.
Assembly: Start with putting the slots of the inner walls together. Then add the front and the back wall. Be especially careful with adding the bottom. It is always assymetrical and flush with the right/lower side while being a little short on the left/higher side to not protrude into the side wall. Add the very left and right walls last.
"""
ui_group = "Misc"
def __init__(self):
Boxes.__init__(self)
self.addSettingsArgs(edges.StackableSettings, height=1.0, width=3)
self.addSettingsArgs(edges.FingerJointSettings)
self.buildArgParser(sx="25*6", sy="10:20:30", sh="25:40:60")
self.argparser.add_argument(
"--extra_height", action="store", type=float, default=15.0,
help="height difference left to right")
def yWall(self, nr, move=None):
t = self.thickness
x, sx, y, sy, sh = self.x, self.sx, self.y, self.sy, self.sh
eh = self.extra_height * (sum(sx[:nr])+ nr*t - t)/x
tw, th = sum(sy) + t * len(sy) + t, max(sh) + eh
if self.move(tw, th, move, True):
return
self.moveTo(t)
self.polyline(y, 90)
self.edges["f"](sh[-1]+eh)
self.corner(90)
for i in range(len(sy)-1, 0, -1):
s1 = max(sh[i]-sh[i-1], 0) + 4*t
s2 = max(sh[i-1]-sh[i], 0) + 4*t
self.polyline(sy[i], 90, s1, -90, t, -90, s2, 90)
self.polyline(sy[0], 90)
self.edges["f"](sh[0] + eh)
self.corner(90)
self.move(tw, th, move)
def sideWall(self, extra_height=0.0, foot_height=0.0, edges="šFf", move=None):
t = self.thickness
x, sx, y, sy, sh = self.x, self.sx, self.y, self.sy, self.sh
eh = extra_height
fh = foot_height
edges = [self.edges.get(e, e) for e in edges]
tw = sum(sy) + t * len(sy) + t
th = max(sh) + eh + fh + edges[0].spacing()
if self.move(tw, th, move, True):
return
self.moveTo(edges[0].margin())
edges[0](y+2*t)
self.edgeCorner(edges[0], "e")
self.edge(fh)
self.step(edges[1].startwidth() - t)
edges[1](sh[-1]+eh)
self.edgeCorner(edges[1], "e")
for i in range(len(sy)-1, 0, -1):
self.edge(sy[i])
if sh[i] > sh[i-1]:
self.fingerHolesAt(0.5*t, self.burn, sh[i]+eh, 90)
self.polyline(t, 90, sh[i] - sh[i-1], -90)
else:
self.polyline(0, -90, sh[i-1] - sh[i], 90, t)
self.fingerHolesAt(-0.5*t, self.burn, sh[i-1]+eh)
self.polyline(sy[0])
self.edgeCorner("e", edges[2])
edges[2](sh[0]+eh)
self.step(t - edges[2].endwidth())
self.polyline(fh)
self.edgeCorner("e", edges[0])
self.move(tw, th, move)
def xWall(self, nr, move=None):
t = self.thickness
x, sx, y, sy, sh = self.x, self.sx, self.y, self.sy, self.sh
eh = self.extra_height
tw, th = x + 2*t, sh[nr] + eh + t
a = math.degrees(math.atan(eh / x))
fa = 1 / math.cos(math.radians(a))
if self.move(tw, th, move, True):
return
self.moveTo(t, eh+t, -a)
for i in range(len(sx)-1):
self.edges["f"](fa*sx[i])
h = min(sh[nr - 1], sh[nr])
s1 = h - 3.95*t + self.extra_height * (sum(sx[:i+1]) + i*t)/x
s2 = h - 3.95*t + self.extra_height * (sum(sx[:i+1]) + i*t + t)/x
self.polyline(0, 90+a, s1, -90, t, -90, s2, 90-a)
self.edges["f"](fa*sx[-1])
self.polyline(0, 90+a)
self.edges["f"](sh[nr]+eh)
self.polyline(0, 90, x, 90)
self.edges["f"](sh[nr])
self.polyline(0, 90+a)
self.move(tw, th, move)
def xOutsideWall(self, h, edges="fFeF", move=None):
t = self.thickness
x, sx, y, sy, sh = self.x, self.sx, self.y, self.sy, self.sh
edges = [self.edges.get(e, e) for e in edges]
eh = self.extra_height
tw = x + edges[1].spacing() + edges[3].spacing()
th = h + eh + edges[0].spacing() + edges[2].spacing()
a = math.degrees(math.atan(eh / x))
fa = 1 / math.cos(math.radians(a))
if self.move(tw, th, move, True):
return
self.moveTo(edges[3].spacing(), eh+edges[0].margin(), -a)
self.edge(t*math.tan(math.radians(a)))
if isinstance(edges[0], boxes.edges.FingerHoleEdge):
with self.saved_context():
self.moveTo(0, 0, a)
self.fingerHolesAt(
0, 1.5*t, x*fa - t*math.tan(math.radians(a)), -a)
self.edge(x*fa - t*math.tan(math.radians(a)))
elif isinstance(edges[0], boxes.edges.FingerJointEdge):
edges[0](x*fa - t*math.tan(math.radians(a)))
else:
raise ValueError("Only edges h and f supported: ")
self.corner(a)
self.edgeCorner(edges[0], "e", 90)
self.corner(-90)
self.edgeCorner("e", edges[1], 90)
edges[1](eh+h)
self.edgeCorner(edges[1], edges[2], 90)
edges[2](x)
self.edgeCorner(edges[2], edges[3], 90)
edges[3](h)
self.edgeCorner(edges[3], "e", 90)
self.corner(-90)
self.edgeCorner("e", edges[0], 90)
self.moveTo(0, self.burn+edges[0].startwidth(), 0)
for i in range(1, len(sx)):
posx = sum(sx[:i]) + i*t - 0.5 * t
length = h + self.extra_height * (sum(sx[:i]) + i*t - t)/x
self.fingerHolesAt(posx, h, length, -90)
self.move(tw, th, move)
def bottomCB(self):
t = self.thickness
x, sx, y, sy, sh = self.x, self.sx, self.y, self.sy, self.sh
eh = self.extra_height
a = math.degrees(math.atan(eh / x))
fa = 1 / math.cos(math.radians(a))
posy = -0.5 * t
for i in range(len(sy)-1):
posy += sy[i] + t
posx = -t * math.tan(math.radians(a)) # left side is clipped
for j in range(len(sx)):
self.fingerHolesAt(posx, posy, fa*sx[j], 0)
posx += fa*sx[j] + fa*t
def render(self):
t = self.thickness
sx, sy, sh = self.sx, self.sy, self.sh
self.x = x = sum(sx) + len(sx)*t - t
self.y = y = sum(sy) + len(sy)*t - t
bottom_angle = math.atan(self.extra_height / x) # radians
self.xOutsideWall(sh[0], "hFeF", move="up")
for i in range(1, len(sy)):
self.xWall(i, move="up")
self.xOutsideWall(sh[-1], "hfef", move="up")
self.rectangularWall(x/math.cos(bottom_angle)-t*math.tan(bottom_angle), y, "fefe", callback=[self.bottomCB], move="up")
self.sideWall(foot_height=self.extra_height+2*t, move="right")
for i in range(1, len(sx)):
self.yWall(i, move="right")
self.sideWall(self.extra_height, 2*t, move="right")
| florianfesti/boxes | boxes/generators/drillstand.py | Python | gpl-3.0 | 8,194 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestWebsiteSettings(unittest.TestCase):
pass
| adityahase/frappe | frappe/website/doctype/website_settings/test_website_settings.py | Python | mit | 227 |
# Django specific
from django.core.management.base import BaseCommand
from geodata.updaters import RegionUpdater
class Command(BaseCommand):
option_list = BaseCommand.option_list
def handle(self, *args, **options):
ru = RegionUpdater()
ru.update_unesco_regions() | tokatikato/OIPA | OIPA/geodata/management/commands/region_update_unesco_regions.py | Python | agpl-3.0 | 289 |
#!/usr/bin/python
from txzookeeper.client import ZookeeperClient
from txzookeeper.retry import RetryClient
import zookeeper
import sys
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
import os
from twisted.python import log
from twisted.internet import reactor, defer
log.startLogging(sys.stdout)
class Config():
"""
The aim of this class is to centralize and manage most configurations
using Zookeeper
"""
def __init__(self, zkAddr):
"""Initialization of a configuration manager.
create a configuration tree into zookeeper
@param zkAddr: Zookeeper client instance.
"""
self.zk = zkAddr
self.init_nodes()
def init_nodes(self):
self.init_node("/producers")
self.init_node("/consumer")
self.init_node("/mongo")
self.init_node("/solr")
self.init_node("/ssl")
self.init_node("/ssl/ca")
self.init_node("/nodes")
def init_node(self,name):
d = self.zk.create(name)
d.addErrback(self._err)
print "node %s : OK" % name
def _err(self,error):
"""
error handler
@param error: error message
"""
log.msg('node seems to already exists : %s' % error)
def add_producer(self,producer):
"""
add a producer address in the configuration tree
@param producer: producer address
"""
d = self.zk.create("/producers/%s" % str(producer), flags = zookeeper.EPHEMERAL)
d.addErrback(self._err)
def get_mongod_all(self, callback = None):
"""
get all mongoDB addresses from configuration tree
@param callback: function to call after getting data
from the configuration tree
"""
self._get_conf_all("/mongo", callback)
def get_solr_all(self, callback = None):
"""
get all solr addresses from configuration tree
@param callback: call this function after getting data
from the configuration tree
"""
self._get_conf_all("/solr", callback)
def _get_conf_all(self, path, callback = None):
"""
get all configurations from the configuration tree, using a giving path
@param path: path of the configuration to get
@param callback: the function to call after getting the configuration
"""
def _get_value(m):
value = m
if callback:
callback(value)
def _call(m):
dat, m = self.zk.get_children_and_watch(path)
dat.addCallback(_get_value)
m.addCallback(_call)
m.addErrback(self._err)
if not callback:
data = self.zk.get(path)
data.addCallback(_get_value)
data.addErrback(self._err)
else:
data, diff = self.zk.get_children_and_watch(path)
data.addCallback(_get_value)
data.addErrback(self._err)
diff.addCallback(_call)
diff.addErrback(self._err)
def _get_conf_data(self, path, callback = None):
"""
get and watch data from the configuration tree, using a giving path
@param path: path to the configuration to get
@param callback: the function to call after getting the configuration
"""
def _get_value(m):
value = m
if callback:
callback(value)
def _call(m):
dat, m = self.zk.get_and_watch(path)
dat.addCallback(_get_value)
m.addCallback(_call)
m.addErrback(self._err)
if not callback:
data = self.zk.get(path)
data.addCallback(_get_value)
data.addErrback(self._err)
else:
data, diff = self.zk.get_and_watch(path)
data.addCallback(_get_value)
data.addErrback(self._err)
diff.addCallback(_call)
diff.addErrback(self._err)
def _get_data(self, path, callback = None, errback = None):
"""
get data from the configuration tree without watch.
@param path: path to the configuration to get
@param callback: function to call after getting data
@param errback: function to call if getting data fails
"""
def _err(error):
"""
error handler
@param error: error message
"""
log.msg('node seems to already exists : %s' % error)
data = self.zk.get(path)
data.addErrback(_err)
if callback:
data.addCallback(callback)
if errback:
data.addErrback(errback)
| Wallix-Resilience/LogMonitor | resilience/zookeeper/configure/config.py | Python | gpl-2.0 | 4,845 |
import bpy
import math
import sys
import os
import stat
import bmesh
import time
import random
##---------------------------RELOAD IMAGES------------------
class reloadImages (bpy.types.Operator):
bl_idname = "image.reload_images_osc"
bl_label = "Reload Images"
bl_options = {"REGISTER", "UNDO"}
def execute(self,context):
for imgs in bpy.data.images:
imgs.reload()
return {'FINISHED'}
##------------------------ SAVE INCREMENTAL ------------------------
class saveIncremental(bpy.types.Operator):
bl_idname = "file.save_incremental_osc"
bl_label = "Save Incremental File"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
filepath = bpy.data.filepath
if filepath.count("_v"):
strnum = filepath.rpartition("_v")[-1].rpartition(".blend")[0]
intnum = int(strnum)
modnum = strnum.replace(str(intnum),str(intnum+1))
output = filepath.replace(strnum,modnum)
basename = os.path.basename(filepath)
bpy.ops.wm.save_as_mainfile(filepath=os.path.join(os.path.dirname(filepath),"%s_v%s.blend" % (basename.rpartition("_v")[0],str(modnum))))
else:
output = filepath.rpartition(".blend")[0]+"_v01"
bpy.ops.wm.save_as_mainfile(filepath=output)
return {'FINISHED'}
##------------------------ REPLACE FILE PATHS ------------------------
bpy.types.Scene.oscSearchText = bpy.props.StringProperty(default="Search Text")
bpy.types.Scene.oscReplaceText = bpy.props.StringProperty(default="Replace Text")
class replaceFilePath(bpy.types.Operator):
bl_idname = "file.replace_file_path_osc"
bl_label = "Replace File Path"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
TEXTSEARCH = bpy.context.scene.oscSearchText
TEXTREPLACE = bpy.context.scene.oscReplaceText
for image in bpy.data.images:
image.filepath = image.filepath.replace(TEXTSEARCH,TEXTREPLACE)
return {'FINISHED'}
##---------------------- SYNC MISSING GROUPS --------------------------
class reFreshMissingGroups(bpy.types.Operator):
bl_idname = "file.sync_missing_groups"
bl_label = "Sync Missing Groups"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
for group in bpy.data.groups:
if group.library != None:
with bpy.data.libraries.load(group.library.filepath, link=True) as (linked,local):
local.groups = linked.groups
return {'FINISHED'} | Passtechsoft/TPEAlpGen | blender/release/scripts/addons_contrib/oscurart_tools/oscurart_files.py | Python | gpl-3.0 | 2,626 |
#!/usr/bin/env python
# Original Author: Michael Lelli <toadking@toadking.com>
import usb.core
import usb.util
import uinput
import sys
import getopt
if sys.version_info.major < 3:
iteritems = lambda x: x.iteritems()
else:
iteritems = lambda x: x.items()
controllers = [None, None, None, None]
controllers_state = [None, None, None, None]
DIGITAL_BUTTONS = {
uinput.BTN_DPAD_UP: 0x8000,
uinput.BTN_DPAD_DOWN: 0x4000,
uinput.BTN_DPAD_LEFT: 0x1000,
uinput.BTN_DPAD_RIGHT: 0x2000,
uinput.BTN_NORTH: 0x0800,
uinput.BTN_SOUTH: 0x0100,
uinput.BTN_EAST: 0x0400,
uinput.BTN_WEST: 0x0200,
uinput.BTN_START: 0x0001,
uinput.BTN_TL: 0x0008,
uinput.BTN_TR: 0x0004,
uinput.BTN_TR2: 0x0002
}
AXIS_BYTES = {
uinput.ABS_X: 3,
uinput.ABS_Y: 4,
uinput.ABS_RX: 5,
uinput.ABS_RY: 6,
uinput.ABS_Z: 7,
uinput.ABS_RZ: 8
}
STATE_NORMAL = 0x10
STATE_WAVEBIRD = 0x20
def controller_type(status):
return status & (STATE_NORMAL | STATE_WAVEBIRD)
def is_connected(status):
return status & (STATE_NORMAL | STATE_WAVEBIRD) in (STATE_NORMAL, STATE_WAVEBIRD)
def controller_type_string(controller_type):
if controller_type == STATE_NORMAL:
return "GameCube Controller"
elif controller_type == STATE_WAVEBIRD:
return "WaveBird Controller"
else:
return "Unknown Controller"
def create_device(index, status, raw):
if raw:
axis_cal = (0, 255, 0, 0)
cstick_cal = (0, 255, 0, 0)
trigger_cal = (0, 255, 0, 0)
else:
axis_cal = (20, 235, 0, 0)
cstick_cal = (30, 225, 0, 0)
trigger_cal = (25, 225, 0, 0)
events = (
uinput.BTN_NORTH,
uinput.BTN_SOUTH,
uinput.BTN_EAST,
uinput.BTN_WEST,
uinput.BTN_START,
uinput.BTN_DPAD_UP,
uinput.BTN_DPAD_DOWN,
uinput.BTN_DPAD_LEFT,
uinput.BTN_DPAD_RIGHT,
uinput.ABS_X + axis_cal,
uinput.ABS_Y + axis_cal,
uinput.ABS_RX + cstick_cal,
uinput.ABS_RY + cstick_cal,
uinput.BTN_TL,
uinput.ABS_Z + trigger_cal,
uinput.BTN_TR,
uinput.ABS_RZ + trigger_cal,
uinput.BTN_TR2
)
controllers[index] = uinput.Device(events, name="Wii U GameCube Adapter Port {}".format(index+1))
controllers_state[index] = (
controller_type(status),
0,
{
uinput.ABS_X: -1,
uinput.ABS_Y: -1,
uinput.ABS_RX: -1,
uinput.ABS_RY: -1,
uinput.ABS_Z: -1,
uinput.ABS_RZ: -1
}
)
print("{} connected on port {}".format(controller_type_string(controllers_state[index][0]), index+1))
def destroy_device(index):
print("disconnecting {} on port {}".format(controller_type_string(controllers_state[index][0]), index+1))
controllers[index] = None
def help():
print("usage: " + sys.argv[0] + " [-h/--help] [-r/--raw]\n\n"
" -h/--help: display this message\n"
" -r/--raw: do not do scaling on axis")
sys.exit(-1)
def main():
dev = usb.core.find(idVendor=0x057e, idProduct=0x0337)
raw_mode = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hr", ["help", "raw"])
except getopt.GetoptError:
help()
for opt, arg in opts:
if opt in ("-h", "--help"):
help()
elif opt in ("-r", "--raw"):
print("raw mode")
raw_mode = True
if dev is None:
raise ValueError('GC adapter not found')
if dev.is_kernel_driver_active(0):
reattach = True
dev.detach_kernel_driver(0)
dev.set_configuration()
cfg = dev.get_active_configuration()
intf = cfg[(0,0)]
out_ep = usb.util.find_descriptor(
intf,
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_OUT)
in_ep = usb.util.find_descriptor(
intf,
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_IN)
# might not be necessary, but doesn't hurt
dev.ctrl_transfer(0x21, 11, 0x0001, 0, [])
out_ep.write([0x13])
try:
while 1:
try:
data = in_ep.read(37)
except (KeyboardInterrupt, SystemExit):
raise
except:
print("read error")
continue
if data[0] != 0x21:
print("unknown message {:02x}}".format(data[0]))
continue
payloads = [data[1:10], data[10:19], data[19:28], data[28:37]]
index = 0
for i, d in enumerate(payloads):
status = d[0]
# check for connected
if is_connected(status) and controllers[i] is None:
create_device(i, status, raw_mode)
elif not is_connected(status) and controllers[i] is not None:
destroy_device(i)
if controllers[i] is None:
continue
# if status & 0x04 != 0:
# do something about having both USB plugs connected
if controller_type(status) != controllers_state[i][0]:
print("controller on port {} changed from {} to {}???".format(i+1,
controller_type_string(controllers_state[i][0]), controller_type_string(controller_type(status))))
btns = d[1] << 8 | d[2]
newmask = 0
for btn, mask in iteritems(DIGITAL_BUTTONS):
pressed = btns & mask
newmask |= pressed
# state change
if controllers_state[i][1] & mask != pressed:
controllers[i].emit(btn, 1 if pressed != 0 else 0, syn=False)
newaxis = {}
for axis, offset in iteritems(AXIS_BYTES):
value = d[offset]
newaxis[axis] = value
if axis == uinput.ABS_Y or axis == uinput.ABS_RY:
# flip from 0 - 255 to 255 - 0
value ^= 0xFF
#elif axis == uinput.ABS_RZ or axis == uinput.ABS_Z:
# scale from 0 - 255 to 127 - 255
#value = (value >> 1) + 127
if controllers_state[i][2][axis] != value:
controllers[i].emit(axis, value, syn=False)
controllers[i].syn()
controllers_state[i] = (
controller_type(status),
newmask,
newaxis
)
except:
raise
if __name__ == "__main__":
main()
| ToadKing/wii-u-gc-adapter | old/wii-u-gc-adapter.py | Python | mit | 6,068 |
from toontown.coghq.BossbotCogHQLoader import BossbotCogHQLoader
from toontown.toonbase import ToontownGlobals
from toontown.hood.CogHood import CogHood
class BossbotHQ(CogHood):
notify = directNotify.newCategory('BossbotHQ')
ID = ToontownGlobals.BossbotHQ
LOADER_CLASS = BossbotCogHQLoader
def load(self):
CogHood.load(self)
self.sky.hide()
def enter(self, requestStatus):
CogHood.enter(self, requestStatus)
base.localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
base.camLens.setNearFar(ToontownGlobals.BossbotHQCameraNear, ToontownGlobals.BossbotHQCameraFar)
| ToontownUprising/src | toontown/hood/BossbotHQ.py | Python | mit | 635 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Admin views for managing access to actions."""
from flask import current_app
from flask_admin.contrib.sqla import ModelView
from werkzeug.local import LocalProxy
from wtforms import SelectField
from .models import ActionRoles, ActionSystemRoles, ActionUsers
from .proxies import current_access
_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
def _(x):
"""Identity."""
return x
class ActionUsersView(ModelView):
"""View for managing access to actions by users."""
can_view_details = True
list_all = ('user_id', 'user.email', 'action', 'argument', 'exclude')
column_list = list_all
column_default_sort = ('user_id', True)
column_labels = {
'user.email': _("Email"),
'user_id': _("User ID"),
'exclude': _("Deny"),
}
column_filters = list_all
form_columns = ('user', 'action', 'argument', 'exclude')
form_args = dict(
action=dict(
choices=LocalProxy(lambda: [
(action, action) for action in current_access.actions.keys()
])
)
)
form_overrides = dict(
action=SelectField,
)
class ActionRolesView(ModelView):
"""View for managing access to actions by users with given role."""
can_view_details = True
list_all = ('role.name', 'action', 'argument', 'exclude')
column_list = list_all
column_filters = \
columns_sortable_list = \
columns_searchable_list = \
list_all
column_display_all_relations = True
column_labels = {
'role.name': _("Role Name"),
'exclude': _("Deny"),
}
form_columns = ('role', 'action', 'argument', 'exclude')
form_args = dict(
action=dict(
choices=LocalProxy(lambda: [
(action, action) for action in current_access.actions.keys()
])
)
)
form_overrides = dict(
action=SelectField,
)
class ActionSystemRolesView(ModelView):
"""View for managing access to actions by users with system roles."""
can_view_details = True
list_all = ('role_name', 'action', 'argument', 'exclude')
column_list = list_all
column_filters = \
columns_sortable_list = \
columns_searchable_list = \
list_all
column_display_all_relations = True
column_labels = {
'role_name': _("System Role"),
'exclude': _("Deny"),
}
form_args = dict(
action=dict(
choices=LocalProxy(lambda: [
(action, action) for action in current_access.actions.keys()
])
),
role_name=dict(
choices=LocalProxy(lambda: [
(action, action) for action
in current_access.system_roles.keys()
])
)
)
form_columns = ('role_name', 'action', 'argument', 'exclude')
form_overrides = dict(
action=SelectField,
role_name=SelectField,
)
action_roles_adminview = {
'model': ActionRoles,
'modelview': ActionRolesView,
'category': _('User Management'),
'name': _('Access: Roles')
}
action_users_adminview = {
'model': ActionUsers,
'modelview': ActionUsersView,
'category': _('User Management'),
'name': _('Access: Users')
}
action_system_roles_adminview = {
'model': ActionSystemRoles,
'modelview': ActionSystemRolesView,
'category': _('User Management'),
'name': _('Access: System Roles')
}
__all__ = ('action_users_adminview', 'action_roles_adminview',
'action_system_roles_adminview')
| tiborsimko/invenio-access | invenio_access/admin.py | Python | mit | 3,832 |
from flask import request
from flask_restful import Resource
from airy.units import client, report
from airy.resources import Api
from airy.resources.user import requires_auth
class Clients(Resource):
def get(self):
# Return list of clients
return {'clients': client.get_all()}
def post(self):
# Create new client
return {'client': client.save(request.get_json() or {})}
class Client(Resource):
def get(self, client_id):
# Get client details
return {'client': client.get(client_id)}
def put(self, client_id):
# Update client
return {'client': client.save(request.get_json() or {}, client_id)}
def delete(self, client_id):
# Delete client
client.delete(client_id)
class TimeSheet(Resource):
def get(self, client_id):
date_range = {
'beg': request.args.get('beg'),
'end': request.args.get('end'),
}
timesheet = report.TimeSheet(client_id, date_range)
return {'timesheet': timesheet.get()}
def post(self, client_id):
timesheet = report.TimeSheet(client_id, request.get_json() or {})
timesheet.send()
class Report(Resource):
def get(self, client_id):
# Get report
date_range = {
'beg': request.args.get('beg'),
'end': request.args.get('end'),
}
task_report = report.TaskReport(client_id, date_range)
return {'report': task_report.get()}
def post(self, client_id):
# Send report by email
task_report = report.TaskReport(client_id, request.get_json() or {})
task_report.send()
client_api = Api(decorators=[requires_auth])
client_api.add_resource(Clients, '/clients')
client_api.add_resource(Client, '/clients/<int:client_id>')
client_api.add_resource(TimeSheet, '/clients/<int:client_id>/timesheet')
client_api.add_resource(Report, '/clients/<int:client_id>/report')
| xuhcc/airy | airy/resources/client.py | Python | mit | 1,957 |
import pygame
import sys
import os
import re
import itertools
import string
import time
from pygame.locals import *
from PacManMap import *
from MakeGraph import *
from Moving_pacman import *
from Ghosts import Ghost
import socket
import pickle
class DrawGraphic():
def __init__(self,class_pacman,class_ghost,class_ghost1,class_ghost2,class_ghost3):
self.ghost = class_ghost
self.ghost1 = class_ghost1
self.ghost2 = class_ghost2
self.ghost3 = class_ghost3
self.pacman = class_pacman
def draw_graphic(self, screen):
for i in range(len(Map)):
for j in range(len(Map[0])):
if Map[i][j] == 0:
pygame.draw.rect(screen, (0, 0, 255),
(j * MOVE, i * MOVE, 23, 23))
else:
pygame.draw.rect(screen, (0, 0, 0),
(j * MOVE, i * MOVE, 23, 23))
if Map[i][j] == 2:
screen.set_at((j * 23 + 12, i * 23 + 12),
(255, 255, 255))
if Map[i][j] == 3:
pygame.draw.rect(screen, (255, 0, 0),
(j * MOVE, i * MOVE, 23, 23))
def draw_nodes(self,screen):
l = PacMan.find_closest_nodes(self)
for i in l:
pygame.draw.rect(screen, (255, 0, 255),
(i[0] * MOVE, i[1] * MOVE, 23, 23))
def draw_pacman(self, screen, direction):
pacman = pygame.image.load(self.pacman.name_image)
pacmanL = pygame.transform.rotate(pacman, 180)
pacmanU = pygame.transform.rotate(pacman, 90)
pacmanD = pygame.transform.rotate(pacman, 270)
default_rotation = pacman
if direction == 'l':
default_rotation = pacmanL
self.pacman.move_left()
elif direction == 'r':
default_rotation = pacman
self.pacman.move_right()
elif direction == 'u':
default_rotation = pacmanU
self.pacman.move_up()
else:
default_rotation = pacmanD
self.pacman.move_down()
# print((self.pacman.cords['x'], self.pacman.cords['y']))
screen.blit(default_rotation,
(self.pacman.cords['x'], self.pacman.cords['y']))
def draw_c_pacman(self, screen,cords_x,cords_y,name_image,direction):
pacman = pygame.image.load(name_image)
pacmanL = pygame.transform.rotate(pacman, 180)
pacmanU = pygame.transform.rotate(pacman, 90)
pacmanD = pygame.transform.rotate(pacman, 270)
default_rotation = pacman
if direction == 'l':
default_rotation = pacmanL
elif direction == 'r':
default_rotation = pacman
elif direction == 'u':
default_rotation = pacmanU
else:
default_rotation = pacmanD
# print((self.cords['x'], self.cords['y']))
screen.blit(default_rotation,
(cords_x, cords_y))
def draw_ghost(self,screen,name,cords_x,cords_y):
ghost1 = pygame.image.load("Ghosts/"+name[0]+".png")
ghost2 = pygame.image.load("Ghosts/"+name[1]+".png")
ghost3 = pygame.image.load("Ghosts/"+name[2]+".png")
ghost4 = pygame.image.load("Ghosts/"+name[3]+".png")
# pygame.draw.rect(screen, (124, 124, 0),
# (p[1]* 23, p[0] * 23, 23, 23))
screen.blit(ghost1,(cords_x[0], cords_y[0]))
screen.blit(ghost2,(cords_x[1], cords_y[1]))
screen.blit(ghost3,(cords_x[2], cords_y[2]))
screen.blit(ghost4,(cords_x[3], cords_y[3]))
class PacManPlay(DrawGraphic):
def __init__(self,class_pacman,class_ghost,class_ghost1,class_ghost2,class_ghost3):
self.ghost = class_ghost
self.ghost1 = class_ghost1
self.ghost2 = class_ghost2
self.ghost3 = class_ghost3
self.pacman = class_pacman
def find_pacman_cords(self):
pacman_x = int(self.pacman.cords['y']/23)
pacman_y = int(self.pacman.cords['x']/23)
return (pacman_x,pacman_y)
def find_closest_node_to_node(self,node):
all_paths = self.ghost.paths_to_all_nodes
for athor_note in all_paths[node]:
if(len(all_paths[node][athor_note]) == 2):
return athor_note
return node
def split_pacman_ver(self):
closest_nod = self.pacman.find_closest_nodes()
pacman_cord = self.find_pacman_cords()
cordinates = []
for nodes in closest_nod:
if nodes[0] <= pacman_cord[0] and nodes[1] >= pacman_cord[1]:
cordinates.insert(0,nodes)
if nodes[0] <= pacman_cord[0] and nodes[1] < pacman_cord[1]:
cordinates.insert(1,nodes)
if nodes[0] > pacman_cord[0] and nodes[1] >= pacman_cord[1]:
cordinates.insert(2,nodes)
if nodes[0] > pacman_cord[0] and nodes[1] < pacman_cord[1]:
cordinates.insert(3,nodes)
for i in range(2):
if len(cordinates) < 4:
cordinates.append(self.find_closest_node_to_node(closest_nod[0]))
return cordinates
def are_goint_to_colide(self,screen,ghost_list):
temp_ghost_list = []
temp_ghost_list.extend(ghost_list)
count_moves = 0
for i in range(4):
for f_ghost in temp_ghost_list:
can_move = True
for s_ghost in ghost_list:
if f_ghost != s_ghost and f_ghost.next_hop() == s_ghost.find_ghost_cords():
can_move = False
break
if can_move == True:
count_moves += 1
# print("mestim ", f_ghost.index)
# print("next_hopove:_________")
# for i in ghost_list:
# print(i.index,"-",i.find_ghost_cords(),"-",i.next_hop())
f_ghost.ghost_make_move()
temp_ghost_list.remove(f_ghost)
# print(temp_ghost_list)
break
# if count_moves <3:
# for i in ghost_list:
# print(i.index,"-",i.find_ghost_cords(),"-",i.next_hop())
return count_moves
# if (f_ghost.next_hop() == s_ghost.find_ghost_cords() and
# s_ghost.next_hop() == f_ghost.find_ghost_cords()):
# temp = s_ghost.path
# s_ghost.path = f_ghost.path
# f_ghost.path = temp
def ghost_chase(self,screen):
self.ghost.get_pictures()
self.ghost1.get_pictures()
self.ghost2.get_pictures()
self.ghost3.get_pictures()
chasing_l = self.split_pacman_ver()
pacman_cord = self.find_pacman_cords()
if not self.ghost.path:
self.ghost.ghost_move(chasing_l[0],pacman_cord)
if not self.ghost1.path:
self.ghost1.ghost_move(chasing_l[1],pacman_cord)
if not self.ghost2.path:
self.ghost2.ghost_move(chasing_l[2],pacman_cord)
if not self.ghost3.path:
self.ghost3.ghost_move(chasing_l[3],pacman_cord)
gst = [self.ghost,self.ghost1,self.ghost2,self.ghost3]
if self.are_goint_to_colide(screen,gst) < 3:
# print(self.ghost.path)
for g in gst:
for g1 in gst:
if g.next_hop()==g1.find_ghost_cords() and g1.next_hop()==g.find_ghost_cords():
temp = g.path
g.path = g1.path
g1.path = temp
ghost.find_ghost_cords()
name = [self.ghost.name_image,self.ghost1.name_image,
self.ghost2.name_image, self.ghost3.name_image]
cords_x = [self.ghost.cords['x'],self.ghost1.cords['x'],self.ghost2.cords['x'],self.ghost3.cords['x']]
cords_y = [self.ghost.cords['y'],self.ghost1.cords['y'],self.ghost2.cords['y'],self.ghost3.cords['y']]
self.draw_ghost(screen,name,cords_x,cords_y)
def is_game_over(self):
ghost_list = [self.ghost,self.ghost1,self.ghost2,self.ghost3]
for ghost in ghost_list:
if ghost.find_ghost_cords() == self.find_pacman_cords():
return False
return True
def send_to_other_player(self):
ghost_list = [self.ghost,self.ghost1,self.ghost2,self.ghost3]
cords_x = [self.ghost.cords['x'],self.ghost1.cords['x'],self.ghost2.cords['x'],self.ghost3.cords['x']]
cords_y = [self.ghost.cords['y'],self.ghost1.cords['y'],self.ghost2.cords['y'],self.ghost3.cords['y']]
all_cords = {'p':[self.pacman.cords,self.pacman.name_image,DIRECTION],
'g':[[self.ghost.name_image,self.ghost1.name_image,self.ghost2.name_image,self.ghost3.name_image],
cords_x,cords_y],
'm':Map}
b = pickle.dumps(all_cords)
data,clientaddr = sock.recvfrom(2048)
pacman_c_cords = pickle.loads(data)
sock.sendto(b,clientaddr)
return pacman_c_cords
g = MakeGraph()
# print (g.find_nodes())
# print(g.make_all_paths())
# print(g.get_shortest_path()[(10,4)][(4,11)])
# print(g.is_p_vertex((100,100)))
# print(g.bfs((1,4)))
ALL_p = g.make_all_paths()
# print(ALL_p[(1,4)][(4,4)])
pygame.init()
screen = pygame.display.set_mode((WIDTH, HIGHT))
background = pygame.image.load("pacm.jpg")
fpsClock = pygame.time.Clock()
# d.drawLabyrinth(screen,(0,0,255))
# a.drawLabyrint(screen,(0,0,255),5)
node = g.find_nodes()
node1 = g.find_nodes()
node.pop(0)
DIRECTION = 'l'
pacm = PacMan(g)
ghost = Ghost(g,184,207)
ghost1 = Ghost(g,207,207)
ghost2 = Ghost(g,207,230)
ghost3 = Ghost(g,184,230)
a = DrawGraphic(pacm,ghost,ghost1,ghost2,ghost3)
testvam = PacManPlay(pacm,ghost,ghost1,ghost2,ghost3)
testvam.find_closest_node_to_node(pacm.find_closest_nodes()[0])
# print(testvam.split_pacman_ver())
while testvam.is_game_over():
all_cords = testvam.send_to_other_player()
a.draw_graphic(screen)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_LEFT:
DIRECTION = 'l'
elif event.key == K_RIGHT:
DIRECTION = 'r'
elif event.key == K_DOWN:
DIRECTION = 'd'
else:
DIRECTION = 'u'
else:
pass
a.draw_pacman(screen, DIRECTION)
# pacm.draw_nodes(screen)
# a.draw_c_pacman(screen,all_cords['p'][0]['x'],all_cords['p'][0]['y'],all_cords['p'][1],all_cords['p'][2])
testvam.ghost_chase(screen)
# print(pacm.find_closest_nodes())
# a.draw_nodes(screen)
# print(pacm.find_nodes())
# g.draw_shortest_path(screen, node1[28], node1[13])
# if node:
# g.draw_shortest_path(screen, node1[0], node.pop(0))
# else:
# node1.pop(0)
# p = node1[0]
# node = g.find_nodes()
# node.remove(p)
# screen.blit(background,(0,0,480,640))
# a.drawLabyrint(screen,(0,0,255),5)
# p = pygame.mouse.get_pos()
# i = pygame.Surface((1000, 1000))
# a.drawLabyrinth(screen,(0,0,255))
# print(screen.get_at(p))
# pygame.draw.polygon(screen,(0,0,255),[(0, 0), (20, 0), (20, 80), (0, 80)],3)
pygame.display.flip()
pygame.display.update()
fpsClock.tick(6)
| Yordan92/Pac-man-multiplayer | drawGraphic.py | Python | gpl-3.0 | 9,683 |
"""
WSGI config for django_rest_omics project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_rest_omics.local_settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| naderm/django_rest_omics | django_rest_omics/wsgi.py | Python | bsd-2-clause | 415 |
# encoding: utf8
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import tempfile
import unittest
from bkr.server.testinfo import get_namespace_for_package, parse_string, \
TestInfo, StrictParser, ParserError, ParserWarning
class NamespaceTests(unittest.TestCase):
def test_package_not_found(self):
"Ensure that we get None for the namespace of an unrecognized package"
self.assertEquals(None, get_namespace_for_package('foobar'))
def test_simple_packages(self):
"Ensure that we get expected namespaces back for some simple packages"
self.assertEquals('desktop', get_namespace_for_package('evolution'))
self.assertEquals('tools', get_namespace_for_package('gcc'))
class NameFieldTests(unittest.TestCase):
def test_name(self):
"Ensure Name field is parsed correctly"
ti = parse_string(u"Name: /CoreOS/cups/foo/bar", raise_errors=False)
self.assertEquals(ti.test_name, u"/CoreOS/cups/foo/bar")
class PathFieldTests(unittest.TestCase):
def test_path_absolute(self):
"Ensure absolute Path field is parsed correctly"
ti = parse_string(u"Path: /mnt/tests/CoreOS/cups/foo/bar", raise_errors=False)
self.assertEquals(ti.test_path, u"/mnt/tests/CoreOS/cups/foo/bar")
def test_path_relative(self):
"Ensure relative Path field is parsed correctly"
ti = parse_string(u"Path: CoreOS/cups/foo/bar", raise_errors=False)
self.assertEquals(ti.test_path, u"/mnt/tests/CoreOS/cups/foo/bar")
class DescriptionFieldTests(unittest.TestCase):
def test_description(self):
"Ensure Description field is parsed correctly"
ti = parse_string(u"Description: Ensure that the thingummy frobnicates the doohickey", raise_errors=False)
self.assertEquals(ti.test_description, u"Ensure that the thingummy frobnicates the doohickey")
def test_description_with_colon(self):
"Ensure Description field containing a colon is parsed correctly"
ti = parse_string(u"Description: This test is from http://foo/bar", raise_errors=False)
self.assertEquals(ti.test_description, u"This test is from http://foo/bar")
class ReleasesFieldTests(unittest.TestCase):
def test_releases(self):
"Ensure Releases field is parsed correctly"
ti = parse_string(u"Releases: FC5 FC6", raise_errors=False)
self.assertEquals(ti.releases, [u'FC5', u'FC6'])
class ArchitecturesFieldTests(unittest.TestCase):
def test_architectures(self):
"Ensure Architectures field is parsed correctly"
ti = parse_string(u"Architectures: i386 x86_64", raise_errors=False)
self.assertEquals(ti.test_archs, [u"i386", u"x86_64"])
def test_architectures_after_releases(self):
"Ensure that an Architectures field following a Releases field is parsed correctly"
ti = parse_string(u"""
Releases: FC5 FC6
Architectures: i386 x86_64""", raise_errors=False)
self.assertEquals(ti.releases, [u'FC5', u'FC6'])
self.assertEquals(ti.test_archs, [u"i386", u"x86_64"])
class RhtsOptionsFieldTests(unittest.TestCase):
def test_rhtsoptions(self):
"Ensure RhtsOptions field is parsed correctly"
ti = parse_string(u"RhtsOptions: Compatible", raise_errors=False)
self.assertEquals(ti.options, [u"Compatible"])
def test_multi_options(self):
"Ensure RhtsOptions field is parsed correctly"
ti = parse_string(u"RhtsOptions: Compatible -CompatService -StrongerAVC", raise_errors=False)
self.assertEquals(ti.options, [u"Compatible", u"-CompatService", u"-StrongerAVC"])
def test_rhtsoptions_minus(self):
"Ensure RhtsOptions field parses options preceded with dash correctly"
ti = parse_string(u"RhtsOptions: -Compatible", raise_errors=False)
self.assertEquals(ti.options, [u"-Compatible"])
def test_rhtsoption_bad_value(self):
"Ensure RhtsOptions field captures bad input"
self.assertRaises(ParserError, parse_string, u"RhtsOptions: Compat", raise_errors=True)
def test_rhtsoption_duplicate(self):
"Ensure RhtsOptions field captures duplicate entries"
self.assertRaises(ParserError, parse_string, u"RhtsOptions: Compatible\nRhtsOptions: -Compatible", raise_errors=True)
class EnvironmentFieldTests(unittest.TestCase):
def test_environment(self):
"Ensure Environment field is parsed correctly"
ti = parse_string(u"Environment: VAR1=VAL1\nEnvironment: VAR2=Value with spaces - 2", raise_errors=False)
self.assertEquals(ti.environment["VAR1"], u"VAL1")
self.assertEquals(ti.environment["VAR2"], u"Value with spaces - 2")
def test_environment_duplicate_key(self):
"Ensure Environment field captures duplicate keys"
self.assertRaises(ParserError, parse_string, u"Environment: VAR1=VAL1\nEnvironment: VAR1=Value with spaces - 2", raise_errors=True)
def test_environment_bad_key(self):
"Ensure Environment field captures bad keys"
self.assertRaises(ParserError, parse_string, u"Environment: VAR =VAL1", raise_errors=True)
class NotifyFieldTests(unittest.TestCase):
def test_notify(self):
"Ensure Notify field is deprecated"
self.assertRaises(ParserWarning, parse_string, u"Notify: everyone in a 5-mile radius", raise_errors=True)
class OwnerFieldTests(unittest.TestCase):
def test_owner_example(self):
"Ensure that the example Owner field is parsed correctly"
ti = parse_string(u"Owner: John Doe <jdoe@redhat.com>", raise_errors=False)
self.assertEquals(ti.owner, u"John Doe <jdoe@redhat.com>")
def test_owner_example2(self):
"Ensure that other Owner fields are parsed correctly"
ti = parse_string(u"Owner: Jane Doe <jdoe@fedoraproject.org>", raise_errors=False)
self.assertEquals(ti.owner, u"Jane Doe <jdoe@fedoraproject.org>")
# https://bugzilla.redhat.com/show_bug.cgi?id=723159
def test_owner_with_hyphen(self):
parser = StrictParser(raise_errors=True)
parser.handle_owner('Owner', u'Endre Balint-Nagy <endre@redhat.com>')
self.assertEquals(parser.info.owner, u'Endre Balint-Nagy <endre@redhat.com>')
# https://bugzilla.redhat.com/show_bug.cgi?id=1491658
def test_non_ascii_owner(self):
parser = StrictParser(raise_errors=True)
parser.handle_owner('Owner', u'Gęśla Jaźń <gj@example.com>')
self.assertEquals(parser.info.owner, u'Gęśla Jaźń <gj@example.com>')
class PriorityFieldTests(unittest.TestCase):
def test_priority(self):
"Ensure Priority field is parsed correctly"
ti = parse_string(u"Priority: Manual", raise_errors=False)
self.assertEquals(ti.priority, u"Manual")
class BugFieldTests(unittest.TestCase):
def test_single_bug(self):
"Ensure a single Bug field works"
ti = parse_string(u"Bug: 123456", raise_errors=False)
self.assertEquals(ti.bugs, [123456])
def test_single_bugs(self):
"Ensure a single Bugs field works"
ti = parse_string(u"Bugs: 123456", raise_errors=False)
self.assertEquals(ti.bugs, [123456])
def test_multiple_bugs(self):
"Ensure that multiple values for a Bugs field work"
ti = parse_string(u"Bugs: 123456 456123", raise_errors=False)
self.assertEquals(ti.bugs, [123456, 456123])
def test_multiple_bug_lines(self):
"Ensure that multiple Bug and Bugs lines work"
ti = parse_string(u"""Bugs: 123456 456123
Bug: 987654 456789""", raise_errors=False)
self.assertEquals(ti.bugs, [123456, 456123, 987654, 456789])
def test_blank_bug(self):
"Ensure a blank Bug field is handled"
ti = parse_string(u"Bug: ", raise_errors=False)
self.assertEquals(ti.bugs, [])
class TestVersionFieldTests(unittest.TestCase):
def test_testversion(self):
"Ensure TestVersion field is parsed correctly"
ti = parse_string(u"TestVersion: 1.1", raise_errors=False)
self.assertEquals(ti.testversion, u"1.1")
class LicenseFieldTests(unittest.TestCase):
def test_license(self):
"Ensure License field is parsed correctly"
ti = parse_string(u"License: GPL", raise_errors=False)
self.assertEquals(ti.license, u"GPL")
class TestTimeFieldTests(unittest.TestCase):
def test_testtime_seconds(self):
"Ensure TestTime field can handle seconds"
ti = parse_string(u"TestTime: 5", raise_errors=False)
self.assertEquals(ti.avg_test_time, 5)
def test_testtime_minutes(self):
"Ensure TestTime field can handle minutes"
ti = parse_string(u"TestTime: 10m", raise_errors=False)
self.assertEquals(ti.avg_test_time, 600)
def test_testtime_hours(self):
"Ensure TestTime field can handle hours"
ti = parse_string(u"TestTime: 2h", raise_errors=False)
self.assertEquals(ti.avg_test_time, (2*60*60))
class RequiresFieldTests(unittest.TestCase):
def test_single_line_requires(self):
"Ensure Requires field is parsed correctly"
ti = parse_string(u"Requires: evolution dogtail", raise_errors=False)
self.assertEquals(ti.requires, [u'evolution', u'dogtail'])
def test_multiline_requires(self):
"Ensure we can handle multiple Requires lines"
ti = parse_string(u"""Requires: evolution dogtail
Requires: foo bar""", raise_errors=False)
self.assertEquals(ti.requires, [u'evolution', u'dogtail', u'foo', u'bar'])
def test_requires_with_case_differences(self):
"Ensure Requires field is parsed correctly"
ti = parse_string(u"Requires: opencryptoki openCryptoki", raise_errors=False)
self.assertEquals(ti.requires, [u'opencryptoki', u'openCryptoki'])
class RunForFieldTests(unittest.TestCase):
def test_single_line_runfor(self):
"Ensure RunFor field is parsed correctly"
ti = parse_string(u"RunFor: evolution dogtail", raise_errors=False)
self.assertEquals(ti.runfor, [u'evolution', u'dogtail'])
def test_multiline_runfor(self):
"Ensure we can handle multiple RunFor lines"
ti = parse_string(u"""RunFor: evolution dogtail
RunFor: foo bar""", raise_errors=False)
self.assertEquals(ti.runfor, [u'evolution', u'dogtail', u'foo', u'bar'])
class TypeFieldTests(unittest.TestCase):
def test_single_line_type(self):
"Ensure Type field is parsed correctly"
ti = parse_string(u"Type: Crasher Regression", raise_errors=False)
self.assertEquals(ti.types, [u'Crasher', u'Regression'])
def test_multiline_type(self):
"Ensure we can handle multiple Type lines"
ti = parse_string(u"""Type: Crasher Regression
Type: Performance Stress""", raise_errors=False)
self.assertEquals(ti.types, [u'Crasher', u'Regression', u'Performance', u'Stress'])
class NeedPropertyFieldTests(unittest.TestCase):
def test_single_line_needproperty(self):
"Ensure NeedProperty field is parsed correctly"
ti = parse_string(u"NeedProperty: PROCESSORS > 1", raise_errors=False)
self.assertEquals(ti.need_properties, [(u"PROCESSORS", u">", u"1")])
def test_multiline_needproperty(self):
"Ensure we can handle multiple NeedProperty lines"
ti = parse_string(u"""
NeedProperty: CAKE = CHOCOLATE
NeedProperty: SLICES > 3
""", raise_errors=False)
self.assertEquals(ti.need_properties, [(u"CAKE", u"=", u"CHOCOLATE"), (u"SLICES", u">", u"3")])
class DestructiveFieldTests(unittest.TestCase):
def test_destructive(self):
ti = parse_string(u"Destructive: yes", raise_errors=False)
self.assertEquals(ti.destructive, True)
class SiteConfigDeclarationTests(unittest.TestCase):
"""Unit tests for the SiteConfig declaration"""
def test_relative_siteconfig_without_name(self):
"Ensure that a relative SiteConfig declaration without a Name is handled with a sane error"
self.assertRaises(ParserError, parse_string, u"SiteConfig(server): Hostname of server", raise_errors=True)
def test_flat_relative_siteconfig(self):
"Ensure that relative SiteConfig declarations without nesting work"
ti = parse_string(u"""
Name: /desktop/evolution/mail/imap/authentication/ssl
SiteConfig(server): Hostname of server
SiteConfig(username): Username to use
SiteConfig(password): Password to use
""", raise_errors=False)
self.assertEquals(ti.siteconfig, [(u'/desktop/evolution/mail/imap/authentication/ssl/server', u"Hostname of server"),
(u'/desktop/evolution/mail/imap/authentication/ssl/username', u"Username to use"),
(u'/desktop/evolution/mail/imap/authentication/ssl/password', u"Password to use")
])
def test_nested_relative_siteconfig(self):
"Ensure that a relative SiteConfig declaration containing a path works"
ti = parse_string(u"""
Name: /desktop/evolution/mail/imap/authentication
SiteConfig(ssl/server): Hostname of server to try SSL auth against
SiteConfig(ssl/username): Username to use for SSL auth
SiteConfig(ssl/password): Password to use for SSL auth
SiteConfig(tls/server): Hostname of server to try TLS auth against
SiteConfig(tls/username): Username to use for TLS auth
SiteConfig(tls/password): Password to use for TLS auth
""", raise_errors=False)
self.assertEquals(ti.siteconfig, [(u'/desktop/evolution/mail/imap/authentication/ssl/server', u"Hostname of server to try SSL auth against"),
(u'/desktop/evolution/mail/imap/authentication/ssl/username', u"Username to use for SSL auth"),
(u'/desktop/evolution/mail/imap/authentication/ssl/password', u"Password to use for SSL auth"),
(u'/desktop/evolution/mail/imap/authentication/tls/server', u"Hostname of server to try TLS auth against"),
(u'/desktop/evolution/mail/imap/authentication/tls/username', u"Username to use for TLS auth"),
(u'/desktop/evolution/mail/imap/authentication/tls/password', u"Password to use for TLS auth")
])
def test_absolute_siteconfig(self):
"Ensure that an absolute SiteConfig declaration works"
ti = parse_string(u"""SiteConfig(/stable-servers/ldap/hostname): Location of stable LDAP server to use""", raise_errors=False)
self.assertEquals(ti.siteconfig, [(u'/stable-servers/ldap/hostname', u'Location of stable LDAP server to use')])
#def test_siteconfig_comment(self):
# "Ensure that comments are stripped as expected from descriptions"
# ti = parse_string("SiteConfig(/foo/bar): Some value # hello world", raise_errors=False)
# self.assertEquals(ti.siteconfig, [('/foo/bar', "Some value")])
def test_siteconfig_whitespace(self):
"Ensure that whitespace is stripped as expected from descriptions"
ti = parse_string(u"SiteConfig(/foo/bar): Some value ", raise_errors=False)
self.assertEquals(ti.siteconfig, [(u'/foo/bar', u"Some value")])
def test_output_relative_siteconfig(self):
"Ensure that the output methods collapse redundant paths in relative SiteConfig declarations"
ti = TestInfo()
ti.test_name = u'/foo/bar'
ti.siteconfig = [(u'/foo/bar/baz/fubar', u'Dummy value')]
self.assertEquals(ti.generate_siteconfig_lines(), u"SiteConfig(baz/fubar): Dummy value\n")
class IntegrationTests(unittest.TestCase):
def test_example_file(self):
"Ensure a full example file is parsed correctly"
ti = parse_string(u"""\
# Test comment
Owner: Jane Doe <jdoe@redhat.com>
Name: /examples/coreutils/example-simple-test
Path: /mnt/tests/examples/coreutils/example-simple-test
Description: This test ensures that cafés are generated and validated correctly
TestTime: 1m
TestVersion: 1.1
License: GPL
RunFor: coreutils
Requires: coreutils python
""", raise_errors=True)
self.assertEquals(ti.owner, u"Jane Doe <jdoe@redhat.com>")
self.assertEquals(ti.test_name, u"/examples/coreutils/example-simple-test")
self.assertEquals(ti.test_path, u"/mnt/tests/examples/coreutils/example-simple-test")
self.assertEquals(ti.test_description, u"This test ensures that cafés are generated and validated correctly")
self.assertEquals(ti.avg_test_time, 60)
self.assertEquals(ti.testversion, u"1.1")
self.assertEquals(ti.license, u"GPL")
self.assertEquals(ti.runfor, [u"coreutils"])
self.assertEquals(ti.requires, [u"coreutils", u"python"])
def test_output_testinfo(self):
"Output an example file, then ensure it is parsed succesfully"
ti1 = parse_string(u"""\
# Test comment
Owner: Jane Doe <jdoe@redhat.com>
Name: /examples/coreutils/example-simple-test
Path: /mnt/tests/examples/coreutils/example-simple-test
Description: This test ensures that cafés are generated and validated correctly
TestTime: 1m
TestVersion: 1.1
License: GPL
Destructive: yes
RunFor: coreutils
Requires: coreutils python
NeedProperty: CAKE = CHOCOLATE
NeedProperty: SLICES > 3
SiteConfig(server): Hostname of server
SiteConfig(username): Username to use
SiteConfig(password): Password to use
SiteConfig(ssl/server): Hostname of server to try SSL auth against
SiteConfig(ssl/username): Username to use for SSL auth
SiteConfig(ssl/password): Password to use for SSL auth
SiteConfig(tls/server): Hostname of server to try TLS auth against
SiteConfig(tls/username): Username to use for TLS auth
SiteConfig(tls/password): Password to use for TLS auth
SiteConfig(/stable-servers/ldap/hostname): Location of stable LDAP server to use
""", raise_errors=True)
file = tempfile.NamedTemporaryFile(mode='w')
ti1.output(file)
file.flush()
ti2 = parse_string(open(file.name).read().decode('utf8'))
self.assertEquals(ti2.owner, u"Jane Doe <jdoe@redhat.com>")
self.assertEquals(ti2.test_name, u"/examples/coreutils/example-simple-test")
self.assertEquals(ti2.test_path, u"/mnt/tests/examples/coreutils/example-simple-test")
self.assertEquals(ti2.test_description, u"This test ensures that cafés are generated and validated correctly")
self.assertEquals(ti2.avg_test_time, 60)
self.assertEquals(ti2.testversion, u"1.1")
self.assertEquals(ti2.license, u"GPL")
self.assertEquals(ti2.destructive, True)
self.assertEquals(ti2.runfor, [u"coreutils"])
self.assertEquals(ti2.requires, [u"coreutils", u"python"])
self.assertEquals(ti2.need_properties, [(u'CAKE', u'=', u'CHOCOLATE'), (u'SLICES', u'>', u'3')])
self.assertEquals(ti2.siteconfig, [(u'/examples/coreutils/example-simple-test/server', u'Hostname of server'),
(u'/examples/coreutils/example-simple-test/username', u'Username to use'),
(u'/examples/coreutils/example-simple-test/password', u'Password to use'),
(u'/examples/coreutils/example-simple-test/ssl/server', u'Hostname of server to try SSL auth against'),
(u'/examples/coreutils/example-simple-test/ssl/username', u'Username to use for SSL auth'),
(u'/examples/coreutils/example-simple-test/ssl/password', u'Password to use for SSL auth'),
(u'/examples/coreutils/example-simple-test/tls/server', u'Hostname of server to try TLS auth against'),
(u'/examples/coreutils/example-simple-test/tls/username', u'Username to use for TLS auth'),
(u'/examples/coreutils/example-simple-test/tls/password', u'Password to use for TLS auth'),
(u'/stable-servers/ldap/hostname', u'Location of stable LDAP server to use')])
| beaker-project/beaker | Server/bkr/server/tests/test_testinfo.py | Python | gpl-2.0 | 20,598 |
# Wrapper for coinroll.it's API
import requests
from urllib import parse
from decimal import Decimal
from collections import namedtuple
BASE_URI = 'https://coinroll.it'
ONE_SATOSHI = Decimal('0.00000001')
ONE_BTC = 100000000
# convert a number in satoshis to BTC
def btc(x):
return ONE_SATOSHI * x
# convert a number in BTC to satoshis if it's not already an integer
def sats(x):
if not isinstance(x, int):
x = int(x * ONE_BTC)
return x
# the request method throws this when the 'result' is 0 (error)
class RequestException(Exception):
pass
# a bunch of namedtuples used below
BetResult = namedtuple('BetResult', 'id lessthan amount lucky nonce win diff '
'balance date profit bets wins')
Game = namedtuple('Game', 'lessthan minbet maxbet multiplier odds houseedge')
Stats = namedtuple('Stats', 'balance profit bets wins')
Withdraw = namedtuple('Withdraw', 'deferred amount txid balance')
DepositStatus = namedtuple('DepositStatus', 'confirmed withdrawal amount')
BetQueryResult = namedtuple('BetQueryResult', 'id user lessthan amount lucky '
'multiplier nonce win delta '
'timestamp txid released secret '
'secretHash')
Bets = namedtuple('Bets', 'bets count')
Bet = namedtuple('Bet', 'id num lessthan amount lucky multiplier nonce win '
'diff display timestamp when date')
LiveBets = namedtuple('LiveBets', 'bets volume data')
LiveBet = namedtuple('LiveBet', 'user id amount lessthan lucky win multiplier '
'display timestamp diff')
Leader = namedtuple('Leader', 'user bets profit volume active')
# helper method
def request(path, **params):
res = requests.post(parse.urljoin(BASE_URI, path), data=params)
data = res.json()
if data['result']:
return data
else:
raise RequestException('error on %s: %s' % (path, data['result']))
class Coinroll(object):
def __init__(self, user, password):
self.user = user
self.password = password
# helper method
def request(self, path, **params):
return request(path, user=self.user, password=self.password, **params)
# Everything from here is just wrappers around the API.
# Read https://coinroll.it/api to see how to use them.
def deposit(self):
return self.request('deposit')
def bet(self, lessthan, amount):
r = self.request('bet', lessthan=lessthan, amount=sats(amount))
return BetResult(r['id'], r['lessthan'], btc(r['amount']), r['lucky'],
r['nonce'], r['win'], btc(r['diff']), btc(r['balance']),
r['date'], btc(r['profit']), r['bets'], r['wins'])
def stats(self):
r = self.request('getbalance')
return Stats(btc(r['balance']), btc(r['profit']), r['bets'], r['wins'])
def withdraw(self, address, amount):
r = self.request('withdraw', address=address, amount=sats(amount))
return Withdraw(r['deferred'], btc(r['amount']), r['txid'],
btc(r['balance']))
def depositstatus(self):
r = self.request('depositstatus')
return DepositStatus(r['confirmed'], r['deferred'], btc(r['amount']))
def bets(self, offset):
r = self.request('getbets', offset=offset)
bets = []
for b in r['bets']:
bets.append(Bet(b['id'], b['num'], b['lessthan'], btc(b['amount']),
b['lucky'], b['multiplier'], b['nonce'], b['win'],
btc(b['diff']), b['display'], b['timestamp'], b['when'],
b['date']))
return Bets(bets, r['count'])
# Since these methods don't require authentication, they are outside of the
# Coinroll class.
def querybet(id):
r = request('querybet', id=id)
return BetQueryResult(r['id'], r['user'], r['lessthan'], btc(r['amount']),
r['lucky'], r['multiplier'], r['nonce'], r['win'],
btc(r['delta']), r['timestamp'], r['txid'],
r['released'], r['secret'], r['secrethash'])
def gameinfo(lessthan):
r = request('getgameinfo', lessthan=lessthan)
return Game(r['lessthan'], btc(r['minbet']), btc(r['maxbet']),
r['multiplier'], r['odds'], r['houseedge'])
# these ones don't seem to be documented
def livebets():
r = request('livebets')
data = []
for b in r['data']:
diff = b['display'] - b['amount'] if b['win'] else b['display']
data.append(LiveBet(b['user'], b['id'], btc(b['amount']), b['lessthan'],
b['lucky'], b['win'], b['multiplier'],
btc(b['display']), b['timestamp'], btc(diff)))
return LiveBets(r['bets'].replace(',', ''),
Decimal(r['volume'].replace(',', '')), data)
def leaderboard():
r = request('leaderboard')
leaders = []
for user, bets, profit, volume, active in r['data']:
leaders.append(Leader(user, bets, btc(profit), btc(volume), active))
return leaders
| andrew12/python-coinroll | coinroll.py | Python | unlicense | 5,007 |
#!/usr/bin/python
"""Test of menu accelerator label output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Control>f"))
sequence.append(TypeAction("Application class"))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("Return"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Alt>p"))
sequence.append(utils.AssertPresentationAction(
"1. Initial menu and menu item",
["BRAILLE LINE: 'gtk3-demo-application application Application Class frame Preferences menu'",
" VISIBLE: 'Preferences menu', cursor=1",
"BRAILLE LINE: 'gtk3-demo-application application Application Class frame < > Prefer Dark Theme check menu item'",
" VISIBLE: '< > Prefer Dark Theme check menu', cursor=1",
"SPEECH OUTPUT: 'Preferences menu'",
"SPEECH OUTPUT: 'Prefer Dark Theme check menu item not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Next menu item",
["BRAILLE LINE: 'gtk3-demo-application application Application Class frame < > Hide Titlebar when maximized check menu item'",
" VISIBLE: '< > Hide Titlebar when maximized', cursor=1",
"SPEECH OUTPUT: 'Hide Titlebar when maximized check menu item not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Next menu item",
["BRAILLE LINE: 'gtk3-demo-application application Application Class frame Color menu'",
" VISIBLE: 'Color menu', cursor=1",
"SPEECH OUTPUT: 'Color menu'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"4. Where Am I",
["BRAILLE LINE: 'gtk3-demo-application application Application Class frame Color menu'",
" VISIBLE: 'Color menu', cursor=1",
"SPEECH OUTPUT: 'Application Class frame'",
"SPEECH OUTPUT: 'Preferences menu'",
"SPEECH OUTPUT: 'Color menu 3 of 5.'",
"SPEECH OUTPUT: 'C'"]))
sequence.append(KeyComboAction("Escape"))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| pvagner/orca | test/keystrokes/gtk3-demo/role_accel_label.py | Python | lgpl-2.1 | 2,381 |
'''
Author: Jennifer Clark
Description: This program demonstrates a simple breadth-first algorithm. It takes input
from a file called 'arcs.txt' and creates a matrix containing information about the nodes.
'''
from numpy import inf # Allows the use of infinity
# Forward star representation of graph 7.21a on page 243
# xij, uij
# i------------>j
# x = Flow, u = Capacity
matrix = [[1,2,0,3],[1,3,0,3],[1,4,0,2],[2,5,0,4],[3,4,0,1],[3,6,0,2],[4,2,0,1],[4,6,0,2],[5,4,0,1],[5,6,0,1]]
arcs = [[1,2],[1,3],[1,4],[2,5],[3,4],[3,2],[4,2],[4,6],[5,4],[5,6]]
list = [1]
marked = [1]
order = [1] # Order vector
d = [inf, inf, inf, inf, inf, 0] # The distance vector
pred = [0] # Precedence vector
path = [] # Path vector
m = 10 # Holds the number of rows
n = 6 # The number of nodes
s = 1 # Source node
t = 6 # Terminus node
# Begin the reverse breadth-first search algorithm
def reverseBFS():
order = [n] # Order vector
listTemp = [n]
marked = [n] # Used by the BFS algorithm to hold marked nodes.
dtemp = [0, 0, 0, 0, 0, 0] # holding array
pi = [0, 0, 0, 0, 0, 0] # Precedence vector
while marked: # While the marked array has elements, continue loop
j = marked[0]
for r in range(m-1, -1, -1): # Start search for admissible arcs
if int(matrix[r][1]) == int(j):
i = int(matrix[r][0])
if i not in listTemp and i not in marked: # Found an admissible arc
#print "here"
marked.append(i) # Add node to marked
listTemp.append(i)
if i not in order: # Create the order and pred vectors
order.append(j)
pi[i-1] = j
if pi[i-1] == t: # Connected directly to sink
dtemp[i-1] = 1
elif pi[i-1] == j:
dtemp[i-1] = 1 + d[j-1]
if dtemp[i-1] > d[i-1]:
dtemp[i-1] = d[i-1]
elif dtemp[i-1] < d[i-1]:
d[i-1] = dtemp[i-1]
marked.remove(j) # Remove node from the marked
d[0] += 1 # handles the source node
def getPred():
while marked: # While the marked array has elements, continue loop
i = marked[0]
for r in range(0, m): # Start search for admissible arcs
if int(arcs[r][0]) == i:
j = int(arcs[r][1])
if j not in list and j not in marked: # Found an admissible arc
marked.append(j) # Add node to marked
list.append(j)
if j not in order: # Create the order and pred vectors
order.append(j)
pred.append(i)
#print list
marked.remove(i) # Remove node from the marked
# Create a path from source to sink
def getPath():
del path[:]
j = arcs[0][1]
path.append(s)
path.append(j)
for x in range(0, n):
if pred[x] == j:
path.append(x + 1)
if path[len(path) - 1] != t:
path.append(t)
arcs.pop(0)
def getMinValue():
pathMin = inf
for i in range(0, len(path)-1):
for r in range(0, m):
if path[i] == matrix[r][0] and path[i + 1] == matrix[r][1]:
pathMin = min(pathMin, matrix[r][3]) # Find the minimum value
return pathMin
def relabel(pathMin):
for i in range(0, len(path)-1):
for r in range(0, m):
if path[i] == matrix[r][0] and path[i + 1] == matrix[r][1]:
if matrix[r][3] > pathMin:
matrix[r][3] -= pathMin
matrix.append([matrix[r][1], matrix[r][0], 0, pathMin]) # augment the graph
if matrix[r][0] == 1:
d[matrix[r][1] - 1] += d[0]
if matrix[r][3] < 0: # Zero out arc if negative value
matrix[r][3] = 0
# Begin the main program
def main():
reverseBFS() # get the distance vector
getPred() # get the precedence vector
counter = 0
while matrix[counter][0] == 1:
getPath()
print path
print d
delta = getMinValue()
relabel(delta)
counter += 1
#print pred
#print order
#print d
print matrix
if __name__=="__main__": main() | nerdgirl999/school-examples | MATH466/reverse_BFS.py | Python | mit | 3,654 |
from rps import *
from flask import Flask, render_template, request, redirect
app = Flask(__name__)
gamecount = 0
game = RPS(verbose=True)
cats = ['Random Cat', 'Logistic Cat', 'Naive Bayes Cat', 'Random Forest Cat', 'XGBoost Cat']
@app.route('/play', methods = ['POST'])
def play():
global gamecount, cats
gamecount += 1
userchoice = request.form['choice']
game.play(userchoice)
return redirect('/')
@app.route('/')
def main():
return render_template('index.html', gamecount=gamecount, scores=game.getscore(), cats=cats )
if __name__ == "__main__":
app.run(host='0.0.0.0')
| bartekpi/rps | main.py | Python | mit | 608 |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.tcm.v20210413 import models
class TcmClient(AbstractClient):
_apiVersion = '2021-04-13'
_endpoint = 'tcm.tencentcloudapi.com'
_service = 'tcm'
def DescribeMesh(self, request):
"""查询网格详情
:param request: Request instance for DescribeMesh.
:type request: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshRequest`
:rtype: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeMesh", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMeshResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeMeshList(self, request):
"""查询网格列表
:param request: Request instance for DescribeMeshList.
:type request: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshListRequest`
:rtype: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeMeshList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMeshListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | codespace/python/tencentcloud/tcm/v20210413/tcm_client.py | Python | mit | 3,255 |
def authorized_to_manage_request(_, request, current_user, pushmaster=False):
if pushmaster or \
request['user'] == current_user or \
(request['watchers'] and current_user in request['watchers'].split(',')):
return True
return False
| bchess/pushmanager | ui_methods.py | Python | apache-2.0 | 263 |
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.sites.requests import RequestSite
from django.template import RequestContext
from .models import Overview, PersonalInfo, Education, Job, Accomplishment, Skillset, Skill
def index(request):
site_name = RequestSite(request).domain
personal_info = PersonalInfo.objects.all()[:1]
overview = Overview.objects.all()[:1]
education = Education.objects.all()
job_list = Job.objects.all()
skill_sets = Skillset.objects.all()
return render(request, 'resume/resume.html', {
'site_name': site_name,
'personal_info': personal_info,
'overview' : overview,
'job_list' : job_list,
'education' : education,
'skill_sets' : skill_sets,
}) | ckelly/django-resume | resume/views.py | Python | mit | 794 |
import numpy as np
def primes(n):
if n == 2:
return [2]
elif n < 2:
return []
s = np.arange(3, n+1, 2)
mroot = n ** 0.5
half = (n+1) / 2 - 1
i = 0
m = 3
while m <= mroot:
if s[i]:
j = (m*m - 3) / 2
s[j] = 0
while j < half:
s[j] = 0
j += m
i = i + 1
m = 2 * i + 3
return [2] + [x for x in s if x]
| Van314159/Stokeslet-between-two-parallel-plate | python files/Primes.py | Python | mit | 463 |
import unittest
from helper import UsesQApplication
from PySide.QtCore import QTimer
from PySide.QtGui import QPainter, QFont, QFontInfo, QWidget, qApp
class MyWidget(QWidget):
def paintEvent(self, e):
p = QPainter(self)
self._info = p.fontInfo()
self._app.quit()
class TestQPainter(UsesQApplication):
def testFontInfo(self):
w = MyWidget()
w._app = self.app
w._info = None
QTimer.singleShot(300, w.show)
self.app.exec_()
self.assert_(w._info)
if __name__ == '__main__':
unittest.main()
| M4rtinK/pyside-android | tests/QtGui/bug_750.py | Python | lgpl-2.1 | 577 |
# -*- coding: utf-8 -*-
import requests
from crestify.archivers import ArchiveService, ArchiveException
class ArchiveOrgService(ArchiveService):
def get_service_name(self):
return "org.archive"
def submit(self, url):
url = 'http://web.archive.org/save/%s' % (url)
response = requests.get(url)
# Not every URL can be archived to archive.org
# URLs that cannot be archived due to robots.txt will 403
if response.status_code == 403:
raise ArchiveException()
else:
# Get the archive URL
url = response.headers['Content-Location']
return "http://web.archive.org%s" % (url)
| crestify/crestify | crestify/archivers/archiveorg.py | Python | bsd-3-clause | 684 |
""" generic mechanism for marking and selecting python functions. """
import inspect
class MarkerError(Exception):
"""Error in use of a pytest marker/attribute."""
def pytest_namespace():
return {'mark': MarkGenerator()}
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
'-k',
action="store", dest="keyword", default='', metavar="EXPRESSION",
help="only run tests which match the given substring expression. "
"An expression is a python evaluatable expression "
"where all names are substring-matched against test names "
"and their parent classes. Example: -k 'test_method or test "
"other' matches all test functions and classes whose name "
"contains 'test_method' or 'test_other'. "
"Additionally keywords are matched to classes and functions "
"containing extra names in their 'extra_keyword_matches' set, "
"as well as functions which have names assigned directly to them."
)
group._addoption(
"-m",
action="store", dest="markexpr", default="", metavar="MARKEXPR",
help="only run tests matching given mark expression. "
"example: -m 'mark1 and not mark2'."
)
group.addoption(
"--markers", action="store_true",
help="show markers (builtin, plugin and per-project ones)."
)
parser.addini("markers", "markers for test functions", 'linelist')
def pytest_cmdline_main(config):
import _pytest.config
if config.option.markers:
config._do_configure()
tw = _pytest.config.create_terminal_writer(config)
for line in config.getini("markers"):
name, rest = line.split(":", 1)
tw.write("@pytest.mark.%s:" % name, bold=True)
tw.line(rest)
tw.line()
config._ensure_unconfigure()
return 0
pytest_cmdline_main.tryfirst = True
def pytest_collection_modifyitems(items, config):
keywordexpr = config.option.keyword.lstrip()
matchexpr = config.option.markexpr
if not keywordexpr and not matchexpr:
return
# pytest used to allow "-" for negating
# but today we just allow "-" at the beginning, use "not" instead
# we probably remove "-" alltogether soon
if keywordexpr.startswith("-"):
keywordexpr = "not " + keywordexpr[1:]
selectuntil = False
if keywordexpr[-1:] == ":":
selectuntil = True
keywordexpr = keywordexpr[:-1]
remaining = []
deselected = []
for colitem in items:
if keywordexpr and not matchkeyword(colitem, keywordexpr):
deselected.append(colitem)
else:
if selectuntil:
keywordexpr = None
if matchexpr:
if not matchmark(colitem, matchexpr):
deselected.append(colitem)
continue
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
class MarkMapping:
"""Provides a local mapping for markers where item access
resolves to True if the marker is present. """
def __init__(self, keywords):
mymarks = set()
for key, value in keywords.items():
if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator):
mymarks.add(key)
self._mymarks = mymarks
def __getitem__(self, name):
return name in self._mymarks
class KeywordMapping:
"""Provides a local mapping for keywords.
Given a list of names, map any substring of one of these names to True.
"""
def __init__(self, names):
self._names = names
def __getitem__(self, subname):
for name in self._names:
if subname in name:
return True
return False
def matchmark(colitem, markexpr):
"""Tries to match on any marker names, attached to the given colitem."""
return eval(markexpr, {}, MarkMapping(colitem.keywords))
def matchkeyword(colitem, keywordexpr):
"""Tries to match given keyword expression to given collector item.
Will match on the name of colitem, including the names of its parents.
Only matches names of items which are either a :class:`Class` or a
:class:`Function`.
Additionally, matches on names in the 'extra_keyword_matches' set of
any item, as well as names directly assigned to test functions.
"""
mapped_names = set()
# Add the names of the current item and any parent items
import pytest
for item in colitem.listchain():
if not isinstance(item, pytest.Instance):
mapped_names.add(item.name)
# Add the names added as extra keywords to current or parent items
for name in colitem.listextrakeywords():
mapped_names.add(name)
# Add the names attached to the current function through direct assignment
if hasattr(colitem, 'function'):
for name in colitem.function.__dict__:
mapped_names.add(name)
mapping = KeywordMapping(mapped_names)
if " " not in keywordexpr:
# special case to allow for simple "-k pass" and "-k 1.3"
return mapping[keywordexpr]
elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]:
return not mapping[keywordexpr[4:]]
return eval(keywordexpr, {}, mapping)
def pytest_configure(config):
import pytest
if config.option.strict:
pytest.mark._config = config
class MarkGenerator:
""" Factory for :class:`MarkDecorator` objects - exposed as
a ``pytest.mark`` singleton instance. Example::
import pytest
@pytest.mark.slowtest
def test_function():
pass
will set a 'slowtest' :class:`MarkInfo` object
on the ``test_function`` object. """
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError("Marker name must NOT start with underscore")
if hasattr(self, '_config'):
self._check(name)
return MarkDecorator(name)
def _check(self, name):
try:
if name in self._markers:
return
except AttributeError:
pass
self._markers = l = set()
for line in self._config.getini("markers"):
beginning = line.split(":", 1)
x = beginning[0].split("(", 1)[0]
l.add(x)
if name not in self._markers:
raise AttributeError("%r not a registered marker" % (name,))
def istestfunc(func):
return hasattr(func, "__call__") and \
getattr(func, "__name__", "<lambda>") != "<lambda>"
class MarkDecorator:
""" A decorator for test functions and test classes. When applied
it will create :class:`MarkInfo` objects which may be
:ref:`retrieved by hooks as item keywords <excontrolskip>`.
MarkDecorator instances are often created like this::
mark1 = pytest.mark.NAME # simple MarkDecorator
mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator
and can then be applied as decorators to test functions::
@mark2
def test_function():
pass
When a MarkDecorator instance is called it does the following:
1. If called with a single class as its only positional argument and no
additional keyword arguments, it attaches itself to the class so it
gets applied automatically to all test cases found in that class.
2. If called with a single function as its only positional argument and
no additional keyword arguments, it attaches a MarkInfo object to the
function, containing all the arguments already stored internally in
the MarkDecorator.
3. When called in any other case, it performs a 'fake construction' call,
i.e. it returns a new MarkDecorator instance with the original
MarkDecorator's content updated with the arguments passed to this
call.
Note: The rules above prevent MarkDecorator objects from storing only a
single function or class reference as their positional argument with no
additional keyword or positional arguments.
"""
def __init__(self, name, args=None, kwargs=None):
self.name = name
self.args = args or ()
self.kwargs = kwargs or {}
@property
def markname(self):
return self.name # for backward-compat (2.4.1 had this attr)
def __repr__(self):
d = self.__dict__.copy()
name = d.pop('name')
return "<MarkDecorator %r %r>" % (name, d)
def __call__(self, *args, **kwargs):
""" if passed a single callable argument: decorate it with mark info.
otherwise add *args/**kwargs in-place to mark information. """
if args and not kwargs:
func = args[0]
is_class = inspect.isclass(func)
if len(args) == 1 and (istestfunc(func) or is_class):
if is_class:
if hasattr(func, 'pytestmark'):
mark_list = func.pytestmark
if not isinstance(mark_list, list):
mark_list = [mark_list]
# always work on a copy to avoid updating pytestmark
# from a superclass by accident
mark_list = mark_list + [self]
func.pytestmark = mark_list
else:
func.pytestmark = [self]
else:
holder = getattr(func, self.name, None)
if holder is None:
holder = MarkInfo(
self.name, self.args, self.kwargs
)
setattr(func, self.name, holder)
else:
holder.add(self.args, self.kwargs)
return func
kw = self.kwargs.copy()
kw.update(kwargs)
args = self.args + args
return self.__class__(self.name, args=args, kwargs=kw)
class MarkInfo:
""" Marking object created by :class:`MarkDecorator` instances. """
def __init__(self, name, args, kwargs):
#: name of attribute
self.name = name
#: positional argument list, empty if none specified
self.args = args
#: keyword argument dictionary, empty if nothing specified
self.kwargs = kwargs.copy()
self._arglist = [(args, kwargs.copy())]
def __repr__(self):
return "<MarkInfo %r args=%r kwargs=%r>" % (
self.name, self.args, self.kwargs
)
def add(self, args, kwargs):
""" add a MarkInfo with the given args and kwargs. """
self._arglist.append((args, kwargs))
self.args += args
self.kwargs.update(kwargs)
def __iter__(self):
""" yield MarkInfo objects each relating to a marking-call. """
for args, kwargs in self._arglist:
yield MarkInfo(self.name, args, kwargs)
| Yukarumya/Yukarum-Redfoxes | python/pytest/_pytest/mark.py | Python | mpl-2.0 | 11,102 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the basic architectures supported by cr."""
import cr
DEFAULT = cr.Config.From(
CR_ENVSETUP_ARCH='{CR_ARCH}',
)
class Arch(cr.Plugin, cr.Plugin.Type):
"""Base class for implementing cr architecture targets."""
SELECTOR = 'CR_ARCH'
@classmethod
def AddArguments(cls, parser):
parser.add_argument(
'--architecture', dest=cls.SELECTOR,
choices=cls.Choices(),
default=None,
help='Sets the target architecture to use. Overrides ' + cls.SELECTOR
)
class IA32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='ia32',
)
class Mips32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='mipsel',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class X64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='x64',
)
@property
def priority(self):
return super(X64Arch, self).priority + 1
class Arm32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm',
)
@property
def priority(self):
return super(Arm32Arch, self).priority + 2
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class Arm64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm64',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
| jaruba/chromium.src | tools/cr/cr/base/arch.py | Python | bsd-3-clause | 1,544 |
import copy
from types import GeneratorType
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"""
Returns the list of values for the passed key. If key doesn't exist,
then an empty list is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key, [])
super(MultiValueDict, self).__setitem__(key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key, []).append(value)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| skevy/django | django/utils/datastructures.py | Python | bsd-3-clause | 15,444 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module wraps Android's split-select tool."""
from devil.android.sdk import build_tools
from devil.utils import cmd_helper
from devil.utils import lazy
_split_select_path = lazy.WeakConstant(
lambda: build_tools.GetPath('split-select'))
def _RunSplitSelectCmd(args):
"""Runs a split-select command.
Args:
args: A list of arguments for split-select.
Returns:
The output of the command.
"""
cmd = [_split_select_path.read()] + args
status, output = cmd_helper.GetCmdStatusAndOutput(cmd)
if status != 0:
raise Exception('Failed running command "%s" with output "%s".' %
(' '.join(cmd), output))
return output
def _SplitConfig(device, allow_cached_props=False):
"""Returns a config specifying which APK splits are required by the device.
Args:
device: A DeviceUtils object.
allow_cached_props: Whether to use cached values for device properties.
"""
return ('%s-r%s-%s:%s' %
(device.GetLanguage(cache=allow_cached_props),
device.GetCountry(cache=allow_cached_props),
device.screen_density,
device.product_cpu_abi))
def SelectSplits(device, base_apk, split_apks, allow_cached_props=False):
"""Determines which APK splits the device requires.
Args:
device: A DeviceUtils object.
base_apk: The path of the base APK.
split_apks: A list of paths of APK splits.
allow_cached_props: Whether to use cached values for device properties.
Returns:
The list of APK splits that the device requires.
"""
config = _SplitConfig(device, allow_cached_props=allow_cached_props)
args = ['--target', config, '--base', base_apk]
for split in split_apks:
args.extend(['--split', split])
return _RunSplitSelectCmd(args).splitlines()
| js0701/chromium-crosswalk | build/android/devil/android/sdk/split_select.py | Python | bsd-3-clause | 1,938 |
def oss():
import RPi.GPIO as GPIO
import cv2
import time
from datetime import datetime
from model.lampu import lampu_on, lampu_off
#from model.kirim import mail
#from model.camera import VideoCamera as camera
GPIO.setmode(GPIO.BCM)
pirPin = 18
GPIO.setup(pirPin, GPIO.IN)
try:
while True:
if GPIO.input(pirPin) == GPIO.HIGH:
print ("Gerakan terdeteksi!")
# mail("alifpamuji93@gmail.com", "subjek", "halo", "README.md")
lampu_on()
print ("Kamera mulai merekam...")
cap = cv2.VideoCapture(0)
fps = 20
filename = datetime.now().strftime("static/video/%Y-%m-%d_%H.%M.%S.mp4")
codec = cv2.VideoWriter_fourcc(*'H264')
out = cv2.VideoWriter(filename, codec, fps, (640, 480))
ret, frame = cap.read()
delay = 20*fps
while ret and delay > 0:
out.write(frame)
ret, frame = cap.read()
delay -= 1
else:
print ("No motion")
lampu_off()
except KeyboardInterrupt:
GPIO.cleanup()
| alifpamuji93/OSSS | app.py | Python | mit | 1,468 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'djangosite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| fanscribed/django-react | example/djangosite/urls.py | Python | mit | 279 |
import numpy as np
from layers import *
class SeqNN(object):
def __init__(self, word_to_idx, wordvec_dim=128, hidden_dim=128,
cell_type='rnn', sentlen=7, storylen=70, qlen=5,
dtype=np.float32):
if cell_type not in {'rnn', 'lstm', 'gru'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.T3=qlen
self.T2=sentlen
self.T=storylen
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_char = {i: w for w, i in word_to_idx.iteritems()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx['<Start>']
self._query = word_to_idx['<Query>']
self._end = word_to_idx['<End>']
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)/100
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1, 'gru': 3}[cell_type]
self.params['Wx1'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx1'] /= np.sqrt(wordvec_dim)
self.params['Wh1'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh1'] /= np.sqrt(hidden_dim)
self.params['b1'] = np.zeros(dim_mul * hidden_dim)
self.params['Wx2'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx2'] /= np.sqrt(wordvec_dim)
self.params['Wh2'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh2'] /= np.sqrt(hidden_dim)
self.params['b2'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.iteritems():
self.params[k] = v.astype(self.dtype)
def loss(self, stories, sample=False):
"""
suports: (N,T)
Each story has N support sentences(not fixed)
Each supporting sentence has T words
queries: (K,T)
Each story has K questions(not fiexed)
Each query has T words
answers: (K,1)
Each query has an answer as a word
"""
T, T2, T3 = self.T, self.T2, self.T3
loss, grads = 0.0, {}
scores = np.zeros(stories.shape[0], dtype=np.int32)
for p in self.params:
grads[p] = 0.0
for idx in xrange(stories.shape[0]):
X = stories[idx,:T]# (1, T) stories
X = np.asarray(np.split(X, 10))# (10, T) sentences, at most 10 sent per story
# Word embedding
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx1, Wh1, b1 = self.params['Wx1'], self.params['Wh1'], self.params['b1']
Wx2, Wh2, b2 = self.params['Wx2'], self.params['Wh2'], self.params['b2']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
h0 = np.zeros((X.shape[0], Wh1.shape[0]))
X_in, cache_we = word_embedding_forward(X, W_embed)
func = rnn_forward
if self.cell_type=='lstm':
func = lstm_forward
h, cache_fwd = func(X_in, h0, Wx1, Wh1, b1)
h = h[:, -1, :] # get only the last output
X2 = stories[idx, T:T+T3].reshape(1,T3) #get the query
h0 = np.sum(h, axis=0).reshape(1,h.shape[1]) # sum all encodings of the support sentences
X_in2, cache_we2 = word_embedding_forward(X2, W_embed)
h, cache_fwd2 = func(X_in2, h0, Wx2, Wh2, b2)
h = h[:, -1, :] # get only the last output
_scores, cache_aff = affine_forward(h, W_vocab, b_vocab)
if sample==True:
scores[idx]=_scores.argmax()
continue
_loss, dscores = softmax_loss(_scores, stories[idx,-1])
loss += _loss
dscores, grads['W_vocab'], grads['b_vocab'] = affine_backward(dscores, cache_aff)
func = rnn_backward
if self.cell_type=='lstm':
func = lstm_backward
dX_in2, dh0, _Wx, _Wh, _b = func(dscores, cache_fwd2)
grads['Wx2'] += _Wx
grads['Wh2'] += _Wh
grads['b2'] += _b
grads['W_embed'] += word_embedding_backward(dX_in2, cache_we2)
#since h_0 = h_1+h_2+...+h_n, where h_i is the representation for ith support sentence
#dh_0/dh_i = 1
dh0 = np.tile(dh0.mean(axis=0), (X.shape[0], 1))
dX_in, dh0, _Wx, _Wh, _b = func(dh0, cache_fwd, T=X_in.shape[1])
grads['Wx1'] += _Wx
grads['Wh1'] += _Wh
grads['b1'] += _b
grads['W_embed'] += word_embedding_backward(dX_in, cache_we)
W_embed = self.params['W_embed']
if sample==True:
return scores
return loss, grads
| kadircet/CENG | 783/project/approach1.py | Python | gpl-3.0 | 4,760 |
from ircBase import *
import urllib2
import urllib
from bs4 import BeautifulSoup
@respondtoregex('.*')
def twss_response(message, **extra_args):
#Don't check for messages that arent room messages or pms
#This also filters out things said by Rafi, so it only looks at other nicks messages'
if message.is_private_message or (message.is_room_message and message.sending_nick != IrcBot.shared_instance().nick):
try:
#Request TWSS for a given message
twss = message.body.replace(' ', '+')
url = "http://twss-classifier.heroku.com/?sentence=" + twss
responseBodyString = urllib2.urlopen(url).read()
#Find TWSS span
soup = BeautifulSoup(responseBodyString)
twssSpan = soup.find("span", {"id": "twss"})
#Check if that's what she said
if twssSpan.string == "That's what she said!":
return message.new_response_message("That's what she said!")
except:
return
| Mov1s/RafiBot | modules/twssModule.py | Python | bsd-2-clause | 894 |
from osrf_pycommon.process_utils import asyncio
from osrf_pycommon.process_utils.async_execute_process import async_execute_process
from osrf_pycommon.process_utils import get_loop
from .impl_aep_protocol import create_protocol
loop = get_loop()
@asyncio.coroutine
def run(cmd, **kwargs):
transport, protocol = yield from async_execute_process(
create_protocol(), cmd, **kwargs)
retcode = yield from protocol.complete
return protocol.stdout_buffer, protocol.stderr_buffer, retcode
| ros2/ci | ros2_batch_job/vendor/osrf_pycommon/tests/unit/test_process_utils/impl_aep_asyncio.py | Python | apache-2.0 | 505 |
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
import sys
import time
from threading import Thread
from unittest import TestCase
try:
from unittest import SkipTest
except ImportError:
from unittest2 import SkipTest
from pytest import mark
import zmq
from zmq.utils import jsonapi
try:
import gevent
from zmq import green as gzmq
have_gevent = True
except ImportError:
have_gevent = False
PYPY = 'PyPy' in sys.version
#-----------------------------------------------------------------------------
# skip decorators (directly from unittest)
#-----------------------------------------------------------------------------
_id = lambda x: x
skip_pypy = mark.skipif(PYPY, reason="Doesn't work on PyPy")
require_zmq_4 = mark.skipif(zmq.zmq_version_info() < (4,), reason="requires zmq >= 4")
#-----------------------------------------------------------------------------
# Base test class
#-----------------------------------------------------------------------------
class BaseZMQTestCase(TestCase):
green = False
@property
def Context(self):
if self.green:
return gzmq.Context
else:
return zmq.Context
def socket(self, socket_type):
s = self.context.socket(socket_type)
self.sockets.append(s)
return s
def setUp(self):
super(BaseZMQTestCase, self).setUp()
if self.green and not have_gevent:
raise SkipTest("requires gevent")
self.context = self.Context.instance()
self.sockets = []
def tearDown(self):
contexts = set([self.context])
while self.sockets:
sock = self.sockets.pop()
contexts.add(sock.context) # in case additional contexts are created
sock.close(0)
for ctx in contexts:
t = Thread(target=ctx.term)
t.daemon = True
t.start()
t.join(timeout=2)
if t.is_alive():
# reset Context.instance, so the failure to term doesn't corrupt subsequent tests
zmq.sugar.context.Context._instance = None
raise RuntimeError("context could not terminate, open sockets likely remain in test")
super(BaseZMQTestCase, self).tearDown()
def create_bound_pair(self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'):
"""Create a bound socket pair using a random port."""
s1 = self.context.socket(type1)
s1.setsockopt(zmq.LINGER, 0)
port = s1.bind_to_random_port(interface)
s2 = self.context.socket(type2)
s2.setsockopt(zmq.LINGER, 0)
s2.connect('%s:%s' % (interface, port))
self.sockets.extend([s1,s2])
return s1, s2
def ping_pong(self, s1, s2, msg):
s1.send(msg)
msg2 = s2.recv()
s2.send(msg2)
msg3 = s1.recv()
return msg3
def ping_pong_json(self, s1, s2, o):
if jsonapi.jsonmod is None:
raise SkipTest("No json library")
s1.send_json(o)
o2 = s2.recv_json()
s2.send_json(o2)
o3 = s1.recv_json()
return o3
def ping_pong_pyobj(self, s1, s2, o):
s1.send_pyobj(o)
o2 = s2.recv_pyobj()
s2.send_pyobj(o2)
o3 = s1.recv_pyobj()
return o3
def assertRaisesErrno(self, errno, func, *args, **kwargs):
try:
func(*args, **kwargs)
except zmq.ZMQError as e:
self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
else:
self.fail("Function did not raise any error")
def _select_recv(self, multipart, socket, **kwargs):
"""call recv[_multipart] in a way that raises if there is nothing to receive"""
if zmq.zmq_version_info() >= (3,1,0):
# zmq 3.1 has a bug, where poll can return false positives,
# so we wait a little bit just in case
# See LIBZMQ-280 on JIRA
time.sleep(0.1)
r,w,x = zmq.select([socket], [], [], timeout=kwargs.pop('timeout', 5))
assert len(r) > 0, "Should have received a message"
kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)
recv = socket.recv_multipart if multipart else socket.recv
return recv(**kwargs)
def recv(self, socket, **kwargs):
"""call recv in a way that raises if there is nothing to receive"""
return self._select_recv(False, socket, **kwargs)
def recv_multipart(self, socket, **kwargs):
"""call recv_multipart in a way that raises if there is nothing to receive"""
return self._select_recv(True, socket, **kwargs)
class PollZMQTestCase(BaseZMQTestCase):
pass
class GreenTest:
"""Mixin for making green versions of test classes"""
green = True
def assertRaisesErrno(self, errno, func, *args, **kwargs):
if errno == zmq.EAGAIN:
raise SkipTest("Skipping because we're green.")
try:
func(*args, **kwargs)
except zmq.ZMQError:
e = sys.exc_info()[1]
self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
else:
self.fail("Function did not raise any error")
def tearDown(self):
contexts = set([self.context])
while self.sockets:
sock = self.sockets.pop()
contexts.add(sock.context) # in case additional contexts are created
sock.close()
try:
gevent.joinall([gevent.spawn(ctx.term) for ctx in contexts], timeout=2, raise_error=True)
except gevent.Timeout:
raise RuntimeError("context could not terminate, open sockets likely remain in test")
def skip_green(self):
raise SkipTest("Skipping because we are green")
def skip_green(f):
def skipping_test(self, *args, **kwargs):
if self.green:
raise SkipTest("Skipping because we are green")
else:
return f(self, *args, **kwargs)
return skipping_test
| josephkirk/PipelineTools | packages/zmq/tests/__init__.py | Python | bsd-2-clause | 6,227 |
# Scrapy settings for daytonlocal project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'codefordayton'
SPIDER_MODULES = ['dayton.spiders']
NEWSPIDER_MODULE = 'dayton.spiders'
# Crawl responsibly by identifying yourself
# (and your website) on the user-agent
USER_AGENT = 'dayton (+http://www.codefordayton.org)'
FEED_URI = 'feed.json'
FEED_FORMAT = 'json'
# ITEM_PIPELINES
| codefordayton/scrapers | dayton/settings.py | Python | unlicense | 537 |
# -*- coding:utf-8 -*-
import mock
import pytest
import libpy
import libpy.shadow
class TestShadow(object):
def test(self):
proxy = libpy.shadow.Shadow(ShadowTarget())
assert isinstance(proxy, libpy.shadow.Shadow)
assert proxy.a == mock.sentinel.proxy_target_a
assert proxy.b == mock.sentinel.proxy_target_b
with pytest.raises(AttributeError):
_ = proxy.X
with pytest.raises(AttributeError):
_ = proxy._c
def test_keep_value(self):
target = ShadowTarget()
proxy = libpy.shadow.Shadow(target)
assert proxy.b == mock.sentinel.proxy_target_b
delattr(target, 'b')
assert not hasattr(target, 'b')
assert proxy.b == mock.sentinel.proxy_target_b
delattr(proxy, 'b')
with pytest.raises(AttributeError):
_ = proxy.b
def test_update(self):
target = ShadowTarget()
proxy = libpy.shadow.Shadow(target)
assert proxy.a == mock.sentinel.proxy_target_a
target.a = 'new_value_for_a'
assert proxy.a == mock.sentinel.proxy_target_a
delattr(proxy, 'a')
assert proxy.a == 'new_value_for_a'
def test_override(self):
proxy = libpy.shadow.Shadow(ShadowTarget(), a='override_a')
assert proxy.a == 'override_a'
assert proxy.b == mock.sentinel.proxy_target_b
proxy.b = 'dynamic_override_b'
assert proxy.b == 'dynamic_override_b'
class ShadowTarget(object):
a = mock.sentinel.proxy_target_a
def __init__(self):
self.b = mock.sentinel.proxy_target_b
self._c = mock.sentinel.proxy_target_c
| surabujin/libpy | libpy/test/test_shadow.py | Python | mit | 1,664 |
# Copyright (c) 2011, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""CPU module for Westmere-EX"""
import bits
from cpu_gen import mwait_hint_to_cstate
from cpu_nhm import register_tests, generate_mwait_menu
name = 'Westmere-EX'
def is_cpu():
return bits.cpuid(bits.bsp_apicid(),1).eax & ~0xf == 0x206f0
def init():
pass
| ii0/bits | python/cpu_wsm_ex.py | Python | bsd-3-clause | 1,836 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Set of workflow tasks for MagPie API."""
from __future__ import absolute_import, division, print_function
import requests
from flask import current_app
from inspire_utils.record import get_value
from inspirehep.modules.workflows.utils import json_api_request
from ..utils import with_debug_logging
@with_debug_logging
def get_magpie_url():
"""Return the Magpie URL endpoint, if any."""
base_url = current_app.config.get("MAGPIE_API_URL")
if not base_url:
return
return "{base_url}/predict".format(
base_url=base_url
)
@with_debug_logging
def prepare_magpie_payload(record, corpus):
"""Prepare payload to send to Magpie API."""
payload = dict(text="", corpus=corpus)
titles = filter(None, get_value(record, "titles.title", []))
abstracts = filter(None, get_value(record, "abstracts.value", []))
payload["text"] = ". ".join(
[part.encode('utf-8') for part in titles + abstracts])
return payload
@with_debug_logging
def filter_magpie_response(labels, limit):
"""Filter response from Magpie API, keeping most relevant labels."""
filtered_labels = [
(word, score) for word, score in labels
if score >= limit
]
# In the event that there are no labels with a high enough score,
# we take only the top one
if labels and len(filtered_labels) == 0:
filtered_labels.append(labels[0])
return filtered_labels
@with_debug_logging
def guess_keywords(obj, eng):
"""Workflow task to ask Magpie API for a keywords assessment."""
magpie_url = get_magpie_url()
if not magpie_url:
# Skip task if no API URL set
return
payload = prepare_magpie_payload(obj.data, corpus="keywords")
try:
results = json_api_request(magpie_url, payload)
except requests.exceptions.RequestException:
results = {}
if results:
labels = results.get('labels', [])
keywords = labels[:10]
keywords = [{'label': k[0], 'score': k[1], 'accept': k[1] >= 0.09} for
k in
keywords]
obj.extra_data["keywords_prediction"] = dict(
keywords=keywords
)
current_app.logger.info("Keyword prediction (top 10): {0}".format(
obj.extra_data["keywords_prediction"]["keywords"]
))
@with_debug_logging
def guess_categories(obj, eng):
"""Workflow task to ask Magpie API for a subject area assessment."""
magpie_url = get_magpie_url()
if not magpie_url:
# Skip task if no API URL set
return
payload = prepare_magpie_payload(obj.data, corpus="categories")
results = json_api_request(magpie_url, payload)
if results:
labels = results.get('labels', [])
categories = filter_magpie_response(labels, limit=0.22)
categories = [{'label': c[0], 'score': c[1],
'accept': c[1] >= 0.25} for c in categories]
obj.extra_data["categories_prediction"] = dict(
categories=categories
)
current_app.logger.info("Category prediction: {0}".format(
obj.extra_data["categories_prediction"]["categories"]
))
@with_debug_logging
def guess_experiments(obj, eng):
"""Workflow task to ask Magpie API for a subject area assessment."""
magpie_url = get_magpie_url()
if not magpie_url:
# Skip task if no API URL set
return
payload = prepare_magpie_payload(obj.data, corpus="experiments")
results = json_api_request(magpie_url, payload)
if results:
all_predictions = results.get('labels', [])
selected_experiments = filter_magpie_response(
all_predictions,
limit=0.5,
)
selected_experiments = [
{'label': e[0], 'score': e[1]}
for e in selected_experiments
]
obj.extra_data["experiments_prediction"] = dict(
experiments=selected_experiments,
)
current_app.logger.info("Experiment prediction: {0}".format(
obj.extra_data["experiments_prediction"]["experiments"]
))
| zzacharo/inspire-next | inspirehep/modules/workflows/tasks/magpie.py | Python | gpl-3.0 | 5,019 |
import bisect
import re
from itertools import combinations
class NFA(object):
EPSILON = object()
ANY = object()
def __init__(self, start_state):
self.transitions = {}
self.final_states = set()
self._start_state = start_state
@property
def start_state(self):
return frozenset(self._expand({self._start_state}))
def add_transition(self, src, input, dest):
self.transitions.setdefault(src, {}).setdefault(input, set()).add(dest)
def add_final_state(self, state):
self.final_states.add(state)
def is_final(self, states):
return self.final_states.intersection(states)
def _expand(self, states):
frontier = set(states)
while frontier:
state = frontier.pop()
new_states = self.transitions.get(state, {}).get(NFA.EPSILON, set()).difference(states)
frontier.update(new_states)
states.update(new_states)
return states
def next_state(self, states, input):
dest_states = set()
for state in states:
state_transitions = self.transitions.get(state, {})
dest_states.update(state_transitions.get(input, []))
dest_states.update(state_transitions.get(NFA.ANY, []))
return frozenset(self._expand(dest_states))
def get_inputs(self, states):
inputs = set()
for state in states:
inputs.update(self.transitions.get(state, {}).keys())
return inputs
def to_dfa(self):
dfa = DFA(self.start_state)
frontier = [self.start_state]
seen = set()
while frontier:
current = frontier.pop()
inputs = self.get_inputs(current)
for input in inputs:
if input == NFA.EPSILON:
continue
new_state = self.next_state(current, input)
if new_state not in seen:
frontier.append(new_state)
seen.add(new_state)
if self.is_final(new_state):
dfa.add_final_state(new_state)
if input == NFA.ANY:
dfa.set_default_transition(current, new_state)
else:
dfa.add_transition(current, input, new_state)
return dfa
class DFA(object):
def __init__(self, start_state):
self.start_state = start_state
self.transitions = {}
self.defaults = {}
self.final_states = set()
def add_transition(self, src, inpt, dest):
self.transitions.setdefault(src, {})[inpt] = dest
def set_default_transition(self, src, dest):
self.defaults[src] = dest
def add_final_state(self, state):
self.final_states.add(state)
def is_final(self, state):
return state in self.final_states
def next_state(self, src, input):
state_transitions = self.transitions.get(src, {})
return state_transitions.get(input, self.defaults.get(src, None))
def next_valid_string(self, input):
state = self.start_state
stack = []
# Evaluate the DFA as far as possible
for i, x in enumerate(input):
stack.append((input[:i], state, x))
state = self.next_state(state, x)
if not state:
break
else:
stack.append((input[:i + 1], state, None))
if self.is_final(state):
# Input word is already valid
return input
# Perform a 'wall following' search for the lexicographically smallest
# accepting state.
while stack:
path, state, x = stack.pop()
x = self.find_next_edge(state, x)
if x:
path += x
state = self.next_state(state, x)
if self.is_final(state):
return path
stack.append((path, state, None))
return None
def find_next_edge(self, s, x):
if x is None:
x = u'\0'
else:
x = unichr(ord(x) + 1)
state_transitions = self.transitions.get(s, {})
if x in state_transitions or s in self.defaults:
return x
labels = sorted(state_transitions.keys())
pos = bisect.bisect_left(labels, x)
if pos < len(labels):
return labels[pos]
return None
def levenshtein_automata(term):
k = 2
nfa = NFA((0, 0))
for i, c in enumerate(term):
for e in range(k + 1):
# Correct character
nfa.add_transition((i, e), c, (i + 1, e))
if e < k:
# Deletion
nfa.add_transition((i, e), NFA.ANY, (i, e + 1))
# Insertion
nfa.add_transition((i, e), NFA.EPSILON, (i + 1, e + 1))
# Substitution
nfa.add_transition((i, e), NFA.ANY, (i + 1, e + 1))
for e in range(k + 1):
if e < k:
nfa.add_transition((len(term), e), NFA.ANY, (len(term), e + 1))
nfa.add_final_state((len(term), e))
return nfa
class Word:
def __init__(self, stem, suffix):
self.stem = stem
self.suffix = suffix
class LatinStemmer:
__que_exceptions = {
"atque", "quoque", "neque", "itaque", "absque", "apsque", "abusque", "adaeque", "adusque",
"denique", "deque", "susque", "oblique", "peraeque", "plenisque", "quandoque", "quisque",
"quaeque", "cuiusque", "cuique", "quemque", "quamque", "quaque", "quique", "quorumque",
"quarumque", "quibusque", "quosque", "quasque", "quotusquisque", "quousque", "ubique",
"undique", "usque", "uterque", "utique", "utroque", "utribique", "torque", "coque",
"concoque", "contorque", "detorque", "decoque", "excoque", "extorque", "obtorque", "optorque",
"retorque", "recoque", "attorque", "incoque", "intorque", "praetorque"
}
__noun_suffixes = [
"ibus", "ius",
"ae", "am", "as", "em", "es", "ia", "is", "nt", "os", "ud", "um", "us",
"a", "e", "i", "o", "u"
]
def __init__(self):
pass
@staticmethod
def stemmize(word):
word = word.replace('j', 'i').replace('v', 'u')
if word.endswith('que'):
if word in LatinStemmer.__que_exceptions:
return Word(stem=word, suffix='')
word = word[:-3]
for noun_suffix in LatinStemmer.__noun_suffixes:
if word.endswith(noun_suffix):
if len(word) - len(noun_suffix) >= 2:
return Word(stem=word[:-len(noun_suffix)], suffix=noun_suffix)
else:
return Word(stem=word, suffix='')
return Word(stem=word, suffix='')
class Matcher(object):
def __init__(self, l):
self.l = l
self.probes = 0
def __call__(self, w):
self.probes += 1
pos = bisect.bisect_left(self.l, w)
if pos < len(self.l):
return self.l[pos]
else:
return None
def _levenshtein(s1, s2):
if len(s1) < len(s2):
return _levenshtein(s2, s1)
if not s1:
return len(s2)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = \
previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def _matching_threshold_helper(word_input, word_candidate, space_edits):
print '_matching_threshold_helper> word_input:', word_input, \
'| word_candidate:', word_candidate, \
'| space_edits:', space_edits
assert word_input.count(' ') == word_candidate.count(' ')
word_input_parts = word_input.split(' ')
word_candidate_parts = word_candidate.split(' ')
for idx in range(len(word_input_parts)):
word_input_part_len = len(word_input_parts[idx])
if word_input_part_len < 6:
allowed_edits = 0
elif word_input_part_len < 11:
allowed_edits = 1
else:
allowed_edits = 2
actual_edits = _levenshtein(word_input_parts[idx], word_candidate_parts[idx])
print '_matching_threshold_helper_iter>', word_input_parts[idx], '|', word_candidate_parts[idx], \
'||', allowed_edits, '-', actual_edits
if actual_edits + space_edits[idx] > allowed_edits:
return False
return True
def _matching_threshold(word_input, word_candidate):
word_input_spaces = word_input.count(' ')
word_candidate_spaces = word_candidate.count(' ')
if word_input_spaces == word_candidate_spaces:
space_edits = [0] * (word_input_spaces + 1)
return _matching_threshold_helper(word_input, word_candidate, space_edits)
else:
word_long, word_short, word_long_spaces, word_short_spaces = \
(word_input, word_candidate, word_input_spaces, word_candidate_spaces) \
if word_input_spaces > word_candidate_spaces \
else (word_candidate, word_input, word_candidate_spaces, word_input_spaces)
# todo: when words are glued, the first one should not be stemed
# i.e. all the words that are glued to the first one should be verbatim
# eury tellina ~> `eurytellina`, but not `eurytellin`
word_long_parts = word_long.split(' ')
for comb in combinations(range(word_long_spaces), word_short_spaces):
word_long_idx = 0
word_long_new = ''
space_edits = [-1] * (word_short_spaces + 1)
for i, c in enumerate(comb):
while word_long_idx <= c:
word_long_new += word_long_parts[word_long_idx]
space_edits[i] += 1
word_long_idx += 1
word_long_new += ' '
word_long_new += ''.join(word_long_parts[word_long_idx:])
space_edits[-1] += len(word_long_parts[word_long_idx:])
word_input_new, word_candidate_new = \
(word_long_new, word_short) \
if word_input_spaces > word_candidate_spaces \
else (word_short, word_long_new)
if _matching_threshold_helper(word_input_new, word_candidate_new, space_edits):
return True
return False
def _find_all_matches(lev, lookup_func, lookup_ds):
match = lev.next_valid_string(u'\0')
while match:
nxt = lookup_func(match)
if not nxt:
return
if match == nxt:
if lookup_ds(match):
yield match
nxt = nxt + u'\0'
match = lev.next_valid_string(nxt)
class MatcherByStem:
def __init__(self, words_to_datasources):
print("Constructing MatcherByStem")
self.word_stemmized_to_words = {}
self.words_to_datasources = words_to_datasources
for idx, (word, data_sources) in enumerate(words_to_datasources.iteritems()):
if idx > 0 and idx % 100000 == 0:
print(idx)
word_stemmized = self.transform(word)
if word_stemmized not in self.word_stemmized_to_words:
self.word_stemmized_to_words[word_stemmized] = set()
self.word_stemmized_to_words[word_stemmized].add(word)
word_stemmized = list(self.word_stemmized_to_words.keys())
word_stemmized.sort()
self.bisect_by_word_stems = Matcher(word_stemmized)
pass
def match(self, word, data_sources):
word_stem = self.transform(word)
lev = levenshtein_automata(word_stem).to_dfa()
def lookup_ds(word_stem_candidate):
if not _matching_threshold(word_stem, word_stem_candidate):
return False
if len(data_sources) == 0:
return True
s = set(
ds
for w in self.word_stemmized_to_words[word_stem_candidate]
for ds in self.words_to_datasources[w]
)
intersect = s.intersection(data_sources)
return len(intersect) > 0
res = list(_find_all_matches(lev, self.bisect_by_word_stems, lookup_ds))
return res
@staticmethod
def __stemmize_word(word):
word_parts = word.split(' ')
if len(word_parts) < 2:
return word
else:
word_stemmized = \
word_parts[0] + ' ' + ' '.join(LatinStemmer.stemmize(word_part).stem for word_part in word_parts[1:])
return word_stemmized
@staticmethod
def transform(word):
word_stemmized = MatcherByStem.__stemmize_word(word.lower())
return word_stemmized
def lookup(self, word_transformed):
return self.word_stemmized_to_words.get(word_transformed, set())
class MatcherByVerbatim:
def __init__(self, words_to_datasources):
print("Constructing MatcherByVerbatim")
self.words_to_datasources = words_to_datasources
self.words_verbatims_to_words = {}
for idx, word in enumerate(words_to_datasources.keys()):
if idx > 0 and idx % 100000 == 0:
print(idx)
word_verbatim = self.transform(word)
if word_verbatim not in self.words_verbatims_to_words:
self.words_verbatims_to_words[word_verbatim] = set()
self.words_verbatims_to_words[word_verbatim].add(word)
word_verbatims = list(self.words_verbatims_to_words.keys())
word_verbatims.sort()
self.bisect_by_word_verbatims = Matcher(word_verbatims)
pass
def match(self, word, data_sources):
word_verbatim = self.transform(word)
lev = levenshtein_automata(word_verbatim).to_dfa()
def lookup_ds(word_verbatim_candidate):
if not _matching_threshold(word_verbatim, word_verbatim_candidate):
return False
if len(data_sources) == 0:
return True
s = set(ds
for w_orig in self.words_verbatims_to_words[word_verbatim_candidate]
for ds in self.words_to_datasources[w_orig])
intersect = s.intersection(data_sources)
return len(intersect) > 0
res = list(_find_all_matches(lev, self.bisect_by_word_verbatims, lookup_ds))
return res
@staticmethod
def transform(word):
word_verbatim = word.lower().replace('j', 'i').replace('v', 'u')
return word_verbatim
def lookup(self, word_transformed):
return self.words_verbatims_to_words.get(word_transformed, set())
class MatcherByGenusOnly:
def __init__(self, words_to_datasources):
print("Constructing MatcherByGenusOnly")
self.words_to_datasources = words_to_datasources
self.words_genus_only_to_words = {}
for idx, word in enumerate(words_to_datasources.keys()):
if idx > 0 and idx % 100000 == 0:
print(idx)
word_transformed = self.transform(word)
if ' ' in word_transformed:
continue
if word_transformed not in self.words_genus_only_to_words:
self.words_genus_only_to_words[word_transformed] = set()
self.words_genus_only_to_words[word_transformed].add(word)
pass
def match(self, word, data_sources):
word_transformed = self.transform(word)
res = self.words_genus_only_to_words.get(word_transformed, set())
if data_sources:
res = [r for r in res
if len(data_sources.intersection(self.words_to_datasources[r]))]
return res
@staticmethod
def transform(word):
word_verbatim = word.lower().replace('j', 'i').replace('v', 'u')
return word_verbatim
def lookup(self, word_transformed):
res = self.words_genus_only_to_words.get(word_transformed, set())
return res
@staticmethod
def verify(word_cleaned):
return ' ' not in word_cleaned
class MatcherByLetter:
def __init__(self, words_to_datasources):
print "Constructing MatcherByLetter"
self.words_to_datasources = words_to_datasources
self.letter_to_matching = {}
for idx, word in enumerate(words_to_datasources.keys()):
if idx > 0 and idx % 100000 == 0:
print(idx)
letter, word_rest = self.transform(word)
if letter not in self.letter_to_matching:
self.letter_to_matching[letter] = {'words_to_datasources': {}, 'words_rest_to_words_full': {}}
if word_rest is not None:
self.letter_to_matching[letter]['words_to_datasources'][word_rest] = words_to_datasources[word]
if word_rest not in self.letter_to_matching[letter]['words_rest_to_words_full']:
self.letter_to_matching[letter]['words_rest_to_words_full'][word_rest] = set()
self.letter_to_matching[letter]['words_rest_to_words_full'][word_rest].add(word)
for letter in self.letter_to_matching.keys():
print "Constructing MatcherByLetter | letter", letter
finder = Finder(self.letter_to_matching[letter]['words_to_datasources'],
matcher_by_letter_context=True)
self.letter_to_matching[letter]['finder'] = finder
def match(self, word, data_sources):
letter, word_rest = self.transform(word)
if letter not in self.letter_to_matching:
return []
matching = self.letter_to_matching[letter]
res = matching['finder'].find_all_matches(word_rest, data_sources)
res = [
word_full
for r in res
for word_full in matching['words_rest_to_words_full'][r]
]
if data_sources:
res = [r for r in res
if len(data_sources.intersection(self.words_to_datasources[r]))]
return res
@staticmethod
def transform(word):
word_parts = word.split(' ')
word_rest = ' '.join(word_parts[1:])
letter = word[0].lower()
word_rest = word_rest if len(word_rest) > 0 else None
return letter, word_rest
@staticmethod
def verify(word):
word_parts = word.split(' ')
return len(word_parts) > 0 and len(word_parts[0]) == 2 and word_parts[0].endswith('.')
class Finder:
def __init__(self, words_to_datasources, matcher_by_letter_context=False):
self.words_to_datasources = words_to_datasources
self.matcher_by_stem = MatcherByStem(words_to_datasources)
self.matcher_by_verbatim = MatcherByVerbatim(words_to_datasources)
self.matcher_by_letter_context = matcher_by_letter_context
if not self.matcher_by_letter_context:
self.matcher_by_genus_only = MatcherByGenusOnly(words_to_datasources)
self.matcher_by_letter = MatcherByLetter(words_to_datasources)
def __pipeline(self, word, data_sources=set()):
word_cleaned = re.sub('\s+', ' ', word.strip()).lower()
print 'request: ', word_cleaned, '|', data_sources
if not self.matcher_by_letter_context:
if MatcherByGenusOnly.verify(word_cleaned):
matches_genus_only = self.matcher_by_genus_only.match(word_cleaned, data_sources)
print 'single word match', matches_genus_only
res = [
w
for match_genus_only in matches_genus_only
for w in self.matcher_by_genus_only.lookup(match_genus_only)
]
print 'single word match (filtered)', res
return res
if MatcherByLetter.verify(word_cleaned):
self.matcher_by_letter.transform(word_cleaned)
matches_by_letter = self.matcher_by_letter.match(word_cleaned, data_sources)
print 'matches_by_letter', matches_by_letter
return matches_by_letter
matches_by_stem = self.matcher_by_stem.match(word_cleaned, data_sources)
print 'matches_by_stem', matches_by_stem
if matches_by_stem:
res = [
w
for match_by_stem in matches_by_stem
for w in self.matcher_by_stem.lookup(match_by_stem)
]
if data_sources:
res = [r for r in res
if len(data_sources.intersection(self.words_to_datasources[r]))]
print 'matches_by_stem (filtered)', res
else:
matches_by_verbatim = self.matcher_by_verbatim.match(word_cleaned, data_sources)
res = [
w
for match_by_verbatim in matches_by_verbatim
for w in self.matcher_by_verbatim.lookup(match_by_verbatim)
]
print 'matches_by_verbatim', matches_by_verbatim
if data_sources:
res = [r for r in res
if len(data_sources.intersection(self.words_to_datasources[r]))]
print 'matches_by_verbatim (filtered)', res
print 'res:', res
return res
def find_all_matches(self, word, data_sources=set()):
import traceback
try:
return self.__pipeline(word, data_sources)
except Exception as ex:
print ex
traceback.print_exc()
return []
| GlobalNamesArchitecture/gnmatcher | matcher/src/main/resources/levenshtein_py/automata.py | Python | mit | 21,685 |
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sms_proxy.settings import DB, TEST_DB, DEBUG_MODE
if DEBUG_MODE:
engine = create_engine('sqlite:////tmp/{}'.format(TEST_DB),
convert_unicode=True)
else:
engine = create_engine('sqlite:///{}'.format(DB), convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
import models
Base.metadata.create_all(bind=engine)
def destroy_db():
import models
Base.metadata.drop_all(bind=engine)
| flowroute/sms-proxy | sms_proxy/database.py | Python | mit | 811 |
from readinglistlib import ReadingListReader
| anoved/ReadingListReader | readinglistlib/__init__.py | Python | mit | 45 |
# -*- coding: utf-8 -*-
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, os, stat, datetime, StringIO, wx
import xml.etree.ElementTree as ET
from taskcoachlib.domain import date, effort, task, category, note, attachment
from taskcoachlib.syncml.config import SyncMLConfigNode, createDefaultSyncConfig
from taskcoachlib.thirdparty.guid import generate
from taskcoachlib.i18n import translate
from taskcoachlib import meta
from .. import sessiontempfile # pylint: disable-msg=F0401
class PIParser(ET.XMLTreeBuilder):
"""See http://effbot.org/zone/element-pi.htm"""
def __init__(self):
ET.XMLTreeBuilder.__init__(self)
self._parser.ProcessingInstructionHandler = self.handle_pi
self.tskversion = meta.data.tskversion
def handle_pi(self, target, data):
if target == 'taskcoach':
matchObject = re.search('tskversion="(\d+)"', data)
self.tskversion = int(matchObject.group(1))
class XMLReaderTooNewException(Exception):
pass
class XMLReader(object):
defaultStartTime = (0, 0, 0, 0)
defaultEndTime = (23, 59, 59, 999999)
def __init__(self, fd):
self.__fd = fd
self.__defaultFontSize = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT).GetPointSize()
def read(self):
if self._hasBrokenLines():
self._fixBrokenLines()
parser = PIParser()
tree = ET.parse(self.__fd, parser)
root = tree.getroot()
self.__tskversion = parser.tskversion # pylint: disable-msg=W0201
if self.__tskversion > meta.data.tskversion:
raise XMLReaderTooNewException # Version number of task file is too high
tasks = self._parseTaskNodes(root)
self._resolvePrerequisitesAndDependencies(tasks)
categorizables = tasks[:]
for eachTask in tasks:
categorizables.extend(eachTask.children(recursive=True))
if self.__tskversion <= 15:
notes = []
else:
notes = self._parseNoteNodes(root)
categorizables.extend(notes)
for eachNote in notes:
categorizables.extend(eachNote.children(recursive=True))
categorizablesById = dict([(categorizable.id(), categorizable) for \
categorizable in categorizables])
if self.__tskversion <= 13:
categories = self._parseCategoryNodesFromTaskNodes(root,
categorizablesById)
else:
categories = self._parseCategoryNodes(root, categorizablesById)
guid = self.__parseGUIDNode(root.find('guid'))
syncMLConfig = self._parseSyncMLNode(root, guid)
return tasks, categories, notes, syncMLConfig, guid
def _hasBrokenLines(self):
''' tskversion 24 may contain newlines in element tags. '''
hasBrokenLines = '><spds><sources><TaskCoach-\n' in self.__fd.read()
self.__fd.seek(0)
return hasBrokenLines
def _fixBrokenLines(self):
''' Remove spurious newlines from element tags. '''
self.__origFd = self.__fd # pylint: disable-msg=W0201
self.__fd = StringIO.StringIO()
lines = self.__origFd.readlines()
for index in xrange(len(lines)):
if lines[index].endswith('<TaskCoach-\n') or lines[index].endswith('</TaskCoach-\n'):
lines[index] = lines[index][:-1] # Remove newline
lines[index+1] = lines[index+1][:-1] # Remove newline
self.__fd.write(''.join(lines))
self.__fd.seek(0)
def _parseTaskNodes(self, node):
return [self._parseTaskNode(child) for child in node.findall('task')]
def _resolvePrerequisitesAndDependencies(self, tasks):
tasksById = dict()
def collectIds(tasks):
for task in tasks:
tasksById[task.id()] = task
collectIds(task.children())
def addPrerequisitesAndDependencies(tasks):
for task in tasks:
dummyPrerequisites = task.prerequisites()
prerequisites = set()
for dummyPrerequisite in dummyPrerequisites:
prerequisites.add(tasksById[dummyPrerequisite.id])
task.setPrerequisites(prerequisites)
for prerequisite in prerequisites:
prerequisite.addDependencies([task])
addPrerequisitesAndDependencies(task.children())
collectIds(tasks)
addPrerequisitesAndDependencies(tasks)
def _parseCategoryNodes(self, node, categorizablesById):
return [self._parseCategoryNode(child, categorizablesById) \
for child in node.findall('category')]
def _parseNoteNodes(self, node):
return [self._parseNoteNode(child) for child in node.findall('note')]
def _parseCategoryNode(self, categoryNode, categorizablesById):
kwargs = self._parseBaseCompositeAttributes(categoryNode,
self._parseCategoryNodes, categorizablesById)
kwargs.update(dict(\
notes=self._parseNoteNodes(categoryNode),
filtered=self._parseBoolean(categoryNode.attrib.get('filtered', 'False')),
exclusiveSubcategories=self._parseBoolean(categoryNode.attrib.get('exclusiveSubcategories', 'False'))))
if self.__tskversion < 19:
categorizableIds = categoryNode.attrib.get('tasks', '')
else:
categorizableIds = categoryNode.attrib.get('categorizables', '')
if categorizableIds:
# The category tasks attribute might contain id's that refer to tasks that
# have been deleted (a bug in release 0.61.5), be prepared:
categorizables = [categorizablesById[categorizableId] for categorizableId in \
categorizableIds.split(' ') \
if categorizableId in categorizablesById]
else:
categorizables = []
kwargs['categorizables'] = categorizables
if self.__tskversion > 20:
kwargs['attachments'] = self._parseAttachmentNodes(categoryNode)
return category.Category(**kwargs) # pylint: disable-msg=W0142
def _parseCategoryNodesFromTaskNodes(self, root, tasks):
''' In tskversion <=13 category nodes were subnodes of task nodes. '''
taskNodes = root.findall('.//task')
categoryMapping = self._parseCategoryNodesWithinTaskNodes(taskNodes)
subjectCategoryMapping = {}
for taskId, categories in categoryMapping.items():
for subject in categories:
if subject in subjectCategoryMapping:
cat = subjectCategoryMapping[subject]
else:
cat = category.Category(subject)
subjectCategoryMapping[subject] = cat
theTask = tasks[taskId]
cat.addCategorizable(theTask)
theTask.addCategory(cat)
return subjectCategoryMapping.values()
def _parseCategoryNodesWithinTaskNodes(self, taskNodes):
''' In tskversion <=13 category nodes were subnodes of task nodes. '''
categoryMapping = {}
for node in taskNodes:
taskId = node.attrib['id']
categories = [child.text for child in node.findall('category')]
categoryMapping.setdefault(taskId, []).extend(categories)
return categoryMapping
def _parseTaskNode(self, taskNode):
class DummyPrerequisite(object):
def __init__(self, id):
self.id = id
def __getattr__(self, attr):
''' Ignore all method calls. '''
return lambda *args, **kwargs: None
kwargs = self._parseBaseCompositeAttributes(taskNode, self._parseTaskNodes)
kwargs.update(dict(
startDateTime=date.parseDateTime(taskNode.attrib.get('startdate', ''),
*self.defaultStartTime),
dueDateTime=date.parseDateTime(taskNode.attrib.get('duedate', ''),
*self.defaultEndTime),
completionDateTime=date.parseDateTime(taskNode.attrib.get('completiondate', ''),
*self.defaultEndTime),
percentageComplete=int(taskNode.attrib.get('percentageComplete','0')),
budget=date.parseTimeDelta(taskNode.attrib.get('budget', '')),
priority=int(taskNode.attrib.get('priority', '0')),
hourlyFee=float(taskNode.attrib.get('hourlyFee', '0')),
fixedFee=float(taskNode.attrib.get('fixedFee', '0')),
reminder=self._parseDateTime(taskNode.attrib.get('reminder', '')),
# Here we just add the ids, they will be converted to object references later on:
prerequisites=[DummyPrerequisite(id) for id in taskNode.attrib.get('prerequisites', '').split(' ') if id],
shouldMarkCompletedWhenAllChildrenCompleted= \
self._parseBoolean(taskNode.attrib.get('shouldMarkCompletedWhenAllChildrenCompleted', '')),
efforts=self._parseEffortNodes(taskNode),
notes=self._parseNoteNodes(taskNode),
recurrence=self._parseRecurrence(taskNode)))
if self.__tskversion > 20:
kwargs['attachments'] = self._parseAttachmentNodes(taskNode)
return task.Task(**kwargs) # pylint: disable-msg=W0142
def _parseRecurrence(self, taskNode):
if self.__tskversion <= 19:
parseKwargs = self._parseRecurrenceAttributesFromTaskNode
else:
parseKwargs = self._parseRecurrenceNode
return date.Recurrence(**parseKwargs(taskNode))
def _parseRecurrenceNode(self, taskNode):
''' Since tskversion >= 20, recurrence information is stored in a
separate node. '''
kwargs = dict(unit='', amount=1, count=0, max=0, sameWeekday=False)
node = taskNode.find('recurrence')
if node is not None:
kwargs = dict(unit=node.attrib.get('unit', ''),
amount=int(node.attrib.get('amount', '1')),
count=int(node.attrib.get('count', '0')),
max=int(node.attrib.get('max', '0')),
sameWeekday=self._parseBoolean(node.attrib.get('sameWeekday', 'False')))
return kwargs
def _parseRecurrenceAttributesFromTaskNode(self, taskNode):
''' In tskversion <=19 recurrence information was stored as attributes
of task nodes. '''
return dict(unit=taskNode.attrib.get('recurrence', ''),
count=int(taskNode.attrib.get('recurrenceCount', '0')),
amount=int(taskNode.attrib.get('recurrenceFrequency', '1')),
max=int(taskNode.attrib.get('maxRecurrenceCount', '0')))
def _parseNoteNode(self, noteNode):
''' Parse the attributes and child notes from the noteNode. '''
kwargs = self._parseBaseCompositeAttributes(noteNode, self._parseNoteNodes)
if self.__tskversion > 20:
kwargs['attachments'] = self._parseAttachmentNodes(noteNode)
return note.Note(**kwargs) # pylint: disable-msg=W0142
def _parseBaseAttributes(self, node):
''' Parse the attributes all composite domain objects share, such as
id, subject, description, and return them as a
keyword arguments dictionary that can be passed to the domain
object constructor. '''
bgColorAttribute = 'color' if self.__tskversion <= 27 else 'bgColor'
attributes = dict(id=node.attrib.get('id', ''),
subject=node.attrib.get('subject', ''),
description=self._parseDescription(node),
fgColor=self._parseTuple(node.attrib.get('fgColor', ''), None),
bgColor=self._parseTuple(node.attrib.get(bgColorAttribute, ''), None),
font=self._parseFontDesc(node.attrib.get('font', '')),
icon=node.attrib.get('icon', ''),
selectedIcon=node.attrib.get('selectedIcon', ''))
if self.__tskversion <= 20:
attributes['attachments'] = self._parseAttachmentsBeforeVersion21(node)
if self.__tskversion >= 22:
attributes['status'] = int(node.attrib.get('status', '1'))
return attributes
def _parseBaseCompositeAttributes(self, node, parseChildren, *parseChildrenArgs):
"""Same as _parseBaseAttributes, but also parse children and expandedContexts."""
kwargs = self._parseBaseAttributes(node)
kwargs['children'] = parseChildren(node, *parseChildrenArgs)
kwargs['expandedContexts'] = self._parseTuple(node.attrib.get('expandedContexts', ''), [])
return kwargs
def _parseAttachmentsBeforeVersion21(self, parent):
path, name = os.path.split(os.path.abspath(self.__fd.name)) # pylint: disable-msg=E1103
name = os.path.splitext(name)[0]
attdir = os.path.normpath(os.path.join(path, name + '_attachments'))
attachments = []
for node in parent.findall('attachment'):
if self.__tskversion <= 16:
args = (node.text,)
kwargs = dict()
else:
args = (os.path.join(attdir, node.find('data').text), node.attrib['type'])
description = self._parseDescription(node)
kwargs = dict(subject=description,
description=description)
try:
attachments.append(attachment.AttachmentFactory(*args, **kwargs)) # pylint: disable-msg=W0142
except IOError:
# Mail attachment, file doesn't exist. Ignore this.
pass
return attachments
def _parseEffortNodes(self, parent):
return [self._parseEffortNode(node) for node in parent.findall('effort')]
def _parseEffortNode(self, effortNode):
kwargs = {}
if self.__tskversion >= 22:
kwargs['status'] = int(effortNode.attrib['status'])
if self.__tskversion >= 29:
kwargs['id'] = effortNode.attrib['id']
start = effortNode.attrib.get('start', '')
stop = effortNode.attrib.get('stop', '')
description = self._parseDescription(effortNode)
# pylint: disable-msg=W0142
return effort.Effort(task=None, start=date.parseDateTime(start),
stop=date.parseDateTime(stop), description=description, **kwargs)
def _parseSyncMLNode(self, nodes, guid):
syncML = createDefaultSyncConfig(guid)
nodeName = 'syncmlconfig'
if self.__tskversion < 25:
nodeName = 'syncml'
for node in nodes.findall(nodeName):
self._parseSyncMLNodes(node, syncML)
return syncML
def _parseSyncMLNodes(self, parent, cfgNode):
for node in parent:
if node.tag == 'property':
cfgNode.set(node.attrib['name'], self._parseText(node))
else:
for childCfgNode in cfgNode.children():
if childCfgNode.name == node.tag:
break
else:
tag = node.tag
childCfgNode = SyncMLConfigNode(tag)
cfgNode.addChild(childCfgNode)
self._parseSyncMLNodes(node, childCfgNode) # pylint: disable-msg=W0631
def __parseGUIDNode(self, node):
guid = self._parseText(node).strip()
return guid if guid else generate()
def _parseAttachmentNodes(self, parent):
result = []
for node in parent.findall('attachment'):
try:
result.append(self._parseAttachmentNode(node))
except IOError:
pass
return result
def _parseAttachmentNode(self, attachmentNode):
kwargs = self._parseBaseAttributes(attachmentNode)
kwargs['notes'] = self._parseNoteNodes(attachmentNode)
if self.__tskversion <= 22:
path, name = os.path.split(os.path.abspath(self.__fd.name)) # pylint: disable-msg=E1103
name, ext = os.path.splitext(name)
attdir = os.path.normpath(os.path.join(path, name + '_attachments'))
location = os.path.join(attdir, attachmentNode.attrib['location'])
else:
if attachmentNode.attrib.has_key('location'):
location = attachmentNode.attrib['location']
else:
dataNode = attachmentNode.find('data')
if dataNode is None:
raise ValueError, 'Neither location or data are defined for this attachment.'
data = self._parseText(dataNode)
ext = dataNode.attrib['extension']
location = sessiontempfile.get_temp_file(suffix=ext)
file(location, 'wb').write(data.decode('base64'))
if os.name == 'nt':
os.chmod(location, stat.S_IREAD)
return attachment.AttachmentFactory(location, # pylint: disable-msg=W0142
attachmentNode.attrib['type'],
**kwargs)
def _parseDescription(self, node):
if self.__tskversion <= 6:
description = node.attrib.get('description', '')
else:
description = self._parseText(node.find('description'))
return description
def _parseText(self, textNode):
text = u'' if textNode is None else textNode.text or u''
if self.__tskversion >= 24:
# Strip newlines
if text.startswith('\n'):
text = text[1:]
if text.endswith('\n'):
text = text[:-1]
return text
def _parseDateTime(self, dateTimeText, *timeDefaults):
return self._parse(dateTimeText, date.parseDateTime, None, *timeDefaults)
def _parseFontDesc(self, fontDesc, defaultValue=None):
if fontDesc:
try:
font = wx.FontFromNativeInfoString(fontDesc)
except wx.PyAssertionError:
return defaultValue
if font.IsOk():
if font.GetPointSize() < 4:
font.SetPointSize(self.__defaultFontSize)
return font
return defaultValue
def _parseBoolean(self, booleanText, defaultValue=None):
def textToBoolean(text):
if text in ['True', 'False']:
return text == 'True'
else:
raise ValueError, "Expected 'True' or 'False', got '%s'"%booleanText
return self._parse(booleanText, textToBoolean, defaultValue)
def _parseTuple(self, tupleText, defaultValue=None):
if tupleText.startswith('(') and tupleText.endswith(')'):
return self._parse(tupleText, eval, defaultValue)
else:
return defaultValue
def _parse(self, text, parseFunction, defaultValue, *parseArgs):
try:
return parseFunction(text, *parseArgs) if parseArgs else parseFunction(text)
except ValueError:
return defaultValue
class TemplateXMLReader(XMLReader):
def __init__(self, *args, **kwargs):
super(TemplateXMLReader, self).__init__(*args, **kwargs)
self.__context = dict()
self.__context.update(date.__dict__)
self.__context.update(datetime.__dict__)
def read(self):
return super(TemplateXMLReader, self).read()[0][0]
def _parseTaskNode(self, taskNode):
for name in ['startdate', 'duedate', 'completiondate', 'reminder']:
if taskNode.attrib.has_key(name + 'tmpl'):
taskNode.attrib[name] = str(eval(taskNode.attrib[name + 'tmpl'], self.__context))
if taskNode.attrib.has_key('subject'):
taskNode.attrib['subject'] = translate(taskNode.attrib['subject'])
return super(TemplateXMLReader, self)._parseTaskNode(taskNode)
| wdmchaft/taskcoach | taskcoachlib/persistence/xml/reader.py | Python | gpl-3.0 | 20,854 |
# WARPnet Client<->Server Architecture
# WARPnet Parameter Definitions
#
# Author: Siddharth Gupta
#ETH_INTERFACE = 'en0'
#LOCAL_MAC_ADDRESS = [0x00,0x50,0xc2,0x63,0x3f,0xee]
# Ethernet Types (with or without header)
ETH_HEADER = 0x9090
ETH_NO_HEADER = 0x9292
ETH_RECEIVE = 0x9191
| shailcoolboy/Warp-Trinity | ResearchApps/Measurement/warpnet_framework/warpnet_server_params.py | Python | bsd-2-clause | 284 |
# Copyright 2019 ForgeFlow S.L. (http://www.forgeflow.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Account Move Budget",
"summary": "Create Accounting Budgets",
"version": "14.0.1.0.0",
"category": "Accounting & Finance",
"website": "https://github.com/OCA/account-financial-tools",
"author": "ForgeFlow, " "Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": ["account", "date_range"],
"data": [
"security/ir.model.access.csv",
"views/account_move_budget_line_views.xml",
"views/account_move_budget_views.xml",
],
}
| OCA/account-financial-tools | account_move_budget/__manifest__.py | Python | agpl-3.0 | 683 |
#----------------------------------------------------------------------
# $Id: mozilla_dbd.py,v 1.1 2004/06/12 03:13:27 lsaffre Exp $
# Copyright: (c) 2003-2004 Luc Saffre
# License: GPL
#----------------------------------------------------------------------
import types
import warnings
from lino.adamo import
from datatypes import *
from rowattrs import Field, Pointer #, Detail
from connection import Connection
class MailboxSchema(Schema):
def defineTables(self):
self.addTable(Folders())
self.addTable(Addresses())
self.addTable(Messages())
self.addTable( babel.Languages(), "LANGS")
class MailboxConnection(Connection):
| MaxTyutyunnikov/lino | obsolete/src/lino/adamo/mozilla_dbd.py | Python | gpl-3.0 | 646 |
import time
import gevent
import operator
from collections import OrderedDict
from protocol import BaseProtocol
from p2p_protocol import P2PProtocol
from service import WiredService
import multiplexer
from muxsession import MultiplexedSession
from crypto import ECIESDecryptionError
import slogging
import gevent.socket
import rlpxcipher
log = slogging.get_logger('p2p.peer')
class UnknownCommandError(Exception):
"raised if we recive an unknown command for a known protocol"
pass
class Peer(gevent.Greenlet):
remote_client_version = ''
offset_based_dispatch = False
wait_read_timeout = 0.001
dumb_remote_timeout = 10.0
def __init__(self, peermanager, connection, remote_pubkey=None):
super(Peer, self).__init__()
self.is_stopped = False
self.hello_received = False
self.peermanager = peermanager
self.connection = connection
self.config = peermanager.config
self.protocols = OrderedDict()
log.debug('peer init', peer=self)
# create multiplexed encrypted session
privkey = self.config['node']['privkey_hex'].decode('hex')
hello_packet = P2PProtocol.get_hello_packet(self)
self.mux = MultiplexedSession(privkey, hello_packet,
token_by_pubkey=dict(), remote_pubkey=remote_pubkey)
self.remote_pubkey = remote_pubkey
# register p2p protocol
assert issubclass(self.peermanager.wire_protocol, P2PProtocol)
self.connect_service(self.peermanager)
# assure, we don't get messages while replies are not read
self.safe_to_read = gevent.event.Event()
self.safe_to_read.set()
# Stop peer if hello not received in self.dumb_remote_timeout seconds
gevent.spawn_later(self.dumb_remote_timeout, self.check_if_dumb_remote)
@property
def remote_pubkey(self):
"if peer is responder, then the remote_pubkey will not be available"
"before the first packet is received"
return self.mux.remote_pubkey
@remote_pubkey.setter
def remote_pubkey(self, value):
self.remote_pubkey_available = True if value else False
self.mux.remote_pubkey = value
def __repr__(self):
try:
pn = self.connection.getpeername()
except gevent.socket.error:
pn = ('not ready',)
try:
cv = '/'.join(self.remote_client_version.split('/')[:2])
except:
cv = self.remote_client_version
return '<Peer%r %s>' % (pn, cv)
# return '<Peer%r>' % repr(pn)
def report_error(self, reason):
try:
ip_port = self.ip_port
except:
ip_port = 'ip_port not available fixme'
self.peermanager.errors.add(ip_port, reason, self.remote_client_version)
@property
def ip_port(self):
try:
return self.connection.getpeername()
except Exception as e:
log.debug('ip_port failed', e=e)
raise e
def connect_service(self, service):
assert isinstance(service, WiredService)
protocol_class = service.wire_protocol
assert issubclass(protocol_class, BaseProtocol)
# create protcol instance which connects peer with serivce
protocol = protocol_class(self, service)
# register protocol
assert protocol_class not in self.protocols
log.debug('registering protocol', protocol=protocol.name, peer=self)
self.protocols[protocol_class] = protocol
self.mux.add_protocol(protocol.protocol_id)
protocol.start()
def has_protocol(self, protocol):
assert issubclass(protocol, BaseProtocol)
return protocol in self.protocols
def receive_hello(self, proto, version, client_version_string, capabilities,
listen_port, remote_pubkey):
log.info('received hello', version=version,
client_version=client_version_string, capabilities=capabilities)
assert isinstance(remote_pubkey, bytes)
assert len(remote_pubkey) == 64
if self.remote_pubkey_available:
assert self.remote_pubkey == remote_pubkey
self.hello_received = True
# enable backwards compatibility for legacy peers
if version < 5:
self.offset_based_dispatch = True
max_window_size = 2**32 # disable chunked transfers
# call peermanager
agree = self.peermanager.on_hello_received(
proto, version, client_version_string, capabilities, listen_port, remote_pubkey)
if not agree:
return
self.remote_client_version = client_version_string
self.remote_pubkey = remote_pubkey
# register in common protocols
log.debug('connecting services', services=self.peermanager.wired_services)
remote_services = dict((name, version) for name, version in capabilities)
for service in sorted(self.peermanager.wired_services, key=operator.attrgetter('name')):
proto = service.wire_protocol
assert isinstance(service, WiredService)
if proto.name in remote_services:
if remote_services[proto.name] == proto.version:
if service != self.peermanager: # p2p protcol already registered
self.connect_service(service)
else:
log.debug('wrong version', service=proto.name, local_version=proto.version,
remote_version=remote_services[proto.name])
self.report_error('wrong version')
@property
def capabilities(self):
return [(s.wire_protocol.name, s.wire_protocol.version)
for s in self.peermanager.wired_services]
# sending p2p messages
def send_packet(self, packet):
for i, protocol in enumerate(self.protocols.values()):
if packet.protocol_id == protocol.protocol_id:
break
assert packet.protocol_id == protocol.protocol_id, 'no protocol found'
log.debug('send packet', cmd=protocol.cmd_by_id[packet.cmd_id], protcol=protocol.name,
peer=self)
# rewrite cmd_id (backwards compatibility)
if self.offset_based_dispatch:
for i, protocol in enumerate(self.protocols.values()):
if packet.protocol_id > i:
packet.cmd_id += (0 if protocol.max_cmd_id == 0 else protocol.max_cmd_id + 1)
if packet.protocol_id == protocol.protocol_id:
break
packet.protocol_id = 0
self.mux.add_packet(packet)
# receiving p2p messages
def protocol_cmd_id_from_packet(self, packet):
# offset-based dispatch (backwards compatibility)
if self.offset_based_dispatch:
max_id = 0
for protocol in self.protocols.values():
if packet.cmd_id < max_id + protocol.max_cmd_id + 1:
return protocol, packet.cmd_id - (0 if max_id == 0 else max_id + 1)
max_id += protocol.max_cmd_id
raise UnknownCommandError('no protocol for id %s' % packet.cmd_id)
# new-style dispatch based on protocol_id
for i, protocol in enumerate(self.protocols.values()):
if packet.protocol_id == protocol.protocol_id:
return protocol, packet.cmd_id
raise UnknownCommandError('no protocol for protocol id %s' % packet.protocol_id)
def _handle_packet(self, packet):
assert isinstance(packet, multiplexer.Packet)
try:
protocol, cmd_id = self.protocol_cmd_id_from_packet(packet)
except UnknownCommandError, e:
log.error('received unknown cmd', error=e, packet=packet)
return
log.debug('recv packet', cmd=protocol.cmd_by_id[
cmd_id], protocol=protocol.name, orig_cmd_id=packet.cmd_id)
packet.cmd_id = cmd_id # rewrite
protocol.receive_packet(packet)
def send(self, data):
if not data:
return
self.safe_to_read.clear() # make sure we don't accept any data until message is sent
try:
self.connection.sendall(data) # check if gevent chunkes and switches contexts
log.debug('wrote data', size=len(data), ts=time.time())
except gevent.socket.error as e:
log.debug('write error', errno=e.errno, reason=e.strerror)
self.report_error('write error %r' % e.strerror)
self.stop()
except gevent.socket.timeout:
log.debug('write timeout')
self.report_error('write timeout')
self.stop()
self.safe_to_read.set()
def _run_egress_message(self):
while not self.is_stopped:
self.send(self.mux.message_queue.get())
def _run_decoded_packets(self):
# handle decoded packets
while not self.is_stopped:
self._handle_packet(self.mux.packet_queue.get()) # get_packet blocks
def _run_ingress_message(self):
log.debug('peer starting main loop')
assert not self.connection.closed, "connection is closed"
gevent.spawn(self._run_decoded_packets)
gevent.spawn(self._run_egress_message)
while not self.is_stopped:
self.safe_to_read.wait()
try:
gevent.socket.wait_read(self.connection.fileno())
except gevent.socket.error as e:
log.debug('read error', errno=e.errno, reason=e.strerror, peer=self)
self.report_error('network error %s' % e.strerror)
if e.errno in(9,):
# ('Bad file descriptor')
self.stop()
else:
raise e
break
try:
imsg = self.connection.recv(4096)
except gevent.socket.error as e:
log.debug('read error', errno=e.errno, reason=e.strerror, peer=self)
self.report_error('network error %s' % e.strerror)
if e.errno in(50, 54, 60, 65, 104):
# (Network down, Connection reset by peer, timeout, no route to host,
# Connection reset by peer)
self.stop()
else:
raise e
break
if imsg:
log.debug('read data', ts=time.time(), size=len(imsg))
try:
self.mux.add_message(imsg)
except (rlpxcipher.RLPxSessionError, ECIESDecryptionError) as e:
log.debug('rlpx session error', peer=self, error=e)
self.report_error('rlpx session error')
self.stop()
except multiplexer.MultiplexerError as e:
log.debug('multiplexer error', peer=self, error=e)
self.report_error('multiplexer error')
self.stop()
_run = _run_ingress_message
def stop(self):
if not self.is_stopped:
self.is_stopped = True
log.debug('stopped', peer=self)
for p in self.protocols.values():
p.stop()
self.peermanager.peers.remove(self)
self.kill()
def check_if_dumb_remote(self):
"Stop peer if hello not received"
if not self.hello_received:
self.report_error('No hello in {} seconds'.format(self.dumb_remote_timeout))
self.stop()
| ms83/pydevp2p | devp2p/peer.py | Python | mit | 11,577 |
from __future__ import absolute_import
from sentry.api.bases.organization import OrganizationEndpoint, OrganizationIntegrationsPermission
from sentry.api.paginator import OffsetPaginator
from sentry.api.serializers import serialize
from sentry.models import ObjectStatus, OrganizationIntegration
class OrganizationIntegrationsEndpoint(OrganizationEndpoint):
permission_classes = (OrganizationIntegrationsPermission,)
def get(self, request, organization):
integrations = OrganizationIntegration.objects.filter(
organization=organization, status=ObjectStatus.VISIBLE
)
if "provider_key" in request.GET:
integrations = integrations.filter(integration__provider=request.GET["provider_key"])
# include the configurations by default if no param
include_config = True
if request.GET.get("includeConfig") == "0":
include_config = False
return self.paginate(
queryset=integrations,
request=request,
order_by="integration__name",
on_results=lambda x: serialize(x, request.user, include_config=include_config),
paginator_cls=OffsetPaginator,
)
| beeftornado/sentry | src/sentry/api/endpoints/organization_integrations.py | Python | bsd-3-clause | 1,208 |
# The equalizer class and some audio eq functions are derived from
# 180093157554388993's work, with his permission
from pathlib import Path
from typing import Final
from redbot.core.i18n import Translator
_ = Translator("Audio", Path(__file__))
class Equalizer:
def __init__(self):
self.band_count: Final[int] = 15
self.bands = [0.0 for _loop_counter in range(self.band_count)]
def set_gain(self, band: int, gain: float):
if band < 0 or band >= self.band_count:
raise IndexError(f"Band {band} does not exist!")
gain = min(max(gain, -0.25), 1.0)
self.bands[band] = gain
def get_gain(self, band: int):
if band < 0 or band >= self.band_count:
raise IndexError(f"Band {band} does not exist!")
return self.bands[band]
def visualise(self):
block = ""
bands = [str(band + 1).zfill(2) for band in range(self.band_count)]
bottom = (" " * 8) + " ".join(bands)
gains = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0, -0.1, -0.2, -0.25]
for gain in gains:
prefix = ""
if gain > 0:
prefix = "+"
elif gain == 0:
prefix = " "
block += f"{prefix}{gain:.2f} | "
for value in self.bands:
if value >= gain:
block += "[] "
else:
block += " "
block += "\n"
block += bottom
return block
| palmtree5/Red-DiscordBot | redbot/cogs/audio/equalizer.py | Python | gpl-3.0 | 1,518 |
# DESCRIPTION
# Renders a PNG image like bacteria that mutate color as they spread. TRY IT. The output is awesome.
# DEPENDENCIES
# python 3 with numpy, queue, and pyimage modules installed (and others--see the import statements).
# USAGE
# Run this script through a Python interpreter without any parameters, and it will use a default set of parameters:
# python /path/to_this_script/color_growth.py
# To see available parameters, run this script with the --help switch:
# python /path/to_this_script/ --help
# NOTES
# - GitHub user `scribblemaniac` sped up this script (with a submitted pull request) by orders of magnitute vs. an earlier version of the script. An image that took seven minutes to render took 5 seconds after speedup.
# - Output file names are based on the date and time and random characters. Inspired and drastically evolved from `color_fibers.py`, which was horked and adapted from:
#
# https://scipython.com/blog/computer-generated-contemporary-art
#
# KNOWN ISSUES
# See help for `--RANDOM_SEED`.
# CODE
# TO DO
# - figure out whether I broke RND continuity? It would seem the same presets are no longer producing the same results?
# - isolate what situation didn't create a new preset / anim folder when I expected it to, and fix that (or document in help).
# - make naming convention of variables consistent? I think I'm all over the place with this . . . :p
# - possibly things in the color_growth_v1.py's TO DO list.
# - determine whether any code in the fast fork (now this script) is leftover from color_growth_v1.py, and delete them?
# - make it properly use negative or > 8 growth-clip values again? Since the color_growth_fast.py fork it isn't.
# VERSION HISTORY
# v2.8.7:
# Edit speedup credit comment.
# START IMPORTS AND GLOBALS
ColorGrowthPyVersionString = 'v2.8.7'
import datetime
import random
import argparse
import ast
import os.path
import sys
import re
import queue
from more_itertools import unique_everseen
import platform
# I'm also using another psuedorandom number generator built into numpy as np:
import numpy as np
from PIL import Image
# Defaults which will be overridden if arguments of the same name are provided to the script:
WIDTH = 600
HEIGHT = 300
RSHIFT = 8
STOP_AT_PERCENT = 1
SAVE_EVERY_N = 0
RAMP_UP_SAVE_EVERY_N = False
START_COORDS_RANGE = (1,3)
GROWTH_CLIP = (0,5)
SAVE_PRESET = True
animationFrameCounter = 0
renderedFrameCounter = 0
saveNextFrameNumber = 0
imageFrameFileName = ''
padFileNameNumbersDigitsWidth = 0
# SOME BACKGROUND COLOR options;
# any of these (uncomment only one) are made into a list later by ast.literal_eval(BG_COLOR) :
# BG_COLOR = "[157,140,157]" # Medium purplish gray
BG_COLOR = "[252,251,201]" # Buttery light yellow
# BG_COLOR = "[255,63,52]" # Scarlet-scarlet-orange
RECLAIM_ORPHANS = True
BORDER_BLEND = True
TILEABLE = False
SCRIPT_ARGS_STR = ''
# END GLOBALS
# START OPTIONS (which affect globals)
# allows me to have a version string parser option that prints
# and exits; re: https://stackoverflow.com/a/41575802/1397555
class versionStringPrintAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print('color_growth.py', ColorGrowthPyVersionString)
parser.exit()
PARSER = argparse.ArgumentParser(description=
'Renders a PNG image like bacteria that produce random color mutations \
as they grow over a surface. Output file names are named after the date \
and time. Inspired by and drastically evolved from colorFibers.py, which \
was horked and adapted from \
https://scipython.com/blog/computer-generated-contemporary-art/ \
NOTE: CLI options have had breaking changes over time. If reusing settings \
from a previous version, check those settings first if you get errors. \
NOTE: by default the --RAMP_UP_SAVE_EVERY_N switch has a False value, but \
you probably want it True if you save animation frames (--SAVE_EVERY_N).'
)
PARSER.register('action', 'versionStringPrint', versionStringPrintAction)
PARSER.add_argument('-v', '--VERSION', nargs=0, action='versionStringPrint', help='Print version number and exit.')
PARSER.add_argument('--WIDTH', type=int, help=
'WIDTH of output image(s). Default ' + str(WIDTH) + '.')
PARSER.add_argument('--HEIGHT', type=int, help=
'HEIGHT of output image(s). Default ' + str(HEIGHT) + '.')
PARSER.add_argument('-r', '--RSHIFT', type=int, help=
'Vary R, G and B channel values randomly in the range negative this \
value or positive this value. Note that this means the range is RSHIFT \
times two. Defaut ' + str(RSHIFT) + '.'
)
PARSER.add_argument('-b', '--BG_COLOR', type=str, help=
'Canvas color. Expressed as a python list or single number that will be \
assigned to every value in an RGB triplet. If a list, give the RGB \
values in the format \'[255,70,70]\' (if you add spaces after the \
commas, you must surround the parameter in single or double quotes). \
This example would produce a deep red, as Red = 255, Green = 70, Blue = \
70). A single number example like just 150 will result in a medium-light \
gray of [150,150,150] (Red = 150, Green = 150, Blue = 150). All values \
must be between 0 and 255. Default ' + str(BG_COLOR) + '.'
)
PARSER.add_argument('-c', '--COLOR_MUTATION_BASE', type=str, help=
'Base initialization color for pixels, which randomly mutates as \
painting proceeds. If omitted, defaults to whatever BG_COLOR is. If \
included, may differ from BG_COLOR. This option must be given in the \
same format as BG_COLOR. You may make the base initialization color of \
each origin random by specifying "--COLOR_MUTATION_BASE random".'
)
PARSER.add_argument('--BORDER_BLEND', type=str, help=
'If this is enabled, the hard edges between different colonies will be \
blended together. Enabled by default. To disable pass \
--BORDER_BLEND False or --BORDER_BLEND 0.'
)
PARSER.add_argument('--TILEABLE', type=str, help=
'Make the generated image seamlessly tile. Colonies will wrap around \
the edge when they encounter it. Disabled by default. Enable with \
--TILEABLE True or --TILEABLE 1.'
)
PARSER.add_argument('--STOP_AT_PERCENT', type=float, help=
'What percent canvas fill to stop painting at. To paint until the canvas \
is filled (which can take extremely long for higher resolutions), pass 1 \
(for 100 percent). If not 1, value should be a percent expressed as a \
decimal (float) between 0 and 1 (e.g 0.4 for 40 percent. Default ' + \
str(STOP_AT_PERCENT) + '. For high --failedMutationsThreshold or random \
walk (neither of which is implemented at this writing), 0.475 (around 48 \
percent) is recommended. Stop percent is adhered to approximately (it \
could be much less efficient to make it exact).'
)
PARSER.add_argument('-a', '--SAVE_EVERY_N', type=int, help=
'Every N successful coordinate and color mutations, save an animation \
frame into a subfolder named after the intended final art file. To save \
every frame, set this to 1, or to save every 3rd frame set it to 3, etc. \
Saves zero-padded numbered frames to a subfolder which may be strung \
together into an animation of the entire painting process (for example \
via ffmpegAnim.sh). May substantially slow down render, and can also \
create many, many gigabytes of data, depending. ' + str(SAVE_EVERY_N) + \
' by default. To disable, set it to 0 with: -a 0 OR: --SAVE_EVERY_N 0. \
NOTE: If this is nonzero and you do not set --RAMP_UP_SAVE_EVERY_N to \
either True or False (see), the default --RAMP_UP_SAVE_EVERY_N False \
will override to True, as it is strongly suggested you want that if \
you render an animation. If that is not what you want, manually set \
--RAMP_UP_SAVE_EVERY_N False.'
)
PARSER.add_argument('--RAMP_UP_SAVE_EVERY_N', type=str, help=
'Increase the value of --SAVE_EVERY_N over time. Without this, the \
animation may seem to slow toward the middle and end, because the \
interval --SAVE_EVERY_N is constant; the same number of new mutated \
coordinates is spread over a wider area every save frame. \
--RAMP_UP_SAVE_EVERY_N causes the value of --SAVE_EVERY_N to increase \
over time, like dragging the corner of a selection rectangle to increase \
rendered area over the whole canvas. The result is an apparently \
more visually linear growth (in all growth vectors) and a faster \
animation (and faster animation render, as less time is made saving \
fewer frames), but technically the growth rate (vs. saved animation frames) \
actually increases over time. Default ' + str(RAMP_UP_SAVE_EVERY_N) + '. \
NOTES: 1) Relies on --SAVE_EVERY_N being nonzero. Script will warn and exit \
if --RAMP_UP_SAVE_EVERY_N is True and --SAVE_EVERY_N is 0 (zero). \
2) Save frame intervals near start of animation may be similar to \
--SAVE_EVERY_N value, but as noted increase (and can increase a lot) \
over time. 3) To re-render animations created prior to v2.6.6 the same \
as at their creation --RAMP_UP_SAVE_EVERY_N must be False (as this feature \
was introduced in v2.6.6). 4) See related NOTE for --SAVE_EVERY_N.'
)
PARSER.add_argument('-s', '--RANDOM_SEED', type=int, help=
'Seed for random number generators (random and numpy.random are used). \
Default generated by random library itself and added to render file name \
for reference. Can be any integer in the range 0 to 4294967296 (2^32). \
If not provided, it will be randomly chosen from that range (meta!). If \
--SAVE_PRESET is used, the chosen seed will be saved with the preset \
.cgp file. KNOWN ISSUE: functional differences between random generators \
of different versions of Python and/or Python, maybe on different platforms, \
produce different output from the same random seed. ALSO, some versions of \
this script had code that accidentally altered the pseudorandom number \
sequence via something outside the intended color growth algorithm. The \
result was different output from the same --RANDOM_SEED. If you get \
different output than before from the same --RANDOM_SEED, search for and \
examine the VESTIGAL CODE comment(s!), and try uncommenting the line of code \
they detail.'
)
PARSER.add_argument('-q', '--START_COORDS_N', type=int, help=
'How many origin coordinates to begin coordinate and color mutation \
from. Default randomly chosen from range in --START_COORDS_RANGE (see). \
Random selection from that range is performed *after* random seeding by \
--RANDOM_SEED, so that the same random seed will always produce the same \
number of start coordinates. I haven\'t tested whether this will work if \
the number exceeds the number of coordinates possible in the image. \
Maybe it would just overlap itself until they\'re all used?'
)
PARSER.add_argument('--START_COORDS_RANGE', help=
'Random integer range to select a random number of --START_COORDS_N if \
--START_COORDS_N is not provided. Default (' + \
str(START_COORDS_RANGE[0]) + ',' + str(START_COORDS_RANGE[1]) + '). Must \
be provided in that form (a string surrounded by double quote marks (for \
Windows) which can be evaluated to a python tuple), and in the range 0 \
to 4294967296 (2^32), but I bet that sometimes nothing will render if \
you choose a max range number orders of magnitude higher than the number \
of pixels available in the image. I probably would never make the max \
range higher than (number of pixesl in image) / 62500 (which is 250 \
squared). Will not be used if [-q | START_COORDS_N] is provided.'
)
PARSER.add_argument('--CUSTOM_COORDS_AND_COLORS', type=str, help=
'Custom coordinate locations and colors list to initialized coordinate \
mutation queue with. In complex nested lists of tuples _and lists_ \
format (I know, it\'s crazy), surrounded by single or double quote marks, \
OR passed without any space characters in the parameter, like: \
\'[[(coordinate),[color]], [(coordinate),[color]], [(coordinate),[color]]]\', \
or more accurately like: \
[[(50,40),[255,0,255]],[(88,84),[0,255,255]]]. NOTES: \
1) Because this overrides --START_COORDS_N, --START_COORDS_RANGE, and \
--COLOR_MUTATION_BASE, if you want random numbers of coordinates and \
coordinate positions with this, contrive them via another custom script \
or program, and pass them to this. 2) Internally in code the coordinates \
are zero-index-based, which means 0 is 1, 1 is 2, 4 is 5, etc.; BUT \
that\'s not human-friendly, so use the actual values (1 is 1!) \
and the program will just subtract 1 for the zero-based indexing. 3) \
Although internally in code, coordinates are represented as (y,x) tuples \
(or (down,across), that confuses me and isn\'t standard or expected for \
humans, so in this parameter coordinate are represented as (x,y) (or \
(across,down), and the code swaps them before assignment to real, \
internal tuples. You\'re welcome.'
)
PARSER.add_argument('--GROWTH_CLIP', type=str, help=
'Affects seeming "thickness" (or viscosity) of growth. A Python tuple \
expressed as a string (must be surrounded by double quote marks for \
Windows). Default ' + str(GROWTH_CLIP) + '. In growth into adjacent \
coordinates, the maximum number of possible neighbor coordinates to grow \
into is 8 (which may only ever happen with a start coordinate: in \
practical terms, the most coordinates that may usually be expanded into \
is 7). The first number in the tuple is the minimum number of \
coordinates to randomly select, and the second number is the maximum. \
The second must be greater than the first. The first may be lower than 0 \
and will be clipped to 1, making selection of only 1 neighbor coordinate \
more common. The second number may be higher than 8 (or the number of \
available coordinates as the case may be), and will be clipped to the \
maximum number of available coordinates, making selection of all \
available coordinates more common. If the first number is a positive \
integer <= 7, at least that many coordinates will always be selected \
when possible. If the second number is a positive integer >= 1, at most \
that many coordinates will ever be selected. A negative first number or \
low first number clip will tend toward a more evenly spreading liquid \
appearance, and a lower second number clip will cause a more \
stringy/meandering/splatty path or form (as it spreads less uniformly). \
With an effectively more viscous clip like "(2,4)", smaller \
streamy/flood things may traverse a distance faster. Some tuples make \
--RECLAIM_ORPHANS quickly fail, some make it virtually never fail.'
)
PARSER.add_argument('--RECLAIM_ORPHANS', type=str, help=
'Coordinates can end up never mutating color, and remain the same color \
as --BG_COLOR (which may result in the appearance of pixels that seem \
like flecks or discontiguous color). This may be more likely with a \
--GROWTH_CLIP range nearer zero (higher viscosity). This option coralls \
these orphan coordinates and revives them so that their color will \
mutate. Default ' + str(RECLAIM_ORPHANS) + '. To disable pass \
--RECLAIM_ORPHANS False or --RECLAIM_ORPHANS 0.'
)
PARSER.add_argument('--SAVE_PRESET', type=str, help=
'Save all parameters (which are passed to this script) to a .cgp (color \
growth preset) file. If provided, --SAVE_PRESET must be a string \
representing a boolean state (True or False or 1 or 0). Default '+ \
str(SAVE_PRESET) +'. The .cgp file can later be loaded with the \
--LOAD_PRESET switch to create either new or identical work from the \
same parameters (whether it is new or identical depends on the switches, \
--RANDOM_SEED being the most consequential). This with [-a | \
--SAVE_EVERY_N] can recreate gigabytes of exactly the same animation \
frames using just a preset. NOTES: 1) --START_COORDS_RANGE and its \
accompanying value are not saved to config files, and the resultantly \
generated [-q | --START_COORDS_N] is saved instead. 2) You may add \
arbitrary text (such as notes) to the second and subsequent lines of a \
saved preset, as only the first line is used.'
)
PARSER.add_argument('--LOAD_PRESET', type=str, help=
'A preset file (as first created by --SAVE_PRESET) to use. Empty (none \
used) by default. Not saved to any preset. At this writing only a single \
file name is handled, not a path, and it is assumed the file is in the \
current directory. A .cgp preset file is a plain text file on one line, \
which is a collection of SWITCHES to be passed to this script, written \
literally the way you would pass them to this script. NOTE: you may load \
a preset and override any switches in the preset by using the override \
after --LOAD_PRESET. For example, if a preset contains --RANDOM SEED \
98765 but you want to override it with 12345, pass --LOAD_PRESET \
<preset_filename.cgp> --RANDOM_SEED 12345 to this script.'
)
# START ARGUMENT PARSING
# DEVELOPER NOTE: Throughout the below argument checks, wherever a user does not specify an argument and I use a default (as defaults are defined near the start of working code in this script), add that default switch and switch value pair argsparse, for use by the --SAVE_PRESET feature (which saves everything except for the script path ([0]) to a preset). I take this approach because I can't check if a default value was supplied if I do that in the PARSER.add_argument function --
# http://python.6.x6.nabble.com/argparse-tell-if-arg-was-defaulted-td1528162.html
# -- so what I do is check for None (and then supply a default and add to argsparse if None is found). The check for None isn't literal: it's in the else clause after an if (value) check (if the if check fails, that means the value is None, and else: is used) :
print('')
print('Processing any arguments to script . . .')
# allows me to override parser arguments declared in this namespace:
class ARGUMENTS_NAMESPACE:
pass
argumentsNamespace = ARGUMENTS_NAMESPACE()
# Weirdly, for the behavior I want, I must call parse_args a few times:
# - first to get the --LOAD_PRESET CLI argument if there is any
# - then potentially many times to iterate over arguments got from the
# .cgp config file specified
# - then again to override any of those with options passed via CLI
# which I want to override those.
# DEPRECATED: call parse_args with default no parameters (except it is done before the above of necessity):
# ARGS = PARSER.parse_args()
# NOW, create a namespace that allows loaded .cgp file parameters to overwrite values in:
# re: https://docs.python.org/3/library/argparse.html#argparse.Namespace
# re: https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.parse_args
ARGS = PARSER.parse_args(args=sys.argv[1:], namespace=argumentsNamespace)
# Build dictionary from ARGS and use it to build global SCRIPT_ARGS_STR;
# clean it up later (we don't want elements in it with value "None":
argsDict = vars(ARGS)
# modify like this:
# argsDict['COLOR_MUTATION_BASE'] = '[0,0,0]'
# IF A PRESET file is given, load its contents and make its parameters override anything else that was just parsed through the argument parser:
if ARGS.LOAD_PRESET:
LOAD_PRESET = ARGS.LOAD_PRESET
with open(LOAD_PRESET) as f:
SWITCHES = f.readline()
# Remove spaces from parameters in tuples like (1, 13), because it
# mucks up this parsing:
SWITCHES = re.sub('(\([0-9]*),\s*([0-9]*\))', r'\1,\2', SWITCHES)
# removes any start and end whitespace that can throw off
# the following parsing:
SWITCHES = SWITCHES.strip()
SWITCHES = SWITCHES.split(' ')
for i in range(0, len(SWITCHES), 2):
ARGS = PARSER.parse_args(args=[SWITCHES[i], SWITCHES[i+1]], namespace=argumentsNamespace)
# Doing this again here so that anything in the command line overrides:
ARGS = PARSER.parse_args(args=sys.argv[1:], namespace=argumentsNamespace) # When this
# If a user supplied an argument (so that WIDTH has a value (is not None), use that:
if ARGS.WIDTH:
# It is in argsparse already, so it will be used by --WIDTH:
WIDTH = ARGS.WIDTH
else:
# If not, leave the default as it was defined globally, and add to argsDict
# so it can be saved in a .cfg preset:
argsDict['WIDTH'] = WIDTH
if ARGS.HEIGHT:
HEIGHT = ARGS.HEIGHT
else:
argsDict['HEIGHT'] = HEIGHT
if ARGS.RSHIFT:
RSHIFT = ARGS.RSHIFT
else:
argsDict['RSHIFT'] = RSHIFT
if ARGS.BG_COLOR:
# For preset saving, remove spaces and write back to argsparse,
# OR ADD IT (if it was gotten through argparse), so a preset saved by
# --SAVE_PRESET won't cause errors:
BG_COLOR = ARGS.BG_COLOR
BG_COLOR = re.sub(' ', '', BG_COLOR)
argsDict['BG_COLOR'] = BG_COLOR
else:
argsDict['BG_COLOR'] = BG_COLOR
# Convert BG_COLOR (as set from ARGS.BG_COLOR or default) string to python list for use
# by this script, re: https://stackoverflow.com/a/1894296/1397555
BG_COLOR = ast.literal_eval(BG_COLOR)
# See comments in ARGS.BG_COLOR handling; handled the same:
if not ARGS.CUSTOM_COORDS_AND_COLORS:
if ARGS.COLOR_MUTATION_BASE:
COLOR_MUTATION_BASE = ARGS.COLOR_MUTATION_BASE
COLOR_MUTATION_BASE = re.sub(' ', '', COLOR_MUTATION_BASE)
argsDict['COLOR_MUTATION_BASE'] = COLOR_MUTATION_BASE
if ARGS.COLOR_MUTATION_BASE.lower() == 'random':
COLOR_MUTATION_BASE = 'random'
else:
COLOR_MUTATION_BASE = ast.literal_eval(COLOR_MUTATION_BASE)
else: # Write same string as BG_COLOR, after the same silly string manipulation as
# for COLOR_MUTATION_BASE, but more ridiculously now _back_ from that to
# a string again:
BG_COLOR_TMP_STR = str(BG_COLOR)
BG_COLOR_TMP_STR = re.sub(' ', '', BG_COLOR_TMP_STR)
argsDict['COLOR_MUTATION_BASE'] = BG_COLOR_TMP_STR
# In this case we're using a list as already assigned to BG_COLOR:
COLOR_MUTATION_BASE = list(BG_COLOR)
# If I hadn't used list(), COLOR_MUTATION_BASE would be a reference to BG_COLOR (which
# is default Python list handling behavior with the = operator), and when I changed either,
# "both" would change (but they would really just be different names for the same list).
# I want them to be different.
# purple = [255, 0, 255] # Purple. In prior commits of this script, this has been defined
# and unused, just like in real life. Now, it is commented out or not even defined, just
# like it is in real life.
if ARGS.RECLAIM_ORPHANS:
RECLAIM_ORPHANS = ast.literal_eval(ARGS.RECLAIM_ORPHANS)
else:
argsDict['RECLAIM_ORPHANS'] = RECLAIM_ORPHANS
if ARGS.BORDER_BLEND:
BORDER_BLEND = ast.literal_eval(ARGS.BORDER_BLEND)
else:
argsDict['BORDER_BLEND'] = BORDER_BLEND
if ARGS.TILEABLE:
TILEABLE = ast.literal_eval(ARGS.TILEABLE)
else:
argsDict['TILEABLE'] = TILEABLE
if ARGS.STOP_AT_PERCENT:
STOP_AT_PERCENT = ARGS.STOP_AT_PERCENT
else:
argsDict['STOP_AT_PERCENT'] = STOP_AT_PERCENT
if ARGS.SAVE_EVERY_N:
SAVE_EVERY_N = ARGS.SAVE_EVERY_N
else:
argsDict['SAVE_EVERY_N'] = SAVE_EVERY_N
# Conditional override:
if ARGS.SAVE_EVERY_N and not ARGS.RAMP_UP_SAVE_EVERY_N:
RAMP_UP_SAVE_EVERY_N = True
argsDict['RAMP_UP_SAVE_EVERY_N'] = 'True'
if ARGS.RAMP_UP_SAVE_EVERY_N:
RAMP_UP_SAVE_EVERY_N = ast.literal_eval(ARGS.RAMP_UP_SAVE_EVERY_N)
if SAVE_EVERY_N == 0 and RAMP_UP_SAVE_EVERY_N == True:
print('--RAMP_UP_SAVE_EVERY_N is True, but --SAVE_EVERY_N is 0. --SAVE_EVERY_N must be nonzero if --RAMP_UP_SAVE_EVERY_N is True. Either set --SAVE_EVERY_N to something other than 0, or set RAMP_UP_SAVE_EVERY_N to False. Exiting script.')
sys.exit(2)
else:
argsDict['RAMP_UP_SAVE_EVERY_N'] = RAMP_UP_SAVE_EVERY_N
if ARGS.RANDOM_SEED:
RANDOM_SEED = ARGS.RANDOM_SEED
else:
RANDOM_SEED = random.randint(0, 4294967296)
argsDict['RANDOM_SEED'] = RANDOM_SEED
# Use that seed straightway:
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
# BEGIN STATE MACHINE "Megergeberg 5,000."
# DOCUMENTATION.
# Possible combinations of these variables to handle; "coords" means START_COORDS_N, RNDcoords means START_COORDS_RANGE:
# --
# ('coords', 'RNDcoords') : use coords, delete any RNDcoords
# ('coords', 'noRNDcoords') : use coords, no need to delete any RNDcoords. These two:
# if coords if RNDcoords.
# ('noCoords', 'RNDcoords') : assign user-provided RNDcoords for use (overwrite defaults).
# ('noCoords', 'noRNDcoords') : continue with RNDcoords defaults (don't overwrite defaults).
# These two: else if RNDcoords else. Also these two: generate coords independent of (outside) that last if else (by using whatever RNDcoords ends up being (user-provided
# or default).
# --
# I COULD just have four different, independent "if" checks explicitly for those four pairs and work from that, but this is more compact logic (fewer checks).
# If --START_COORDS_N is provided by the user, use it, unless there is overriding CUSTOM_COORDS_AND_COLORS:
if not ARGS.CUSTOM_COORDS_AND_COLORS:
if ARGS.START_COORDS_N:
START_COORDS_N = ARGS.START_COORDS_N
print('Will use the provided --START_COORDS_N, ', START_COORDS_N)
if ARGS.START_COORDS_RANGE:
# .. and delete any --START_COORDS_RANGE and its value from argsparse (as it will not be used and would best not be stored in the .cgp config file via --SAVE_PRESET:
argsDict.pop('START_COORDS_RANGE', None)
print(
'** NOTE: ** You provided both [-q | --START_COORDS_N] and --START_COORDS_RANGE, \
but the former overrides the latter (the latter will not be used). This program \
disregards the latter from the parameters list.'
)
else: # If --START_COORDS_N is _not_ provided by the user..
if ARGS.START_COORDS_RANGE:
# .. but if --START_COORDS_RANGE _is_ provided, assign from that:
START_COORDS_RANGE = ast.literal_eval(ARGS.START_COORDS_RANGE)
STR_PART = 'from user-supplied range ' + str(START_COORDS_RANGE)
else: # .. otherwise use the default START_COORDS_RANGE:
STR_PART = 'from default range ' + str(START_COORDS_RANGE)
START_COORDS_N = random.randint(START_COORDS_RANGE[0], START_COORDS_RANGE[1])
argsDict['START_COORDS_N'] = START_COORDS_N
print('Using', START_COORDS_N, 'start coordinates, by random selection ' + STR_PART)
# END STATE MACHINE "Megergeberg 5,000."
if ARGS.CUSTOM_COORDS_AND_COLORS:
CUSTOM_COORDS_AND_COLORS = ARGS.CUSTOM_COORDS_AND_COLORS
CUSTOM_COORDS_AND_COLORS = re.sub(' ', '', CUSTOM_COORDS_AND_COLORS)
argsDict['CUSTOM_COORDS_AND_COLORS'] = CUSTOM_COORDS_AND_COLORS
CUSTOM_COORDS_AND_COLORS = ast.literal_eval(ARGS.CUSTOM_COORDS_AND_COLORS)
if ARGS.GROWTH_CLIP: # See comments in ARGS.BG_COLOR handling. Handled the same.
GROWTH_CLIP = ARGS.GROWTH_CLIP
GROWTH_CLIP = re.sub(' ', '', GROWTH_CLIP)
argsDict['GROWTH_CLIP'] = GROWTH_CLIP
GROWTH_CLIP = ast.literal_eval(GROWTH_CLIP)
# NOTE: VESTIGAL CODE HERE that will alter pseudorandom determinism if commented vs. not commented out; if render from a preset doesn't produce the same result as it once did, try uncommenting the next line! :
# zax_blor = ('%03x' % random.randrange(16**6))
else:
temp_str = str(GROWTH_CLIP)
temp_str = re.sub(' ', '', temp_str)
argsDict['GROWTH_CLIP'] = GROWTH_CLIP
if ARGS.SAVE_PRESET:
SAVE_PRESET = ast.literal_eval(ARGS.SAVE_PRESET)
else:
argsDict['SAVE_PRESET'] = SAVE_PRESET
# END ARGUMENT PARSING
# Remove arguments from argsDict whose values are 'None' from that (they cause problems when doing things with the arguments list via CLI, as intended) :
for key in argsDict:
# if the key value is 'None', don't bother saving it; otherwise save it:
if argsDict[key] != None:
keyValStr = '--' + key + ' ' + str(argsDict[key])
SCRIPT_ARGS_STR += keyValStr + ' '
# removes whitespace from start and end that would mess up parse code earlier in the script (if I didn't do this there also) :
SCRIPT_ARGS_STR = SCRIPT_ARGS_STR.strip()
# ADDITIONAL GLOBALS defined here:
allPixelsN = WIDTH * HEIGHT
stopRenderAtPixelsN = int(allPixelsN * STOP_AT_PERCENT)
# If RAMP_UP_SAVE_EVERY_N is True, create list saveFramesAtCoordsPaintedArray with increasing values for when to save N evolved coordinates to animation frames:
saveFramesAtCoordsPaintedArray = []
if SAVE_EVERY_N != 0 and RAMP_UP_SAVE_EVERY_N == True:
allPixelsNdividedBy_SAVE_EVERY_N = allPixelsN / SAVE_EVERY_N
divisor = 1 / allPixelsNdividedBy_SAVE_EVERY_N
saveFramesAtCoordsPaintedMultipliers = [x * divisor for x in range(0, int(allPixelsNdividedBy_SAVE_EVERY_N)+1)]
for multiplier in saveFramesAtCoordsPaintedMultipliers:
mod_w = WIDTH * multiplier
mod_h = HEIGHT * multiplier
mod_area = mod_w * mod_h
saveFramesAtCoordsPaintedArray.append(int(mod_area))
# Deduplicate elements in the list but maintain order:
saveFramesAtCoordsPaintedArray = list(unique_everseen(saveFramesAtCoordsPaintedArray))
# Because that resulting list doesn't include the ending number, add it:
saveFramesAtCoordsPaintedArray.append(stopRenderAtPixelsN)
# If RAMP_UP_SAVE_EVERY_N is False, create list saveFramesAtCoordsPaintedArray with values at constant intervals for when to save animation frames:
if SAVE_EVERY_N != 0 and RAMP_UP_SAVE_EVERY_N == False:
saveFramesAtCoordsPaintedArray = [x * SAVE_EVERY_N for x in range(0, int(stopRenderAtPixelsN/SAVE_EVERY_N)+1 )]
# Because that range doesn't include the end of the range:
saveFramesAtCoordsPaintedArray.append(stopRenderAtPixelsN)
# Because that resulting list doesn't include the ending number, add it:
saveFramesAtCoordsPaintedArray.append(stopRenderAtPixelsN)
# Values of these used elsewhere:
saveFramesAtCoordsPaintedArrayIDX = 0
saveFramesAtCoordsPaintedArrayMaxIDX = (len(saveFramesAtCoordsPaintedArray) - 1)
def is_coord_in_bounds(y, x):
return y >= 0 and y < HEIGHT and x >= 0 and x < WIDTH
def is_color_valid(y, x, canvas):
return canvas[y][x][0] >= 0 # Negative number used for invalid color
def get_rnd_unallocd_neighbors(y, x, canvas):
"""Returns both a set() of randomly selected empty neighbor coordinates to use
immediately, and a set() of neighbors to use later."""
# init an empty set we'll populate with neighbors (int tuples) and return:
rnd_neighbors_to_ret = []
unallocd_neighbors = set()
for i in range(-1, 2):
for j in range(-1, 2):
if TILEABLE:
if not (i == 0 and j == 0) and not is_color_valid((y+i) % HEIGHT, (x+j) % WIDTH, canvas):
unallocd_neighbors.add(((y+i) % HEIGHT, (x+j) % WIDTH))
else:
if not (i == 0 and j == 0) and is_coord_in_bounds(y+i, x+j) and not is_color_valid(y+i, x+j, canvas):
unallocd_neighbors.add((y+i, x+j))
if unallocd_neighbors: # If there is anything left in unallocd_neighbors:
# START GROWTH_CLIP (VISCOSITY) CONTROL.
# Decide how many to pick:
n_neighbors_to_ret = np.clip(np.random.randint(GROWTH_CLIP[0], GROWTH_CLIP[1] + 1), 0, len(unallocd_neighbors))
# END GROWTH_CLIP (VISCOSITY) CONTROL.
rnd_neighbors_to_ret = random.sample(unallocd_neighbors, n_neighbors_to_ret)
for neighbor in rnd_neighbors_to_ret:
unallocd_neighbors.remove(neighbor)
return rnd_neighbors_to_ret, unallocd_neighbors
def find_adjacent_color(y, x, canvas):
allocd_neighbors = []
for i in range(-1, 2):
for j in range(-1, 2):
if TILEABLE:
if not (i == 0 and j == 0) and is_color_valid((y+i) % HEIGHT, (x+j) % WIDTH, canvas):
allocd_neighbors.append(((y+i) % HEIGHT, (x+j) % WIDTH))
else:
if not (i == 0 and j == 0) and is_coord_in_bounds(y+i, x+j) and is_color_valid(y+i, x+j, canvas):
allocd_neighbors.append((y+i, x+j))
if not allocd_neighbors:
return None
else:
y, x = random.choice(allocd_neighbors)
return canvas[y][x]
def coords_set_to_image(canvas, render_target_file_name):
"""Creates and saves image from dict of Coordinate objects, HEIGHT and WIDTH definitions,
and a filename string."""
tmp_array = [[BG_COLOR if x[0] < 0 else x for x in row] for row in canvas]
tmp_array = np.asarray(tmp_array)
image_to_save = Image.fromarray(tmp_array.astype(np.uint8)).convert('RGB')
image_to_save.save(render_target_file_name)
def print_progress(newly_painted_coords):
"""Prints coordinate plotting statistics (progress report)."""
print('newly painted : total painted : target : canvas size : reclaimed orphans')
print(newly_painted_coords, ':', painted_coordinates, ':', \
stopRenderAtPixelsN, ':', allPixelsN, ':', orphans_to_reclaim_n)
def set_img_frame_file_name():
global padFileNameNumbersDigitsWidth
global renderedFrameCounter
global imageFrameFileName
renderedFrameCounter += 1
frameNumberStr = str(renderedFrameCounter)
imageFrameFileName = anim_frames_folder_name + '/' + frameNumberStr.zfill(padFileNameNumbersDigitsWidth) + '.png'
def save_animation_frame():
# Tells the function we are using global variables:
global animationFrameCounter
global saveNextFrameNumber
global saveFramesAtCoordsPaintedArrayIDX
global saveFramesAtCoordsPaintedArrayMaxIDX
# print('animationFrameCounter', animationFrameCounter, 'saveNextFrameNumber', saveNextFrameNumber)
if SAVE_EVERY_N != 0:
if (animationFrameCounter == saveNextFrameNumber):
# only increment the ~IDX if it will be in array bounds:
if (saveFramesAtCoordsPaintedArrayIDX + 1) < saveFramesAtCoordsPaintedArrayMaxIDX:
saveFramesAtCoordsPaintedArrayIDX += 1
saveNextFrameNumber = saveFramesAtCoordsPaintedArray[saveFramesAtCoordsPaintedArrayIDX]
set_img_frame_file_name()
# Only write frame if it does not already exist (allows resume of suspended / crashed renders) :
if os.path.exists(imageFrameFileName) == False:
# print("Animation render frame file does not exist; writing frame.")
coords_set_to_image(canvas, imageFrameFileName)
animationFrameCounter += 1
# END GLOBAL FUNCTIONS
# END OPTIONS AND GLOBALS
"""START MAIN FUNCTIONALITY."""
print('Initializing render script..')
# A dict of Coordinate objects which is used with tracking sets to fill a "canvas:"
canvas = []
# A set of coordinates (tuples, not Coordinate objects) which are free for the taking:
unallocd_coords = set()
# A set of coordinates (again tuples) which are set aside (allocated) for use:
allocd_coords = set()
# A set of coordinates (again tuples) which have been color mutated and may no longer
# coordinate mutate:
filled_coords = set()
coord_queue = []
# Initialize canvas dict and unallocd_coords set (canvas being a dict of Coordinates with
# tuple coordinates as keys:
for y in range(0, HEIGHT): # for columns (x) in row)
canvas.append([])
for x in range(0, WIDTH): # over the columns, prep and add:
unallocd_coords.add((y, x))
canvas[y].append([-1,-1,-1])
# If ARGS.CUSTOM_COORDS_AND_COLORS was not passed to script, initialize allocd_coords set by random selection from unallocd_coords (and remove from unallocd_coords); structure of coords is (y,x)
if not ARGS.CUSTOM_COORDS_AND_COLORS:
print('no --CUSTOM_COORDS_AND_COLORS argument passed to script, so initializing coordinate locations randomly . . .')
RNDcoord = random.sample(unallocd_coords, START_COORDS_N)
for coord in RNDcoord:
coord_queue.append(coord)
if COLOR_MUTATION_BASE == "random":
canvas[coord[0]][coord[1]] = np.random.randint(0, 255, 3)
else:
canvas[coord[0]][coord[1]] = COLOR_MUTATION_BASE
# If ARGS.CUSTOM_COORDS_AND_COLORS was passed to script, init coords and their colors from it:
else:
print('--CUSTOM_COORDS_AND_COLORS argument passed to script, so initializing coords and colors from that. NOTE that this overrides --START_COORDS_N, --START_COORDS_RANGE, and --COLOR_MUTATION_BASE if those were provided.')
print('\n')
for element in CUSTOM_COORDS_AND_COLORS:
# SWAPPING those (on CLI they are x,y; here it wants y,x) ;
# ALSO, this program kindly allows hoomans to not bother with zero-based indexing, which means 1 for hoomans is 0 for program, so substracting 1 from both values:
coord = (element[0][1], element[0][0])
# print('without mod:', coord)
coord = (element[0][1]-1, element[0][0]-1)
# print('with mod:', coord)
coord_queue.append(coord)
color_values = np.asarray(element[1]) # np.asarray() gets it into same object type as elsewhere done and expected.
# print('adding color to canvas:', color_values) MINDING the x,y swap AND to modify the hooman 1-based index here, too! :
canvas[ element[0][1]-1 ][ element[0][0]-1 ] = color_values # LORF!
report_stats_every_n = 5000
report_stats_nth_counter = 0
# Render target file name generation; differs in different scenarios:
# If a preset was loaded, base the render target file name on it.
if ARGS.LOAD_PRESET:
# take trailing .cgp off it:
render_target_file_base_name = LOAD_PRESET.rstrip('.cgp')
else:
# Otherwise, create render target file name based on time painting began.
now = datetime.datetime.now()
time_stamp = now.strftime('%Y_%m_%d__%H_%M_%S__')
# VESTIGAL CODE; most versions of this script here altered the pseudorandom sequence of --RANDOM_SEED with the following line of code (that makes an rndStr); this had been commented out around v2.3.6 - v2.5.5 (maybe?), which broke with psuedorandom continuity as originally developed in the script. For continuity (and because output seemed randomly better _with_ this code), it is left here;
# ALSO NOTE:
# in trying to track down this issue some versions of the script had the following line of code before the above if ARGS.LOAD_PRESET; but now I think it _would_ have been here (also git history isn't complete on versions, I think, so I'm speculating); if you can't duplicate the rnd state of a render, you may want to try copying it up there.
rndStr = ('%03x' % random.randrange(16**6))
render_target_file_base_name = time_stamp + '__' + rndStr + '_colorGrowthPy'
# Check if render target file with same name (but .png) extension exists. This logic is very slightly risky: if render_target_file_base_name does not exist, I will assume that state image file name and anim frames folder names also do not exist; if I am wrong, those may get overwritten (by other logic in this script).
target_render_file_exists = os.path.exists(render_target_file_base_name + '.png')
# If it does not exist, set render target file name to that ( + '.png'). In that case, the following following "while" block will never execute. BUT if it does exist, the following "while" block _will_ execute, and do this: rename the render target file name by appending six rnd hex chars to it plus 'var', e.g. 'var_32ef5f' to file base name, and keep checking and doing that over again until there's no target name conflict:
cgp_rename_count = 1
while target_render_file_exists == True:
# Returns six random lowercase hex characters:
cgp_rename_count += 1; variantNameStr = str(cgp_rename_count)
variantNameStr = variantNameStr.zfill(4)
tst_str = render_target_file_base_name + '__variant_' + variantNameStr
target_render_file_exists = os.path.exists(tst_str + '.png')
if cgp_rename_count > 10000:
print(
"Encountered 10,000 naming collisions making new render target file \
names. Please make a copy of and rename the source .cgp file before \
continuning, Sparkles McSparkly. Exiting."
)
sys.exit(1)
if target_render_file_exists == False:
render_target_file_base_name = tst_str
render_target_file_name = render_target_file_base_name + '.png'
anim_frames_folder_name = render_target_file_base_name + '_frames'
print('\nrender_target_file_name: ', render_target_file_name)
print('anim_frames_folder_name: ', anim_frames_folder_name)
# If SAVE_EVERY_N has a value greater than zero, create a subfolder to write frames to; Also, initialize a variable which is how many zeros to pad animation save frame file (numbers) to, based on how many frames will be rendered:
if SAVE_EVERY_N > 0:
padFileNameNumbersDigitsWidth = len(str(stopRenderAtPixelsN))
# Only create the anim frames folder if it does not exist:
if os.path.exists(anim_frames_folder_name) == False:
os.mkdir(anim_frames_folder_name)
# If bool set saying so, save arguments to this script to a .cgp file with the target render base file name:
if SAVE_PRESET:
# strip the --LOAD_PRESET parameter and value from SCRIPT_ARGS_STR before writing it to preset file (and save it in a new variable), as it would be redundant (and, if the parameters are based on loading another preset and overriding some parameters, it would moreover be wrong) :
SCRIPT_ARGS_WRITE_STR = re.sub('--LOAD_PRESET [^ ]*', r'', SCRIPT_ARGS_STR)
file = open(render_target_file_base_name + '.cgp', "w")
file.write(SCRIPT_ARGS_WRITE_STR + '\n\n')
if ARGS.LOAD_PRESET:
file.write('# Derived of preset: ' + LOAD_PRESET + '\n')
file.write('# Created with color_growth.py ' + ColorGrowthPyVersionString + '\n')
file.write('# Python version: ' + sys.version + '\n')
file.write('# Platform: ' + platform.platform() + '\n')
file.close()
# ----
# START IMAGE MAPPING
painted_coordinates = 0
# With higher VISCOSITY some coordinates can be painted around (by other coordinates on all sides) but coordinate mutation never actually moves into that coordinate. The result is that some coordinates may never be "born." this set and associated code revives orphan coordinates:
potential_orphan_coords_two = set()
# used to reclaim orphan coordinates every N iterations through the `while allocd_coords` loop:
base_orphan_reclaim_multiplier = 0.015
orphans_to_reclaim_n = 0
coords_painted_since_reclaim = 0
# These next two variables are used to ramp up orphan coordinate reclamation rate as the render proceeds:
print('Generating image . . . ')
newly_painted_coords = 0 # This is reset at every call of print_progress()
continue_painting = True
while coord_queue:
if continue_painting == False:
break
while coord_queue:
index = np.random.randint(0, len(coord_queue))
y, x = coord_queue[index]
if index == len(coord_queue) - 1:
coord_queue.pop()
else:
coord_queue[index] = coord_queue.pop()
# Mutate color--! and assign it to the color variable (list) in the Coordinate object:
canvas[y][x] = canvas[y][x] + np.random.randint(-RSHIFT, RSHIFT + 1, size=3) / 2
# print('Colored coordinate (y, x)', coord)
new_allocd_coords_color = canvas[y][x] = np.clip(canvas[y][x], 0, 255)
painted_coordinates += 1
newly_painted_coords += 1
coords_painted_since_reclaim += 1
# The first returned set is used straightway, the second optionally shuffles into the first after the first is depleted:
rnd_new_coords_set, potential_orphan_coords_one = get_rnd_unallocd_neighbors(y, x, canvas)
for new_y, new_x in rnd_new_coords_set:
coord_queue.append((new_y, new_x))
if BORDER_BLEND and is_coord_in_bounds(2*new_y-y, 2*new_x-x) and is_color_valid(2*new_y-y, 2*new_x-x, canvas):
canvas[new_y][new_x] = (np.array(new_allocd_coords_color) + np.array(canvas[2*new_y-y][2*new_x-x])) / 2
else:
canvas[new_y][new_x] = new_allocd_coords_color
# Save an animation frame (function only does if SAVE_EVERY_N True):
save_animation_frame()
# Print progress:
if report_stats_nth_counter == 0 or report_stats_nth_counter == report_stats_every_n:
print_progress(newly_painted_coords)
newly_painted_coords = 0
report_stats_nth_counter = 0
report_stats_nth_counter += 1
# Terminate all coordinate and color mutation at an arbitary number of mutations:
if painted_coordinates > stopRenderAtPixelsN:
print('Painted coordinate termination count', painted_coordinates, 'exceeded. Ending paint algorithm.')
continue_painting = False
break
if RECLAIM_ORPHANS:
for y in range(0, HEIGHT):
for x in range(0, WIDTH):
if not is_color_valid(y, x, canvas):
adj_color = find_adjacent_color(y, x, canvas)
if adj_color is not None:
coord_queue.append((y, x))
canvas[y][x] = adj_color + np.random.randint(-RSHIFT, RSHIFT + 1, size=3) / 2
canvas[y][x] = np.clip(canvas[y][x], 0, 255)
orphans_to_reclaim_n += 1
# END IMAGE MAPPING
# ----
# Works around problem that this setup can (always does?) save everything _except_ for a last frame with every coordinate painted if painted_coordinates >= stopRenderAtPixelsN and STOP_AT_PERCENT == 1; is there a better-engineered way to fix this problem? But this works:
if SAVE_EVERY_N != 0:
set_img_frame_file_name()
coords_set_to_image(canvas, imageFrameFileName)
# Save final image file:
print('Saving image ', render_target_file_name, ' . . .')
coords_set_to_image(canvas, render_target_file_name)
print('Render complete and image saved.')
# END MAIN FUNCTIONALITY. | r-alex-hall/fontDevTools | scripts/imgAndVideo/color_growth.py | Python | gpl-3.0 | 45,584 |
#!/usr/bin/env python
#
# This tool is copyright (c) 2006, Sean Estabrooks.
# It is released under the Gnu Public License, version 2.
#
# Import Perforce branches into Git repositories.
# Checking out the files is done by calling the standard p4
# client which you must have properly configured yourself
#
import marshal
import os
import sys
import time
import getopt
from signal import signal, \
SIGPIPE, SIGINT, SIG_DFL, \
default_int_handler
signal(SIGPIPE, SIG_DFL)
s = signal(SIGINT, SIG_DFL)
if s != default_int_handler:
signal(SIGINT, s)
def die(msg, *args):
for a in args:
msg = "%s %s" % (msg, a)
print "git-p4import fatal error:", msg
sys.exit(1)
def usage():
print "USAGE: git-p4import [-q|-v] [--authors=<file>] [-t <timezone>] [//p4repo/path <branch>]"
sys.exit(1)
verbosity = 1
logfile = "/dev/null"
ignore_warnings = False
stitch = 0
tagall = True
def report(level, msg, *args):
global verbosity
global logfile
for a in args:
msg = "%s %s" % (msg, a)
fd = open(logfile, "a")
fd.writelines(msg)
fd.close()
if level <= verbosity:
print msg
class p4_command:
def __init__(self, _repopath):
try:
global logfile
self.userlist = {}
if _repopath[-1] == '/':
self.repopath = _repopath[:-1]
else:
self.repopath = _repopath
if self.repopath[-4:] != "/...":
self.repopath= "%s/..." % self.repopath
f=os.popen('p4 -V 2>>%s'%logfile, 'rb')
a = f.readlines()
if f.close():
raise
except:
die("Could not find the \"p4\" command")
def p4(self, cmd, *args):
global logfile
cmd = "%s %s" % (cmd, ' '.join(args))
report(2, "P4:", cmd)
f=os.popen('p4 -G %s 2>>%s' % (cmd,logfile), 'rb')
list = []
while 1:
try:
list.append(marshal.load(f))
except EOFError:
break
self.ret = f.close()
return list
def sync(self, id, force=False, trick=False, test=False):
if force:
ret = self.p4("sync -f %s@%s"%(self.repopath, id))[0]
elif trick:
ret = self.p4("sync -k %s@%s"%(self.repopath, id))[0]
elif test:
ret = self.p4("sync -n %s@%s"%(self.repopath, id))[0]
else:
ret = self.p4("sync %s@%s"%(self.repopath, id))[0]
if ret['code'] == "error":
data = ret['data'].upper()
if data.find('VIEW') > 0:
die("Perforce reports %s is not in client view"% self.repopath)
elif data.find('UP-TO-DATE') < 0:
die("Could not sync files from perforce", self.repopath)
def changes(self, since=0):
try:
list = []
for rec in self.p4("changes %s@%s,#head" % (self.repopath, since+1)):
list.append(rec['change'])
list.reverse()
return list
except:
return []
def authors(self, filename):
f=open(filename)
for l in f.readlines():
self.userlist[l[:l.find('=')].rstrip()] = \
(l[l.find('=')+1:l.find('<')].rstrip(),l[l.find('<')+1:l.find('>')])
f.close()
for f,e in self.userlist.items():
report(2, f, ":", e[0], " <", e[1], ">")
def _get_user(self, id):
if not self.userlist.has_key(id):
try:
user = self.p4("users", id)[0]
self.userlist[id] = (user['FullName'], user['Email'])
except:
self.userlist[id] = (id, "")
return self.userlist[id]
def _format_date(self, ticks):
symbol='+'
name = time.tzname[0]
offset = time.timezone
if ticks[8]:
name = time.tzname[1]
offset = time.altzone
if offset < 0:
offset *= -1
symbol = '-'
localo = "%s%02d%02d %s" % (symbol, offset / 3600, offset % 3600, name)
tickso = time.strftime("%a %b %d %H:%M:%S %Y", ticks)
return "%s %s" % (tickso, localo)
def where(self):
try:
return self.p4("where %s" % self.repopath)[-1]['path']
except:
return ""
def describe(self, num):
desc = self.p4("describe -s", num)[0]
self.msg = desc['desc']
self.author, self.email = self._get_user(desc['user'])
self.date = self._format_date(time.localtime(long(desc['time'])))
return self
class git_command:
def __init__(self):
try:
self.version = self.git("--version")[0][12:].rstrip()
except:
die("Could not find the \"git\" command")
try:
self.gitdir = self.get_single("rev-parse --git-dir")
report(2, "gdir:", self.gitdir)
except:
die("Not a git repository... did you forget to \"git init\" ?")
try:
self.cdup = self.get_single("rev-parse --show-cdup")
if self.cdup != "":
os.chdir(self.cdup)
self.topdir = os.getcwd()
report(2, "topdir:", self.topdir)
except:
die("Could not find top git directory")
def git(self, cmd):
global logfile
report(2, "GIT:", cmd)
f=os.popen('git %s 2>>%s' % (cmd,logfile), 'rb')
r=f.readlines()
self.ret = f.close()
return r
def get_single(self, cmd):
return self.git(cmd)[0].rstrip()
def current_branch(self):
try:
testit = self.git("rev-parse --verify HEAD")[0]
return self.git("symbolic-ref HEAD")[0][11:].rstrip()
except:
return None
def get_config(self, variable):
try:
return self.git("config --get %s" % variable)[0].rstrip()
except:
return None
def set_config(self, variable, value):
try:
self.git("config %s %s"%(variable, value) )
except:
die("Could not set %s to " % variable, value)
def make_tag(self, name, head):
self.git("tag -f %s %s"%(name,head))
def top_change(self, branch):
try:
a=self.get_single("name-rev --tags refs/heads/%s" % branch)
loc = a.find(' tags/') + 6
if a[loc:loc+3] != "p4/":
raise
return int(a[loc+3:][:-2])
except:
return 0
def update_index(self):
self.git("ls-files -m -d -o -z | git update-index --add --remove -z --stdin")
def checkout(self, branch):
self.git("checkout %s" % branch)
def repoint_head(self, branch):
self.git("symbolic-ref HEAD refs/heads/%s" % branch)
def remove_files(self):
self.git("ls-files | xargs rm")
def clean_directories(self):
self.git("clean -d")
def fresh_branch(self, branch):
report(1, "Creating new branch", branch)
self.git("ls-files | xargs rm")
os.remove(".git/index")
self.repoint_head(branch)
self.git("clean -d")
def basedir(self):
return self.topdir
def commit(self, author, email, date, msg, id):
self.update_index()
fd=open(".msg", "w")
fd.writelines(msg)
fd.close()
try:
current = self.get_single("rev-parse --verify HEAD")
head = "-p HEAD"
except:
current = ""
head = ""
tree = self.get_single("write-tree")
for r,l in [('DATE',date),('NAME',author),('EMAIL',email)]:
os.environ['GIT_AUTHOR_%s'%r] = l
os.environ['GIT_COMMITTER_%s'%r] = l
commit = self.get_single("commit-tree %s %s < .msg" % (tree,head))
os.remove(".msg")
self.make_tag("p4/%s"%id, commit)
self.git("update-ref HEAD %s %s" % (commit, current) )
try:
opts, args = getopt.getopt(sys.argv[1:], "qhvt:",
["authors=","help","stitch=","timezone=","log=","ignore","notags"])
except getopt.GetoptError:
usage()
for o, a in opts:
if o == "-q":
verbosity = 0
if o == "-v":
verbosity += 1
if o in ("--log"):
logfile = a
if o in ("--notags"):
tagall = False
if o in ("-h", "--help"):
usage()
if o in ("--ignore"):
ignore_warnings = True
git = git_command()
branch=git.current_branch()
for o, a in opts:
if o in ("-t", "--timezone"):
git.set_config("perforce.timezone", a)
if o in ("--stitch"):
git.set_config("perforce.%s.path" % branch, a)
stitch = 1
if len(args) == 2:
branch = args[1]
git.checkout(branch)
if branch == git.current_branch():
die("Branch %s already exists!" % branch)
report(1, "Setting perforce to ", args[0])
git.set_config("perforce.%s.path" % branch, args[0])
elif len(args) != 0:
die("You must specify the perforce //depot/path and git branch")
p4path = git.get_config("perforce.%s.path" % branch)
if p4path == None:
die("Do not know Perforce //depot/path for git branch", branch)
p4 = p4_command(p4path)
for o, a in opts:
if o in ("-a", "--authors"):
p4.authors(a)
localdir = git.basedir()
if p4.where()[:len(localdir)] != localdir:
report(1, "**WARNING** Appears p4 client is misconfigured")
report(1, " for sync from %s to %s" % (p4.repopath, localdir))
if ignore_warnings != True:
die("Reconfigure or use \"--ignore\" on command line")
if stitch == 0:
top = git.top_change(branch)
else:
top = 0
changes = p4.changes(top)
count = len(changes)
if count == 0:
report(1, "Already up to date...")
sys.exit(0)
ptz = git.get_config("perforce.timezone")
if ptz:
report(1, "Setting timezone to", ptz)
os.environ['TZ'] = ptz
time.tzset()
if stitch == 1:
git.remove_files()
git.clean_directories()
p4.sync(changes[0], force=True)
elif top == 0 and branch != git.current_branch():
p4.sync(changes[0], test=True)
report(1, "Creating new initial commit");
git.fresh_branch(branch)
p4.sync(changes[0], force=True)
else:
p4.sync(changes[0], trick=True)
report(1, "processing %s changes from p4 (%s) to git (%s)" % (count, p4.repopath, branch))
for id in changes:
report(1, "Importing changeset", id)
change = p4.describe(id)
p4.sync(id)
if tagall :
git.commit(change.author, change.email, change.date, change.msg, id)
else:
git.commit(change.author, change.email, change.date, change.msg, "import")
if stitch == 1:
git.clean_directories()
stitch = 0
| 2ndy/RaspIM | usr/share/doc/git/contrib/p4import/git-p4import.py | Python | gpl-2.0 | 10,722 |
#!/usr/bin/env python
import os
import sys
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(PROJECT_DIR)
sys.path.append(os.path.abspath(PROJECT_DIR + '/../'))
sys.path.append(os.path.abspath(PROJECT_DIR + '/../realestate/'))
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproject.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| wm3ndez/realestate | testproject/manage.py | Python | bsd-2-clause | 464 |
'''
Button
======
.. image:: images/button.jpg
:align: right
The :class:`Button` is a :class:`~kivy.uix.label.Label` with associated actions
that are triggered when the button is pressed (or released after a
click/touch). To configure the button, the same properties (padding,
font_size, etc) and
:ref:`sizing system <kivy-uix-label-sizing-and-text-content>`
are used as for the :class:`~kivy.uix.label.Label` class::
button = Button(text='Hello world', font_size=14)
To attach a callback when the button is pressed (clicked/touched), use
:class:`~kivy.uix.widget.Widget.bind`::
def callback(instance):
print('The button <%s> is being pressed' % instance.text)
btn1 = Button(text='Hello world 1')
btn1.bind(on_press=callback)
btn2 = Button(text='Hello world 2')
btn2.bind(on_press=callback)
If you want to be notified every time the button state changes, you can bind
to the :attr:`Button.state` property::
def callback(instance, value):
print('My button <%s> state is <%s>' % (instance, value))
btn1 = Button(text='Hello world 1')
btn1.bind(state=callback)
Kv Example::
Button:
text: 'press me'
on_press: print("ouch! More gently please")
on_release: print("ahhh")
on_state:
print("my current state is {}".format(self.state))
'''
__all__ = ('Button', )
from kivy.uix.label import Label
from kivy.properties import StringProperty, ListProperty, ColorProperty
from kivy.uix.behaviors import ButtonBehavior
class Button(ButtonBehavior, Label):
'''Button class, see module documentation for more information.
.. versionchanged:: 1.8.0
The behavior / logic of the button has been moved to
:class:`~kivy.uix.behaviors.ButtonBehaviors`.
'''
background_color = ColorProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
This acts as a *multiplier* to the texture colour. The default
texture is grey, so just setting the background color will give
a darker result. To set a plain color, set the
:attr:`background_normal` to ``''``.
.. versionadded:: 1.0.8
The :attr:`background_color` is a
:class:`~kivy.properties.ColorProperty` and defaults to [1, 1, 1, 1].
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
'''
background_normal = StringProperty(
'atlas://data/images/defaulttheme/button')
'''Background image of the button used for the default graphical
representation when the button is not pressed.
.. versionadded:: 1.0.4
:attr:`background_normal` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/button'.
'''
background_down = StringProperty(
'atlas://data/images/defaulttheme/button_pressed')
'''Background image of the button used for the default graphical
representation when the button is pressed.
.. versionadded:: 1.0.4
:attr:`background_down` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/button_pressed'.
'''
background_disabled_normal = StringProperty(
'atlas://data/images/defaulttheme/button_disabled')
'''Background image of the button used for the default graphical
representation when the button is disabled and not pressed.
.. versionadded:: 1.8.0
:attr:`background_disabled_normal` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/button_disabled'.
'''
background_disabled_down = StringProperty(
'atlas://data/images/defaulttheme/button_disabled_pressed')
'''Background image of the button used for the default graphical
representation when the button is disabled and pressed.
.. versionadded:: 1.8.0
:attr:`background_disabled_down` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/button_disabled_pressed'.
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with :attr:`background_normal` and
:attr:`background_down`. Can be used for custom backgrounds.
It must be a list of four values: (bottom, right, top, left). Read the
BorderImage instruction for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
(16, 16, 16, 16)
'''
| rnixx/kivy | kivy/uix/button.py | Python | mit | 4,598 |
import unicodecsv as csv
import tempfile
import os
import errno
from datetime import date
class Recommendations:
STORED_RECOMMENDATIONS_FILENAME = os.path.join(tempfile.gettempdir(), 'seuraaja_recommendations_last.csv')
CSV_FIELD_NAMES = ['name', 'recommendation', 'potential', 'timestamp']
@staticmethod
def company_summary_to_recommendation(company_summary):
return {
'name': company_summary['name'],
'recommendation': company_summary['recommendation'],
'potential': company_summary['potential'],
'timestamp': date.today()
}
@staticmethod
def get_current_recommendations(company_summaries):
return map(Recommendations.company_summary_to_recommendation, company_summaries)
@staticmethod
def read_stored_recommendations():
try:
with open(Recommendations.STORED_RECOMMENDATIONS_FILENAME, 'r') as csvfile:
reader = csv.DictReader(csvfile)
return list(reader)
except IOError as e:
if e.errno == errno.ENOENT:
return None
raise e
@staticmethod
def get_changed_recommendations(current_recommendations, last_recommendations):
last_company_names = map(lambda r: r['name'], last_recommendations)
last_recommendations_by_name = dict(zip(last_company_names, last_recommendations))
return filter(lambda r: r['name'] in last_recommendations_by_name and
last_recommendations_by_name[r['name']]['recommendation'] != r['recommendation'],
current_recommendations)
@staticmethod
def get_new_recommendations(current_recommendations, last_recommendations):
current_company_names = map(lambda r: r['name'], current_recommendations)
last_company_names = map(lambda r: r['name'], last_recommendations)
new_company_names = set(current_company_names) - set(last_company_names)
return filter(lambda r: r['name'] in new_company_names, current_recommendations)
@staticmethod
def persist(recommendations):
with open(Recommendations.STORED_RECOMMENDATIONS_FILENAME, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=Recommendations.CSV_FIELD_NAMES)
writer.writeheader()
for recommendation in recommendations:
writer.writerow(recommendation)
| 2mv/seuraaja | recommendations.py | Python | isc | 2,250 |
import sh
class GitCommitMessage(object):
""" Class representing a git commit message. A commit message consists of the following:
- original: The actual commit message as returned by `git log`
- full: original, but stripped of any comments
- title: the first line of full
- body: all lines following the title
"""
def __init__(self, original=None, full=None, title=None, body=None):
self.original = original
self.full = full
self.title = title
self.body = body
def __str__(self):
return self.full # pragma: no cover
def __repr__(self):
return self.__str__() # pragma: no cover
class GitContext(object):
def __init__(self):
self.commit_msg = None
self.changed_files = []
def set_commit_msg(self, commit_msg_str):
""" Sets the commit message by parsing a given string into the different parts of a commit message """
lines = [line for line in commit_msg_str.split("\n") if not line.startswith("#")]
full = "\n".join(lines)
title = lines[0]
body = lines[1:] if len(lines) > 1 else []
self.commit_msg = GitCommitMessage(original=commit_msg_str, full=full, title=title, body=body)
@staticmethod
def from_environment():
commit_info = GitContext()
commit_info.set_commit_msg(sh.git.log("-1", "--pretty=%B", _tty_out=False))
# changed files in last commit
changed_files_str = sh.git("diff-tree", "--no-commit-id", "--name-only", "-r", "HEAD", _tty_out=False)
commit_info.changed_files = [changed_file for changed_file in changed_files_str.strip().split("\n")]
return commit_info
| tobyoxborrow/gitlint | gitlint/git.py | Python | mit | 1,705 |
# -*- coding: utf-8 -*-
'''
Beacon to monitor disk usage.
.. versionadded:: 2015.5.0
:depends: python-psutil
'''
# Import Python libs
from __future__ import absolute_import
import logging
import re
# Import Third Party Libs
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
log = logging.getLogger(__name__)
__virtualname__ = 'diskusage'
def __virtual__():
if HAS_PSUTIL is False:
return False
else:
return __virtualname__
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for diskusage beacon should be a list of dicts
if not isinstance(config, dict):
return False, ('Configuration for diskusage beacon '
'must be a dictionary.')
return True, 'Valid beacon configuration'
def beacon(config):
'''
Monitor the disk usage of the minion
Specify thresholds for each disk and only emit a beacon if any of them are
exceeded.
.. code-block:: yaml
beacons:
diskusage:
- /: 63%
- /mnt/nfs: 50%
Windows drives must be quoted to avoid yaml syntax errors
.. code-block:: yaml
beacons:
diskusage:
- interval: 120
- 'c:\': 90%
- 'd:\': 50%
'''
ret = []
for mounts in config:
mount = mounts.keys()[0]
try:
_current_usage = psutil.disk_usage(mount)
except OSError:
# Ensure a valid mount point
log.error('{0} is not a valid mount point, skipping.'.format(mount))
continue
current_usage = _current_usage.percent
monitor_usage = mounts[mount]
if '%' in monitor_usage:
monitor_usage = re.sub('%', '', monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({'diskusage': current_usage, 'mount': mount})
return ret
| stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/beacons/diskusage.py | Python | apache-2.0 | 1,982 |
#!/usr/bin/python
import network
import threading
import blockchain
import code
import logging
import socket
import sys
#logging.basicConfig(format='%(name)s - %(message)s', level=logging.INFO)
chain = blockchain.BlockChain()
server = network.BitcoinServer(hosts=["127.0.0.1"], chain=chain)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
def console():
d = {"chain":chain, "server":server}
console = code.InteractiveConsole(d)
console.interact()
console_thread = threading.Thread(target=console)
console_thread.daemon = True
console_thread.start()
console_thread.join()
| kokjo/pycoin | pycoin.py | Python | unlicense | 665 |
import inspect
import os
import textwrap
import types
import warnings
from .reference import Reference
from .shim import ModuleShim
class XTracebackFrame(object):
FILTER = ("__builtins__", "__all__", "__doc__", "__file__", "__name__",
"__package__", "__path__", "__loader__", "__cached__",
"__initializing__")
FUNCTION_EXCLUDE = ("GeneratorContextManager.__exit__",)
GLOBALS_PREFIX = "g:"
def __init__(self, xtb, frame, frame_info, tb_index):
self.xtb = xtb
self.frame = frame
self.frame_info = frame_info
self.tb_index = tb_index
(self.filename, self.lineno, self.function,
self.code_context, self.index) = self.frame_info
self.args, self.varargs, self.varkw = inspect.getargs(frame.f_code)
# keep track of what we've formatted in this frame
self.formatted_vars = {}
# we use a filtered copy of locals and globals
self.locals = self._filter(frame.f_locals)
self.globals = self._filter(frame.f_globals)
# filter globals
if self.xtb.options.globals_module_include is not None:
for key, value in self.globals.items():
assert key not in self.FILTER
if isinstance(value, types.ModuleType):
module = value.__name__
elif isinstance(value, types.InstanceType):
module = value.__class__.__module__
else:
module = getattr(value, "__module__", None)
if (module is not None \
and not module.startswith(
self.xtb.options.globals_module_include
)):
del self.globals[key]
# if path is a real path then try to shorten it
if os.path.exists(self.filename):
self.filename = self.xtb._format_filename(
os.path.abspath(self.filename)
)
# qualify method name with class name
if self.xtb.options.qualify_methods and self.args:
try:
cls = frame.f_locals[self.args[0]]
except KeyError: # pragma: no cover - defensive
# we're assuming that the first argument is in f_locals but
# it may not be in some cases so this is a defence, see
# https://github.com/ischium/xtraceback/issues/3 with further
# detail at http://www.sqlalchemy.org/trac/ticket/2317 and
# https://dev.entrouvert.org/issues/765
pass
except TypeError: # pragma: no cover - defensive
# if self.args[0] is a list it is not hashable - inspect.getargs
# may return nested lists for args
pass
else:
if not isinstance(cls, type):
cls = type(cls)
if hasattr(cls, self.function):
for base in inspect.getmro(cls):
if self.function in base.__dict__:
self.function = base.__name__ + "." + self.function
break
self._formatted = None
@property
def exclude(self):
return self.locals.get("__xtraceback_skip_frame__", False) \
or self.function in self.FUNCTION_EXCLUDE
def _filter(self, fdict):
try:
fdict = fdict.copy()
except NotImplementedError:
# user data types inheriting dict may not have implemented copy
pass
else:
to_remove = []
for key, value in fdict.items():
try:
if key in self.FILTER:
to_remove.append(key)
continue
except:
exc_info = sys.exc_info()
# the comparison failed for an unknown reason likely a
# custom __cmp__ that makes bad assumptions - swallow
try:
warnings.warn("Could not filter %r: %r" % (key, exc_info[1]))
except:
warnings.warn("Could not filter and can't say why: %s" % exc_info[1])
continue
else:
# replace some values with shim types
if isinstance(value, types.ModuleType):
value = ModuleShim.get_instance(value, self.xtb)
# replace objects from further up the stack with a Marker
oid = id(value)
stack_ref = self.xtb.seen.get(oid)
if stack_ref is not None:
marker = stack_ref.marker(self.xtb, self.tb_index, key)
if marker.tb_offset != 0:
value = marker
else:
self.xtb.seen[oid] = Reference(self.tb_index, key, value)
if isinstance(value, dict):
value = self._filter(value)
fdict[key] = value
for key in to_remove:
del fdict[key]
return fdict
def _format_variable(self, lines, key, value, indent=4, prefix=""):
if value is not self.formatted_vars.get(key):
self.formatted_vars[key] = value
if self.globals.get(key) is value:
prefix = self.GLOBALS_PREFIX + prefix
lines.append(self.xtb._format_variable(key, value, indent, prefix))
def _format_dict(self, odict, indent=4):
lines = []
for key in sorted(odict.keys()):
self._format_variable(lines, key, odict[key], indent)
return lines
def _format_frame(self):
lines = [' File "%s", line %d, in %s' % (self.filename, self.lineno,
self.function)]
# push frame args
if self.xtb.options.show_args:
for arg in self.args:
if isinstance(arg, list):
# TODO: inspect.getargs arg list may contain nested lists;
# skip it for now
continue
self._format_variable(lines, arg, self.locals.get(arg))
if self.varargs:
self._format_variable(lines, self.varargs,
self.locals.get(self.varargs), prefix="*")
if self.varkw:
self._format_variable(lines, self.varkw,
self.locals.get(self.varkw), prefix="**")
# push globals
if self.xtb.options.show_globals:
lines.extend(self._format_dict(self.globals))
# push context lines
if self.code_context is not None:
lineno = self.lineno - self.index
dedented = textwrap.dedent("".join(self.code_context))
for line in dedented.splitlines():
numbered_line = " %s" % "%*s %s" % (self.xtb.number_padding,
lineno,
line)
if lineno == self.lineno:
if self.xtb.options.context > 1:
# push the numbered line with a marker
dedented_line = numbered_line.lstrip()
marker_padding = len(numbered_line) \
- len(dedented_line) - 2
lines.append("%s> %s" % ("-" * marker_padding,
dedented_line))
else:
# push the line only
lines.append(" " + line)
# push locals below lined up with the start of code
if self.xtb.options.show_locals:
indent = self.xtb.number_padding + len(line) \
- len(line.lstrip()) + 5
lines.extend(self._format_dict(self.locals, indent))
else:
# push the numbered line
lines.append(numbered_line)
lineno += 1
elif self.xtb.options.show_locals:
# no context so we are execing
lines.extend(self._format_dict(self.locals))
return "\n".join(lines)
def __str__(self):
if self._formatted is None:
self._formatted = self._format_frame()
return self._formatted
| g2p/xtraceback | xtraceback/xtracebackframe.py | Python | mit | 8,655 |
"""
This is an Astropy affiliated package.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
pass
'''Convert from photon energy in keV to wavelength in mm'''
energy2wave = 1.2398419292004202e-06
| hamogu/marxs | marxs/__init__.py | Python | gpl-3.0 | 536 |
short_name = "godot"
name = "Godot Engine"
major = 2
minor = 1
patch = 4
status = "beta"
| pixelpicosean/my-godot-2.1 | version.py | Python | mit | 89 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# pynag - Python Nagios plug-in and configuration environment
# Copyright (C) 2013 Pall Sigurdsson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Varios test that must be passed in order for build process to complete
| pynag/pynag | tests/build-test.py | Python | gpl-2.0 | 924 |
# -*- coding: utf-8 -*-
# Zeobuilder is an extensible GUI-toolkit for molecular model construction.
# Copyright (C) 2007 - 2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of Zeobuilder.
#
# Zeobuilder is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the
# nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck
# and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48
# (7), 1530-1541, 2008
# DOI:10.1021/ci8000748
#
# Zeobuilder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
from common import *
from zeobuilder import context
from zeobuilder.actions.composed import Parameters
from zeobuilder.expressions import Expression
import zeobuilder.actions.primitive as primitive
from molmod import angstrom, Translation
from molmod.bonds import BOND_SINGLE
import numpy
def test_add_atom():
def fn():
FileNew = context.application.plugins.get_action("FileNew")
FileNew()
context.application.main.select_nodes([context.application.model.universe])
AddAtom = context.application.plugins.get_action("AddAtom")
assert AddAtom.analyze_selection()
AddAtom()
run_application(fn)
def test_connect_single_bond():
def fn():
FileNew = context.application.plugins.get_action("FileNew")
FileNew()
context.application.main.select_nodes([context.application.model.universe])
AddAtom = context.application.plugins.get_action("AddAtom")
assert AddAtom.analyze_selection()
AddAtom()
context.application.main.select_nodes([context.application.model.universe])
AddAtom = context.application.plugins.get_action("AddAtom")
assert AddAtom.analyze_selection()
AddAtom()
context.application.main.select_nodes(context.application.model.universe.children)
ConnectSingleBond = context.application.plugins.get_action("ConnectSingleBond")
assert ConnectSingleBond.analyze_selection()
ConnectSingleBond()
run_application(fn)
def test_connect_double_bond():
def fn():
FileNew = context.application.plugins.get_action("FileNew")
FileNew()
context.application.main.select_nodes([context.application.model.universe])
AddAtom = context.application.plugins.get_action("AddAtom")
assert AddAtom.analyze_selection()
AddAtom()
context.application.main.select_nodes([context.application.model.universe])
AddAtom = context.application.plugins.get_action("AddAtom")
assert AddAtom.analyze_selection()
AddAtom()
context.application.main.select_nodes(context.application.model.universe.children)
ConnectDoubleBond = context.application.plugins.get_action("ConnectDoubleBond")
assert ConnectDoubleBond.analyze_selection()
ConnectDoubleBond()
run_application(fn)
def test_connect_triple_bond():
def fn():
FileNew = context.application.plugins.get_action("FileNew")
FileNew()
context.application.main.select_nodes([context.application.model.universe])
AddAtom = context.application.plugins.get_action("AddAtom")
assert AddAtom.analyze_selection()
AddAtom()
context.application.main.select_nodes([context.application.model.universe])
AddAtom = context.application.plugins.get_action("AddAtom")
assert AddAtom.analyze_selection()
AddAtom()
context.application.main.select_nodes(context.application.model.universe.children)
ConnectTripleBond = context.application.plugins.get_action("ConnectTripleBond")
assert ConnectTripleBond.analyze_selection()
ConnectTripleBond()
run_application(fn)
def test_auto_connect_physical_tpa():
def fn():
context.application.model.file_open("test/input/tpa.zml")
context.application.main.select_nodes([context.application.model.universe])
AutoConnectPhysical = context.application.plugins.get_action("AutoConnectPhysical")
assert AutoConnectPhysical.analyze_selection()
AutoConnectPhysical()
run_application(fn)
def test_auto_connect_physical_tpa_framed():
def fn():
context.application.model.file_open("test/input/tpa.xyz")
# put it in a frame
context.application.main.select_nodes(context.application.model.universe.children)
Frame = context.application.plugins.get_action("Frame")
assert Frame.analyze_selection()
Frame()
# add bonds
context.application.main.select_nodes([context.application.model.universe])
AutoConnectPhysical = context.application.plugins.get_action("AutoConnectPhysical")
assert AutoConnectPhysical.analyze_selection()
AutoConnectPhysical()
# count the number of bonds that are direct children of the universe
# object, should be zero
Bond = context.application.plugins.get_node("Bond")
for child in context.application.model.universe.children:
assert not isinstance(child, Bond)
run_application(fn)
def test_auto_connect_physical_lau():
def fn():
context.application.model.file_open("test/input/lau.zml")
context.application.main.select_nodes([context.application.model.universe])
AutoConnectPhysical = context.application.plugins.get_action("AutoConnectPhysical")
assert AutoConnectPhysical.analyze_selection()
AutoConnectPhysical()
run_application(fn)
def test_auto_connect_parameters_tpa():
def fn():
context.application.model.file_open("test/input/tpa.zml")
universe = context.application.model.universe
context.application.main.select_nodes([universe])
parameters = Parameters()
parameters.number1 = 6
parameters.number2 = 6
parameters.distance = 3.0
parameters.bond_type = BOND_SINGLE
AutoConnectParameters = context.application.plugins.get_action("AutoConnectParameters")
assert AutoConnectParameters.analyze_selection(parameters)
AutoConnectParameters(parameters)
Bond = context.application.plugins.get_node("Bond")
num_bonds = sum(isinstance(node,Bond) for node in universe.children)
assert num_bonds == 2*4
run_application(fn)
def test_auto_connect_parameters_tpa2():
def fn():
context.application.model.file_open("test/input/tpa.zml")
universe = context.application.model.universe
context.application.main.select_nodes([universe] + universe.children)
parameters = Parameters()
parameters.number1 = 6
parameters.number2 = 6
parameters.distance = 3.0
parameters.bond_type = BOND_SINGLE
AutoConnectParameters = context.application.plugins.get_action("AutoConnectParameters")
assert AutoConnectParameters.analyze_selection(parameters)
AutoConnectParameters(parameters)
Bond = context.application.plugins.get_node("Bond")
num_bonds = sum(isinstance(node,Bond) for node in universe.children)
assert num_bonds == 2*4
run_application(fn)
def test_auto_connect_parameters_lau():
def fn():
context.application.model.file_open("test/input/lau.zml")
context.application.main.select_nodes([context.application.model.universe])
parameters = Parameters()
parameters.number1 = 14
parameters.number2 = 14
parameters.distance = 6.0
parameters.bond_type = BOND_SINGLE
AutoConnectParameters = context.application.plugins.get_action("AutoConnectParameters")
assert AutoConnectParameters.analyze_selection(parameters)
AutoConnectParameters(parameters)
run_application(fn)
def test_merge_overlapping_atoms_lau():
def fn():
context.application.model.file_open("test/input/lau_double.zml")
context.application.main.select_nodes([context.application.model.universe])
MergeOverlappingAtoms = context.application.plugins.get_action("MergeOverlappingAtoms")
assert MergeOverlappingAtoms.analyze_selection()
MergeOverlappingAtoms()
run_application(fn)
def test_center_of_mass():
def fn():
context.application.model.file_open("test/input/tpa.zml")
context.application.main.select_nodes([context.application.model.universe])
CenterOfMass = context.application.plugins.get_action("CenterOfMass")
assert CenterOfMass.analyze_selection()
CenterOfMass()
run_application(fn)
def test_center_of_mass_and_principal_axes():
def fn():
context.application.model.file_open("test/input/tpa.zml")
context.application.main.select_nodes([context.application.model.universe])
CenterOfMassAndPrincipalAxes = context.application.plugins.get_action("CenterOfMassAndPrincipalAxes")
assert CenterOfMassAndPrincipalAxes.analyze_selection()
CenterOfMassAndPrincipalAxes()
run_application(fn)
def test_rearrange_atoms():
def fn():
context.application.model.file_open("test/input/tpa.zml")
context.application.main.select_nodes([context.application.model.universe])
RearrangeAtoms = context.application.plugins.get_action("RearrangeAtoms")
assert RearrangeAtoms.analyze_selection()
RearrangeAtoms()
run_application(fn)
def test_saturate_with_hydrogens_tpa():
def fn():
context.application.model.file_open("test/input/tpa.zml")
Atom = context.application.plugins.get_node("Atom")
for child in context.application.model.universe.children[::-1]:
if isinstance(child, Atom) and child.number == 1:
context.application.model.universe.children.remove(child)
context.application.main.select_nodes([context.application.model.universe])
AutoConnectPhysical = context.application.plugins.get_action("AutoConnectPhysical")
assert AutoConnectPhysical.analyze_selection()
AutoConnectPhysical()
context.application.main.select_nodes([context.application.model.universe])
SaturateWithHydrogens = context.application.plugins.get_action("SaturateWithHydrogens")
assert SaturateWithHydrogens.analyze_selection()
SaturateWithHydrogens()
run_application(fn)
def test_saturate_with_hydrogens_manual_tpa():
def fn():
context.application.model.file_open("test/input/tpa.xyz")
Atom = context.application.plugins.get_node("Atom")
for child in context.application.model.universe.children[::-1]:
if isinstance(child, Atom) and child.number == 1:
context.application.model.universe.children.remove(child)
context.application.main.select_nodes([context.application.model.universe])
AutoConnectPhysical = context.application.plugins.get_action("AutoConnectPhysical")
assert AutoConnectPhysical.analyze_selection()
AutoConnectPhysical()
parameters = Parameters()
parameters.num_hydrogens = 2
parameters.valence_angle = 1.9093
context.application.main.select_nodes([context.application.model.universe.children[1]])
SaturateHydrogensManual = context.application.plugins.get_action("SaturateHydrogensManual")
assert SaturateHydrogensManual.analyze_selection(parameters)
SaturateHydrogensManual(parameters)
run_application(fn)
def test_distribution_bond_lengths_precursor():
def fn():
context.application.model.file_open("test/input/precursor.zml")
context.application.main.select_nodes([context.application.model.universe])
parameters = Parameters()
parameters.filter_atom1 = Expression()
parameters.filter_bond12 = Expression()
parameters.filter_atom2 = Expression()
DistributionBondLengths = context.application.plugins.get_action("DistributionBondLengths")
assert DistributionBondLengths.analyze_selection(parameters)
DistributionBondLengths(parameters)
run_application(fn)
def test_distribution_bending_angles_precursor():
def fn():
context.application.model.file_open("test/input/precursor.zml")
context.application.main.select_nodes([context.application.model.universe])
parameters = Parameters()
parameters.filter_atom1 = Expression()
parameters.filter_bond12 = Expression()
parameters.filter_atom2 = Expression("atom.number==8")
parameters.filter_bond23 = Expression()
parameters.filter_atom3 = Expression()
DistributionBendingAngles = context.application.plugins.get_action("DistributionBendingAngles")
assert DistributionBendingAngles.analyze_selection(parameters)
DistributionBendingAngles(parameters)
run_application(fn)
def test_distribution_dihedral_angles_precursor():
def fn():
context.application.model.file_open("test/input/precursor.zml")
context.application.main.select_nodes([context.application.model.universe])
parameters = Parameters()
parameters.filter_atom1 = Expression()
parameters.filter_bond12 = Expression()
parameters.filter_atom2 = Expression()
parameters.filter_bond23 = Expression()
parameters.filter_atom3 = Expression()
parameters.filter_bond34 = Expression()
parameters.filter_atom4 = Expression()
DistributionDihedralAngles = context.application.plugins.get_action("DistributionDihedralAngles")
assert DistributionDihedralAngles.analyze_selection(parameters)
DistributionDihedralAngles(parameters)
run_application(fn)
def test_molden_labels():
def fn():
context.application.model.file_open("test/input/precursor.zml")
context.application.main.select_nodes(context.application.model.universe.children)
MoldenLabels = context.application.plugins.get_action("MoldenLabels")
assert MoldenLabels.analyze_selection()
MoldenLabels()
run_application(fn)
def test_clone_order():
def fn():
context.application.model.file_open("test/input/springs.zml")
context.application.main.select_nodes(context.application.model.universe.children[:2])
CloneOrder = context.application.plugins.get_action("CloneOrder")
assert CloneOrder.analyze_selection()
CloneOrder()
run_application(fn)
def test_clone_order2():
def fn():
context.application.model.file_open("test/input/azaallyl_thf_mm.zml")
context.application.main.select_nodes(context.application.model.universe.children[2:])
CloneOrder = context.application.plugins.get_action("CloneOrder")
assert CloneOrder.analyze_selection()
CloneOrder()
run_application(fn)
def test_strong_ring_distribution():
def fn():
context.application.model.file_open("test/input/springs.zml")
context.application.main.select_nodes([context.application.model.universe.children[1]])
StrongRingDistribution = context.application.plugins.get_action("StrongRingDistribution")
assert StrongRingDistribution.analyze_selection()
StrongRingDistribution()
run_application(fn)
def test_frame_molecules():
def fn():
from molmod import UnitCell
context.application.model.file_open("test/input/methane_box22_125.xyz")
universe = context.application.model.universe
context.application.action_manager.record_primitives = False
unit_cell = UnitCell(numpy.identity(3, float)*22*angstrom, numpy.ones(3, bool))
primitive.SetProperty(universe, "cell", unit_cell)
context.application.main.select_nodes([universe])
AutoConnectPhysical = context.application.plugins.get_action("AutoConnectPhysical")
assert AutoConnectPhysical.analyze_selection()
AutoConnectPhysical()
context.application.main.select_nodes([universe])
FrameMolecules = context.application.plugins.get_action("FrameMolecules")
assert FrameMolecules.analyze_selection()
FrameMolecules()
Bond = context.application.plugins.get_node("Bond")
for frame in universe.children:
for bond in frame.children:
if isinstance(bond, Bond):
bond.calc_vector_dimensions()
assert bond.length < 2*angstrom
run_application(fn)
def test_frame_molecules2():
def fn():
context.application.model.file_open("test/input/ethane-ethane-pos.xyz")
universe = context.application.model.universe
context.application.action_manager.record_primitives = False
context.application.main.select_nodes(universe.children)
Frame = context.application.plugins.get_action("Frame")
assert Frame.analyze_selection()
Frame()
context.application.main.select_nodes(universe.children)
AutoConnectPhysical = context.application.plugins.get_action("AutoConnectPhysical")
assert AutoConnectPhysical.analyze_selection()
AutoConnectPhysical()
context.application.main.select_nodes(universe.children)
FrameMolecules = context.application.plugins.get_action("FrameMolecules")
assert FrameMolecules.analyze_selection()
FrameMolecules()
run_application(fn)
def test_frame_molecules_periodic():
def fn():
context.application.model.file_open("test/input/DOH_2xAl_Cu.zml")
universe = context.application.model.universe
context.application.main.select_nodes([universe])
FrameMolecules = context.application.plugins.get_action("FrameMolecules")
assert FrameMolecules.analyze_selection()
FrameMolecules()
# nothing should have happend. there should not be one frame.
assert len(universe.children) > 1
run_application(fn)
def test_frame_molecules_periodic2():
def fn():
context.application.model.file_open("test/input/sdock_testing.zml")
universe = context.application.model.universe
context.application.main.select_nodes([universe])
FrameMolecules = context.application.plugins.get_action("FrameMolecules")
assert FrameMolecules.analyze_selection()
FrameMolecules()
# there should not be two frames.
assert len(universe.children) > 2
run_application(fn)
def test_select_bonded_neighbors():
def fn():
context.application.model.file_open("test/input/springs.zml")
context.application.main.select_nodes(context.application.model.universe.children[0].children[0:1])
SelectBondedNeighbors = context.application.plugins.get_action("SelectBondedNeighbors")
assert SelectBondedNeighbors.analyze_selection()
SelectBondedNeighbors()
run_application(fn)
def test_add_zeolite_tetraeders():
def fn():
context.application.model.file_open("test/input/precursor.zml")
context.application.main.select_nodes([context.application.model.universe])
AddZeoliteTetraeders = context.application.plugins.get_action("AddZeoliteTetraeders")
assert AddZeoliteTetraeders.analyze_selection()
AddZeoliteTetraeders()
run_application(fn)
def test_com_praxes_diatomic():
def fn():
FileNew = context.application.plugins.get_action("FileNew")
FileNew()
Atom = context.application.plugins.get_node("Atom")
Frame = context.application.plugins.get_node("Frame")
frame = Frame()
context.application.model.universe.add(frame)
atom1 = Atom()
atom2 = Atom(transformation=Translation([1.1,0.1,0.03]))
frame.add(atom1)
frame.add(atom2)
CenterOfMassAndPrincipalAxes = context.application.plugins.get_action("CenterOfMassAndPrincipalAxes")
context.application.main.select_nodes([context.application.model.universe.children[0]])
assert CenterOfMassAndPrincipalAxes.analyze_selection()
CenterOfMassAndPrincipalAxes()
run_application(fn)
| molmod/zeobuilder | test/test_molecular.py | Python | gpl-3.0 | 20,897 |
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (c) 2015, PAL Team.
# All rights reserved. See LICENSE for details.
from flask import request
from flask.ext.restful import Resource
from flask_restful_swagger import swagger
from pal.nlp.standard_nlp import StandardNLP
PRESENT_TAGS = ['VB', 'VBG', 'VBP', 'VBZ']
PAST_TAGS = ['VBD', 'VBN']
def get_tense(pos):
present_count = len([True for x in pos if x[1] in PRESENT_TAGS])
past_count = len([True for x in pos if x[1] in PAST_TAGS])
return 'past' if past_count > present_count else 'present'
class TenseClassifier(Resource):
"""Swagger resource for TenseClassifier"""
@swagger.operation(
notes='Classifies Tenses',
nickname='tense',
parameters=[
{
'name': 'query',
'description': 'The sentence to classify.',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'form'
}
])
def post(self):
params = {x: request.form[x] for x in request.form}
StandardNLP.process(params)
return get_tense(params['features']['pos'])
| Machyne/pal | pal/nlp/tense_classifier.py | Python | bsd-3-clause | 1,198 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
def ou(n):
s = 0.0
if n < 2:
return 0
else:
for j in range(n, 0, -2):
s += (1.0 / j)
print j
return s
def ji(n):
s = 0.0
if n < 1:
return 0
else:
for i in range(n, 0, -2):
s += (1.0 / i)
print i
return s
if __name__ == '__main__':
n = int(raw_input('input a number:'))
if n % 2 == 0:
print ou(n)
else:
print ji(n)
| tomhaoye/LetsPython | practice/practice76.py | Python | mit | 510 |
from bucket_filter.bucket import Bucket
from bucket_filter.condition import ConditionType, BooleanCondition
from bucket_filter.resolver import parse, evaluate
buckets = {}
def get_bucket(expression):
key = BooleanCondition.build_key(expression)
return buckets.get(key, None)
def register_bucket(expression, elements=None, condition_type=ConditionType.BOOLEAN):
if condition_type == ConditionType.BOOLEAN:
key = BooleanCondition.build_key(expression)
if key not in buckets:
condition = BooleanCondition(expression)
bucket = Bucket(condition, elements)
buckets[condition.key] = bucket
def solve(expression):
expressions, inner = parse(expression)
evaluated = {}
if inner:
for i in inner:
evaluate(expressions["{}".format(i)], evaluated, buckets)
bucket_final = evaluate(expressions["FINAL"], evaluated, buckets)
return bucket_final.elements.values()
| conlini/bucket_filters | bucket_filter/__init__.py | Python | apache-2.0 | 958 |
# -*- coding: utf-8 -*-
import os
import urllib2
import urllib
from mechanize import Browser
from bs4 import BeautifulSoup
import re
from PIL import Image
import pyimgur
favi = "/home/ozgur/mount/media/Series/Conan/ConanTheBarbarian/Conan.the.Barbarian.1982.iNTERNAL.DVDRiP.XViD.CD1-HLS.avi"
fmkv = "/home/ozgur/mount/media/TorrentTemp/All.About.Steve.2009.720p.BluRay.DUAL.x264-CBGB(HDA).mkv"
from hachoir_core.error import HachoirError
from hachoir_core.cmd_line import unicodeFilename
from hachoir_parser import createParser
from hachoir_core.tools import makePrintable
from hachoir_metadata import extractMetadata
from hachoir_core.i18n import getTerminalCharset
from sys import argv, stderr, exit
__author__ = 'ozgur'
__creation_date__ = '11.08.2014' '23:15'
CLIENT_ID = "48fa40a51f1c795"
HDR = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
class MovieInfoFetcher():
def __init__(self):
pass
def getunicode(self, soup):
body = ''
if isinstance(soup, unicode):
soup = soup.replace(''', "'")
soup = soup.replace('"', '"')
soup = soup.replace(' ', ' ')
body += soup
else:
if not soup.contents:
return ''
con_list = soup.contents
for con in con_list:
body = body + self.getunicode(con)
return body
@staticmethod
def parse_movie_divxplanet(link):
directors = ""
authors = ""
actors = ""
genre = ""
req = urllib2.Request(link, headers=HDR)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page.read())
temp_list = soup.find_all('div', itemprop='director')
for item in temp_list:
directors += item.a.span.text + ", "
temp_list = soup.find_all('div', itemprop='author')
for item in temp_list:
authors += item.a.span.text + ", "
try:
title = soup.find('span', itemprop='alternativeHeadline').text
except:
title = ""
temp_list = soup.find_all('div', itemprop='actor')
for item in temp_list:
actors += item.a.span.text + ", "
temp_list = soup.find_all('span', itemprop='genre')
for item in temp_list:
genre += item.text + ", "
try:
description = soup.find('span', itemprop='description').text
except:
description = ""
retval = {
'directors': directors,
'authors': authors,
'title': title,
'actors': actors,
'genre': genre,
'description': description,
}
return retval
def parse_movie_imdb(self, link):
br = Browser()
br.open(link)
link = br.find_link(url_regex=re.compile(r'/title/tt.*'))
res = br.follow_link(link)
soup = BeautifulSoup(res.read())
movie_title = self.getunicode(soup.find('title'))
rate = soup.find('span', itemprop='ratingValue')
rating = self.getunicode(rate)
actors = []
actors_soup = soup.findAll('a', itemprop='actors')
for i in range(len(actors_soup)):
actors.append(self.getunicode(actors_soup[i]))
des = soup.find('meta', {'name': 'description'})['content']
genre = []
infobar = soup.find('div', {'class': 'infobar'})
r = infobar.find('', {'title': True})['title']
genrelist = infobar.findAll('a', {'href': True})
for i in range(len(genrelist) - 1):
genre.append(self.getunicode(genrelist[i]))
release_date = self.getunicode(genrelist[-1])
print movie_title, rating + '/10.0'
print 'Relase Date:', release_date
print 'Rated', r
print ''
print 'Genre:',
print ', '.join(genre)
print '\nActors:',
print ', '.join(actors)
print '\nDescription:'
print des
class ImageProcessor():
def __init__(self):
pass
def _download_image(self, link, path):
print(link + " >> " + path)
testfile = urllib.URLopener(HDR)
testfile.retrieve(link, path)
testfile.close()
def _resize_image(self, path):
size = 500, 500
im = Image.open(path)
im.thumbnail(size, Image.ANTIALIAS)
im.save(path)
return True
def _upload_image(self, path):
im = pyimgur.Imgur(CLIENT_ID)
uploaded_image = im.upload_image(path, title="HDDiyari")
return uploaded_image.link
def prepare_image(self, link, name):
retval = ""
link = str(link)
if not os.path.exists("temp"):
os.makedirs("temp")
if link != "":
path = os.path.join("temp", name) + ".jpg"
self._download_image(link, path)
self._resize_image(path)
retval = self._upload_image(path)
return retval
class MovieMetadata():
def __init__(self):
pass
def get_movie_metadata(self, filename):
filename, realname = unicodeFilename(filename), filename
# parser = createParser(filename, realname)
parser = createParser(filename, filename)
if not parser:
print >> stderr, "Unable to parse file"
exit(1)
try:
metadata = extractMetadata(parser)
except HachoirError, err:
print "Metadata extraction error: %s" % unicode(err)
metadata = None
if not metadata:
print "Unable to extract metadata"
exit(1)
text = metadata.exportPlaintext()
charset = getTerminalCharset()
retval = ""
for line in text:
retval += makePrintable(line, charset) + u"\n"
return retval
| obayhan/hddiyari_presentation | engine.py | Python | gpl-2.0 | 6,149 |
# ~*~ coding: utf-8 ~*~
from .callback import *
from .inventory import *
from .runner import *
from .exceptions import *
| eli261/jumpserver | apps/ops/ansible/__init__.py | Python | gpl-2.0 | 122 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.