code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from StringIO import StringIO
import urllib2
import logging
import csv_parser
import metadata
import json_generator
import metadata_extractor
__author__ = 'sebastian'
logging.basicConfig()
logger = logging.getLogger(__name__)
class CSVW:
def __init__(self, url=None, path=None, handle=None, metadata_url=None, metadata_path=None, metadata_handle=None, date_parsing=False):
# http://www.w3.org/TR/2015/WD-tabular-data-model-20150416/#processing-tables
if handle:
logger.warning('"handle" is used only for testing purposes')
name = None
elif url:
url_resp = urllib2.urlopen(url)
handle = StringIO(url_resp.read())
name = url
elif path:
handle = open(path, 'rb')
name = path
elif path and url:
raise ValueError("only one argument of url and path allowed")
else:
raise ValueError("url or path argument required")
# metadata_handle = None
if metadata_path and metadata_url:
raise ValueError("only one argument of metadata_url and metadata_path allowed")
elif metadata_handle:
logger.warning('"metadata_handle" is used only for testing purposes')
elif metadata_url:
meta_resp = urllib2.urlopen(metadata_url)
metadata_handle = StringIO(meta_resp.read())
elif metadata_path:
metadata_handle = open(metadata_path, 'rb')
# Retrieve the tabular data file.
self.table, embedded_metadata = csv_parser.parse(handle, url)
# TODO create settings using arguments or provided metadata
sources = metadata_extractor.metadata_extraction(url, metadata_handle, embedded_metadata=embedded_metadata)
self.metadata = metadata.merge(sources)
def to_rdf(self):
pass
def to_json(self):
# TODO group of tables?
json_generator.minimal_mode(self.table, self.metadata.json()['tables'][0])
| sebneu/csvw-parser | pycsvw/main.py | Python | mit | 2,001 |
# P2P vendor specific extension tests
# Copyright (c) 2014, Qualcomm Atheros, Inc.
import logging
logger = logging.getLogger()
def test_p2p_ext_discovery(dev):
"""P2P device discovery with vendor specific extensions"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
try:
if "OK" not in dev[0].request("VENDOR_ELEM_ADD 1 dd050011223344"):
raise Exception("VENDOR_ELEM_ADD failed")
res = dev[0].request("VENDOR_ELEM_GET 1")
if res != "dd050011223344":
raise Exception("Unexpected VENDOR_ELEM_GET result: " + res)
if "OK" not in dev[0].request("VENDOR_ELEM_ADD 1 dd06001122335566"):
raise Exception("VENDOR_ELEM_ADD failed")
res = dev[0].request("VENDOR_ELEM_GET 1")
if res != "dd050011223344dd06001122335566":
raise Exception("Unexpected VENDOR_ELEM_GET result(2): " + res)
res = dev[0].request("VENDOR_ELEM_GET 2")
if res != "":
raise Exception("Unexpected VENDOR_ELEM_GET result(3): " + res)
if "OK" not in dev[0].request("VENDOR_ELEM_REMOVE 1 dd050011223344"):
raise Exception("VENDOR_ELEM_REMOVE failed")
res = dev[0].request("VENDOR_ELEM_GET 1")
if res != "dd06001122335566":
raise Exception("Unexpected VENDOR_ELEM_GET result(4): " + res)
if "OK" not in dev[0].request("VENDOR_ELEM_REMOVE 1 dd06001122335566"):
raise Exception("VENDOR_ELEM_REMOVE failed")
res = dev[0].request("VENDOR_ELEM_GET 1")
if res != "":
raise Exception("Unexpected VENDOR_ELEM_GET result(5): " + res)
if "OK" not in dev[0].request("VENDOR_ELEM_ADD 1 dd050011223344dd06001122335566"):
raise Exception("VENDOR_ELEM_ADD failed(2)")
if "FAIL" not in dev[0].request("VENDOR_ELEM_REMOVE 1 dd051122334455"):
raise Exception("Unexpected VENDOR_ELEM_REMOVE success")
if "FAIL" not in dev[0].request("VENDOR_ELEM_REMOVE 1 dd"):
raise Exception("Unexpected VENDOR_ELEM_REMOVE success(2)")
if "FAIL" not in dev[0].request("VENDOR_ELEM_ADD 1 ddff"):
raise Exception("Unexpected VENDOR_ELEM_ADD success(3)")
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Device discovery timed out")
if not dev[0].discover_peer(addr1):
raise Exception("Device discovery timed out")
peer = dev[1].get_peer(addr0)
if peer['vendor_elems'] != "dd050011223344dd06001122335566":
raise Exception("Vendor elements not reported correctly")
res = dev[0].request("VENDOR_ELEM_GET 1")
if res != "dd050011223344dd06001122335566":
raise Exception("Unexpected VENDOR_ELEM_GET result(6): " + res)
if "OK" not in dev[0].request("VENDOR_ELEM_REMOVE 1 dd06001122335566"):
raise Exception("VENDOR_ELEM_REMOVE failed")
res = dev[0].request("VENDOR_ELEM_GET 1")
if res != "dd050011223344":
raise Exception("Unexpected VENDOR_ELEM_GET result(7): " + res)
finally:
dev[0].request("VENDOR_ELEM_REMOVE 1 *")
def test_p2p_ext_discovery_go(dev):
"""P2P device discovery with vendor specific extensions for GO"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
try:
if "OK" not in dev[0].request("VENDOR_ELEM_ADD 2 dd050011223344dd06001122335566"):
raise Exception("VENDOR_ELEM_ADD failed")
if "OK" not in dev[0].request("VENDOR_ELEM_ADD 3 dd050011223344dd06001122335566"):
raise Exception("VENDOR_ELEM_ADD failed")
if "OK" not in dev[0].request("VENDOR_ELEM_ADD 12 dd050011223344dd06001122335566"):
raise Exception("VENDOR_ELEM_ADD failed")
dev[0].p2p_start_go(freq="2412")
if not dev[1].discover_peer(addr0):
raise Exception("Device discovery timed out")
peer = dev[1].get_peer(addr0)
if peer['vendor_elems'] != "dd050011223344dd06001122335566":
print peer['vendor_elems']
raise Exception("Vendor elements not reported correctly")
finally:
dev[0].request("VENDOR_ELEM_REMOVE 2 *")
dev[0].request("VENDOR_ELEM_REMOVE 3 *")
dev[0].request("VENDOR_ELEM_REMOVE 12 *")
| aelarabawy/hostap | tests/hwsim/test_p2p_ext.py | Python | gpl-2.0 | 4,299 |
import logging
import os
import tornado.web
import tornado.wsgi
from tornado.web import url
from google.appengine.ext import db
from google.appengine.ext.webapp.util import run_wsgi_app
import forms
import models
# Constants
IS_DEV = os.environ['SERVER_SOFTWARE'].startswith('Dev') # Development server
class Application(tornado.wsgi.WSGIApplication):
def __init__(self):
handlers = [
(r'/', IndexHandler),
# TODO Put your handlers here
]
settings = dict(
static_path=os.path.join(os.path.dirname(__file__), "static"),
template_path=os.path.join(os.path.dirname(__file__), 'templates'),
xsrf_cookies=True,
# TODO Change this cookie secret
cookie_secret="asjidoh91239jasdasdasdasdasdkja8izxc21312sjdhsa/Vo=",
)
tornado.wsgi.WSGIApplication.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
pass
class IndexHandler(BaseHandler):
def get(self):
self.write('Hello from tornado on app engine')
def main():
run_wsgi_app(Application())
if __name__ == '__main__':
main()
| haldun/gae-tornado-template | app.py | Python | mit | 1,085 |
'''
modifier: 02
eqtime: 30
'''
def main():
info("Air Pipette x1-4 bone only")
gosub('felix:WaitForMiniboneAccess')
gosub('felix:PrepareForAirShot')
open(name='N')
open(name='Q')
open(name='D')
open(name='B')
gosub('common:EvacPipette2')
gosub('common:FillPipette2')
gosub('felix:PrepareForAirShotExpansion')
gosub('common:ExpandPipette2')
close(name='B')
close(name='Q')
close(name='E')
close(name='D')
close(name='N')
close(description='Outer Pipette 2')
sleep(3)
| USGSDenverPychron/pychron | docs/user_guide/operation/scripts/examples/helix/extraction/felix_air_x1_boneonly.py | Python | apache-2.0 | 564 |
# -*- coding: utf-8 -*-
import logging
from functools import wraps
from urllib.parse import urljoin, urlencode
import requests
from tvdbrest import VERSION
from tvdbrest.objects import *
import datetime
import time
logger = logging.getLogger(__name__)
class Unauthorized(Exception):
pass
class NotFound(Exception):
pass
class APIError(Exception):
pass
def login_required(f):
@wraps(f)
def wrapper(obj, *args, **kwargs):
if not obj.logged_in:
logger.debug("not logged in")
obj.login()
try:
return f(obj, *args, **kwargs)
except Unauthorized:
logger.info("Unauthorized API error - login again")
obj.login()
return f(obj, *args, **kwargs)
return wrapper
def single_response(response_class):
def _inner(func):
@wraps(func)
def wrapper(obj, *args, **kwargs):
result = func(obj, *args, **kwargs)
return response_class(result["data"], obj)
return wrapper
return _inner
def multi_response(response_class):
def _inner(func):
@wraps(func)
def wrapper(obj, *args, **kwargs):
result = func(obj, *args, **kwargs)
return [response_class(d, obj) for d in result["data"]]
return wrapper
return _inner
def paged_response(response_class, page_size=100):
def _inner(func):
@wraps(func)
def wrapper(obj, *args, **kwargs):
result = func(obj, *args, **kwargs)
return PaginatedAPIObjectList(result['links'],
[response_class(d, obj) for d in result['data']],
multi_response(response_class)(func), tuple([obj] + list(args)), kwargs,
page_size=page_size)
return wrapper
return _inner
class TVDB(object):
def __init__(self, username, userkey, apikey, language=None):
self.username = username
self.userkey = userkey
self.apikey = apikey
self.accept_language = language or "en"
assert self.username and self.userkey and self.apikey
self.jwttoken = None
self.useragent = "tvdb-rest %s" % VERSION
self._series_search_params = None
def login(self):
self.jwttoken = None
response = self._api_request('post', '/login', json={
'username': self.username,
'userkey': self.userkey,
'apikey': self.apikey,
})
self.jwttoken = response['token']
def logout(self):
self.jwttoken = None
@property
def logged_in(self):
return self.jwttoken is not None
@property
@login_required
def series_search_params(self):
if self._series_search_params is None:
self._series_search_params = self._api_request('get', '/search/series/params')['data']['params']
return self._series_search_params
@multi_response(Language)
@login_required
def languages(self):
return self._api_request('get', '/languages')
@login_required
def language(self, language_id):
return Language(self._api_request('get', '/languages/%s' % language_id), self)
@single_response(Series)
@login_required
def series(self, series_id, keys=None):
u = '/series/%s' % series_id
if keys:
u += "/filter?%s" % urlencode({
'keys': ','.join(keys)
})
return self._api_request('get', u)
@login_required
def series_key_params(self, series_id):
return self._api_request('get', '/series/%s/filter/params' % series_id)['data']['params']
@multi_response(Series)
@login_required
def search(self, **kwargs):
if not kwargs:
return {
"data": []
}
u = "/search/series?%s" % urlencode(kwargs)
return self._api_request('get', u)
@multi_response(Actor)
@login_required
def actors_by_series(self, series_id):
return self._api_request('get', '/series/%s/actors' % series_id)
@paged_response(Episode)
@login_required
def episodes_by_series(self, series_id, *args, **kwargs):
u = '/series/%s/episodes' % series_id
if kwargs:
if not (len(kwargs) == 1 and 'page' in kwargs):
u += '/query'
u += "?%s" % urlencode(kwargs)
return self._api_request('get', u)
@login_required
def episode_query_params(self, series_id):
return self._api_request('get', '/series/%s/episodes/query/params' % series_id)['data']
@single_response(Episode)
@login_required
def episode_details(self, episode_id):
return self._api_request('get', '/episodes/%s' % episode_id)
@single_response(ImageCount)
@login_required
def image_count(self, series_id):
return self._api_request('get', '/series/%s/images' % series_id)
@multi_response(Image)
@login_required
def images(self, series_id, **kwargs):
u = '/series/%s/images/query' % series_id
if kwargs:
u += "?%s" % urlencode(kwargs)
return self._api_request('get', u)
@multi_response(Update)
@login_required
def updates(self, from_time, to_time=None):
u = '/updated/query?'
def _dt_to_epoch(o):
return int(time.mktime(o.timetuple())) - time.timezone if isinstance(o, datetime.datetime) else o
kwargs = {
'fromTime': _dt_to_epoch(from_time)
}
if to_time:
kwargs['toTime'] = _dt_to_epoch(to_time)
u += urlencode(kwargs)
return self._api_request('get', u)
def _api_request(self, method, relative_url, data_attribute="data", **kwargs):
url = urljoin('https://api.thetvdb.com/', relative_url)
headers = kwargs.pop('headers', {})
headers['User-Agent'] = self.useragent
if self.jwttoken:
headers['Authorization'] = 'Bearer %s' % self.jwttoken
if self.accept_language:
headers['Accept-Language'] = self.accept_language
response = requests.request(method, url, headers=headers, **kwargs)
if response.status_code == 401:
raise Unauthorized(response.json()["Error"])
elif response.status_code == 404:
raise NotFound(response.json()["Error"])
elif response.status_code >= 400:
raise APIError()
logger.info("Response: %s", response)
return response.json()
| ercpe/tvdb-rest | tvdbrest/client.py | Python | gpl-3.0 | 6,771 |
import unittest
from mudlib.character import *
class CharacterTestCase(unittest.TestCase):
def setup(self):
pass
def teardown(self):
pass
class ParserTestCase(unittest.TestCase):
def setup(self):
pass
def teardown(self):
pass
def test_parser_look(self):
self.assertEqual(Parser.parse("l"), (Action.LOOK, []), "")
self.assertEqual(Parser.parse("L"), (Action.LOOK, []), "")
self.assertEqual(Parser.parse("look"), (Action.LOOK, []), "")
self.assertEqual(Parser.parse("look n s X"), (Action.LOOK, ["n", "s", "x"]), "")
self.assertEqual(Parser.parse("L N eASt s"), (Action.LOOK, ["n", "east", "s"]), "")
def test_parser_move(self):
self.assertEqual(Parser.parse("m"), (Action.MOVE, []), "")
self.assertEqual(Parser.parse("Move"), (Action.MOVE, []), "")
self.assertEqual(Parser.parse("m n east SOUTH"), (Action.MOVE, ["n", "east", "south"]), "")
self.assertEqual(Parser.parse("n"), (Action.MOVE, ["n"]), "")
self.assertEqual(Parser.parse("ne"), (Action.MOVE, ["ne"]), "")
| jon-stewart/mud | mudlib/test_character.py | Python | mit | 1,212 |
"""First 100 days of the US House of Representatives 1995"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unifited Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of bill assignments in the 104th House in 1995"""
DESCRLONG = """The example in Gill, seeks to explain the number of bill
assignments in the first 100 days of the US' 104th House of Representatives.
The response variable is the number of bill assignments in the first 100 days
over 20 Committees. The explanatory variables in the example are the number of
assignments in the first 100 days of the 103rd House, the number of members on
the committee, the number of subcommittees, the log of the number of staff
assigned to the committee, a dummy variable indicating whether
the committee is a high prestige committee, and an interaction term between
the number of subcommittees and the log of the staff size.
The data returned by load are not cleaned to represent the above example.
"""
NOTE = """Number of Observations - 20
Number of Variables - 6
Variable name definitions::
BILLS104 - Number of bill assignments in the first 100 days of the 104th
House of Representatives.
SIZE - Number of members on the committee.
SUBS - Number of subcommittees.
STAFF - Number of staff members assigned to the committee.
PRESTIGE - PRESTIGE == 1 is a high prestige committee.
BILLS103 - Number of bill assignments in the first 100 days of the 103rd
House of Representatives.
Committee names are included as a variable in the data file though not
returned by load.
"""
from numpy import recfromtxt, column_stack, array
import scikits.statsmodels.tools.datautils as du
from os.path import dirname, abspath
def load():
"""Load the committee data and returns a data class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/committee.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6))
return data
| wesm/statsmodels | scikits/statsmodels/datasets/committee/data.py | Python | bsd-3-clause | 2,533 |
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 13 19:59:46 2014
@author: pedro
"""
import os
from libphys import *
dname = '/home/pedro/LAB/DATA/2014/Aug/PF/14_08_14/pf03/'
files = []
for file in os.listdir(dname):
if file.endswith(".bmp"):
files = np.append(files,file)
files.sort()
print 'Found %d files' %len(files)
pump_V = np.array([6,5.5,5,4.5,4,3.8,3.5,3.4,3.3,3.2,3.1,3,2.9,2.8,2.7,2.6,2.5,2.4,2.3,2.2,2.1,2,1.9,1.8,1.7,1.6,1.5])
j=0
for i in range(20,290,10):
if (len(files[i])>=0):
if (files[i][0:3]=='sig'):
for k in range(0,10):
os.rename(dname+files[i+k],dname+files[i+k][:21]+'-'+str(pump_V[j])+files[i+k][21:])
print 'success'
j+=1
print j | atreyv/atom-phys | change_filenames_template.py | Python | gpl-3.0 | 739 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ai.h2o.sparkling.ml.models.H2OTargetEncoderModel import H2OTargetEncoderModel
from ai.h2o.sparkling.ml.models.H2OTargetEncoderMOJOModel import H2OTargetEncoderMOJOModel
from ai.h2o.sparkling.ml.models.H2OMOJOSettings import H2OMOJOSettings
from ai.h2o.sparkling.ml.models.H2OXGBoostMOJOModel import H2OXGBoostMOJOModel
from ai.h2o.sparkling.ml.models.H2OGBMMOJOModel import H2OGBMMOJOModel
from ai.h2o.sparkling.ml.models.H2ODRFMOJOModel import H2ODRFMOJOModel
from ai.h2o.sparkling.ml.models.H2OGLMMOJOModel import H2OGLMMOJOModel
from ai.h2o.sparkling.ml.models.H2OGAMMOJOModel import H2OGAMMOJOModel
from ai.h2o.sparkling.ml.models.H2OPCAMOJOModel import H2OPCAMOJOModel
from ai.h2o.sparkling.ml.models.H2OGLRMMOJOModel import H2OGLRMMOJOModel
from ai.h2o.sparkling.ml.models.H2ODeepLearningMOJOModel import H2ODeepLearningMOJOModel
from ai.h2o.sparkling.ml.models.H2OAutoEncoderMOJOModel import H2OAutoEncoderMOJOModel
from ai.h2o.sparkling.ml.models.H2OWord2VecMOJOModel import H2OWord2VecMOJOModel
from ai.h2o.sparkling.ml.models.H2OKMeansMOJOModel import H2OKMeansMOJOModel
from ai.h2o.sparkling.ml.models.H2OIsolationForestMOJOModel import H2OIsolationForestMOJOModel
from ai.h2o.sparkling.ml.models.H2OMOJOPipelineModel import H2OMOJOPipelineModel
from ai.h2o.sparkling.ml.models.H2OBinaryModel import H2OBinaryModel
from ai.h2o.sparkling.ml.models.H2OMOJOModel import H2OTreeBasedSupervisedMOJOModel
from ai.h2o.sparkling.ml.models.H2OMOJOModel import H2OTreeBasedUnsupervisedMOJOModel
from ai.h2o.sparkling.ml.models.H2OMOJOModel import H2OSupervisedMOJOModel
from ai.h2o.sparkling.ml.models.H2OMOJOModel import H2OUnsupervisedMOJOModel
from ai.h2o.sparkling.ml.models.H2OMOJOModel import H2OMOJOModel
from ai.h2o.sparkling.ml.models.H2OMOJOModel import H2OAlgorithmMOJOModel
from ai.h2o.sparkling.ml.models.H2OMOJOModel import H2OFeatureMOJOModel
from ai.h2o.sparkling.ml.models.H2OCoxPHMOJOModel import H2OCoxPHMOJOModel
from ai.h2o.sparkling.ml.models.H2ORuleFitMOJOModel import H2ORuleFitMOJOModel
| h2oai/sparkling-water | py-scoring/src/ai/h2o/sparkling/ml/models/__init__.py | Python | apache-2.0 | 2,803 |
import os, sys
from django.template import Template, Context
from django.conf import settings
from django.utils.crypto import get_random_string
MY_PATH = os.path.abspath(os.path.dirname(__file__))
def main():
target_path = os.path.abspath(os.path.join(MY_PATH, '..', 'trackersite', 'settings.py'))
if os.path.exists(target_path):
print 'Don\'t want to overwrite %s.\nIf you\'re sure, delete it and try again.' % target
sys.exit(1)
# make a template instance
settings.configure(TEMPLATE_DEBUG=False)
template_file = open(os.path.join(MY_PATH, 'settings.py.template'))
template = Template(template_file.read())
template_file.close()
# set up the options we want
options = {}
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
options['secret_key'] = get_random_string(50, chars)
context = Context(options)
target = open(target_path, 'wb')
target.write(template.render(context))
target.close()
if __name__ == '__main__':
main()
| urbanecm/teh-tracker | support/makesettings.py | Python | gpl-2.0 | 1,037 |
'''
Created on Oct 29, 2017
@author: bkehoe
'''
from __future__ import absolute_import
import uuid
import itertools
import json
from . import components, states
def is_heaviside_execution(context):
return Executor.CONTEXT_EXECUTION_ID_KEY in context
class Executor(object):
@classmethod
def create(cls, definition,
definition_store,
execution_store,
logger_factory,
task_dispatcher):
execution_id = uuid.uuid4().hex
if not isinstance(definition, states.StateMachine):
definition = states.StateMachine.from_json(definition)
execution = execution_store.execution_factory(execution_id, definition_store)
execution.initialize(definition)
return cls(
execution_id,
execution,
definition_store,
execution_store,
logger_factory,
task_dispatcher)
@classmethod
def hydrate(cls, context,
definition_store,
execution_store,
logger_factory,
task_dispatcher):
execution_id = context[cls.CONTEXT_EXECUTION_ID_KEY]
definition_store.hydrate(context)
execution = execution_store.execution_factory(execution_id, definition_store)
execution.hydrate(context)
return cls(
execution_id,
execution,
definition_store,
execution_store,
logger_factory,
task_dispatcher)
def __init__(self,
execution_id,
execution,
definition_store,
execution_store,
logger_factory,
task_dispatcher):
self.execution_id = execution_id
self.execution = execution
self.definition = execution.get_definition()
self.executor_id = uuid.uuid4().hex
print 'created executor {}'.format(self.executor_id[-4:])
self.execution = execution
self.definition_store=definition_store
self.execution_store=execution_store
self.logger=logger_factory.logger_factory(self.execution_id, self.executor_id)
self.task_dispatcher=task_dispatcher
CONTEXT_EXECUTION_ID_KEY = 'x-heaviside-sm-eid'
def get_context(self):
context = {
self.CONTEXT_EXECUTION_ID_KEY: self.execution_id,
}
context.update(self.definition_store.get_context())
context.update(self.execution.get_context())
context.update(self.logger.get_context())
return context
def log_state(self):
state, result = self.execution.get_current_state_and_result()
s = []
s.append('\nvvv vvv')
# s.append('execution: {}'.format(self.execution_id[-4:]))
s.append('executor: {}'.format(self.executor_id[-4:]))
if state:
s.append('state:' + json.dumps(state.to_json(), indent=2))
if result:
s.extend(['result:', json.dumps(result.to_json())])
s.append('^^^ ^^^\n')
print '\n'.join(s)
def dispatch(self, input):
"""Run the state machine up to the next Task state, which will be async invoked."""
print '[dispatch] {} input: {}'.format(self.executor_id[-4:], input)
current_state, result = self.execution.get_current_state_and_result()
self.log_state()
if current_state is None:
if result:
print 'has result'
return result.to_json()
else:
print 'initializing', self.definition.start_at
self.execution.change_state(self.definition.start_at)
self.log_state()
for i in itertools.count():
current_state, result = self.execution.get_current_state_and_result()
print 'loop', self.executor_id[-4:], i, current_state.name
state_def = self.definition.states[current_state.name]
print 'state def', json.dumps(state_def.to_json())
if isinstance(state_def, states.SucceedState):
print 'succeed'
self.execution.set_result(components.Result(components.Result.STATUS_SUCCEEDED))
self.log_state()
break
elif isinstance(state_def, states.FailState):
print 'fail'
self.execution.set_result(components.Result(components.Result.STATUS_FAILED))
self.log_state()
break
elif isinstance(state_def, states.TaskState):
print 'task'
self.task_dispatcher.dispatch(state_def.resource, input, self.get_context())
break
else:
raise TypeError("No matching type for {}".format(state_def))
def run_task(self, task_function, exception_handler):
"""Process the current task and dispatch.
Assumes the current state is a Task state."""
print '[run task] {}'.format(self.executor_id[-4:])
current_state, result = self.execution.get_current_state_and_result()
self.log_state()
state_def = self.definition.states[current_state.name]
print 'state def', state_def.to_json()
try:
output = task_function()
except Exception as e:
exception = exception_handler(e)
for catcher in state_def.catch or []:
if catcher.matches(exception):
self.execution.change_state(catcher.next)
output = {}
break
else:
# self.result = {
# "type": "Fail",
# "name": current_state.name,
# "Error": "States.TaskFailed",
# "Cause": "No matching catcher",
# }
self.execution.set_result(components.Result(components.Result.STATUS_FAILED))
self.log_state()
return
else:
if state_def.is_end():
print 'task is end state'
self.execution.set_result(components.Result(components.Result.STATUS_SUCCEEDED, output))
else:
self.execution.change_state(state_def.next)
self.dispatch(result)
return result | benkehoe/heaviside | src/heaviside/executor.py | Python | apache-2.0 | 6,552 |
import unittest
from test import test_support
import subprocess
import sys
import signal
import os
import errno
import tempfile
import time
import re
import sysconfig
try:
import resource
except ImportError:
resource = None
try:
import threading
except ImportError:
threading = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
test_support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr)
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print 'BDFL'"])
self.assertIn('BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn('BDFL', output)
def test_check_output_stdout_arg(self):
# check_output() function stderr redirected to stdout
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print 'will not be run'"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with test_support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print "banana"'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print \'test_stdout_none\'"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), 'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print "banana"'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def test_executable_with_cwd(self):
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(["somethingyoudonthave", "-c",
"import sys; sys.exit(47)"],
executable=sys.executable, cwd=python_dir)
p.wait()
self.assertEqual(p.returncode, 47)
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
p = subprocess.Popen(["somethingyoudonthave", "-c",
"import sys; sys.exit(47)"],
executable=sys.executable)
p.wait()
self.assertEqual(p.returncode, 47)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write("pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
os.write(d, "pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
tf.write("pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), "orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), "orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), "strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), "strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), "strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), "appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), "appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), 'test with stdout=1')
def test_cwd(self):
tmpdir = tempfile.gettempdir()
# We cannot use os.path.realpath to canonicalize the path,
# since it doesn't expand Tru64 {memb} strings. See bug 1063571.
cwd = os.getcwd()
os.chdir(tmpdir)
tmpdir = os.getcwd()
os.chdir(cwd)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getcwd())'],
stdout=subprocess.PIPE,
cwd=tmpdir)
self.addCleanup(p.stdout.close)
normcase = os.path.normcase
self.assertEqual(normcase(p.stdout.read()), normcase(tmpdir))
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "orange")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate("pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, "pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate("banana")
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr, "pineapple")
# This test is Linux specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
fd_directory = '/proc/%d/fd' % os.getpid()
num_fds_before_popen = len(os.listdir(fd_directory))
p = subprocess.Popen([sys.executable, "-c", "print('')"],
stdout=subprocess.PIPE)
p.communicate()
num_fds_after_communicate = len(os.listdir(fd_directory))
del p
num_fds_after_destruction = len(os.listdir(fd_directory))
self.assertEqual(num_fds_before_popen, num_fds_after_destruction)
self.assertEqual(num_fds_before_popen, num_fds_after_communicate)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
if mswindows:
pipe_buf = 512
else:
pipe_buf = os.fpathconf(x, "PC_PIPE_BUF")
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("xyz"*%d);'
'sys.stdout.write(sys.stdin.read())' % pipe_buf],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = "abc"*pipe_buf
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write("banana")
(stdout, stderr) = p.communicate("split")
self.assertEqual(stdout, "bananasplit")
self.assertStderrEqual(stderr, "")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
stdout = p.stdout.read()
if hasattr(file, 'newlines'):
# Interpreter with universal newline support
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
if hasattr(file, 'newlines'):
# Interpreter with universal newline support
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
try:
for i in range(max_handles):
try:
handles.append(os.open(test_support.TESTFN,
os.O_WRONLY | os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
test_support.unlink(test_support.TESTFN)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(1)"])
count = 0
while p.poll() is None:
time.sleep(0.1)
count += 1
# We expect that the poll loop probably went around about 10 times,
# but, based on system scheduling we can't control, it's possible
# poll() never returned None. It "should be" very rare that it
# didn't go around at least twice.
self.assertGreaterEqual(count, 2)
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(2)"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
# Windows raises IOError. Others raise OSError.
with self.assertRaises(EnvironmentError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate("x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
time.sleep(2)
p.communicate("x" * 2**20)
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
# context manager
class _SuppressCoreFiles(object):
"""Try to prevent core files from being created."""
old_limit = None
def __enter__(self):
"""Try to save previous ulimit, then set it to (0, 0)."""
if resource is not None:
try:
self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except (ValueError, resource.error):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
print "this tests triggers the Crash Reporter, that is intentional"
sys.stdout.flush()
def __exit__(self, *args):
"""Return core file behavior to default."""
if self.old_limit is None:
return
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_limit)
except (ValueError, resource.error):
pass
@unittest.skipUnless(hasattr(signal, 'SIGALRM'),
"Requires signal.SIGALRM")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGALRM, handler)
self.addCleanup(signal.signal, signal.SIGALRM, old_handler)
# the process is running for 2 seconds
args = [sys.executable, "-c", 'import time; time.sleep(2)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
signal.alarm(1)
# communicate() will be interrupted by SIGALRM
process.communicate()
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def test_exceptions(self):
# caught & re-raised exceptions
with self.assertRaises(OSError) as c:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd="/this/path/does/not/exist")
# The attribute child_traceback should contain "os.chdir" somewhere.
self.assertIn("os.chdir", c.exception.child_traceback)
def test_run_abort(self):
# returncode handles signal termination
with _SuppressCoreFiles():
p = subprocess.Popen([sys.executable, "-c",
"import os; os.abort()"])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# preexec function
p = subprocess.Popen([sys.executable, "-c",
"import sys, os;"
"sys.stdout.write(os.getenv('FRUIT'))"],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "apple")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(
self, args, executable, preexec_fn, close_fds, cwd, env,
universal_newlines, startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
try:
subprocess.Popen._execute_child(
self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (p2cwrite, c2pread, errread))
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise RuntimeError("force the _execute_child() errpipe_data path.")
with self.assertRaises(RuntimeError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_args_string(self):
# args is a string
f, fname = tempfile.mkstemp()
os.write(f, "#!/bin/sh\n")
os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.close(f)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), "apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), "apple")
def test_call_string(self):
# call() function with string argument on UNIX
f, fname = tempfile.mkstemp()
os.write(f, "#!/bin/sh\n")
os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.close(f)
os.chmod(fname, 0700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), sh)
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn('KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
newfds = []
for a in fds:
b = os.dup(a)
newfds.append(b)
if a == 0:
stdin = b
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = test_support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
for b, a in zip(newfds, fds):
os.dup2(b, a)
for b in newfds:
os.close(b)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = [os.dup(fd) for fd in range(3)]
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = test_support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
for std, saved in enumerate(saved_fds):
os.dup2(saved, std)
os.close(saved)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = test_support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" % stderr)
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(EnvironmentError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_pipe_cloexec(self):
# Issue 12786: check that the communication pipes' FDs are set CLOEXEC,
# and are not inherited by another child process.
p1 = subprocess.Popen([sys.executable, "-c",
'import os;'
'os.read(0, 1)'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p2 = subprocess.Popen([sys.executable, "-c", """if True:
import os, errno, sys
for fd in %r:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
sys.exit(1)
sys.exit(0)
""" % [f.fileno() for f in (p1.stdin, p1.stdout,
p1.stderr)]
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
p1.communicate('foo')
_, stderr = p2.communicate()
self.assertEqual(p2.returncode, 0, "Unexpected error: " + repr(stderr))
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn("physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn("physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
@unittest.skipUnless(getattr(subprocess, '_has_poll', False),
"poll system call not supported")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
subprocess._has_poll = False
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._has_poll = True
ProcessTestCase.tearDown(self)
class HelperFunctionTests(unittest.TestCase):
@unittest.skipIf(mswindows, "errno and EINTR make no sense on windows")
def test_eintr_retry_call(self):
record_calls = []
def fake_os_func(*args):
record_calls.append(args)
if len(record_calls) == 2:
raise OSError(errno.EINTR, "fake interrupted system call")
return tuple(reversed(args))
self.assertEqual((999, 256),
subprocess._eintr_retry_call(fake_os_func, 256, 999))
self.assertEqual([(256, 999)], record_calls)
# This time there will be an EINTR so it will loop once.
self.assertEqual((666,),
subprocess._eintr_retry_call(fake_os_func, 666))
self.assertEqual([(256, 999), (666,), (666,)], record_calls)
@unittest.skipUnless(mswindows, "mswindows only")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super(CommandsWithSpaces, self).setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super(CommandsWithSpaces, self).tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
ProcessTestCaseNoPoll,
HelperFunctionTests,
CommandsWithSpaces)
test_support.run_unittest(*unit_tests)
test_support.reap_children()
if __name__ == "__main__":
test_main()
| sometallgit/AutoUploader | Python27/Lib/test/test_subprocess.py | Python | mit | 59,584 |
# coding=utf-8
"""
DCRM - Darwin Cydia Repository Manager
Copyright (C) 2017 WU Zheng <i.82@me.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django import template
from django.template import Context
register = template.Library()
@register.inclusion_tag("admin/version/package_title_style.html", takes_context=True)
def package_title_style(context):
ctx = Context(context)
if 'c_package__exact' in context.request.GET:
ctx.update({
"filtered_by_package": context.request.GET['c_package__exact']
})
return ctx
| 82Flex/DCRM | WEIPDCRM/templatetags/package_title_style.py | Python | agpl-3.0 | 1,157 |
from __future__ import annotations
from ..core.bitmap import bitmap
from ..core.index_update import IndexUpdate
from ..table.dshape import DataShape, dshape_from_dict
import numpy as np
from typing import Any, List, Optional, Dict, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from progressivis.core.changemanager_dict import DictChangeManager
class PsDict(Dict[str, Any]):
"progressive dictionary"
def __init__(self, other: Optional[Dict[str, Any]] = None, **kwargs: Any) -> None:
if other is not None:
# useful when keys are not varname-alike
# one can use both (other and kwargs)
# if keys are not duplicate
assert isinstance(other, dict)
else:
other = {}
super().__init__(other, **kwargs)
self._index: Optional[Dict[str, int]] = None
self._deleted: Dict[str, int] = {}
self._inverse: Optional[Dict[int, str]] = None
self._inverse_del: Optional[Dict[int, str]] = None
self.changes: Optional[DictChangeManager] = None
def compute_updates(
self, start: int, now: float, mid: Optional[str], cleanup: bool = True
) -> Optional[IndexUpdate]:
assert False, "compute_updates should not be called on PsDict"
if self.changes:
updates = self.changes.compute_updates(start, now, mid, cleanup=cleanup)
if updates is None:
updates = IndexUpdate(created=bitmap(self.ids))
return updates
return None
def fill(self, val: Any) -> None:
for k in self.keys():
self[k] = val
@property
def array(self) -> np.ndarray[Any, Any]:
return np.array(list(self.values()))
@property
def as_row(self) -> Dict[str, List[Any]]:
return {k: [v] for (k, v) in self.items()}
@property
def dshape(self) -> DataShape:
return dshape_from_dict(self.as_row)
# def last(self, *args, **kwargs):
# print("LAST")
# import pdb;pdb.set_trace()
def key_of(self, id: int) -> Tuple[str, str]:
"""
returns (key, status)
key: the key associated to id
status: {active|deleted}
"""
if self._index is None:
return list(self.keys())[id], "active"
if self._inverse is None:
self._inverse = {i: k for (k, i) in self._index.items()}
if id in self._inverse:
return self._inverse[id], "active"
if self._inverse_del is None:
self._inverse_del = {i: k for (k, i) in self._deleted.items()}
if id in self._inverse_del:
return self._inverse_del[id], "deleted"
raise KeyError(f"Key not found for id: {id}")
def k_(self, id: int) -> str:
k, _ = self.key_of(id)
return k
def fix_indices(self) -> None: # TODO find a better name ...
if self._index is None:
return
self._inverse = None
self._inverse_del = None
next_id = max(self.ids) + 1
for k in self.keys():
if k not in self._index:
self._index[k] = next_id
next_id += 1
if k in self._deleted: # a previously deleted key was added later
del self._deleted[k]
def created_indices(self, prev: PsDict) -> bitmap:
if self._index is None:
return bitmap(range(len(prev), len(self)))
new_keys = set(self.keys()) - set(prev.keys())
return bitmap((i for (k, i) in self._index.items() if k in new_keys))
def updated_indices(self, prev: PsDict) -> bitmap:
if self._index is None:
return bitmap(
(
i
for (i, x, y) in zip(range(len(prev)), prev.values(), self.values())
if x is not y
)
)
common_keys = set(self.keys()) & set(prev.keys())
return bitmap(
(
i
for (k, i) in self._index.items()
if k in common_keys and self[k] is not prev[k]
)
)
def deleted_indices(self, prev: PsDict) -> bitmap:
if self._index is None:
return bitmap()
del_keys = set(prev.keys()) - set(self.keys())
return bitmap((i for (k, i) in self._deleted.items() if k in del_keys))
def __delitem__(self, key: str) -> None:
if key not in self:
raise KeyError(f"Key {key} does not exist")
if self._index is None: # first deletion
self._index = dict(zip(self.keys(), range(len(self))))
self._deleted[key] = self._index[key]
del self._index[key]
super().__delitem__(key)
def clear(self) -> None:
for key in list(self.keys()):
del self[key]
def set_nth(self, i: int, val: Any) -> None:
self[list(self)[i]] = val
def get_nth(self, i: int) -> Any:
return self[list(self)[i]]
@property
def ids(self) -> bitmap:
if self._index is None:
return bitmap(range(len(self)))
return bitmap(self._index.values())
EMPTY_PSDICT = PsDict()
| jdfekete/progressivis | progressivis/utils/psdict.py | Python | bsd-2-clause | 5,156 |
from ._llops import dense_forward
from brainforge.util.typing import zX
class DenseOp:
@staticmethod
def forward(X, W, b=None):
if b is None:
b = zX(W.shape[-1])
return dense_forward(X, W, b)
@staticmethod
def backward(X, E, W):
raise NotImplementedError("No backwards for DenseOp!")
| csxeba/brainforge | brainforge/llatomic/llcore_op.py | Python | gpl-3.0 | 340 |
from __future__ import division
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
from acoustics.power import lw_iso3746
@pytest.mark.parametrize("background_noise, expected", [
(79, 91.153934187),
(83, 90.187405234),
(88, 88.153934187),
])
def test_lw_iso3746(background_noise, expected):
LpAi = np.array([90, 90, 90, 90])
LpAiB = background_noise * np.ones(4)
S = 10
alpha = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
surfaces = np.array([10, 10, 10, 10, 10, 10])
calculated = lw_iso3746(LpAi, LpAiB, S, alpha, surfaces)
assert_almost_equal(calculated, expected)
| FRidh/python-acoustics | tests/test_power.py | Python | bsd-3-clause | 637 |
from info import __doc__
from arpack import *
| lesserwhirls/scipy-cwt | scipy/sparse/linalg/eigen/arpack/__init__.py | Python | bsd-3-clause | 46 |
<<<<<<< HEAD
<<<<<<< HEAD
"""Unit tests for the copy module."""
import copy
import copyreg
import weakref
import abc
from operator import le, lt, ge, gt, eq, ne
import unittest
from test import support
order_comparisons = le, lt, ge, gt
equality_comparisons = eq, ne
comparisons = order_comparisons + equality_comparisons
class TestCopy(unittest.TestCase):
# Attempt full line coverage of copy.py from top to bottom
def test_exceptions(self):
self.assertIs(copy.Error, copy.error)
self.assertTrue(issubclass(copy.Error, Exception))
# The copy() method
def test_copy_basic(self):
x = 42
y = copy.copy(x)
self.assertEqual(x, y)
def test_copy_copy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
x = C(42)
y = copy.copy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_copy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.copy, x)
copyreg.pickle(C, pickle_C, C)
y = copy.copy(x)
def test_copy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
c.append(1)
return ""
def __reduce__(self):
self.fail("shouldn't call this")
c = []
x = C()
y = copy.copy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_copy_reduce(self):
class C(object):
def __reduce__(self):
c.append(1)
return ""
c = []
x = C()
y = copy.copy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_copy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError(name)
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.copy, x)
# Type-specific _copy_xxx() methods
def test_copy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
class WithMetaclass(metaclass=abc.ABCMeta):
pass
tests = [None, 42, 2**100, 3.14, True, False, 1j,
"hello", "hello\u1234", f.__code__,
b"world", bytes(range(256)),
NewStyle, range(10), Classic, max, WithMetaclass]
for x in tests:
self.assertIs(copy.copy(x), x)
def test_copy_list(self):
x = [1, 2, 3]
self.assertEqual(copy.copy(x), x)
def test_copy_tuple(self):
x = (1, 2, 3)
self.assertEqual(copy.copy(x), x)
def test_copy_dict(self):
x = {"foo": 1, "bar": 2}
self.assertEqual(copy.copy(x), x)
def test_copy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_copy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
# The deepcopy() method
def test_deepcopy_basic(self):
x = 42
y = copy.deepcopy(x)
self.assertEqual(y, x)
def test_deepcopy_memo(self):
# Tests of reflexive objects are under type-specific sections below.
# This tests only repetitions of objects.
x = []
x = [x, x]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y[0], x[0])
self.assertIs(y[0], y[1])
def test_deepcopy_issubclass(self):
# XXX Note: there's no way to test the TypeError coming out of
# issubclass() -- this can only happen when an extension
# module defines a "type" that doesn't formally inherit from
# type.
class Meta(type):
pass
class C(metaclass=Meta):
pass
self.assertEqual(copy.deepcopy(C), C)
def test_deepcopy_deepcopy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo=None):
return C(self.foo)
x = C(42)
y = copy.deepcopy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_deepcopy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.deepcopy, x)
copyreg.pickle(C, pickle_C, C)
y = copy.deepcopy(x)
def test_deepcopy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
c.append(1)
return ""
def __reduce__(self):
self.fail("shouldn't call this")
c = []
x = C()
y = copy.deepcopy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_deepcopy_reduce(self):
class C(object):
def __reduce__(self):
c.append(1)
return ""
c = []
x = C()
y = copy.deepcopy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_deepcopy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError(name)
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.deepcopy, x)
# Type-specific _deepcopy_xxx() methods
def test_deepcopy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2**100, 3.14, True, False, 1j,
"hello", "hello\u1234", f.__code__,
NewStyle, range(10), Classic, max]
for x in tests:
self.assertIs(copy.deepcopy(x), x)
def test_deepcopy_list(self):
x = [[1, 2], 3]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_deepcopy_reflexive_list(self):
x = []
x.append(x)
y = copy.deepcopy(x)
for op in comparisons:
self.assertRaises(RuntimeError, op, y, x)
self.assertIsNot(y, x)
self.assertIs(y[0], y)
self.assertEqual(len(y), 1)
def test_deepcopy_empty_tuple(self):
x = ()
y = copy.deepcopy(x)
self.assertIs(x, y)
def test_deepcopy_tuple(self):
x = ([1, 2], 3)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_deepcopy_tuple_of_immutables(self):
x = ((1, 2), 3)
y = copy.deepcopy(x)
self.assertIs(x, y)
def test_deepcopy_reflexive_tuple(self):
x = ([],)
x[0].append(x)
y = copy.deepcopy(x)
for op in comparisons:
self.assertRaises(RuntimeError, op, y, x)
self.assertIsNot(y, x)
self.assertIsNot(y[0], x[0])
self.assertIs(y[0][0], y)
def test_deepcopy_dict(self):
x = {"foo": [1, 2], "bar": 3}
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(x, y)
self.assertIsNot(x["foo"], y["foo"])
def test_deepcopy_reflexive_dict(self):
x = {}
x['foo'] = x
y = copy.deepcopy(x)
for op in order_comparisons:
self.assertRaises(TypeError, op, y, x)
for op in equality_comparisons:
self.assertRaises(RuntimeError, op, y, x)
self.assertIsNot(y, x)
self.assertIs(y['foo'], y)
self.assertEqual(len(y), 1)
def test_deepcopy_keepalive(self):
memo = {}
x = []
y = copy.deepcopy(x, memo)
self.assertIs(memo[id(memo)][0], x)
def test_deepcopy_dont_memo_immutable(self):
memo = {}
x = [1, 2, 3, 4]
y = copy.deepcopy(x, memo)
self.assertEqual(y, x)
# There's the entry for the new list, and the keep alive.
self.assertEqual(len(memo), 2)
memo = {}
x = [(1, 2)]
y = copy.deepcopy(x, memo)
self.assertEqual(y, x)
# Tuples with immutable contents are immutable for deepcopy.
self.assertEqual(len(memo), 2)
def test_deepcopy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_deepcopy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo):
return C(copy.deepcopy(self.foo, memo))
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_reflexive_inst(self):
class C:
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assertIsNot(y, x)
self.assertIs(y.foo, y)
# _reconstruct()
def test_reconstruct_string(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assertIs(y, x)
y = copy.deepcopy(x)
self.assertIs(y, x)
def test_reconstruct_nostate(self):
class C(object):
def __reduce__(self):
return (C, ())
x = C()
x.foo = 42
y = copy.copy(x)
self.assertIs(y.__class__, x.__class__)
y = copy.deepcopy(x)
self.assertIs(y.__class__, x.__class__)
def test_reconstruct_state(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y.foo, x.foo)
def test_reconstruct_state_setstate(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __setstate__(self, state):
self.__dict__.update(state)
def __eq__(self, other):
return self.__dict__ == other.__dict__
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y.foo, x.foo)
def test_reconstruct_reflexive(self):
class C(object):
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assertIsNot(y, x)
self.assertIs(y.foo, y)
# Additions for Python 2.3 and pickle protocol 2
def test_reduce_4tuple(self):
class C(list):
def __reduce__(self):
return (C, (), self.__dict__, iter(self))
def __eq__(self, other):
return (list(self) == list(other) and
self.__dict__ == other.__dict__)
x = C([[1, 2], 3])
y = copy.copy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIs(x[0], y[0])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_reduce_5tuple(self):
class C(dict):
def __reduce__(self):
return (C, (), self.__dict__, None, self.items())
def __eq__(self, other):
return (dict(self) == dict(other) and
self.__dict__ == other.__dict__)
x = C([("foo", [1, 2]), ("bar", 3)])
y = copy.copy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIs(x["foo"], y["foo"])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIsNot(x["foo"], y["foo"])
def test_copy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertIs(x.foo, y.foo)
def test_deepcopy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.deepcopy(x)
self.assertEqual(x.foo, y.foo)
self.assertIsNot(x.foo, y.foo)
def test_deepcopy_dict_subclass(self):
class C(dict):
def __init__(self, d=None):
if not d:
d = {}
self._keys = list(d.keys())
super().__init__(d)
def __setitem__(self, key, item):
super().__setitem__(key, item)
if key not in self._keys:
self._keys.append(key)
x = C(d={'foo':0})
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assertEqual(x._keys, y._keys)
self.assertIsNot(x, y)
x['bar'] = 1
self.assertNotEqual(x, y)
self.assertNotEqual(x._keys, y._keys)
def test_copy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.copy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assertIs(x[0], y[0])
self.assertIs(x.foo, y.foo)
def test_deepcopy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.deepcopy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assertIsNot(x[0], y[0])
self.assertIsNot(x.foo, y.foo)
def test_copy_tuple_subclass(self):
class C(tuple):
pass
x = C([1, 2, 3])
self.assertEqual(tuple(x), (1, 2, 3))
y = copy.copy(x)
self.assertEqual(tuple(y), (1, 2, 3))
def test_deepcopy_tuple_subclass(self):
class C(tuple):
pass
x = C([[1, 2], 3])
self.assertEqual(tuple(x), ([1, 2], 3))
y = copy.deepcopy(x)
self.assertEqual(tuple(y), ([1, 2], 3))
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_getstate_exc(self):
class EvilState(object):
def __getstate__(self):
raise ValueError("ain't got no stickin' state")
self.assertRaises(ValueError, copy.copy, EvilState())
def test_copy_function(self):
self.assertEqual(copy.copy(global_foo), global_foo)
def foo(x, y): return x+y
self.assertEqual(copy.copy(foo), foo)
bar = lambda: None
self.assertEqual(copy.copy(bar), bar)
def test_deepcopy_function(self):
self.assertEqual(copy.deepcopy(global_foo), global_foo)
def foo(x, y): return x+y
self.assertEqual(copy.deepcopy(foo), foo)
bar = lambda: None
self.assertEqual(copy.deepcopy(bar), bar)
def _check_weakref(self, _copy):
class C(object):
pass
obj = C()
x = weakref.ref(obj)
y = _copy(x)
self.assertIs(y, x)
del obj
y = _copy(x)
self.assertIs(y, x)
def test_copy_weakref(self):
self._check_weakref(copy.copy)
def test_deepcopy_weakref(self):
self._check_weakref(copy.deepcopy)
def _check_copy_weakdict(self, _dicttype):
class C(object):
pass
a, b, c, d = [C() for i in range(4)]
u = _dicttype()
u[a] = b
u[c] = d
v = copy.copy(u)
self.assertIsNot(v, u)
self.assertEqual(v, u)
self.assertEqual(v[a], b)
self.assertEqual(v[c], d)
self.assertEqual(len(v), 2)
del c, d
self.assertEqual(len(v), 1)
x, y = C(), C()
# The underlying containers are decoupled
v[x] = y
self.assertNotIn(x, u)
def test_copy_weakkeydict(self):
self._check_copy_weakdict(weakref.WeakKeyDictionary)
def test_copy_weakvaluedict(self):
self._check_copy_weakdict(weakref.WeakValueDictionary)
def test_deepcopy_weakkeydict(self):
class C(object):
def __init__(self, i):
self.i = i
a, b, c, d = [C(i) for i in range(4)]
u = weakref.WeakKeyDictionary()
u[a] = b
u[c] = d
# Keys aren't copied, values are
v = copy.deepcopy(u)
self.assertNotEqual(v, u)
self.assertEqual(len(v), 2)
self.assertIsNot(v[a], b)
self.assertIsNot(v[c], d)
self.assertEqual(v[a].i, b.i)
self.assertEqual(v[c].i, d.i)
del c
self.assertEqual(len(v), 1)
def test_deepcopy_weakvaluedict(self):
class C(object):
def __init__(self, i):
self.i = i
a, b, c, d = [C(i) for i in range(4)]
u = weakref.WeakValueDictionary()
u[a] = b
u[c] = d
# Keys are copied, values aren't
v = copy.deepcopy(u)
self.assertNotEqual(v, u)
self.assertEqual(len(v), 2)
(x, y), (z, t) = sorted(v.items(), key=lambda pair: pair[0].i)
self.assertIsNot(x, a)
self.assertEqual(x.i, a.i)
self.assertIs(y, b)
self.assertIsNot(z, c)
self.assertEqual(z.i, c.i)
self.assertIs(t, d)
del x, y, z, t
del d
self.assertEqual(len(v), 1)
def test_deepcopy_bound_method(self):
class Foo(object):
def m(self):
pass
f = Foo()
f.b = f.m
g = copy.deepcopy(f)
self.assertEqual(g.m, g.b)
self.assertIs(g.b.__self__, g)
g.b()
def global_foo(x, y): return x+y
def test_main():
support.run_unittest(TestCopy)
if __name__ == "__main__":
test_main()
=======
"""Unit tests for the copy module."""
import copy
import copyreg
import weakref
import abc
from operator import le, lt, ge, gt, eq, ne
import unittest
from test import support
order_comparisons = le, lt, ge, gt
equality_comparisons = eq, ne
comparisons = order_comparisons + equality_comparisons
class TestCopy(unittest.TestCase):
# Attempt full line coverage of copy.py from top to bottom
def test_exceptions(self):
self.assertIs(copy.Error, copy.error)
self.assertTrue(issubclass(copy.Error, Exception))
# The copy() method
def test_copy_basic(self):
x = 42
y = copy.copy(x)
self.assertEqual(x, y)
def test_copy_copy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
x = C(42)
y = copy.copy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_copy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.copy, x)
copyreg.pickle(C, pickle_C, C)
y = copy.copy(x)
def test_copy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
c.append(1)
return ""
def __reduce__(self):
self.fail("shouldn't call this")
c = []
x = C()
y = copy.copy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_copy_reduce(self):
class C(object):
def __reduce__(self):
c.append(1)
return ""
c = []
x = C()
y = copy.copy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_copy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError(name)
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.copy, x)
# Type-specific _copy_xxx() methods
def test_copy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
class WithMetaclass(metaclass=abc.ABCMeta):
pass
tests = [None, 42, 2**100, 3.14, True, False, 1j,
"hello", "hello\u1234", f.__code__,
b"world", bytes(range(256)),
NewStyle, range(10), Classic, max, WithMetaclass]
for x in tests:
self.assertIs(copy.copy(x), x)
def test_copy_list(self):
x = [1, 2, 3]
self.assertEqual(copy.copy(x), x)
def test_copy_tuple(self):
x = (1, 2, 3)
self.assertEqual(copy.copy(x), x)
def test_copy_dict(self):
x = {"foo": 1, "bar": 2}
self.assertEqual(copy.copy(x), x)
def test_copy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_copy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
# The deepcopy() method
def test_deepcopy_basic(self):
x = 42
y = copy.deepcopy(x)
self.assertEqual(y, x)
def test_deepcopy_memo(self):
# Tests of reflexive objects are under type-specific sections below.
# This tests only repetitions of objects.
x = []
x = [x, x]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y[0], x[0])
self.assertIs(y[0], y[1])
def test_deepcopy_issubclass(self):
# XXX Note: there's no way to test the TypeError coming out of
# issubclass() -- this can only happen when an extension
# module defines a "type" that doesn't formally inherit from
# type.
class Meta(type):
pass
class C(metaclass=Meta):
pass
self.assertEqual(copy.deepcopy(C), C)
def test_deepcopy_deepcopy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo=None):
return C(self.foo)
x = C(42)
y = copy.deepcopy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_deepcopy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.deepcopy, x)
copyreg.pickle(C, pickle_C, C)
y = copy.deepcopy(x)
def test_deepcopy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
c.append(1)
return ""
def __reduce__(self):
self.fail("shouldn't call this")
c = []
x = C()
y = copy.deepcopy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_deepcopy_reduce(self):
class C(object):
def __reduce__(self):
c.append(1)
return ""
c = []
x = C()
y = copy.deepcopy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_deepcopy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError(name)
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.deepcopy, x)
# Type-specific _deepcopy_xxx() methods
def test_deepcopy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2**100, 3.14, True, False, 1j,
"hello", "hello\u1234", f.__code__,
NewStyle, range(10), Classic, max]
for x in tests:
self.assertIs(copy.deepcopy(x), x)
def test_deepcopy_list(self):
x = [[1, 2], 3]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_deepcopy_reflexive_list(self):
x = []
x.append(x)
y = copy.deepcopy(x)
for op in comparisons:
self.assertRaises(RuntimeError, op, y, x)
self.assertIsNot(y, x)
self.assertIs(y[0], y)
self.assertEqual(len(y), 1)
def test_deepcopy_empty_tuple(self):
x = ()
y = copy.deepcopy(x)
self.assertIs(x, y)
def test_deepcopy_tuple(self):
x = ([1, 2], 3)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_deepcopy_tuple_of_immutables(self):
x = ((1, 2), 3)
y = copy.deepcopy(x)
self.assertIs(x, y)
def test_deepcopy_reflexive_tuple(self):
x = ([],)
x[0].append(x)
y = copy.deepcopy(x)
for op in comparisons:
self.assertRaises(RuntimeError, op, y, x)
self.assertIsNot(y, x)
self.assertIsNot(y[0], x[0])
self.assertIs(y[0][0], y)
def test_deepcopy_dict(self):
x = {"foo": [1, 2], "bar": 3}
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(x, y)
self.assertIsNot(x["foo"], y["foo"])
def test_deepcopy_reflexive_dict(self):
x = {}
x['foo'] = x
y = copy.deepcopy(x)
for op in order_comparisons:
self.assertRaises(TypeError, op, y, x)
for op in equality_comparisons:
self.assertRaises(RuntimeError, op, y, x)
self.assertIsNot(y, x)
self.assertIs(y['foo'], y)
self.assertEqual(len(y), 1)
def test_deepcopy_keepalive(self):
memo = {}
x = []
y = copy.deepcopy(x, memo)
self.assertIs(memo[id(memo)][0], x)
def test_deepcopy_dont_memo_immutable(self):
memo = {}
x = [1, 2, 3, 4]
y = copy.deepcopy(x, memo)
self.assertEqual(y, x)
# There's the entry for the new list, and the keep alive.
self.assertEqual(len(memo), 2)
memo = {}
x = [(1, 2)]
y = copy.deepcopy(x, memo)
self.assertEqual(y, x)
# Tuples with immutable contents are immutable for deepcopy.
self.assertEqual(len(memo), 2)
def test_deepcopy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_deepcopy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo):
return C(copy.deepcopy(self.foo, memo))
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_reflexive_inst(self):
class C:
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assertIsNot(y, x)
self.assertIs(y.foo, y)
# _reconstruct()
def test_reconstruct_string(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assertIs(y, x)
y = copy.deepcopy(x)
self.assertIs(y, x)
def test_reconstruct_nostate(self):
class C(object):
def __reduce__(self):
return (C, ())
x = C()
x.foo = 42
y = copy.copy(x)
self.assertIs(y.__class__, x.__class__)
y = copy.deepcopy(x)
self.assertIs(y.__class__, x.__class__)
def test_reconstruct_state(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y.foo, x.foo)
def test_reconstruct_state_setstate(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __setstate__(self, state):
self.__dict__.update(state)
def __eq__(self, other):
return self.__dict__ == other.__dict__
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y.foo, x.foo)
def test_reconstruct_reflexive(self):
class C(object):
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assertIsNot(y, x)
self.assertIs(y.foo, y)
# Additions for Python 2.3 and pickle protocol 2
def test_reduce_4tuple(self):
class C(list):
def __reduce__(self):
return (C, (), self.__dict__, iter(self))
def __eq__(self, other):
return (list(self) == list(other) and
self.__dict__ == other.__dict__)
x = C([[1, 2], 3])
y = copy.copy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIs(x[0], y[0])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_reduce_5tuple(self):
class C(dict):
def __reduce__(self):
return (C, (), self.__dict__, None, self.items())
def __eq__(self, other):
return (dict(self) == dict(other) and
self.__dict__ == other.__dict__)
x = C([("foo", [1, 2]), ("bar", 3)])
y = copy.copy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIs(x["foo"], y["foo"])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIsNot(x["foo"], y["foo"])
def test_copy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertIs(x.foo, y.foo)
def test_deepcopy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.deepcopy(x)
self.assertEqual(x.foo, y.foo)
self.assertIsNot(x.foo, y.foo)
def test_deepcopy_dict_subclass(self):
class C(dict):
def __init__(self, d=None):
if not d:
d = {}
self._keys = list(d.keys())
super().__init__(d)
def __setitem__(self, key, item):
super().__setitem__(key, item)
if key not in self._keys:
self._keys.append(key)
x = C(d={'foo':0})
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assertEqual(x._keys, y._keys)
self.assertIsNot(x, y)
x['bar'] = 1
self.assertNotEqual(x, y)
self.assertNotEqual(x._keys, y._keys)
def test_copy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.copy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assertIs(x[0], y[0])
self.assertIs(x.foo, y.foo)
def test_deepcopy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.deepcopy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assertIsNot(x[0], y[0])
self.assertIsNot(x.foo, y.foo)
def test_copy_tuple_subclass(self):
class C(tuple):
pass
x = C([1, 2, 3])
self.assertEqual(tuple(x), (1, 2, 3))
y = copy.copy(x)
self.assertEqual(tuple(y), (1, 2, 3))
def test_deepcopy_tuple_subclass(self):
class C(tuple):
pass
x = C([[1, 2], 3])
self.assertEqual(tuple(x), ([1, 2], 3))
y = copy.deepcopy(x)
self.assertEqual(tuple(y), ([1, 2], 3))
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_getstate_exc(self):
class EvilState(object):
def __getstate__(self):
raise ValueError("ain't got no stickin' state")
self.assertRaises(ValueError, copy.copy, EvilState())
def test_copy_function(self):
self.assertEqual(copy.copy(global_foo), global_foo)
def foo(x, y): return x+y
self.assertEqual(copy.copy(foo), foo)
bar = lambda: None
self.assertEqual(copy.copy(bar), bar)
def test_deepcopy_function(self):
self.assertEqual(copy.deepcopy(global_foo), global_foo)
def foo(x, y): return x+y
self.assertEqual(copy.deepcopy(foo), foo)
bar = lambda: None
self.assertEqual(copy.deepcopy(bar), bar)
def _check_weakref(self, _copy):
class C(object):
pass
obj = C()
x = weakref.ref(obj)
y = _copy(x)
self.assertIs(y, x)
del obj
y = _copy(x)
self.assertIs(y, x)
def test_copy_weakref(self):
self._check_weakref(copy.copy)
def test_deepcopy_weakref(self):
self._check_weakref(copy.deepcopy)
def _check_copy_weakdict(self, _dicttype):
class C(object):
pass
a, b, c, d = [C() for i in range(4)]
u = _dicttype()
u[a] = b
u[c] = d
v = copy.copy(u)
self.assertIsNot(v, u)
self.assertEqual(v, u)
self.assertEqual(v[a], b)
self.assertEqual(v[c], d)
self.assertEqual(len(v), 2)
del c, d
self.assertEqual(len(v), 1)
x, y = C(), C()
# The underlying containers are decoupled
v[x] = y
self.assertNotIn(x, u)
def test_copy_weakkeydict(self):
self._check_copy_weakdict(weakref.WeakKeyDictionary)
def test_copy_weakvaluedict(self):
self._check_copy_weakdict(weakref.WeakValueDictionary)
def test_deepcopy_weakkeydict(self):
class C(object):
def __init__(self, i):
self.i = i
a, b, c, d = [C(i) for i in range(4)]
u = weakref.WeakKeyDictionary()
u[a] = b
u[c] = d
# Keys aren't copied, values are
v = copy.deepcopy(u)
self.assertNotEqual(v, u)
self.assertEqual(len(v), 2)
self.assertIsNot(v[a], b)
self.assertIsNot(v[c], d)
self.assertEqual(v[a].i, b.i)
self.assertEqual(v[c].i, d.i)
del c
self.assertEqual(len(v), 1)
def test_deepcopy_weakvaluedict(self):
class C(object):
def __init__(self, i):
self.i = i
a, b, c, d = [C(i) for i in range(4)]
u = weakref.WeakValueDictionary()
u[a] = b
u[c] = d
# Keys are copied, values aren't
v = copy.deepcopy(u)
self.assertNotEqual(v, u)
self.assertEqual(len(v), 2)
(x, y), (z, t) = sorted(v.items(), key=lambda pair: pair[0].i)
self.assertIsNot(x, a)
self.assertEqual(x.i, a.i)
self.assertIs(y, b)
self.assertIsNot(z, c)
self.assertEqual(z.i, c.i)
self.assertIs(t, d)
del x, y, z, t
del d
self.assertEqual(len(v), 1)
def test_deepcopy_bound_method(self):
class Foo(object):
def m(self):
pass
f = Foo()
f.b = f.m
g = copy.deepcopy(f)
self.assertEqual(g.m, g.b)
self.assertIs(g.b.__self__, g)
g.b()
def global_foo(x, y): return x+y
def test_main():
support.run_unittest(TestCopy)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Unit tests for the copy module."""
import copy
import copyreg
import weakref
import abc
from operator import le, lt, ge, gt, eq, ne
import unittest
from test import support
order_comparisons = le, lt, ge, gt
equality_comparisons = eq, ne
comparisons = order_comparisons + equality_comparisons
class TestCopy(unittest.TestCase):
# Attempt full line coverage of copy.py from top to bottom
def test_exceptions(self):
self.assertIs(copy.Error, copy.error)
self.assertTrue(issubclass(copy.Error, Exception))
# The copy() method
def test_copy_basic(self):
x = 42
y = copy.copy(x)
self.assertEqual(x, y)
def test_copy_copy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
x = C(42)
y = copy.copy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_copy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.copy, x)
copyreg.pickle(C, pickle_C, C)
y = copy.copy(x)
def test_copy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
c.append(1)
return ""
def __reduce__(self):
self.fail("shouldn't call this")
c = []
x = C()
y = copy.copy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_copy_reduce(self):
class C(object):
def __reduce__(self):
c.append(1)
return ""
c = []
x = C()
y = copy.copy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_copy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError(name)
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.copy, x)
# Type-specific _copy_xxx() methods
def test_copy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
class WithMetaclass(metaclass=abc.ABCMeta):
pass
tests = [None, 42, 2**100, 3.14, True, False, 1j,
"hello", "hello\u1234", f.__code__,
b"world", bytes(range(256)),
NewStyle, range(10), Classic, max, WithMetaclass]
for x in tests:
self.assertIs(copy.copy(x), x)
def test_copy_list(self):
x = [1, 2, 3]
self.assertEqual(copy.copy(x), x)
def test_copy_tuple(self):
x = (1, 2, 3)
self.assertEqual(copy.copy(x), x)
def test_copy_dict(self):
x = {"foo": 1, "bar": 2}
self.assertEqual(copy.copy(x), x)
def test_copy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_copy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __eq__(self, other):
return self.foo == other.foo
x = C(42)
self.assertEqual(copy.copy(x), x)
# The deepcopy() method
def test_deepcopy_basic(self):
x = 42
y = copy.deepcopy(x)
self.assertEqual(y, x)
def test_deepcopy_memo(self):
# Tests of reflexive objects are under type-specific sections below.
# This tests only repetitions of objects.
x = []
x = [x, x]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y[0], x[0])
self.assertIs(y[0], y[1])
def test_deepcopy_issubclass(self):
# XXX Note: there's no way to test the TypeError coming out of
# issubclass() -- this can only happen when an extension
# module defines a "type" that doesn't formally inherit from
# type.
class Meta(type):
pass
class C(metaclass=Meta):
pass
self.assertEqual(copy.deepcopy(C), C)
def test_deepcopy_deepcopy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo=None):
return C(self.foo)
x = C(42)
y = copy.deepcopy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_deepcopy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.deepcopy, x)
copyreg.pickle(C, pickle_C, C)
y = copy.deepcopy(x)
def test_deepcopy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
c.append(1)
return ""
def __reduce__(self):
self.fail("shouldn't call this")
c = []
x = C()
y = copy.deepcopy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_deepcopy_reduce(self):
class C(object):
def __reduce__(self):
c.append(1)
return ""
c = []
x = C()
y = copy.deepcopy(x)
self.assertIs(y, x)
self.assertEqual(c, [1])
def test_deepcopy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError(name)
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.deepcopy, x)
# Type-specific _deepcopy_xxx() methods
def test_deepcopy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2**100, 3.14, True, False, 1j,
"hello", "hello\u1234", f.__code__,
NewStyle, range(10), Classic, max]
for x in tests:
self.assertIs(copy.deepcopy(x), x)
def test_deepcopy_list(self):
x = [[1, 2], 3]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_deepcopy_reflexive_list(self):
x = []
x.append(x)
y = copy.deepcopy(x)
for op in comparisons:
self.assertRaises(RuntimeError, op, y, x)
self.assertIsNot(y, x)
self.assertIs(y[0], y)
self.assertEqual(len(y), 1)
def test_deepcopy_empty_tuple(self):
x = ()
y = copy.deepcopy(x)
self.assertIs(x, y)
def test_deepcopy_tuple(self):
x = ([1, 2], 3)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_deepcopy_tuple_of_immutables(self):
x = ((1, 2), 3)
y = copy.deepcopy(x)
self.assertIs(x, y)
def test_deepcopy_reflexive_tuple(self):
x = ([],)
x[0].append(x)
y = copy.deepcopy(x)
for op in comparisons:
self.assertRaises(RuntimeError, op, y, x)
self.assertIsNot(y, x)
self.assertIsNot(y[0], x[0])
self.assertIs(y[0][0], y)
def test_deepcopy_dict(self):
x = {"foo": [1, 2], "bar": 3}
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(x, y)
self.assertIsNot(x["foo"], y["foo"])
def test_deepcopy_reflexive_dict(self):
x = {}
x['foo'] = x
y = copy.deepcopy(x)
for op in order_comparisons:
self.assertRaises(TypeError, op, y, x)
for op in equality_comparisons:
self.assertRaises(RuntimeError, op, y, x)
self.assertIsNot(y, x)
self.assertIs(y['foo'], y)
self.assertEqual(len(y), 1)
def test_deepcopy_keepalive(self):
memo = {}
x = []
y = copy.deepcopy(x, memo)
self.assertIs(memo[id(memo)][0], x)
def test_deepcopy_dont_memo_immutable(self):
memo = {}
x = [1, 2, 3, 4]
y = copy.deepcopy(x, memo)
self.assertEqual(y, x)
# There's the entry for the new list, and the keep alive.
self.assertEqual(len(memo), 2)
memo = {}
x = [(1, 2)]
y = copy.deepcopy(x, memo)
self.assertEqual(y, x)
# Tuples with immutable contents are immutable for deepcopy.
self.assertEqual(len(memo), 2)
def test_deepcopy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_deepcopy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo):
return C(copy.deepcopy(self.foo, memo))
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __eq__(self, other):
return self.foo == other.foo
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y, x)
self.assertIsNot(y.foo, x.foo)
def test_deepcopy_reflexive_inst(self):
class C:
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assertIsNot(y, x)
self.assertIs(y.foo, y)
# _reconstruct()
def test_reconstruct_string(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assertIs(y, x)
y = copy.deepcopy(x)
self.assertIs(y, x)
def test_reconstruct_nostate(self):
class C(object):
def __reduce__(self):
return (C, ())
x = C()
x.foo = 42
y = copy.copy(x)
self.assertIs(y.__class__, x.__class__)
y = copy.deepcopy(x)
self.assertIs(y.__class__, x.__class__)
def test_reconstruct_state(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y.foo, x.foo)
def test_reconstruct_state_setstate(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __setstate__(self, state):
self.__dict__.update(state)
def __eq__(self, other):
return self.__dict__ == other.__dict__
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assertIsNot(y.foo, x.foo)
def test_reconstruct_reflexive(self):
class C(object):
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assertIsNot(y, x)
self.assertIs(y.foo, y)
# Additions for Python 2.3 and pickle protocol 2
def test_reduce_4tuple(self):
class C(list):
def __reduce__(self):
return (C, (), self.__dict__, iter(self))
def __eq__(self, other):
return (list(self) == list(other) and
self.__dict__ == other.__dict__)
x = C([[1, 2], 3])
y = copy.copy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIs(x[0], y[0])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_reduce_5tuple(self):
class C(dict):
def __reduce__(self):
return (C, (), self.__dict__, None, self.items())
def __eq__(self, other):
return (dict(self) == dict(other) and
self.__dict__ == other.__dict__)
x = C([("foo", [1, 2]), ("bar", 3)])
y = copy.copy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIs(x["foo"], y["foo"])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assertIsNot(x, y)
self.assertIsNot(x["foo"], y["foo"])
def test_copy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertIs(x.foo, y.foo)
def test_deepcopy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.deepcopy(x)
self.assertEqual(x.foo, y.foo)
self.assertIsNot(x.foo, y.foo)
def test_deepcopy_dict_subclass(self):
class C(dict):
def __init__(self, d=None):
if not d:
d = {}
self._keys = list(d.keys())
super().__init__(d)
def __setitem__(self, key, item):
super().__setitem__(key, item)
if key not in self._keys:
self._keys.append(key)
x = C(d={'foo':0})
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assertEqual(x._keys, y._keys)
self.assertIsNot(x, y)
x['bar'] = 1
self.assertNotEqual(x, y)
self.assertNotEqual(x._keys, y._keys)
def test_copy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.copy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assertIs(x[0], y[0])
self.assertIs(x.foo, y.foo)
def test_deepcopy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.deepcopy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assertIsNot(x[0], y[0])
self.assertIsNot(x.foo, y.foo)
def test_copy_tuple_subclass(self):
class C(tuple):
pass
x = C([1, 2, 3])
self.assertEqual(tuple(x), (1, 2, 3))
y = copy.copy(x)
self.assertEqual(tuple(y), (1, 2, 3))
def test_deepcopy_tuple_subclass(self):
class C(tuple):
pass
x = C([[1, 2], 3])
self.assertEqual(tuple(x), ([1, 2], 3))
y = copy.deepcopy(x)
self.assertEqual(tuple(y), ([1, 2], 3))
self.assertIsNot(x, y)
self.assertIsNot(x[0], y[0])
def test_getstate_exc(self):
class EvilState(object):
def __getstate__(self):
raise ValueError("ain't got no stickin' state")
self.assertRaises(ValueError, copy.copy, EvilState())
def test_copy_function(self):
self.assertEqual(copy.copy(global_foo), global_foo)
def foo(x, y): return x+y
self.assertEqual(copy.copy(foo), foo)
bar = lambda: None
self.assertEqual(copy.copy(bar), bar)
def test_deepcopy_function(self):
self.assertEqual(copy.deepcopy(global_foo), global_foo)
def foo(x, y): return x+y
self.assertEqual(copy.deepcopy(foo), foo)
bar = lambda: None
self.assertEqual(copy.deepcopy(bar), bar)
def _check_weakref(self, _copy):
class C(object):
pass
obj = C()
x = weakref.ref(obj)
y = _copy(x)
self.assertIs(y, x)
del obj
y = _copy(x)
self.assertIs(y, x)
def test_copy_weakref(self):
self._check_weakref(copy.copy)
def test_deepcopy_weakref(self):
self._check_weakref(copy.deepcopy)
def _check_copy_weakdict(self, _dicttype):
class C(object):
pass
a, b, c, d = [C() for i in range(4)]
u = _dicttype()
u[a] = b
u[c] = d
v = copy.copy(u)
self.assertIsNot(v, u)
self.assertEqual(v, u)
self.assertEqual(v[a], b)
self.assertEqual(v[c], d)
self.assertEqual(len(v), 2)
del c, d
self.assertEqual(len(v), 1)
x, y = C(), C()
# The underlying containers are decoupled
v[x] = y
self.assertNotIn(x, u)
def test_copy_weakkeydict(self):
self._check_copy_weakdict(weakref.WeakKeyDictionary)
def test_copy_weakvaluedict(self):
self._check_copy_weakdict(weakref.WeakValueDictionary)
def test_deepcopy_weakkeydict(self):
class C(object):
def __init__(self, i):
self.i = i
a, b, c, d = [C(i) for i in range(4)]
u = weakref.WeakKeyDictionary()
u[a] = b
u[c] = d
# Keys aren't copied, values are
v = copy.deepcopy(u)
self.assertNotEqual(v, u)
self.assertEqual(len(v), 2)
self.assertIsNot(v[a], b)
self.assertIsNot(v[c], d)
self.assertEqual(v[a].i, b.i)
self.assertEqual(v[c].i, d.i)
del c
self.assertEqual(len(v), 1)
def test_deepcopy_weakvaluedict(self):
class C(object):
def __init__(self, i):
self.i = i
a, b, c, d = [C(i) for i in range(4)]
u = weakref.WeakValueDictionary()
u[a] = b
u[c] = d
# Keys are copied, values aren't
v = copy.deepcopy(u)
self.assertNotEqual(v, u)
self.assertEqual(len(v), 2)
(x, y), (z, t) = sorted(v.items(), key=lambda pair: pair[0].i)
self.assertIsNot(x, a)
self.assertEqual(x.i, a.i)
self.assertIs(y, b)
self.assertIsNot(z, c)
self.assertEqual(z.i, c.i)
self.assertIs(t, d)
del x, y, z, t
del d
self.assertEqual(len(v), 1)
def test_deepcopy_bound_method(self):
class Foo(object):
def m(self):
pass
f = Foo()
f.b = f.m
g = copy.deepcopy(f)
self.assertEqual(g.m, g.b)
self.assertIs(g.b.__self__, g)
g.b()
def global_foo(x, y): return x+y
def test_main():
support.run_unittest(TestCopy)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| ArcherSys/ArcherSys | Lib/test/test_copy.py | Python | mit | 67,430 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.xmlstream}.
"""
from twisted.trial import unittest
from zope.interface.verify import verifyObject
from twisted.internet import defer, task
from twisted.internet.error import ConnectionLost
from twisted.internet.interfaces import IProtocolFactory
from twisted.python import failure
from twisted.test import proto_helpers
from twisted.words.test.test_xmlstream import GenericXmlStreamFactoryTestsMixin
from twisted.words.xish import domish
from twisted.words.protocols.jabber import error, ijabber, jid, xmlstream
NS_XMPP_TLS = 'urn:ietf:params:xml:ns:xmpp-tls'
class HashPasswordTest(unittest.TestCase):
"""
Tests for L{xmlstream.hashPassword}.
"""
def test_basic(self):
"""
The sid and secret are concatenated to calculate sha1 hex digest.
"""
hash = xmlstream.hashPassword(u"12345", u"secret")
self.assertEqual('99567ee91b2c7cabf607f10cb9f4a3634fa820e0', hash)
def test_sidNotUnicode(self):
"""
The session identifier must be a unicode object.
"""
self.assertRaises(TypeError, xmlstream.hashPassword, "\xc2\xb92345",
u"secret")
def test_passwordNotUnicode(self):
"""
The password must be a unicode object.
"""
self.assertRaises(TypeError, xmlstream.hashPassword, u"12345",
"secr\xc3\xa9t")
def test_unicodeSecret(self):
"""
The concatenated sid and password must be encoded to UTF-8 before hashing.
"""
hash = xmlstream.hashPassword(u"12345", u"secr\u00e9t")
self.assertEqual('659bf88d8f8e179081f7f3b4a8e7d224652d2853', hash)
class IQTest(unittest.TestCase):
"""
Tests both IQ and the associated IIQResponseTracker callback.
"""
def setUp(self):
authenticator = xmlstream.ConnectAuthenticator('otherhost')
authenticator.namespace = 'testns'
self.xmlstream = xmlstream.XmlStream(authenticator)
self.clock = task.Clock()
self.xmlstream._callLater = self.clock.callLater
self.xmlstream.makeConnection(proto_helpers.StringTransport())
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns='testns' from='otherhost' version='1.0'>")
self.iq = xmlstream.IQ(self.xmlstream, 'get')
def testBasic(self):
self.assertEqual(self.iq['type'], 'get')
self.assertTrue(self.iq['id'])
def testSend(self):
self.xmlstream.transport.clear()
self.iq.send()
self.assertIn(self.xmlstream.transport.value(), [
"<iq type='get' id='%s'/>" % self.iq['id'],
"<iq id='%s' type='get'/>" % self.iq['id'],
])
def testResultResponse(self):
def cb(result):
self.assertEqual(result['type'], 'result')
d = self.iq.send()
d.addCallback(cb)
xs = self.xmlstream
xs.dataReceived("<iq type='result' id='%s'/>" % self.iq['id'])
return d
def testErrorResponse(self):
d = self.iq.send()
self.assertFailure(d, error.StanzaError)
xs = self.xmlstream
xs.dataReceived("<iq type='error' id='%s'/>" % self.iq['id'])
return d
def testNonTrackedResponse(self):
"""
Test that untracked iq responses don't trigger any action.
Untracked means that the id of the incoming response iq is not
in the stream's C{iqDeferreds} dictionary.
"""
xs = self.xmlstream
xmlstream.upgradeWithIQResponseTracker(xs)
# Make sure we aren't tracking any iq's.
self.assertFalse(xs.iqDeferreds)
# Set up a fallback handler that checks the stanza's handled attribute.
# If that is set to True, the iq tracker claims to have handled the
# response.
def cb(iq):
self.assertFalse(getattr(iq, 'handled', False))
xs.addObserver("/iq", cb, -1)
# Receive an untracked iq response
xs.dataReceived("<iq type='result' id='test'/>")
def testCleanup(self):
"""
Test if the deferred associated with an iq request is removed
from the list kept in the L{XmlStream} object after it has
been fired.
"""
d = self.iq.send()
xs = self.xmlstream
xs.dataReceived("<iq type='result' id='%s'/>" % self.iq['id'])
self.assertNotIn(self.iq['id'], xs.iqDeferreds)
return d
def testDisconnectCleanup(self):
"""
Test if deferreds for iq's that haven't yet received a response
have their errback called on stream disconnect.
"""
d = self.iq.send()
xs = self.xmlstream
xs.connectionLost("Closed by peer")
self.assertFailure(d, ConnectionLost)
return d
def testNoModifyingDict(self):
"""
Test to make sure the errbacks cannot cause the iteration of the
iqDeferreds to blow up in our face.
"""
def eb(failure):
d = xmlstream.IQ(self.xmlstream).send()
d.addErrback(eb)
d = self.iq.send()
d.addErrback(eb)
self.xmlstream.connectionLost("Closed by peer")
return d
def testRequestTimingOut(self):
"""
Test that an iq request with a defined timeout times out.
"""
self.iq.timeout = 60
d = self.iq.send()
self.assertFailure(d, xmlstream.TimeoutError)
self.clock.pump([1, 60])
self.assertFalse(self.clock.calls)
self.assertFalse(self.xmlstream.iqDeferreds)
return d
def testRequestNotTimingOut(self):
"""
Test that an iq request with a defined timeout does not time out
when a response was received before the timeout period elapsed.
"""
self.iq.timeout = 60
d = self.iq.send()
self.clock.callLater(1, self.xmlstream.dataReceived,
"<iq type='result' id='%s'/>" % self.iq['id'])
self.clock.pump([1, 1])
self.assertFalse(self.clock.calls)
return d
def testDisconnectTimeoutCancellation(self):
"""
Test if timeouts for iq's that haven't yet received a response
are cancelled on stream disconnect.
"""
self.iq.timeout = 60
d = self.iq.send()
xs = self.xmlstream
xs.connectionLost("Closed by peer")
self.assertFailure(d, ConnectionLost)
self.assertFalse(self.clock.calls)
return d
class XmlStreamTest(unittest.TestCase):
def onStreamStart(self, obj):
self.gotStreamStart = True
def onStreamEnd(self, obj):
self.gotStreamEnd = True
def onStreamError(self, obj):
self.gotStreamError = True
def setUp(self):
"""
Set up XmlStream and several observers.
"""
self.gotStreamStart = False
self.gotStreamEnd = False
self.gotStreamError = False
xs = xmlstream.XmlStream(xmlstream.Authenticator())
xs.addObserver('//event/stream/start', self.onStreamStart)
xs.addObserver('//event/stream/end', self.onStreamEnd)
xs.addObserver('//event/stream/error', self.onStreamError)
xs.makeConnection(proto_helpers.StringTransportWithDisconnection())
xs.transport.protocol = xs
xs.namespace = 'testns'
xs.version = (1, 0)
self.xmlstream = xs
def test_sendHeaderBasic(self):
"""
Basic test on the header sent by sendHeader.
"""
xs = self.xmlstream
xs.sendHeader()
splitHeader = self.xmlstream.transport.value()[0:-1].split(' ')
self.assertIn("<stream:stream", splitHeader)
self.assertIn("xmlns:stream='http://etherx.jabber.org/streams'",
splitHeader)
self.assertIn("xmlns='testns'", splitHeader)
self.assertIn("version='1.0'", splitHeader)
self.assertTrue(xs._headerSent)
def test_sendHeaderAdditionalNamespaces(self):
"""
Test for additional namespace declarations.
"""
xs = self.xmlstream
xs.prefixes['jabber:server:dialback'] = 'db'
xs.sendHeader()
splitHeader = self.xmlstream.transport.value()[0:-1].split(' ')
self.assertIn("<stream:stream", splitHeader)
self.assertIn("xmlns:stream='http://etherx.jabber.org/streams'",
splitHeader)
self.assertIn("xmlns:db='jabber:server:dialback'", splitHeader)
self.assertIn("xmlns='testns'", splitHeader)
self.assertIn("version='1.0'", splitHeader)
self.assertTrue(xs._headerSent)
def test_sendHeaderInitiating(self):
"""
Test addressing when initiating a stream.
"""
xs = self.xmlstream
xs.thisEntity = jid.JID('thisHost')
xs.otherEntity = jid.JID('otherHost')
xs.initiating = True
xs.sendHeader()
splitHeader = xs.transport.value()[0:-1].split(' ')
self.assertIn("to='otherhost'", splitHeader)
self.assertIn("from='thishost'", splitHeader)
def test_sendHeaderReceiving(self):
"""
Test addressing when receiving a stream.
"""
xs = self.xmlstream
xs.thisEntity = jid.JID('thisHost')
xs.otherEntity = jid.JID('otherHost')
xs.initiating = False
xs.sid = 'session01'
xs.sendHeader()
splitHeader = xs.transport.value()[0:-1].split(' ')
self.assertIn("to='otherhost'", splitHeader)
self.assertIn("from='thishost'", splitHeader)
self.assertIn("id='session01'", splitHeader)
def test_receiveStreamError(self):
"""
Test events when a stream error is received.
"""
xs = self.xmlstream
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1.0'>")
xs.dataReceived("<stream:error/>")
self.assertTrue(self.gotStreamError)
self.assertTrue(self.gotStreamEnd)
def test_sendStreamErrorInitiating(self):
"""
Test sendStreamError on an initiating xmlstream with a header sent.
An error should be sent out and the connection lost.
"""
xs = self.xmlstream
xs.initiating = True
xs.sendHeader()
xs.transport.clear()
xs.sendStreamError(error.StreamError('version-unsupported'))
self.assertNotEqual('', xs.transport.value())
self.assertTrue(self.gotStreamEnd)
def test_sendStreamErrorInitiatingNoHeader(self):
"""
Test sendStreamError on an initiating xmlstream without having sent a
header.
In this case, no header should be generated. Also, the error should
not be sent out on the stream. Just closing the connection.
"""
xs = self.xmlstream
xs.initiating = True
xs.transport.clear()
xs.sendStreamError(error.StreamError('version-unsupported'))
self.assertNot(xs._headerSent)
self.assertEqual('', xs.transport.value())
self.assertTrue(self.gotStreamEnd)
def test_sendStreamErrorReceiving(self):
"""
Test sendStreamError on a receiving xmlstream with a header sent.
An error should be sent out and the connection lost.
"""
xs = self.xmlstream
xs.initiating = False
xs.sendHeader()
xs.transport.clear()
xs.sendStreamError(error.StreamError('version-unsupported'))
self.assertNotEqual('', xs.transport.value())
self.assertTrue(self.gotStreamEnd)
def test_sendStreamErrorReceivingNoHeader(self):
"""
Test sendStreamError on a receiving xmlstream without having sent a
header.
In this case, a header should be generated. Then, the error should
be sent out on the stream followed by closing the connection.
"""
xs = self.xmlstream
xs.initiating = False
xs.transport.clear()
xs.sendStreamError(error.StreamError('version-unsupported'))
self.assertTrue(xs._headerSent)
self.assertNotEqual('', xs.transport.value())
self.assertTrue(self.gotStreamEnd)
def test_reset(self):
"""
Test resetting the XML stream to start a new layer.
"""
xs = self.xmlstream
xs.sendHeader()
stream = xs.stream
xs.reset()
self.assertNotEqual(stream, xs.stream)
self.assertNot(xs._headerSent)
def test_send(self):
"""
Test send with various types of objects.
"""
xs = self.xmlstream
xs.send('<presence/>')
self.assertEqual(xs.transport.value(), '<presence/>')
xs.transport.clear()
el = domish.Element(('testns', 'presence'))
xs.send(el)
self.assertEqual(xs.transport.value(), '<presence/>')
xs.transport.clear()
el = domish.Element(('http://etherx.jabber.org/streams', 'features'))
xs.send(el)
self.assertEqual(xs.transport.value(), '<stream:features/>')
def test_authenticator(self):
"""
Test that the associated authenticator is correctly called.
"""
connectionMadeCalls = []
streamStartedCalls = []
associateWithStreamCalls = []
class TestAuthenticator:
def connectionMade(self):
connectionMadeCalls.append(None)
def streamStarted(self, rootElement):
streamStartedCalls.append(rootElement)
def associateWithStream(self, xs):
associateWithStreamCalls.append(xs)
a = TestAuthenticator()
xs = xmlstream.XmlStream(a)
self.assertEqual([xs], associateWithStreamCalls)
xs.connectionMade()
self.assertEqual([None], connectionMadeCalls)
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345'>")
self.assertEqual(1, len(streamStartedCalls))
xs.reset()
self.assertEqual([None], connectionMadeCalls)
class TestError(Exception):
pass
class AuthenticatorTest(unittest.TestCase):
def setUp(self):
self.authenticator = xmlstream.Authenticator()
self.xmlstream = xmlstream.XmlStream(self.authenticator)
def test_streamStart(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.org' to='example.com' id='12345' "
"version='1.0'>")
self.assertEqual((1, 0), xs.version)
self.assertIdentical(None, xs.sid)
self.assertEqual('invalid', xs.namespace)
self.assertIdentical(None, xs.otherEntity)
self.assertEqual(None, xs.thisEntity)
def test_streamStartLegacy(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header for a pre-XMPP-1.0 header.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345'>")
self.assertEqual((0, 0), xs.version)
def test_streamBadVersionOneDigit(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header for a version with only one digit.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1'>")
self.assertEqual((0, 0), xs.version)
def test_streamBadVersionNoNumber(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header for a malformed version.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='blah'>")
self.assertEqual((0, 0), xs.version)
class ConnectAuthenticatorTest(unittest.TestCase):
def setUp(self):
self.gotAuthenticated = False
self.initFailure = None
self.authenticator = xmlstream.ConnectAuthenticator('otherHost')
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.addObserver('//event/stream/authd', self.onAuthenticated)
self.xmlstream.addObserver('//event/xmpp/initfailed', self.onInitFailed)
def onAuthenticated(self, obj):
self.gotAuthenticated = True
def onInitFailed(self, failure):
self.initFailure = failure
def testSucces(self):
"""
Test successful completion of an initialization step.
"""
class Initializer:
def initialize(self):
pass
init = Initializer()
self.xmlstream.initializers = [init]
self.authenticator.initializeStream()
self.assertEqual([], self.xmlstream.initializers)
self.assertTrue(self.gotAuthenticated)
def testFailure(self):
"""
Test failure of an initialization step.
"""
class Initializer:
def initialize(self):
raise TestError
init = Initializer()
self.xmlstream.initializers = [init]
self.authenticator.initializeStream()
self.assertEqual([init], self.xmlstream.initializers)
self.assertFalse(self.gotAuthenticated)
self.assertNotIdentical(None, self.initFailure)
self.assertTrue(self.initFailure.check(TestError))
def test_streamStart(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header.
"""
self.authenticator.namespace = 'testns'
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' to='example.org' id='12345' "
"version='1.0'>")
self.assertEqual((1, 0), xs.version)
self.assertEqual('12345', xs.sid)
self.assertEqual('testns', xs.namespace)
self.assertEqual('example.com', xs.otherEntity.host)
self.assertIdentical(None, xs.thisEntity)
self.assertNot(self.gotAuthenticated)
xs.dataReceived("<stream:features>"
"<test xmlns='testns'/>"
"</stream:features>")
self.assertIn(('testns', 'test'), xs.features)
self.assertTrue(self.gotAuthenticated)
class ListenAuthenticatorTest(unittest.TestCase):
"""
Tests for L{xmlstream.ListenAuthenticator}
"""
def setUp(self):
self.authenticator = xmlstream.ListenAuthenticator()
self.xmlstream = xmlstream.XmlStream(self.authenticator)
def test_streamStart(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
self.assertIdentical(None, xs.sid)
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.org' to='example.com' id='12345' "
"version='1.0'>")
self.assertEqual((1, 0), xs.version)
self.assertNotIdentical(None, xs.sid)
self.assertNotEquals('12345', xs.sid)
self.assertEqual('jabber:client', xs.namespace)
self.assertIdentical(None, xs.otherEntity)
self.assertEqual('example.com', xs.thisEntity.host)
def test_streamStartUnicodeSessionID(self):
"""
The generated session id must be a unicode object.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.org' to='example.com' id='12345' "
"version='1.0'>")
self.assertIsInstance(xs.sid, unicode)
class TLSInitiatingInitializerTest(unittest.TestCase):
def setUp(self):
self.output = []
self.done = []
self.savedSSL = xmlstream.ssl
self.authenticator = xmlstream.Authenticator()
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.send = self.output.append
self.xmlstream.connectionMade()
self.xmlstream.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1.0'>")
self.init = xmlstream.TLSInitiatingInitializer(self.xmlstream)
def tearDown(self):
xmlstream.ssl = self.savedSSL
def testWantedSupported(self):
"""
Test start when TLS is wanted and the SSL library available.
"""
self.xmlstream.transport = proto_helpers.StringTransport()
self.xmlstream.transport.startTLS = lambda ctx: self.done.append('TLS')
self.xmlstream.reset = lambda: self.done.append('reset')
self.xmlstream.sendHeader = lambda: self.done.append('header')
d = self.init.start()
d.addCallback(self.assertEqual, xmlstream.Reset)
starttls = self.output[0]
self.assertEqual('starttls', starttls.name)
self.assertEqual(NS_XMPP_TLS, starttls.uri)
self.xmlstream.dataReceived("<proceed xmlns='%s'/>" % NS_XMPP_TLS)
self.assertEqual(['TLS', 'reset', 'header'], self.done)
return d
if not xmlstream.ssl:
testWantedSupported.skip = "SSL not available"
def testWantedNotSupportedNotRequired(self):
"""
Test start when TLS is wanted and the SSL library available.
"""
xmlstream.ssl = None
d = self.init.start()
d.addCallback(self.assertEqual, None)
self.assertEqual([], self.output)
return d
def testWantedNotSupportedRequired(self):
"""
Test start when TLS is wanted and the SSL library available.
"""
xmlstream.ssl = None
self.init.required = True
d = self.init.start()
self.assertFailure(d, xmlstream.TLSNotSupported)
self.assertEqual([], self.output)
return d
def testNotWantedRequired(self):
"""
Test start when TLS is not wanted, but required by the server.
"""
tls = domish.Element(('urn:ietf:params:xml:ns:xmpp-tls', 'starttls'))
tls.addElement('required')
self.xmlstream.features = {(tls.uri, tls.name): tls}
self.init.wanted = False
d = self.init.start()
self.assertEqual([], self.output)
self.assertFailure(d, xmlstream.TLSRequired)
return d
def testNotWantedNotRequired(self):
"""
Test start when TLS is not wanted, but required by the server.
"""
tls = domish.Element(('urn:ietf:params:xml:ns:xmpp-tls', 'starttls'))
self.xmlstream.features = {(tls.uri, tls.name): tls}
self.init.wanted = False
d = self.init.start()
d.addCallback(self.assertEqual, None)
self.assertEqual([], self.output)
return d
def testFailed(self):
"""
Test failed TLS negotiation.
"""
# Pretend that ssl is supported, it isn't actually used when the
# server starts out with a failure in response to our initial
# C{starttls} stanza.
xmlstream.ssl = 1
d = self.init.start()
self.assertFailure(d, xmlstream.TLSFailed)
self.xmlstream.dataReceived("<failure xmlns='%s'/>" % NS_XMPP_TLS)
return d
class TestFeatureInitializer(xmlstream.BaseFeatureInitiatingInitializer):
feature = ('testns', 'test')
def start(self):
return defer.succeed(None)
class BaseFeatureInitiatingInitializerTest(unittest.TestCase):
def setUp(self):
self.xmlstream = xmlstream.XmlStream(xmlstream.Authenticator())
self.init = TestFeatureInitializer(self.xmlstream)
def testAdvertized(self):
"""
Test that an advertized feature results in successful initialization.
"""
self.xmlstream.features = {self.init.feature:
domish.Element(self.init.feature)}
return self.init.initialize()
def testNotAdvertizedRequired(self):
"""
Test that when the feature is not advertized, but required by the
initializer, an exception is raised.
"""
self.init.required = True
self.assertRaises(xmlstream.FeatureNotAdvertized, self.init.initialize)
def testNotAdvertizedNotRequired(self):
"""
Test that when the feature is not advertized, and not required by the
initializer, the initializer silently succeeds.
"""
self.init.required = False
self.assertIdentical(None, self.init.initialize())
class ToResponseTest(unittest.TestCase):
def test_toResponse(self):
"""
Test that a response stanza is generated with addressing swapped.
"""
stanza = domish.Element(('jabber:client', 'iq'))
stanza['type'] = 'get'
stanza['to'] = 'user1@example.com'
stanza['from'] = 'user2@example.com/resource'
stanza['id'] = 'stanza1'
response = xmlstream.toResponse(stanza, 'result')
self.assertNotIdentical(stanza, response)
self.assertEqual(response['from'], 'user1@example.com')
self.assertEqual(response['to'], 'user2@example.com/resource')
self.assertEqual(response['type'], 'result')
self.assertEqual(response['id'], 'stanza1')
def test_toResponseNoFrom(self):
"""
Test that a response is generated from a stanza without a from address.
"""
stanza = domish.Element(('jabber:client', 'iq'))
stanza['type'] = 'get'
stanza['to'] = 'user1@example.com'
response = xmlstream.toResponse(stanza)
self.assertEqual(response['from'], 'user1@example.com')
self.assertFalse(response.hasAttribute('to'))
def test_toResponseNoTo(self):
"""
Test that a response is generated from a stanza without a to address.
"""
stanza = domish.Element(('jabber:client', 'iq'))
stanza['type'] = 'get'
stanza['from'] = 'user2@example.com/resource'
response = xmlstream.toResponse(stanza)
self.assertFalse(response.hasAttribute('from'))
self.assertEqual(response['to'], 'user2@example.com/resource')
def test_toResponseNoAddressing(self):
"""
Test that a response is generated from a stanza without any addressing.
"""
stanza = domish.Element(('jabber:client', 'message'))
stanza['type'] = 'chat'
response = xmlstream.toResponse(stanza)
self.assertFalse(response.hasAttribute('to'))
self.assertFalse(response.hasAttribute('from'))
def test_noID(self):
"""
Test that a proper response is generated without id attribute.
"""
stanza = domish.Element(('jabber:client', 'message'))
response = xmlstream.toResponse(stanza)
self.assertFalse(response.hasAttribute('id'))
def test_noType(self):
"""
Test that a proper response is generated without type attribute.
"""
stanza = domish.Element(('jabber:client', 'message'))
response = xmlstream.toResponse(stanza)
self.assertFalse(response.hasAttribute('type'))
class DummyFactory(object):
"""
Dummy XmlStream factory that only registers bootstrap observers.
"""
def __init__(self):
self.callbacks = {}
def addBootstrap(self, event, callback):
self.callbacks[event] = callback
class DummyXMPPHandler(xmlstream.XMPPHandler):
"""
Dummy XMPP subprotocol handler to count the methods are called on it.
"""
def __init__(self):
self.doneMade = 0
self.doneInitialized = 0
self.doneLost = 0
def makeConnection(self, xs):
self.connectionMade()
def connectionMade(self):
self.doneMade += 1
def connectionInitialized(self):
self.doneInitialized += 1
def connectionLost(self, reason):
self.doneLost += 1
class FailureReasonXMPPHandler(xmlstream.XMPPHandler):
"""
Dummy handler specifically for failure Reason tests.
"""
def __init__(self):
self.gotFailureReason = False
def connectionLost(self, reason):
if isinstance(reason, failure.Failure):
self.gotFailureReason = True
class XMPPHandlerTest(unittest.TestCase):
"""
Tests for L{xmlstream.XMPPHandler}.
"""
def test_interface(self):
"""
L{xmlstream.XMPPHandler} implements L{ijabber.IXMPPHandler}.
"""
verifyObject(ijabber.IXMPPHandler, xmlstream.XMPPHandler())
def test_send(self):
"""
Test that data is passed on for sending by the stream manager.
"""
class DummyStreamManager(object):
def __init__(self):
self.outlist = []
def send(self, data):
self.outlist.append(data)
handler = xmlstream.XMPPHandler()
handler.parent = DummyStreamManager()
handler.send('<presence/>')
self.assertEqual(['<presence/>'], handler.parent.outlist)
def test_makeConnection(self):
"""
Test that makeConnection saves the XML stream and calls connectionMade.
"""
class TestXMPPHandler(xmlstream.XMPPHandler):
def connectionMade(self):
self.doneMade = True
handler = TestXMPPHandler()
xs = xmlstream.XmlStream(xmlstream.Authenticator())
handler.makeConnection(xs)
self.assertTrue(handler.doneMade)
self.assertIdentical(xs, handler.xmlstream)
def test_connectionLost(self):
"""
Test that connectionLost forgets the XML stream.
"""
handler = xmlstream.XMPPHandler()
xs = xmlstream.XmlStream(xmlstream.Authenticator())
handler.makeConnection(xs)
handler.connectionLost(Exception())
self.assertIdentical(None, handler.xmlstream)
class XMPPHandlerCollectionTest(unittest.TestCase):
"""
Tests for L{xmlstream.XMPPHandlerCollection}.
"""
def setUp(self):
self.collection = xmlstream.XMPPHandlerCollection()
def test_interface(self):
"""
L{xmlstream.StreamManager} implements L{ijabber.IXMPPHandlerCollection}.
"""
verifyObject(ijabber.IXMPPHandlerCollection, self.collection)
def test_addHandler(self):
"""
Test the addition of a protocol handler.
"""
handler = DummyXMPPHandler()
handler.setHandlerParent(self.collection)
self.assertIn(handler, self.collection)
self.assertIdentical(self.collection, handler.parent)
def test_removeHandler(self):
"""
Test removal of a protocol handler.
"""
handler = DummyXMPPHandler()
handler.setHandlerParent(self.collection)
handler.disownHandlerParent(self.collection)
self.assertNotIn(handler, self.collection)
self.assertIdentical(None, handler.parent)
class StreamManagerTest(unittest.TestCase):
"""
Tests for L{xmlstream.StreamManager}.
"""
def setUp(self):
factory = DummyFactory()
self.streamManager = xmlstream.StreamManager(factory)
def test_basic(self):
"""
Test correct initialization and setup of factory observers.
"""
sm = self.streamManager
self.assertIdentical(None, sm.xmlstream)
self.assertEqual([], sm.handlers)
self.assertEqual(sm._connected,
sm.factory.callbacks['//event/stream/connected'])
self.assertEqual(sm._authd,
sm.factory.callbacks['//event/stream/authd'])
self.assertEqual(sm._disconnected,
sm.factory.callbacks['//event/stream/end'])
self.assertEqual(sm.initializationFailed,
sm.factory.callbacks['//event/xmpp/initfailed'])
def test_connected(self):
"""
Test that protocol handlers have their connectionMade method called
when the XML stream is connected.
"""
sm = self.streamManager
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._connected(xs)
self.assertEqual(1, handler.doneMade)
self.assertEqual(0, handler.doneInitialized)
self.assertEqual(0, handler.doneLost)
def test_connectedLogTrafficFalse(self):
"""
Test raw data functions unset when logTraffic is set to False.
"""
sm = self.streamManager
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._connected(xs)
self.assertIdentical(None, xs.rawDataInFn)
self.assertIdentical(None, xs.rawDataOutFn)
def test_connectedLogTrafficTrue(self):
"""
Test raw data functions set when logTraffic is set to True.
"""
sm = self.streamManager
sm.logTraffic = True
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._connected(xs)
self.assertNotIdentical(None, xs.rawDataInFn)
self.assertNotIdentical(None, xs.rawDataOutFn)
def test_authd(self):
"""
Test that protocol handlers have their connectionInitialized method
called when the XML stream is initialized.
"""
sm = self.streamManager
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._authd(xs)
self.assertEqual(0, handler.doneMade)
self.assertEqual(1, handler.doneInitialized)
self.assertEqual(0, handler.doneLost)
def test_disconnected(self):
"""
Test that protocol handlers have their connectionLost method
called when the XML stream is disconnected.
"""
sm = self.streamManager
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._disconnected(xs)
self.assertEqual(0, handler.doneMade)
self.assertEqual(0, handler.doneInitialized)
self.assertEqual(1, handler.doneLost)
def test_disconnectedReason(self):
"""
A L{STREAM_END_EVENT} results in L{StreamManager} firing the handlers
L{connectionLost} methods, passing a L{failure.Failure} reason.
"""
sm = self.streamManager
handler = FailureReasonXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._disconnected(failure.Failure(Exception("no reason")))
self.assertEqual(True, handler.gotFailureReason)
def test_addHandler(self):
"""
Test the addition of a protocol handler while not connected.
"""
sm = self.streamManager
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
self.assertEqual(0, handler.doneMade)
self.assertEqual(0, handler.doneInitialized)
self.assertEqual(0, handler.doneLost)
def test_addHandlerInitialized(self):
"""
Test the addition of a protocol handler after the stream
have been initialized.
Make sure that the handler will have the connected stream
passed via C{makeConnection} and have C{connectionInitialized}
called.
"""
sm = self.streamManager
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._connected(xs)
sm._authd(xs)
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
self.assertEqual(1, handler.doneMade)
self.assertEqual(1, handler.doneInitialized)
self.assertEqual(0, handler.doneLost)
def test_sendInitialized(self):
"""
Test send when the stream has been initialized.
The data should be sent directly over the XML stream.
"""
factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator())
sm = xmlstream.StreamManager(factory)
xs = factory.buildProtocol(None)
xs.transport = proto_helpers.StringTransport()
xs.connectionMade()
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345'>")
xs.dispatch(xs, "//event/stream/authd")
sm.send("<presence/>")
self.assertEqual("<presence/>", xs.transport.value())
def test_sendNotConnected(self):
"""
Test send when there is no established XML stream.
The data should be cached until an XML stream has been established and
initialized.
"""
factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator())
sm = xmlstream.StreamManager(factory)
handler = DummyXMPPHandler()
sm.addHandler(handler)
xs = factory.buildProtocol(None)
xs.transport = proto_helpers.StringTransport()
sm.send("<presence/>")
self.assertEqual("", xs.transport.value())
self.assertEqual("<presence/>", sm._packetQueue[0])
xs.connectionMade()
self.assertEqual("", xs.transport.value())
self.assertEqual("<presence/>", sm._packetQueue[0])
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345'>")
xs.dispatch(xs, "//event/stream/authd")
self.assertEqual("<presence/>", xs.transport.value())
self.assertFalse(sm._packetQueue)
def test_sendNotInitialized(self):
"""
Test send when the stream is connected but not yet initialized.
The data should be cached until the XML stream has been initialized.
"""
factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator())
sm = xmlstream.StreamManager(factory)
xs = factory.buildProtocol(None)
xs.transport = proto_helpers.StringTransport()
xs.connectionMade()
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345'>")
sm.send("<presence/>")
self.assertEqual("", xs.transport.value())
self.assertEqual("<presence/>", sm._packetQueue[0])
def test_sendDisconnected(self):
"""
Test send after XML stream disconnection.
The data should be cached until a new XML stream has been established
and initialized.
"""
factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator())
sm = xmlstream.StreamManager(factory)
handler = DummyXMPPHandler()
sm.addHandler(handler)
xs = factory.buildProtocol(None)
xs.connectionMade()
xs.transport = proto_helpers.StringTransport()
xs.connectionLost(None)
sm.send("<presence/>")
self.assertEqual("", xs.transport.value())
self.assertEqual("<presence/>", sm._packetQueue[0])
class XmlStreamServerFactoryTest(GenericXmlStreamFactoryTestsMixin):
"""
Tests for L{xmlstream.XmlStreamServerFactory}.
"""
def setUp(self):
"""
Set up a server factory with a authenticator factory function.
"""
class TestAuthenticator(object):
def __init__(self):
self.xmlstreams = []
def associateWithStream(self, xs):
self.xmlstreams.append(xs)
def authenticatorFactory():
return TestAuthenticator()
self.factory = xmlstream.XmlStreamServerFactory(authenticatorFactory)
def test_interface(self):
"""
L{XmlStreamServerFactory} is a L{Factory}.
"""
verifyObject(IProtocolFactory, self.factory)
def test_buildProtocolAuthenticatorInstantiation(self):
"""
The authenticator factory should be used to instantiate the
authenticator and pass it to the protocol.
The default protocol, L{XmlStream} stores the authenticator it is
passed, and calls its C{associateWithStream} method. so we use that to
check whether our authenticator factory is used and the protocol
instance gets an authenticator.
"""
xs = self.factory.buildProtocol(None)
self.assertEqual([xs], xs.authenticator.xmlstreams)
def test_buildProtocolXmlStream(self):
"""
The protocol factory creates Jabber XML Stream protocols by default.
"""
xs = self.factory.buildProtocol(None)
self.assertIsInstance(xs, xmlstream.XmlStream)
def test_buildProtocolTwice(self):
"""
Subsequent calls to buildProtocol should result in different instances
of the protocol, as well as their authenticators.
"""
xs1 = self.factory.buildProtocol(None)
xs2 = self.factory.buildProtocol(None)
self.assertNotIdentical(xs1, xs2)
self.assertNotIdentical(xs1.authenticator, xs2.authenticator)
| hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/words/test/test_jabberxmlstream.py | Python | bsd-3-clause | 44,418 |
import pytest
from thespian.system.timing import ExpirationTimer, currentTime
from datetime import datetime, timedelta
from time import sleep
class TestUnitExpirationTimer(object):
def testNoneExpired(self):
et = ExpirationTimer(None)
assert not et.view().expired()
def testZeroExpired(self):
et = ExpirationTimer(timedelta(seconds=0))
assert et.view().expired()
def testNonZeroExpired(self):
et = ExpirationTimer(timedelta(milliseconds=10))
assert not et.view().expired()
sleep(et.view().remainingSeconds())
with et as c:
assert c.expired()
def testNoneRemaining(self):
et = ExpirationTimer(None)
assert et.view().remaining() is None
def testZeroRemaining(self):
et = ExpirationTimer(timedelta(seconds=0))
assert timedelta(days=0) == et.view().remaining()
def testNonZeroRemaining(self):
et = ExpirationTimer(timedelta(milliseconds=10))
ct = currentTime()
assert timedelta(days=0) < et.view(ct).remaining()
assert timedelta(milliseconds=11) > et.view(ct).remaining()
sleep(et.view().remainingSeconds())
assert timedelta(days=0) == et.view().remaining()
def testNoneRemainingSeconds(self):
et = ExpirationTimer(None)
assert et.view().remainingSeconds() is None
def testZeroRemainingSeconds(self):
et = ExpirationTimer(timedelta(microseconds=0))
assert 0.0 == et.view().remainingSeconds()
def testNonZeroRemainingSeconds(self):
et = ExpirationTimer(timedelta(milliseconds=10))
with et as c:
assert 0.0 < c.remainingSeconds()
assert 0.0101 > c.remainingSeconds()
sleep(et.view().remainingSeconds())
assert 0.0 == et.view().remainingSeconds()
def testNoneRemainingExplicitForever(self):
et = ExpirationTimer(None)
assert 5 == et.view().remaining(5)
def testNoneRemainingSecondsExplicitForever(self):
et = ExpirationTimer(None)
assert 9 == et.view().remainingSeconds(9)
def testNoneStr(self):
et = ExpirationTimer(None)
assert 'Forever' == str(et)
def testZeroStr(self):
et = ExpirationTimer(timedelta(hours=0))
assert str(et).startswith('Expired_for_0:00:00')
def testNonZeroStr(self):
et = ExpirationTimer(timedelta(milliseconds=10))
assert str(et).startswith('Expires_in_0:00:00.0')
sleep(et.view().remainingSeconds())
assert str(et).startswith('Expired_for_0:00:00')
def testArbitraryEquality(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(0)
et3 = ExpirationTimer(10)
assert not et1 == None
assert not et2 == None
assert not et3 == None
assert et1 != None
assert et2 != None
assert et3 != None
assert not et1 == 0
assert not et2 == 0
assert not et3 == 0
assert et1 != 0
assert et2 != 0
assert et3 != 0
assert not et1 == 'hi'
assert not et2 == 'hi'
assert not et3 == 'hi'
assert et1 != 'hi'
assert et2 != 'hi'
assert et3 != 'hi'
assert not et1 == object()
assert not et2 == object()
assert not et3 == object()
assert et1 != object()
assert et2 != object()
assert et3 != object()
def testNoneEquality(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(None)
assert et1 == et2
assert et2 == et1
def testNoneToExpiredComparison(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(timedelta(minutes=0))
assert et1 != et2
assert et2 != et1
def testNoneToUnExpiredComparison(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(timedelta(milliseconds=10))
assert et1 != et2
assert et2 != et1
sleep(et2.view().remainingSeconds())
assert et1 != et2
assert et2 != et1
def testExpiredToExpiredComparison(self):
et1 = ExpirationTimer(timedelta(microseconds=0))
sleep(0.001)
et2 = ExpirationTimer(timedelta(minutes=0))
assert et1 == et2
assert et2 == et1
def testExpiredToUnExpiredComparison(self):
et1 = ExpirationTimer(timedelta(microseconds=0))
et2 = ExpirationTimer(timedelta(milliseconds=10))
assert et1 != et2
assert et2 != et1
sleep(et2.view().remainingSeconds())
assert et1 == et2
assert et2 == et1
def testUnExpiredToUnExpiredComparison(self):
et1 = ExpirationTimer(timedelta(milliseconds=15))
et2 = ExpirationTimer(timedelta(milliseconds=10))
assert et1 != et2
assert et2 != et1
sleep(et2.view().remainingSeconds())
print(str(et1), str(et2))
# The following will fail if an extra 5ms delay has occurred
assert et1 != et2
assert et2 != et1
sleep(et1.view().remainingSeconds())
assert et1 == et2
assert et2 == et1
def testNoneInEquality(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(None)
assert not et1 != et2
assert not et2 != et1
def testNoneGreaterThanNone(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(None)
# None == forever, so it is greater than anything, although equal to itself
assert not et1 > et2
assert not et2 > et1
def testNoneComparedToZero(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(timedelta(days=0))
# None == forever, so it is greater than anything, although equal to itself
assert et1 > et2
assert et2 < et1
assert et1 >= et2
assert et2 <= et1
def testNoneComparedToNonZero(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(timedelta(milliseconds=10))
# None == forever, so it is greater than anything, although equal to itself
assert et1 > et2
assert et2 < et1
assert et1 > et2
assert et2 < et1
sleep(et2.view().remainingSeconds())
assert et1 > et2
assert et2 < et1
assert et1 > et2
assert et2 < et1
def testLessThanNone(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(0)
et3 = ExpirationTimer(10)
assert not et1 < None
assert not et2 < None
assert not et3 < None
def testLessThanArbitrary(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(0)
et3 = ExpirationTimer(10)
with pytest.raises(AttributeError):
assert not et1 < 0
with pytest.raises(AttributeError):
assert not et2 < 0
with pytest.raises(AttributeError):
assert not et3 < 0
with pytest.raises(AttributeError):
assert not et1 < 'hi'
with pytest.raises(AttributeError):
assert not et2 < 'hi'
with pytest.raises(AttributeError):
assert not et3 < 'hi'
with pytest.raises(AttributeError):
assert not et1 < object()
with pytest.raises(AttributeError):
assert not et2 < object()
with pytest.raises(AttributeError):
assert not et3 < object()
def testNoneLessThanNone(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(None)
assert not et1 < et2
assert not et2 < et1
def testNoneLessThanEqualNone(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(None)
assert et1 >= et2
assert et2 >= et1
def testGreaterThanArbitrary(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(0)
et3 = ExpirationTimer(10)
with pytest.raises(AttributeError):
assert not et1 > 0
with pytest.raises(AttributeError):
assert not et2 > 0
with pytest.raises(AttributeError):
assert not et3 > 0
with pytest.raises(AttributeError):
assert not et1 > 'hi'
with pytest.raises(AttributeError):
assert not et2 > 'hi'
with pytest.raises(AttributeError):
assert not et3 > 'hi'
with pytest.raises(AttributeError):
assert not et1 > object()
with pytest.raises(AttributeError):
assert not et2 > object()
with pytest.raises(AttributeError):
assert not et3 > object()
def testNoneGreaterThanEqualNone(self):
et1 = ExpirationTimer(None)
et2 = ExpirationTimer(None)
assert et1 <= et2
assert et2 <= et1
def testNoneIsFalse(self):
et = ExpirationTimer(None)
assert not et
assert not bool(et)
def testZeroIsTrue(self):
et = ExpirationTimer(timedelta(minutes=0))
assert et
assert bool(et)
def testNonZeroIsFalse(self):
et = ExpirationTimer(timedelta(milliseconds=10))
assert not et
assert not bool(et)
sleep(et.view().remainingSeconds())
assert et
assert bool(et)
| kquick/Thespian | thespian/system/test/test_expirytime.py | Python | mit | 9,211 |
#!/usr/bin/env python
import hashlib
import os
import wx
class NotBookMainFrame(wx.Frame):
def __init__(self,parent,title):
wx.Frame.__init__(self,parent,title=title)
self.tb = self.CreateToolBar()
self.control = wx.TextCtrl(self,style=wx.TE_MULTILINE|wx.TE_LINEWRAP,size=(300,500))
self.sbar = self.CreateStatusBar()
menuB = self.createMenuBar()
self.SetMenuBar(menuB)
acceltbl = wx.AcceleratorTable([(wx.ACCEL_CTRL, ord('Q'), self.selectA.GetId())])
self.SetAcceleratorTable(acceltbl)
self.Show(True)
def createMenuBar(self):
fileM = wx.Menu()
newF = fileM.Append(wx.NewId(),"new(N)\tCtrl+N","openaalearyexistdefile.")
fmitem = fileM.Append(wx.NewId(),"open(O)\tCtrl+O","openaalearyexistdefile.")
save = fileM.Append(wx.NewId(), "save(S)\tCtrl+S","savecurrentdefile.")
toSave = fileM.Append(wx.NewId(),"othersaveas(A)..","savecurrentdefile.")
fileM.AppendSeparator()
fileM.Append(wx.NewId(),"pageset(U)..","setpagestyle.")
printL = fileM.Append(wx.NewId(),"print(P)\tCtrl+P","printpage.")
fileM.AppendSeparator()
qit = fileM.Append(wx.NewId(),"exit(X)","closeallopendefile.")
self.Bind(wx.EVT_MENU, self.OnNewFile, newF)
self.Bind(wx.EVT_MENU, self.OnOpen, fmitem)
self.Bind(wx.EVT_MENU, self.OnSave, save)
self.Bind(wx.EVT_MENU, self.OnQuit, qit)
editM = wx.Menu()
editM.Append(wx.NewId(),"cancel(U)\tCtrl+Z","cancellastdedo.")
editM.AppendSeparator()
editM.Append(wx.NewId(),"cut(U)\tCtrl+X","cut.")
editM.Append(wx.NewId(),"copy(T)\tCtrl+C","copy.")
editM.Append(wx.NewId(),"paste(C)\tCtrl+V","paste.")
editM.Append(wx.NewId(),"del(P)\tDel","del.")
editM.AppendSeparator()
editM.Append(wx.NewId(),"find(F)\tCtrl+F","find.")
editM.Append(wx.NewId(),"findaa(N)\tF3","findaa.")
editM.Append(wx.NewId(),"del(P)\tDel","del.")
editM.Append(wx.NewId(),"replace(R)\tCtrl+H","replace.")
editM.Append(wx.NewId(),"jump(G)\tCtrl+G","jump.")
editM.AppendSeparator()
self.selectA = editM.Append(wx.NewId(),"allselect(A)\tCtrl+A","allselect.")
self.Bind(wx.EVT_MENU, self.OnSelectAll, self.selectA)
editM.Append(wx.NewId(),"date/date(D)\tF5","date.")
posM = wx.Menu()
self.autoCutoverLine = self.newline = posM.Append(wx.NewId(),"autocr(W)","autocr.",kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.autoCutoverL, self.autoCutoverLine)
self.showToolStatus = 0;
posM.Append(wx.NewId(),"font(F)..","setfont.")
#cat[V]
viewM = wx.Menu()
viewM.Append(wx.NewId(),"status","status.")
self.tool=viewM.Append(wx.NewId(),"toolbar","toolbar",kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.ToggleToolBar, self.tool)
#help[H]
helpM = wx.Menu()
helpM.Append(wx.NewId(),"cathelp(H)","cathelp.")
about = helpM.Append(wx.NewId(),"aboutnotepad(A)","aboutnotepad.")
self.Bind(wx.EVT_MENU, self.OnAbout, about)
#create a menuBar
menuB = wx.MenuBar()
# append a menu
menuB.Append(fileM,"file(F)")
menuB.Append(editM,"edit(E)")
menuB.Append(posM,"style[O]")
menuB.Append(viewM,"cat[V]")
menuB.Append(helpM,"help[H]")
return menuB
def autoCutoverL(self,event):
print "hell"
self.control.SetStyle(-1,-1,wx.TextAttr("wx.TE_WORDWRAP"))
def OnSelectAll(self,event):
self.control.SelectAll()
def OnKeyDown(self,event):
key = event.GetKeyCode();
if key == ord('f'):
self.fileM.Show()
else:
self.control.AppendText(chr(key))
def ToggleToolBar(self,event):
self.showToolStatus+=1;
if self.showToolStatus % 2 == 1:
print 1111
self.control.SetInsertionPoint(50)
self.tb.Show()
else:
print 2222
self.tb.Hide()
def OnNewFile(self,event):
if self.control.IsEmpty() <> True:
dlg = wx.MessageDialog(self, "aswillchangesavetononetitle","notepad",wx.YES_NO | wx.ICON_QUESTION | wx.CANCEL)
retCode = dlg.ShowModal()
if retCode == wx.ID_YES:
# save
self.OnSave(event)
self.control.SetValue("")
elif retCode == wx.ID_NO:
self.control.SetValue("")
else:
dlg.Close();
dlg.Destroy()
def OnSave(self,event):
if self.control.IsEmpty():
return;
self.dirname=''
dlg = wx.FileDialog(self,"choose a file",self.dirname,"","*.*",wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
f = open(os.path.join(self.dirname, self.filename), 'w')
f.write(self.control.GetValue());
f.close()
dlg.Destroy()
self.Title = self.filename + " - notepad"
def OnOpen(self,event):
print self.control.GetValue()
self.dirname=''
self.dirname=''
dlg = wx.FileDialog(self,"choose a file",self.dirname,"","*.*",wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
f = open(os.path.join(self.dirname, self.filename), 'r')
self.control.SetValue(f.read())
f.close()
dlg.Destroy()
self.control.SetFocus()
wx.StaticText(self.sbar, label=self.filename + ","+ str(self.control.GetNumberOfLines()) + " line",pos=(0,1))
def OnQuit(self,event):
self.Close()
def OnAbout(self,event):
dlg = wx.MessageDialog(self, "hello,baby","title is a baby",wx.OK)
dlg.ShowModal()
self.control.SelectAll();
dlg.Destroy()
def OnHello(self, event):
pass
def createButtonBar(self, panel, yPos = 10):
xPos = 0
for eachLabel, eachHandler in self.buttonData():
pos = (xPos, yPos)
button = self.buildOneButton(panel, eachLabel,eachHandler, pos)
xPos += button.GetSize().width
def buildOneButton(self, parent, label, handler, pos=(0,0)):
button = wx.Button(parent, -1, label, pos)
self.Bind(wx.EVT_BUTTON, handler, button)
return button
@staticmethod
def GetMd5(content):
return hashlib.new("md5", content).hexdigest()
if __name__=="__main__":
app = wx.App(False)
frame = NotBookMainFrame(None,"nonetitle - notepad")
app.MainLoop()
| cirline/ct | python/notepad.py | Python | gpl-3.0 | 7,029 |
import copy
import skin
from enigma import eListboxPythonMultiContent, gFont, getPrevAsciiCode, RT_HALIGN_LEFT, RT_HALIGN_CENTER, RT_HALIGN_RIGHT, RT_VALIGN_TOP, RT_VALIGN_CENTER, RT_VALIGN_BOTTOM, BT_SCALE
from Components.ActionMap import HelpableNumberActionMap
from Components.Input import Input
from Components.Label import Label
from Components.Language import language
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaBlend
from Components.Sources.StaticText import StaticText
from Screens.ChoiceBox import ChoiceBox
from Screens.HelpMenu import HelpableScreen
from Screens.Screen import Screen
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from Tools.LoadPixmap import LoadPixmap
from Tools.NumericalTextInput import NumericalTextInput
class VirtualKeyBoardList(MenuList):
def __init__(self, list, enableWrapAround=False):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
font = skin.fonts.get("VirtualKeyBoard", ("Regular", 28, 45))
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setFont(1, gFont(font[0], font[1] * 5 / 9)) # Smaller font is 56% the height of bigger font
self.l.setItemHeight(font[2])
class VirtualKeyBoardEntryComponent:
def __init__(self):
pass
VKB_DONE_ICON = 0
VKB_ENTER_ICON = 1
VKB_OK_ICON = 2
VKB_SAVE_ICON = 3
VKB_SEARCH_ICON = 4
VKB_DONE_TEXT = 5
VKB_ENTER_TEXT = 6
VKB_OK_TEXT = 7
VKB_SAVE_TEXT = 8
VKB_SEARCH_TEXT = 9
SPACE = u"SPACEICON" # Symbol to be used for a SPACE on the keyboard. Must be u"SPACE" (any case), u"SPACEICON" or u"SPACEICONALT".
# For more information about using VirtualKeyBoard see /doc/VIRTUALKEYBOARD
#
class VirtualKeyBoard(Screen, HelpableScreen):
def __init__(self, session, title=_("Virtual KeyBoard Text:"), text="", maxSize=False, visible_width=False, type=Input.TEXT, currPos=None, allMarked=False, style=VKB_ENTER_ICON):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self.setTitle(_("Virtual keyboard"))
prompt = title # Title should only be used for screen titles!
greenLabel, self.green = {
VKB_DONE_ICON: ("Done", u"ENTERICON"),
VKB_ENTER_ICON: ("Enter", u"ENTERICON"),
VKB_OK_ICON: ("OK", u"ENTERICON"),
VKB_SAVE_ICON: ("Save", u"ENTERICON"),
VKB_SEARCH_ICON: ("Search", u"ENTERICON"),
VKB_DONE_TEXT: ("Done", _("Done")),
VKB_ENTER_TEXT: ("Done", _("Enter")),
VKB_OK_TEXT: ("OK", _("OK")),
VKB_SAVE_TEXT: ("Save", _("Save")),
VKB_SEARCH_TEXT: ("Search", _("Search"))
}.get(style, ("Enter", u"ENTERICON"))
self.bg = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_bg.png")) # Legacy support only!
self.bg_l = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_bg_l.png"))
self.bg_m = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_bg_m.png"))
self.bg_r = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_bg_r.png"))
self.sel_l = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_sel_l.png"))
self.sel_m = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_sel_m.png"))
self.sel_r = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_sel_r.png"))
key_red_l = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_red_l.png"))
key_red_m = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_red_m.png"))
key_red_r = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_red_r.png"))
key_green_l = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_green_l.png"))
key_green_m = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_green_m.png"))
key_green_r = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_green_r.png"))
key_yellow_l = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_yellow_l.png"))
key_yellow_m = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_yellow_m.png"))
key_yellow_r = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_yellow_r.png"))
key_blue_l = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_blue_l.png"))
key_blue_m = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_blue_m.png"))
key_blue_r = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_blue_r.png"))
key_backspace = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_backspace.png"))
key_clear = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_clear.png"))
key_delete = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_delete.png"))
key_enter = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_enter.png"))
key_exit = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_exit.png"))
key_first = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_first.png"))
key_last = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_last.png"))
key_left = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_left.png"))
key_locale = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_locale.png"))
key_right = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_right.png"))
key_shift = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_shift.png"))
key_shift0 = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_shift0.png"))
key_shift1 = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_shift1.png"))
key_shift2 = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_shift2.png"))
key_shift3 = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_shift3.png"))
key_space = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_space.png"))
key_space_alt = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "buttons/vkey_space_alt.png"))
self.keyHighlights = { # This is a table of cell highlight components (left, middle and right)
u"EXIT": (key_red_l, key_red_m, key_red_r),
u"EXITICON": (key_red_l, key_red_m, key_red_r),
u"DONE": (key_green_l, key_green_m, key_green_r),
u"ENTER": (key_green_l, key_green_m, key_green_r),
u"ENTERICON": (key_green_l, key_green_m, key_green_r),
u"OK": (key_green_l, key_green_m, key_green_r),
u"SAVE": (key_green_l, key_green_m, key_green_r),
# u"LOC": (key_yellow_l, key_yellow_m, key_yellow_r),
# u"LOCALE": (key_yellow_l, key_yellow_m, key_yellow_r),
# u"LOCALEICON": (key_yellow_l, key_yellow_m, key_yellow_r),
u"SHIFT": (key_yellow_l, key_yellow_m, key_yellow_r),
u"SHIFTICON": (key_yellow_l, key_yellow_m, key_yellow_r),
u"CAPS": (key_blue_l, key_blue_m, key_blue_r),
u"LOCK": (key_blue_l, key_blue_m, key_blue_r),
u"CAPSLOCK": (key_blue_l, key_blue_m, key_blue_r),
u"CAPSLOCKICON": (key_blue_l, key_blue_m, key_blue_r)
}
self.shiftMsgs = [
_("Lower case"),
_("Upper case"),
_("Special 1"),
_("Special 2")
]
self.keyImages = [{
u"BACKSPACEICON": key_backspace,
u"CAPSLOCKICON": key_shift0,
u"CLEARICON": key_clear,
u"DELETEICON": key_delete,
u"ENTERICON": key_enter,
u"EXITICON": key_exit,
u"FIRSTICON": key_first,
u"LASTICON": key_last,
u"LOCALEICON": key_locale,
u"LEFTICON": key_left,
u"RIGHTICON": key_right,
u"SHIFTICON": key_shift,
u"SPACEICON": key_space,
u"SPACEICONALT": key_space_alt
}, {
u"BACKSPACEICON": key_backspace,
u"CAPSLOCKICON": key_shift1,
u"CLEARICON": key_clear,
u"DELETEICON": key_delete,
u"ENTERICON": key_enter,
u"EXITICON": key_exit,
u"FIRSTICON": key_first,
u"LASTICON": key_last,
u"LEFTICON": key_left,
u"LOCALEICON": key_locale,
u"RIGHTICON": key_right,
u"SHIFTICON": key_shift,
u"SPACEICON": key_space,
u"SPACEICONALT": key_space_alt
}, {
u"BACKSPACEICON": key_backspace,
u"CAPSLOCKICON": key_shift2,
u"CLEARICON": key_clear,
u"DELETEICON": key_delete,
u"ENTERICON": key_enter,
u"EXITICON": key_exit,
u"FIRSTICON": key_first,
u"LASTICON": key_last,
u"LEFTICON": key_left,
u"LOCALEICON": key_locale,
u"RIGHTICON": key_right,
u"SHIFTICON": key_shift,
u"SPACEICON": key_space,
u"SPACEICONALT": key_space_alt
}, {
u"BACKSPACEICON": key_backspace,
u"CAPSLOCKICON": key_shift3,
u"CLEARICON": key_clear,
u"DELETEICON": key_delete,
u"ENTERICON": key_enter,
u"EXITICON": key_exit,
u"FIRSTICON": key_first,
u"LASTICON": key_last,
u"LEFTICON": key_left,
u"LOCALEICON": key_locale,
u"RIGHTICON": key_right,
u"SHIFTICON": key_shift,
u"SPACEICON": key_space,
u"SPACEICONALT": key_space_alt
}]
self.cmds = {
u"": "pass",
u"ALL": "self['text'].markAll()",
u"ALLICON": "self['text'].markAll()",
u"BACK": "self['text'].deleteBackward()",
u"BACKSPACE": "self['text'].deleteBackward()",
u"BACKSPACEICON": "self['text'].deleteBackward()",
u"BLANK": "pass",
u"CAPS": "self.capsLockSelected()",
u"CAPSLOCK": "self.capsLockSelected()",
u"CAPSLOCKICON": "self.capsLockSelected()",
u"CLEAR": "self['text'].deleteAllChars()\nself['text'].update()",
u"CLEARICON": "self['text'].deleteAllChars()\nself['text'].update()",
u"CLR": "self['text'].deleteAllChars()\nself['text'].update()",
u"DEL": "self['text'].deleteForward()",
u"DELETE": "self['text'].deleteForward()",
u"DELETEICON": "self['text'].deleteForward()",
u"DONE": "self.save()",
u"ENTER": "self.save()",
u"ENTERICON": "self.save()",
u"ESC": "self.cancel()",
u"EXIT": "self.cancel()",
u"EXITICON": "self.cancel()",
u"FIRST": "self['text'].home()",
u"FIRSTICON": "self['text'].home()",
u"LAST": "self['text'].end()",
u"LASTICON": "self['text'].end()",
u"LEFT": "self['text'].left()",
u"LEFTICON": "self['text'].left()",
u"LOC": "self.localeMenu()",
u"LOCALE": "self.localeMenu()",
u"LOCALEICON": "self.localeMenu()",
u"LOCK": "self.capsLockSelected()",
u"OK": "self.save()",
u"RIGHT": "self['text'].right()",
u"RIGHTICON": "self['text'].right()",
u"SAVE": "self.save()",
u"SHIFT": "self.shiftSelected()",
u"SHIFTICON": "self.shiftSelected()",
u"SPACE": "self['text'].char(' '.encode('UTF-8'))",
u"SPACEICON": "self['text'].char(' '.encode('UTF-8'))",
u"SPACEICONALT": "self['text'].char(' '.encode('UTF-8'))"
}
self.footer = [u"EXITICON", u"LEFTICON", u"RIGHTICON", SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, u"SHIFTICON", u"LOCALEICON", u"CLEARICON", u"DELETEICON"]
self.czech = [
[
[u";", u"+", u"\u011B", u"\u0161", u"\u010D", u"\u0159", u"\u017E", u"\u00FD", u"\u00E1", u"\u00ED", u"\u00E9", u"=", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"\u00FA", u"(", u")"],
[u"LASTICON", u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"\u016F", u"\u00A7", self.green, self.green],
[u"CAPSLOCKICON", u"\\", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u".", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"%", u"'", u"BACKSPACEICON"],
[u"FIRSTICON", u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"/", u"(", u")"],
[u"LASTICON", u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"\"", u"!", self.green, self.green],
[u"CAPSLOCKICON", u"|", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u"?", u":", u"_", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"\u00B0", u"~", u"\u011A", u"\u0160", u"\u010C", u"\u0158", u"\u017D", u"\u00DD", u"\u00C1", u"\u00CD", u"\u00C9", u"`", u"'", u"BACKSPACEICON"],
[u"FIRSTICON", u"\\", u"|", u"\u20AC", u"\u0165", u"\u0164", u"\u0148", u"\u0147", u"\u00F3", u"\u00D3", u"\u00DA", u"\u00F7", u"\u00D7", u"\u00A4"],
[u"LASTICON", u"", u"\u0111", u"\u00D0", u"[", u"]", u"\u010F", u"\u010E", u"\u0142", u"\u0141", u"\u016E", u"\u00DF", self.green, self.green],
[u"CAPSLOCKICON", u"", u"", u"#", u"&", u"@", u"{", u"}", u"$", u"<", u">", u"*", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]
]
self.english = [
[
[u"`", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"-", u"=", u"BACKSPACEICON"],
[u"FIRSTICON", u"q", u"w", u"e", u"r", u"t", u"y", u"u", u"i", u"o", u"p", u"[", u"]", u"\\"],
[u"LASTICON", u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u";", u"'", self.green, self.green],
[u"CAPSLOCKICON", u"CAPSLOCKICON", u"z", u"x", u"c", u"v", u"b", u"n", u"m", u",", u".", u"/", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"~", u"!", u"@", u"#", u"$", u"%", u"^", u"&", u"*", u"(", u")", u"_", u"+", u"BACKSPACEICON"],
[u"FIRSTICON", u"Q", u"W", u"E", u"R", u"T", u"Y", u"U", u"I", u"O", u"P", u"{", u"}", u"|"],
[u"LASTICON", u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u":", u"\"", self.green, self.green],
[u"CAPSLOCKICON", u"CAPSLOCKICON", u"Z", u"X", u"C", u"V", u"B", u"N", u"M", u"<", u">", u"?", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]
]
self.french = [
[
[u"\u00B2", u"&", u"\u00E9", u"\"", u"'", u"(", u"-", u"\u00E8", u"_", u"\u00E7", u"\u00E0", u")", u"=", u"BACKSPACEICON"],
[u"FIRSTICON", u"a", u"z", u"e", u"r", u"t", u"y", u"u", u"i", u"o", u"p", u"^", u"$", u"*"],
[u"LASTICON", u"q", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"m", u"\u00F9", self.green, self.green],
[u"CAPSLOCKICON", u"<", u"w", u"x", u"c", u"v", u"b", u"n", u",", u";", u":", u"!", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"\u00B0", u"+", u"BACKSPACEICON"],
[u"FIRSTICON", u"A", u"Z", u"E", u"R", u"T", u"Y", u"U", u"I", u"O", u"P", u"\u00A8", u"\u00A3", u"\u00B5"],
[u"LASTICON", u"Q", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"M", u"%", self.green, self.green],
[u"CAPSLOCKICON", u">", u"W", u"X", u"C", u"V", u"B", u"N", u"?", u".", u"/", u"\u00A7", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"", u"", u"~", u"#", u"{", u"[", u"|", u"`", u"\\", u"^", u"@", u"]", u"}", u"BACKSPACEICON"],
[u"FIRSTICON", u"", u"", u"\u20AC", u"", u"", u"", u"", u"", u"", u"", u"", u"\u00A4", u""],
[u"LASTICON", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", self.green, self.green],
[u"CAPSLOCKICON", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"", u"", u"\u00E2", u"\u00EA", u"\u00EE", u"\u00F4", u"\u00FB", u"\u00E4", u"\u00EB", u"\u00EF", u"\u00F6", u"\u00FC", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"", u"\u00E0", u"\u00E8", u"\u00EC", u"\u00F2", u"\u00F9", u"\u00E1", u"\u00E9", u"\u00ED", u"\u00F3", u"\u00FA", u"", u""],
[u"LASTICON", u"", u"\u00C2", u"\u00CA", u"\u00CE", u"\u00D4", u"\u00DB", u"\u00C4", u"\u00CB", u"\u00CF", u"\u00D6", u"\u00DC", self.green, self.green],
[u"CAPSLOCKICON", u"", u"\u00C0", u"\u00C8", u"\u00CC", u"\u00D2", u"\u00D9", u"\u00C1", u"\u00C9", u"\u00CD", u"\u00D3", u"\u00DA", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]
]
self.german = [
[
[u"^", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"\u00DF", u"'", u"BACKSPACEICON"],
[u"FIRSTICON", u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"\u00FC", u"+", u"#"],
[u"LASTICON", u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"\u00F6", u"\u00E4", self.green, self.green],
[u"CAPSLOCKICON", u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"\u00B0", u"!", u"\"", u"\u00A7", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"?", u"`", u"BACKSPACEICON"],
[u"FIRSTICON", u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"\u00DC", u"*", u"'"],
[u"LASTICON", u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"\u00D6", u"\u00C4", self.green, self.green],
[u"CAPSLOCKICON", u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CAPSLOCKICON", U"CAPSLOCKICON"],
self.footer
], [
[u"", u"", u"\u00B2", u"\u00B3", u"", u"", u"", u"{", u"[", u"]", u"}", u"\\", u"\u1E9E", u"BACKSPACEICON"],
[u"FIRSTICON", u"@", u"", u"\u20AC", u"", u"", u"", u"", u"", u"", u"", u"", u"~", u""],
[u"LASTICON", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", self.green, self.green],
[u"CAPSLOCKICON", u"|", u"", u"", u"", u"", u"", u"", u"\u00B5", u"", u"", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]
]
self.greek = [
[
[u"`", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"-", u"=", u"BACKSPACEICON"],
[u"FIRSTICON", u";", u"\u03C2", u"\u03B5", u"\u03C1", u"\u03C4", u"\u03C5", u"\u03B8", u"\u03B9", u"\u03BF", u"\u03C0", u"[", u"]", u"\\"],
[u"LASTICON", u"\u03B1", u"\u03C3", u"\u03B4", u"\u03C6", u"\u03B3", u"\u03B7", u"\u03BE", u"\u03BA", u"\u03BB", u"\u0384", u"'", self.green, self.green],
[u"CAPSLOCKICON", u"<", u"\u03B6", u"\u03C7", u"\u03C8", u"\u03C9", u"\u03B2", u"\u03BD", u"\u03BC", u",", ".", u"/", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"~", u"!", u"@", u"#", u"$", u"%", u"^", u"&", u"*", u"(", u")", u"_", u"+", u"BACKSPACEICON"],
[u"FIRSTICON", u":", u"\u0385", u"\u0395", u"\u03A1", u"\u03A4", u"\u03A5", u"\u0398", u"\u0399", u"\u039F", u"\u03A0", u"{", u"}", u"|"],
[u"LASTICON", u"\u0391", u"\u03A3", u"\u0394", u"\u03A6", u"\u0393", u"\u0397", u"\u039E", u"\u039A", u"\u039B", u"\u00A8", u"\"", self.green, self.green],
[u"CAPSLOCKICON", u">", u"\u0396", u"\u03A7", u"\u03A8", u"\u03A9", u"\u0392", u"\u039D", u"\u039C", u"<", u">", u"?", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"", u"", u"\u00B2", u"\u00B3", u"\u00A3", u"\u00A7", u"\u00B6", u"", u"\u00A4", u"\u00A6", u"\u00B0", u"\u00B1", u"\u00BD", u"BACKSPACEICON"],
[u"FIRSTICON", u"", u"\u03AC", u"\u03AD", u"\u03AE", u"\u03AF", u"\u03CC", u"\u03CD", u"\u03CE", u"\u03CA", u"\u03CB", u"\u00AB", u"\u00BB", u"\u00AC"],
[u"LASTICON", u"", u"\u0386", u"\u0388", u"\u0389", u"\u038A", u"\u038C", u"\u038E", u"\u038F", u"\u03AA", u"\u03AB", u"\u0385", self.green, self.green],
[u"CAPSLOCKICON", u"CAPSLOCKICON", u"", u"", u"", u"\u00A9", u"\u00AE", u"\u20AC", u"\u00A5", u"\u0390", u"\u03B0", u"\u0387", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]
]
self.latvian = [
[
[u"", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"-", u"f", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u016B", u"g", u"j", u"r", u"m", u"v", u"n", u"z", u"\u0113", u"\u010D", u"\u017E", u"h", u"\u0137"],
[u"LASTICON", u"\u0161", u"u", u"s", u"i", u"l", u"d", u"a", u"t", u"e", u"c", u"\u00B4", self.green, self.green],
[u"CAPSLOCKICON", u"\u0123", u"\u0146", u"b", u"\u012B", u"k", u"p", u"o", u"\u0101", u",", u".", u"\u013C", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"?", u"!", u"\u00AB", u"\u00BB", u"$", u"%", u"/", u"&", u"\u00D7", u"(", u")", u"_", u"F", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u016A", u"G", u"J", u"R", u"M", u"V", u"N", u"Z", u"\u0112", u"\u010C", u"\u017D", u"H", u"\u0136"],
[u"LASTICON", u"\u0160", u"U", u"S", u"I", u"L", u"D", u"A", u"T", u"E", u"C", u"\u00B0", self.green, self.green],
[u"CAPSLOCKICON", u"\u0122", u"\u0145", u"B", u"\u012A", u"K", u"P", u"O", u"\u0100", u";", u":", u"\u013B", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"", u"\u00AB", u"", u"", u"\u20AC", u"\"", u"'", u"", u":", u"", u"", u"\u2013", u"=", u"BACKSPACEICON"],
[u"FIRSTICON", u"q", u"\u0123", u"", u"\u0157", u"w", u"y", u"", u"", u"", u"", u"[", u"]", u""],
[u"LASTICON", u"", u"", u"", u"", u"", u"", u"", u"", u"\u20AC", u"", u"\u00B4", self.green, self.green],
[u"CAPSLOCKICON", u"\\", u"", u"x", u"", u"\u0137", u"", u"\u00F5", u"", u"<", u">", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"", u"", u"@", u"#", u"$", u"~", u"^", u"\u00B1", u"", u"", u"", u"\u2014", u";", u"BACKSPACEICON"],
[u"FIRSTICON", u"Q", u"\u0122", u"", u"\u0156", u"W", u"Y", u"", u"", u"", u"", u"{", u"}", u""],
[u"LASTICON", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"\u00A8", self.green, self.green],
[u"CAPSLOCKICON", u"|", u"", u"X", u"", u"\u0136", u"", u"\u00D5", u"", u"", u"", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]
]
self.russian = [
[
[u"\u0451", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"-", u"=", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u0439", u"\u0446", u"\u0443", u"\u043A", u"\u0435", u"\u043D", u"\u0433", u"\u0448", u"\u0449", u"\u0437", u"\u0445", u"\u044A", u"\\"],
[u"LASTICON", u"\u0444", u"\u044B", u"\u0432", u"\u0430", u"\u043F", u"\u0440", u"\u043E", u"\u043B", u"\u0434", u"\u0436", u"\u044D", self.green, self.green],
[u"CAPSLOCKICON", u"\\", u"\u044F", u"\u0447", u"\u0441", u"\u043C", u"\u0438", u"\u0442", u"\u044C", u"\u0431", u"\u044E", u".", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"\u0401", u"!", u"\"", u"\u2116", u";", u"%", u":", u"?", u"*", u"(", u")", u"_", u"+", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u0419", u"\u0426", u"\u0423", u"\u041A", u"\u0415", u"\u041D", u"\u0413", u"\u0428", u"\u0429", u"\u0417", u"\u0425", u"\u042A", u"/"],
[u"LASTICON", u"\u0424", u"\u042B", u"\u0412", u"\u0410", u"\u041F", u"\u0420", u"\u041E", u"\u041B", u"\u0414", u"\u0416", u"\u042D", self.green, self.green],
[u"CAPSLOCKICON", u"/", u"\u042F", u"\u0427", u"\u0421", u"\u041C", u"\u0418", u"\u0422", u"\u042C", u"\u0411", u"\u042E", u",", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"", u"\u00A7", u"@", u"#", u"&", u"$", u"\u20BD", u"\u20AC", u"", u"", u"", u"", u""],
[u"LASTICON", u"", u"<", u">", u"[", u"]", u"{", u"}", u"", u"", u"", u"", self.green, self.green],
[u"CAPSLOCKICON", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]
]
self.scandinavian = [
[
[u"\u00A7", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"+", u"\u00B4", u"BACKSPACEICON"],
[u"FIRSTICON", u"q", u"w", u"e", u"r", u"t", u"y", u"u", u"i", u"o", u"p", u"\u00E5", u"\u00A8", u"'"],
[u"LASTICON", u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"\u00F6", u"\u00E4", self.green, self.green],
[u"CAPSLOCKICON", u"<", u"z", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"\u00BD", u"!", u"\"", u"#", u"\u00A4", u"%", u"&", u"/", u"(", u")", u"=", u"?", u"`", u"BACKSPACEICON"],
[u"FIRSTICON", u"Q", u"W", u"E", u"R", u"T", u"Y", u"U", u"I", u"O", u"P", u"\u00C5", u"^", u"*"],
[u"LASTICON", u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"\u00D6", u"\u00C4", self.green, self.green],
[u"CAPSLOCKICON", u">", u"Z", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"", u"", u"@", u"\u00A3", u"$", u"\u20AC", u"", u"{", u"[", u"]", u"}", u"\\", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"", u"", u"\u20AC", u"", u"", u"", u"", u"", u"", u"", u"", u"~", u""],
[u"LASTICON", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", self.green, self.green],
[u"CAPSLOCKICON", u"|", u"", u"", u"", u"", u"", u"", u"\u00B5", u"", u"", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"", u"\u00E2", u"\u00EA", u"\u00EE", u"\u00F4", u"\u00FB", u"\u00E4", u"\u00EB", u"\u00EF", u"\u00F6", u"\u00FC", u"\u00E3", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u00E0", u"\u00E8", u"\u00EC", u"\u00F2", u"\u00F9", u"\u00E1", u"\u00E9", u"\u00ED", u"\u00F3", u"\u00FA", u"\u00F5", u"", u""],
[u"LASTICON", u"\u00C2", u"\u00CA", u"\u00CE", u"\u00D4", u"\u00DB", u"\u00C4", u"\u00CB", u"\u00CF", u"\u00D6", u"\u00DC", u"\u00C3", self.green, self.green],
[u"CAPSLOCKICON", u"\u00C0", u"\u00C8", u"\u00CC", u"\u00D2", u"\u00D9", u"\u00C1", u"\u00C9", u"\u00CD", u"\u00D3", u"\u00DA", u"\u00D5", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]
]
self.spanish = [
[
[u"\u00BA", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"'", u"\u00A1", u"BACKSPACEICON"],
[u"FIRSTICON", u"q", u"w", u"e", u"r", u"t", u"y", u"u", u"i", u"o", u"p", u"`", u"+", u"\u00E7"],
[u"LASTICON", u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"\u00F1", u"\u00B4", self.green, self.green], # [, ]
[u"CAPSLOCKICON", u"<", u"z", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"\u00AA", u"!", u"\"", u"\u00B7", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"?", u"\u00BF", u"BACKSPACEICON"],
[u"FIRSTICON", u"Q", u"W", u"E", u"R", u"T", u"Y", u"U", u"I", u"O", u"P", u"^", u"*", u"\u00C7"],
[u"LASTICON", u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"\u00D1", u"\u00A8", self.green, self.green], # {, }
[u"CAPSLOCKICON", u">", u"Z", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"\\", u"|", u"@", u"#", u"~", u"\u20AC", u"\u00AC", u"", u"", u"", u"", u"", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"", u"\u00E1", u"\u00E9", u"\u00ED", u"\u00F3", u"\u00FA", u"\u00FC", u"", u"", u"[", u"]", u"", u""],
[u"LASTICON", u"", u"\u00C1", u"\u00C9", u"\u00CD", u"\u00D3", u"\u00DA", u"\u00DC", u"", u"", u"{", u"}", self.green, self.green],
[u"CAPSLOCKICON", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]
]
self.thai = [
[
[u"", u"", u"\u0E45", u"\u0E20", u"\u0E16", u"\u0E38", u"\u0E36", u"\u0E04", u"\u0E15", u"\u0E08", u"\u0E02", u"\u0E0A", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u0E46", u"\u0E44", u"\u0E33", u"\u0E1E", u"\u0E30", u"\u0E31", u"\u0E35", u"\u0E23", u"\u0E19", u"\u0E22", u"\u0E1A", u"\u0E25", u""],
[u"LASTICON", u"\u0E1F", u"\u0E2B", u"\u0E01", u"\u0E14", u"\u0E40", u"\u0E49", u"\u0E48", u"\u0E32", u"\u0E2A", u"\u0E27", u"\u0E07", u"\u0E03", self.green],
[u"CAPSLOCKICON", u"CAPSLOCKICON", u"\u0E1C", u"\u0E1B", u"\u0E41", u"\u0E2D", u"\u0E34", u"\u0E37", u"\u0E17", u"\u0E21", u"\u0E43", u"\u0E1D", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"", u"", u"\u0E51", u"\u0E52", u"\u0E53", u"\u0E54", u"\u0E39", u"\u0E55", u"\u0E56", u"\u0E57", u"\u0E58", u"\u0E59", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u0E50", u"", u"\u0E0E", u"\u0E11", u"\u0E18", u"\u0E4D", u"\u0E4A", u"\u0E13", u"\u0E2F", u"\u0E0D", u"\u0E10", u"\u0E05", u""],
[u"LASTICON", u"\u0E24", u"\u0E06", u"\u0E0F", u"\u0E42", u"\u0E0C", u"\u0E47", u"\u0E4B", u"\u0E29", u"\u0E28", u"\u0E0B", u"", u"\u0E3F", self.green],
[u"CAPSLOCKICON", u"CAPSLOCKICON", u"", u"\u0E09", u"\u0E2E", u"\u0E3A", u"\u0E4C", u"", u"\u0E12", u"\u0E2C", u"\u0E26", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]
]
self.locales = {
"ar_BH": [_("Arabic"), _("Bahrain"), self.arabic(self.english)],
"ar_EG": [_("Arabic"), _("Egypt"), self.arabic(self.english)],
"ar_JO": [_("Arabic"), _("Jordan"), self.arabic(self.english)],
"ar_KW": [_("Arabic"), _("Kuwait"), self.arabic(self.english)],
"ar_LB": [_("Arabic"), _("Lebanon"), self.arabic(self.english)],
"ar_OM": [_("Arabic"), _("Oman"), self.arabic(self.english)],
"ar_QA": [_("Arabic"), _("Qatar"), self.arabic(self.english)],
"ar_SA": [_("Arabic"), _("Saudi Arabia"), self.arabic(self.english)],
"ar_SY": [_("Arabic"), _("Syrian Arab Republic"), self.arabic(self.english)],
"ar_AE": [_("Arabic"), _("United Arab Emirates"), self.arabic(self.english)],
"ar_YE": [_("Arabic"), _("Yemen"), self.arabic(self.english)],
"cs_CZ": [_("Czech"), _("Czechia"), self.czech],
"nl_NL": [_("Dutch"), _("Netherlands"), self.dutch(self.english)],
"en_AU": [_("English"), _("Australian"), self.english],
"en_GB": [_("English"), _("United Kingdom"), self.unitedKingdom(self.english)],
"en_US": [_("English"), _("United States"), self.english],
"en_EN": [_("English"), _("Various"), self.english],
"et_EE": [_("Estonian"), _("Estonia"), self.estonian(self.scandinavian)],
"fi_FI": [_("Finnish"), _("Finland"), self.scandinavian],
"fr_BE": [_("French"), _("Belgian"), self.belgian(self.french)],
"fr_FR": [_("French"), _("France"), self.french],
"fr_CH": [_("French"), _("Switzerland"), self.frenchSwiss(self.german)],
"de_DE": [_("German"), _("Germany"), self.german],
"de_CH": [_("German"), _("Switzerland"), self.germanSwiss(self.german)],
"el_GR": [_("Greek"), _("Greece"), self.greek],
"hu_HU": [_("Hungarian"), _("Hungary"), self.hungarian(self.german)],
"lv_01": [_("Latvian"), _("Alternative 1"), self.latvianStandard(self.english)],
"lv_02": [_("Latvian"), _("Alternative 2"), self.latvian],
"lv_LV": [_("Latvian"), _("Latvia"), self.latvianQWERTY(self.english)],
"lt_LT": [_("Lithuanian"), _("Lithuania"), self.lithuanian(self.english)],
"nb_NO": [_("Norwegian"), _("Norway"), self.norwegian(self.scandinavian)],
"fa_IR": [_("Persian"), _("Iran, Islamic Republic"), self.persian(self.english)],
"pl_01": [_("Polish"), _("Alternative"), self.polish(self.german)],
"pl_PL": [_("Polish"), _("Poland"), self.polishProgrammers(self.english)],
"ru_RU": [_("Russian"), _("Russian Federation"), self.russian],
"sk_SK": [_("Slovak"), _("Slovakia"), self.slovak(self.german)],
"es_ES": [_("Spanish"), _("Spain"), self.spanish],
"sv_SE": [_("Swedish"), _("Sweden"), self.scandinavian],
"th_TH": [_("Thai"), _("Thailand"), self.thai],
"uk_01": [_("Ukrainian"), _("Russian"), self.ukranian(self.russian)],
"uk_UA": [_("Ukrainian"), _("Ukraine"), self.ukranianEnhanced(self.russian)]
}
self["actions"] = HelpableNumberActionMap(self, "VirtualKeyBoardActions", {
"cancel": (self.cancel, _("Cancel any text changes and exit")),
"save": (self.save, _("Save / Enter text and exit")),
"shift": (self.shiftSelected, _("Select the virtual keyboard shifted character set for the next character only")),
"capsLock": (self.capsLockSelected, _("Select the virtual keyboard shifted character set")),
"select": (self.processSelect, _("Select the character or action under the virtual keyboard cursor")),
"locale": (self.localeMenu, _("Select the virtual keyboard locale from a menu")),
"up": (self.up, _("Move the virtual keyboard cursor up")),
"left": (self.left, _("Move the virtual keyboard cursor left")),
"right": (self.right, _("Move the virtual keyboard cursor right")),
"down": (self.down, _("Move the virtual keyboard cursor down")),
"first": (self.cursorFirst, _("Move the text buffer cursor to the first character")),
"prev": (self.cursorLeft, _("Move the text buffer cursor left")),
"next": (self.cursorRight, _("Move the text buffer cursor right")),
"last": (self.cursorLast, _("Move the text buffer cursor to the last character")),
"backspace": (self.backSelected, _("Delete the character to the left of text buffer cursor")),
"delete": (self.forwardSelected, _("Delete the character under the text buffer cursor")),
"toggleOverwrite": (self.keyToggleOW, _("Toggle new text inserts before or overwrites existing text")),
"1": (self.keyNumberGlobal, _("Number or SMS style data entry")),
"2": (self.keyNumberGlobal, _("Number or SMS style data entry")),
"3": (self.keyNumberGlobal, _("Number or SMS style data entry")),
"4": (self.keyNumberGlobal, _("Number or SMS style data entry")),
"5": (self.keyNumberGlobal, _("Number or SMS style data entry")),
"6": (self.keyNumberGlobal, _("Number or SMS style data entry")),
"7": (self.keyNumberGlobal, _("Number or SMS style data entry")),
"8": (self.keyNumberGlobal, _("Number or SMS style data entry")),
"9": (self.keyNumberGlobal, _("Number or SMS style data entry")),
"0": (self.keyNumberGlobal, _("Number or SMS style data entry")),
"gotAsciiCode": (self.keyGotAscii, _("Keyboard data entry"))
}, -2, description=_("Virtual KeyBoard Functions"))
self.lang = language.getLanguage()
self["prompt"] = Label(prompt)
self["text"] = Input(text=text, maxSize=maxSize, visible_width=visible_width, type=type, currPos=len(text.decode("utf-8", "ignore")) if currPos is None else currPos, allMarked=allMarked)
self["list"] = VirtualKeyBoardList([])
self["mode"] = Label(_("INS"))
self["locale"] = Label(_("Locale") + ": " + self.lang)
self["language"] = Label(_("Language") + ": " + self.lang)
self["key_info"] = StaticText(_("INFO"))
self["key_red"] = StaticText(_("Exit"))
self["key_green"] = StaticText(_(greenLabel))
self["key_yellow"] = StaticText(_("Shift"))
self["key_blue"] = StaticText(self.shiftMsgs[1])
self["key_help"] = StaticText(_("HELP"))
width, height = skin.parameters.get("VirtualKeyBoard", (45, 45))
if self.bg_l is None or self.bg_m is None or self.bg_r is None:
self.width = width
self.height = height
else:
self.width = self.bg_l.size().width() + self.bg_m.size().width() + self.bg_r.size().width()
self.height = self.bg_m.size().height()
# Alignment -> (Horizontal, Vertical):
# Horizontal alignment: 0=Auto, 1=Left, 2=Center, 3=Right (Auto=Left on left, Center on middle, Right on right).
# Vertical alignment: 0=Auto, 1=Top, 2=Center, 3=Bottom (Auto=Center).
self.alignment = skin.parameters.get("VirtualKeyBoardAlignment", (0, 0))
# Padding -> (Left/Right, Top/Botton) in pixels
self.padding = skin.parameters.get("VirtualKeyBoardPadding", (4, 4))
# Text color for each shift level. (Ensure there is a color for each shift level!)
self.shiftColors = skin.parameters.get("VirtualKeyBoardShiftColors", (0x00ffffff, 0x00ffffff, 0x0000ffff, 0x00ff00ff))
self.language = None
self.location = None
self.keyList = []
self.shiftLevels = 0
self.shiftLevel = 0
self.shiftHold = -1
self.keyboardWidth = 0
self.keyboardHeight = 0
self.maxKey = 0
self.overwrite = False
self.selectedKey = None
self.sms = NumericalTextInput(self.smsGotChar)
self.smsChar = None
self.setLocale()
self.onExecBegin.append(self.setKeyboardModeAscii)
self.onLayoutFinish.append(self.buildVirtualKeyBoard)
def arabic(self, base):
keyList = copy.deepcopy(base)
keyList[1][0][8] = u"\u066D"
keyList.extend([[
[u"\u0630", u"\u0661", u"\u0662", u"\u0663", u"\u0664", u"\u0665", u"\u0666", u"\u0667", u"\u0668", u"\u0669", u"\u0660", u"-", u"=", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u0636", u"\u0635", u"\u062B", u"\u0642", u"\u0641", u"\u063A", u"\u0639", u"\u0647", u"\u062E", u"\u062D", u"\u062C", u"\u062F", u"\\"],
[u"LASTICON", u"\u0634", u"\u0633", u"\u064A", u"\u0628", u"\u0644", u"\u0627", u"\u062A", u"\u0646", u"\u0645", u"\u0643", u"\u0637", self.green, self.green],
[u"CAPSLOCKICON", u"CAPSLOCKICON", u"\u0626", u"\u0621", u"\u0624", u"\u0631", u"\uFEFB", u"\u0649", u"\u0629", u"\u0648", u"\u0632", u"\u0638", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
], [
[u"\u0651", u"!", u"@", u"#", u"$", u"%", u"^", u"&", u"\u066D", u"(", u")", u"_", u"+", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u0636", u"\u0635", u"\u062B", u"\u0642", u"\u0641", u"\u063A", u"\u0639", u"\u00F7", u"\u00D7", u"\u061B", u">", u"<", u"|"],
[u"LASTICON", u"\u0634", u"\u0633", u"\u064A", u"\u0628", u"\u0644", u"\u0623", u"\u0640", u"\u060C", u"/", u":", u"\"", self.green, self.green],
[u"CAPSLOCKICON", u"CAPSLOCKICON", u"\u0626", u"\u0621", u"\u0624", u"\u0631", u"\uFEF5", u"\u0622", u"\u0629", u",", u".", u"\u061F", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
]])
return keyList
def belgian(self, base):
keyList = copy.deepcopy(base)
keyList[0][0][6] = u"\u00A7"
keyList[0][0][8] = u"!"
keyList[0][0][12] = u"-"
keyList[0][1][13] = u"\u00B5"
keyList[0][3][11] = u"="
keyList[1][0][0] = u"\u00B3"
keyList[1][0][12] = u"_"
keyList[1][1][11] = u"\u00A8"
keyList[1][1][12] = u"*"
keyList[1][1][13] = u"\u00A3"
keyList[1][3][11] = u"+"
keyList[2][0] = [u"", u"|", u"@", u"#", u"{", u"[", u"^", u"", u"", u"{", u"}", u"", u"", u"BACKSPACEICON"]
keyList[2][1][11] = u"["
keyList[2][1][12] = u"]"
keyList[2][1][13] = u"`"
keyList[2][2][11] = u"\u00B4"
keyList[2][3][1] = u"\\"
keyList[2][3][11] = u"~"
return keyList
def dutch(self, base):
keyList = copy.deepcopy(base)
keyList[0][0][0] = u"@"
keyList[0][0][11] = u"/"
keyList[0][0][12] = u"\u00B0"
keyList[0][1][11] = u"\u00A8"
keyList[0][1][12] = u"*"
keyList[0][1][13] = u"<"
keyList[0][2][10] = u"+"
keyList[0][2][11] = u"\u00B4"
keyList[0][3] = [u"CAPSLOCKICON", u"]", u"z", u"x", u"c", u"v", u"b", u"n", u"m", u",", u".", u"-", u"CAPSLOCKICON", u"CAPSLOCKICON"]
keyList[1][0] = [u"\u00A7", u"!", u"\"", u"#", u"$", u"%", u"&", u"_", u"(", u")", u"'", u"?", u"~", u"BACKSPACEICON"]
keyList[1][1][11] = u"^"
keyList[1][1][12] = u"|"
keyList[1][1][13] = u">"
keyList[1][2][10] = u"\u00B1"
keyList[1][2][11] = u"`"
keyList[1][3] = [u"CAPSLOCKICON", u"[", u"Z", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"=", u"CAPSLOCKICON", u"CAPSLOCKICON"]
keyList.append([
[u"\u00AC", u"\u00B9", u"\u00B2", u"\u00B3", u"\u00BC", u"\u00BD", u"\u00BE", u"\u00A3", u"{", u"}", u"", u"\\", u"\u00B8", u"BACKSPACEICON"],
[u"FIRSTICON", u"", u"", u"\u20AC", u"\u00B6", u"", u"\u00E1", u"\u00E9", u"\u00ED", u"\u00F3", u"\u00FA", u"", u"", u""],
[u"LASTICON", u"", u"\u00DF", u"", u"", u"", u"\u00C1", u"\u00C9", u"\u00CD", u"\u00D3", u"\u00DA", u"", self.green, self.green],
[u"CAPSLOCKICON", u"\u00A6", u"\u00AB", u"\u00BB", u"\u00A2", u"", u"", u"", u"\u00B5", u"", u"\u00B7", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
])
return keyList
def estonian(self, base):
keyList = copy.deepcopy(base)
keyList[0][0][0] = u"\u02C7"
keyList[0][1][11] = u"\u00FC"
keyList[0][1][12] = u"\u00F5"
keyList[1][0][0] = u"~"
keyList[1][1][11] = u"\u00DC"
keyList[1][1][12] = u"\u00D5"
keyList[2][1][12] = u"\u00A7"
keyList[2][1][13] = u"\u00BD"
keyList[2][2][2] = u"\u0161"
keyList[2][2][3] = u"\u0160"
keyList[2][2][11] = u"^"
keyList[2][3][2] = u"\u017E"
keyList[2][3][3] = u"\u017D"
keyList[2][3][8] = u""
del keyList[3]
return keyList
def frenchSwiss(self, base):
keyList = self.germanSwiss(base)
keyList[0][0][11] = u"'"
keyList[0][0][12] = u"^"
keyList[0][1][11] = u"\u00E8"
keyList[0][2][10] = u"\u00E9"
keyList[0][2][11] = u"\u00E0"
keyList[1][1][11] = u"\u00FC"
keyList[1][2][10] = u"\u00F6"
keyList[1][2][11] = u"\u00E4"
return keyList
def germanSwiss(self, base):
keyList = copy.deepcopy(base)
keyList[0][0][0] = u"\u00A7"
keyList[0][0][11] = u"'"
keyList[0][0][12] = u"^"
keyList[0][1][12] = u"\u00A8"
keyList[0][1][13] = u"$"
keyList[1][0][1] = u"+"
keyList[1][0][3] = u"*"
keyList[1][0][4] = u"\u00E7"
keyList[1][0][11] = u"?"
keyList[1][0][12] = u"`"
keyList[1][1][11] = u"\u00E8"
keyList[1][1][12] = u"!"
keyList[1][1][13] = u"\u00A3"
keyList[1][2][10] = u"\u00E9"
keyList[1][2][11] = u"\u00E0"
keyList[2][0] = [u"", u"\u00A6", u"@", u"#", u"\u00B0", u"\u00A7", u"\u00AC", u"|", u"\u00A2", u"", u"", u"\u00B4", u"~", u"BACKSPACEICON"]
keyList[2][1][1] = u""
keyList[2][1][9] = u"\u00DC"
keyList[2][1][10] = u"\u00C8"
keyList[2][1][11] = u"["
keyList[2][1][12] = u"]"
keyList[2][2][6] = u"\u00D6"
keyList[2][2][7] = u"\u00C9"
keyList[2][2][8] = u"\u00C4"
keyList[2][2][9] = u"\u00C0"
keyList[2][2][10] = u"{"
keyList[2][2][11] = u"}"
keyList[2][3][1] = u"\\"
keyList[2][3][8] = u""
return keyList
def hungarian(self, base):
keyList = copy.deepcopy(base)
keyList[0][0][0] = u"0"
keyList[0][0][10] = u"\u00F6"
keyList[0][0][11] = u"\u00FC"
keyList[0][0][12] = u"\u00F3"
keyList[0][1][11] = u"\u0151"
keyList[0][1][12] = u"\u00FA"
keyList[0][1][13] = u"\u0171"
keyList[0][2][10] = u"\u00E9"
keyList[0][2][11] = u"\u00E1"
keyList[0][3][1] = u"\u00ED"
keyList[1][0] = [u"\u00A7", u"'", u"\"", u"+", u"!", u"%", u"/", u"=", u"(", u")", u"\u00D6", u"\u00DC", u"\u00D3", u"BACSPACEICON"]
keyList[1][1][11] = u"\u0150"
keyList[1][1][12] = u"\u00DA"
keyList[1][1][13] = u"\u0170"
keyList[1][2][10] = u"\u00C9"
keyList[1][2][11] = u"\u00C1"
keyList[1][3][1] = u"\u00CD"
keyList[1][3][9] = u"?"
del keyList[2]
keyList.append([
[u"", u"~", u"\u02C7", u"^", u"\u02D8", u"\u00B0", u"\u02DB", u"`", u"\u02D9", u"\u00B4", u"\u02DD", u"\u00A8", u"\u00B8", u"BACKSPACEICON"],
[u"FIRSTICON", u"\\", u"|", u"\u00C4", u"", u"", u"", u"\u20AC", u"\u00CD", u"", u"", u"\u00F7", u"\u00D7", u"\u00A4"],
[u"LASTICON", u"\u00E4", u"\u0111", u"\u0110", u"[", u"]", u"", u"\u00ED", u"\u0142", u"\u0141", u"$", u"\u00DF", self.green, self.green],
[u"CAPSLOCKICON", u"<", u">", u"#", u"&", u"@", u"{", u"}", u"<", u";", u">", u"*", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
])
return keyList
def latvianQWERTY(self, base):
keyList = self.latvianStandard(base)
keyList[0][1][13] = u"\u00B0"
keyList[2][1][9] = u"\u00F5"
keyList[3][1][9] = u"\u00D5"
return keyList
def latvianStandard(self, base):
keyList = copy.deepcopy(base)
keyList[0][3][1] = u"\\"
keyList[1][3][1] = u"|"
keyList.append([
[u"", u"", u"\u00AB", u"\u00BB", u"\u20AC", u"", u"\u2019", u"", u"", u"", u"", u"\u2013", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"", u"", u"\u0113", u"\u0157", u"", u"", u"\u016B", u"\u012B", u"\u014D", u"", u"", u"", u""],
[u"LASTICON", u"\u0101", u"\u0161", u"", u"", u"\u0123", u"", u"", u"\u0137", u"\u013C", u"", u"\u00B4", self.green, self.green],
[u"CAPSLOCKICON", u"", u"\u017E", u"", u"\u010D", u"", u"", u"\u0146", u"", u"", u"", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
])
keyList.append([
[u"", u"", u"", u"", u"\u00A7", u"\u00B0", u"", u"\u00B1", u"\u00D7", u"", u"", u"\u2014", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"", u"", u"\u0112", u"\u0156", u"", u"", u"\u016A", u"\u012A", u"\u014C", u"", u"", u"", u""],
[u"LASTICON", u"\u0100", u"\u0160", u"", u"", u"\u0122", u"", u"", u"\u0136", u"\u013B", u"", u"\u00A8", self.green, self.green],
[u"CAPSLOCKICON", u"", u"\u017D", u"", u"\u010C", u"", u"", u"\u0145", u"", u"", u"", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
])
return keyList
def lithuanian(self, base):
keyList = copy.deepcopy(base)
keyList[0][0] = [u"`", u"\u0105", u"\u010D", u"\u0119", u"\u0117", u"\u012F", u"\u0161", u"\u0173", u"\u016B", u"9", u"0", u"-", u"\u017E", u"BACKSPACEICON"]
keyList[0][3][1] = u"\\"
keyList[1][0] = [u"~", u"\u0104", u"\u010C", u"\u0118", u"\u0116", u"\u012E", u"\u0160", u"\u0172", u"\u016A", u"(", u")", u"_", u"\u017D", u"BACKSPACEICON"]
keyList[1][3][1] = u"|"
keyList.append([
[u"", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"", u"=", u"BACKSPACEICON"],
[u"FIRSTICON", u"!", u"@", u"#", u"$", u"%", u"^", u"&", u"*", u"", u"", u"", u"+", u""],
[u"LASTICON", u"", u"", u"\u20AC", u"", u"", u"", u"", u"", u"", u"", u"", self.green, self.green],
[u"CAPSLOCKICON", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
])
return keyList
def norwegian(self, base):
keyList = copy.deepcopy(base)
keyList[0][0][0] = u"|"
keyList[0][0][12] = u"\\"
keyList[0][2][10] = u"\u00F8"
keyList[0][2][11] = u"\u00E6"
keyList[1][0][0] = u"\u00A7"
keyList[1][2][10] = u"\u00D8"
keyList[1][2][11] = u"\u00C6"
keyList[2][0][11] = u""
keyList[2][0][12] = u"\u00B4"
keyList[2][3][1] = u""
return keyList
def persian(self, base):
keyList = copy.deepcopy(base)
keyList.append([
[u"\u00F7", u"\u06F1", u"\u06F2", u"\u06F3", u"\u06F4", u"\u06F5", u"\u06F6", u"\u06F7", u"\u06F8", u"\u06F9", u"\u06F0", u"-", u"=", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u0636", u"\u0635", u"\u062B", u"\u0642", u"\u0641", u"\u063A", u"\u0639", u"\u0647", u"\u062E", u"\u062D", u"\u062C", u"\u0686", u"\u067E"],
[u"LASTICON", u"\u0634", u"\u0633", u"\u06CC", u"\u0628", u"\u0644", u"\u0627", u"\u062A", u"\u0646", u"\u0645", u"\u06A9", u"\u06AF", self.green, self.green],
[u"CAPSLOCKICON", u"\u0649", u"\u0638", u"\u0637", u"\u0632", u"\u0631", u"\u0630", u"\u062F", u"\u0626", u"\u0648", u".", u"/", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
])
keyList.append([
[u"\u00D7", u"!", u"@", u"#", u"$", u"%", u"^", u"&", u"*", u")", u"(", u"_", u"+", u"BACKSPACEICON"],
[u"FIRSTICON", u"\u064B", u"\u064C", u"\u064D", u"\u0631", u"\u060C", u"\u061B", u",", u"]", u"[", u"\\", u"}", u"{", u"|"],
[u"LASTICON", u"\u064E", u"\u064F", u"\u0650", u"\u0651", u"\u06C0", u"\u0622", u"\u0640", u"\u00AB", u"\u00BB", u":", u"\"", self.green, self.green],
[u"CAPSLOCKICON", u"|", u"\u0629", u"\u064A", u"\u0698", u"\u0624", u"\u0625", u"\u0623", u"\u0621", u"<", u">", u"\u061F", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
])
return keyList
def polish(self, base):
keyList = copy.deepcopy(base)
keyList[0][0][0] = u"\u02DB"
keyList[0][0][11] = u"+"
keyList[0][1][11] = u"\u017C"
keyList[0][1][12] = u"\u015B"
keyList[0][1][13] = u"\u00F3"
keyList[0][2][10] = u"\u0142"
keyList[0][2][11] = u"\u0105"
keyList[1][0][0] = u"\u00B7"
keyList[1][0][3] = u"#"
keyList[1][0][4] = u"\u00A4"
keyList[1][0][12] = u"*"
keyList[1][1][11] = u"\u0144"
keyList[1][1][12] = u"\u0107"
keyList[1][1][13] = u"\u017A"
keyList[1][2][10] = u"\u0141"
keyList[1][2][11] = u"\u0119"
del keyList[2]
keyList.append([
[u"", u"~", u"\u02C7", u"^", u"\u02D8", u"\u00B0", u"\u02DB", u"`", u"\u00B7", u"\u00B4", u"\u02DD", u"\u00A8", u"\u00B8", u"BACKSPACEICON"],
[u"FIRSTICON", u"\\", u"\u00A6", u"", u"\u017B", u"\u015A", u"\u00D3", u"\u20AC", u"\u0143", u"\u0106", u"\u0179", u"\u00F7", u"\u00D7", u""],
[u"LASTICON", u"", u"\u0111", u"\u0110", u"", u"", u"", u"", u"\u0104", u"\u0118", u"$", u"\u00DF", self.green, self.green],
[u"CAPSLOCKICON", u"", u"", u"", u"", u"@", u"{", u"}", u"\u00A7", u"<", u">", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
])
return keyList
def polishProgrammers(self, base):
keyList = copy.deepcopy(base)
keyList[0][3][1] = u"\\"
keyList[1][3][1] = u"|"
keyList.append([
[u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"BACKSPACEICON"],
[u"FIRSTICON", u"", u"", u"\u0119", u"\u0118", u"", u"", u"\u20AC", u"", u"\u00F3", u"\u00D3", u"", u"", u""],
[u"LASTICON", u"\u0105", u"\u0104", u"\u015B", u"\u015A", u"", u"", u"", u"", u"\u0142", u"\u0141", u"", self.green, self.green],
[u"CAPSLOCKICON", u"\u017C", u"\u017B", u"\u017A", u"\u0179", u"\u0107", u"\u0106", u"\u0144", u"\u0143", u"", u"", u"", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
])
return keyList
def slovak(self, base):
keyList = copy.deepcopy(base)
keyList[0][0] = [u";", u"+", u"\u013E", u"\u0161", u"\u010D", u"\u0165", u"\u017E", u"\u00FD", u"\u00E1", u"\u00ED", u"\u00E9", u"=", u"\u00B4", u"BACKSPACEICON"]
keyList[0][1][11] = u"\u00FA"
keyList[0][1][12] = u"\u00E4"
keyList[0][1][13] = u"\u0148"
keyList[0][2][10] = u"\u00F4"
keyList[0][2][11] = u"\u00A7"
keyList[0][3][1] = u"&"
keyList[1][0] = [u"\u00B0", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"%", u"\u02C7", u"BACKSPACEICON"]
keyList[1][1][11] = u"/"
keyList[1][1][12] = u"("
keyList[1][1][13] = u")"
keyList[1][2][10] = u"\""
keyList[1][2][11] = u"!"
keyList[1][3][1] = u"*"
keyList[1][3][9] = u"?"
del keyList[2]
keyList.append([
[u"", u"~", u"\u02C7", u"^", u"\u02D8", u"\u00B0", u"\u02DB", u"`", u"\u02D9", u"\u00B4", u"\u02DD", u"\u00A8", u"\u00B8", u"BACKSPACEICON"],
[u"FIRSTICON", u"\\", u"|", u"\u20AC", u"", u"", u"", u"", u"", u"", u"'", u"\u00F7", u"\u00D7", u"\u00A4"],
[u"LASTICON", u"", u"\u0111", u"\u0110", u"[", u"]", u"", u"", u"\u0142", u"\u0141", u"$", u"\u00DF", self.green, self.green],
[u"CAPSLOCKICON", u"<", u">", u"#", u"&", u"@", u"{", u"}", u"", u"<", u">", u"*", u"CAPSLOCKICON", u"CAPSLOCKICON"],
self.footer
])
return keyList
def ukranian(self, base):
keyList = copy.deepcopy(base)
keyList[0][1][12] = u"\u0457"
keyList[0][1][13] = u"\\"
keyList[0][2][11] = u"\u0454"
keyList[0][2][2] = u"\u0456"
keyList[0][3][1] = u"\u0491"
keyList[1][1][12] = u"\u0407"
keyList[1][1][13] = u"/"
keyList[1][2][11] = u"\u0404"
keyList[1][2][2] = u"\u0406"
keyList[1][3][1] = u"\u0490"
return keyList
def ukranianEnhanced(self, base):
keyList = self.ukranian(base)
keyList[0][0][0] = u"\u0027"
keyList[1][0][0] = u"\u20B4"
return keyList
def unitedKingdom(self, base):
keyList = copy.deepcopy(base)
keyList[0][1][13] = u"#"
keyList[0][3] = [u"CAPSLOCKICON", u"\\", u"z", u"x", u"c", u"v", u"b", u"n", u"m", u",", u".", u"/", u"CAPSLOCKICON", u"CAPSLOCKICON"]
keyList[0][4] = copy.copy(self.footer)
keyList[0][4][10] = u"\u00A6"
keyList[1][0][0] = u"\u00AC"
keyList[1][0][2] = u"\""
keyList[1][0][3] = u"\u00A3"
keyList[1][1][13] = u"~"
keyList[1][2][11] = u"@"
keyList[1][3] = [u"CAPSLOCKICON", u"|", u"Z", u"X", u"C", u"V", u"B", u"N", u"M", u"<", u">", u"?", u"CAPSLOCKICON", u"CAPSLOCKICON"]
keyList[1][4] = copy.copy(self.footer)
keyList[1][4][10] = u"\u20AC"
return keyList
def smsGotChar(self):
if self.smsChar and self.selectAsciiKey(self.smsChar):
self.processSelect()
def setLocale(self):
self.language, self.location, self.keyList = self.locales.get(self.lang, [None, None, None])
if self.language is None or self.location is None or self.keyList is None:
self.lang = "en_EN"
self.language = _("English")
self.location = _("Various")
self.keyList = self.english
self.shiftLevel = 0
self["locale"].setText(_("Locale") + ": " + self.lang + " (" + self.language + " - " + self.location + ")")
def buildVirtualKeyBoard(self):
self.shiftLevels = len(self.keyList) # Check the current shift level is available / valid in this layout.
if self.shiftLevel >= self.shiftLevels:
self.shiftLevel = 0
self.keyboardWidth = len(self.keyList[self.shiftLevel][0]) # Determine current keymap size.
self.keyboardHeight = len(self.keyList[self.shiftLevel])
self.maxKey = self.keyboardWidth * (self.keyboardHeight - 1) + len(self.keyList[self.shiftLevel][-1]) - 1
# print "[VirtualKeyBoard] DEBUG: Width=%d, Height=%d, Keys=%d, maxKey=%d, shiftLevels=%d" % (self.keyboardWidth, self.keyboardHeight, self.maxKey + 1, self.maxKey, self.shiftLevels)
self.index = 0
self.list = []
for keys in self.keyList[self.shiftLevel]: # Process all the buttons in this shift level.
self.list.append(self.virtualKeyBoardEntryComponent(keys))
self.previousSelectedKey = None
if self.selectedKey is None: # Start on the first character of the second row (EXIT button).
self.selectedKey = self.keyboardWidth
self.markSelectedKey()
def virtualKeyBoardEntryComponent(self, keys):
res = [keys]
text = []
offset = 14 - self.keyboardWidth # 14 represents the maximum buttons per row as defined here and in the skin (14 x self.width).
x = self.width * offset / 2
if offset % 2:
x += self.width / 2
xHighlight = x
prevKey = None
for key in keys:
if key != prevKey:
xData = x + self.padding[0]
start, width = self.findStartAndWidth(self.index)
if self.bg_l is None or self.bg_m is None or self.bg_r is None: # If available display the cell background.
x += self.width * width
else:
w = self.bg_l.size().width()
res.append(MultiContentEntryPixmapAlphaBlend(pos=(x, 0), size=(w, self.height), png=self.bg_l))
x += w
w = self.bg_m.size().width() + (self.width * (width - 1))
res.append(MultiContentEntryPixmapAlphaBlend(pos=(x, 0), size=(w, self.height), png=self.bg_m, flags = BT_SCALE))
x += w
w = self.bg_r.size().width()
res.append(MultiContentEntryPixmapAlphaBlend(pos=(x, 0), size=(w, self.height), png=self.bg_r))
x += w
highlight = self.keyHighlights.get(key.upper(), (None, None, None)) # Check if the cell needs to be highlighted.
if highlight[0] is None or highlight[1] is None or highlight[2] is None: # If available display the cell highlight.
xHighlight += self.width * width
else:
w = highlight[0].size().width()
res.append(MultiContentEntryPixmapAlphaBlend(pos=(xHighlight, 0), size=(w, self.height), png=highlight[0]))
xHighlight += w
w = highlight[1].size().width() + (self.width * (width - 1))
res.append(MultiContentEntryPixmapAlphaBlend(pos=(xHighlight, 0), size=(w, self.height), png=highlight[1], flags = BT_SCALE))
xHighlight += w
w = highlight[2].size().width()
res.append(MultiContentEntryPixmapAlphaBlend(pos=(xHighlight, 0), size=(w, self.height), png=highlight[2]))
xHighlight += w
if self.alignment[0] == 1: # Determine the cell alignment.
alignH = RT_HALIGN_LEFT
elif self.alignment[0] == 2:
alignH = RT_HALIGN_CENTER
elif self.alignment[0] == 3:
alignH = RT_HALIGN_RIGHT
else:
if start == 0 and width > 1:
alignH = RT_HALIGN_LEFT
elif start + width == self.keyboardWidth and width > 1:
alignH = RT_HALIGN_RIGHT
else:
alignH = RT_HALIGN_CENTER
if self.alignment[1] == 1:
alignV = RT_VALIGN_TOP
elif self.alignment[1] == 3:
alignV = RT_VALIGN_BOTTOM
else:
alignV = RT_VALIGN_CENTER
w = (width * self.width) - (self.padding[0] * 2) # Determine the cell data area.
h = self.height - (self.padding[1] * 2)
image = self.keyImages[self.shiftLevel].get(key, None) # Check if the cell contains an image.
if image: # Display the cell image.
left = xData
wImage = image.size().width()
if alignH == RT_HALIGN_CENTER:
left += (w - wImage) / 2
elif alignH == RT_HALIGN_RIGHT:
left += w - wImage
top = self.padding[1]
hImage = image.size().height()
if alignV == RT_VALIGN_CENTER:
top += (h - hImage) / 2
elif alignV == RT_VALIGN_BOTTOM:
top += h - hImage
res.append(MultiContentEntryPixmapAlphaBlend(pos=(left, top), size=(wImage, hImage), png=image))
# print "[VirtualKeyBoard] DEBUG: Left=%d, Top=%d, Width=%d, Height=%d, Image Width=%d, Image Height=%d" % (left, top, w, h, wImage, hImage)
else: # Display the cell text.
if len(key) > 1: # NOTE: UTF8 / Unicode glyphs only count as one character here.
text.append(MultiContentEntryText(pos=(xData, self.padding[1]), size=(w, h), font=1, flags=alignH | alignV, text=key.encode("utf-8"), color=self.shiftColors[self.shiftLevel]))
else:
text.append(MultiContentEntryText(pos=(xData, self.padding[1]), size=(w, h), font=0, flags=alignH | alignV, text=key.encode("utf-8"), color=self.shiftColors[self.shiftLevel]))
prevKey = key
self.index += 1
return res + text
def markSelectedKey(self):
if self.sel_l is None or self.sel_m is None or self.sel_r is None:
return
if self.previousSelectedKey is not None:
del self.list[self.previousSelectedKey / self.keyboardWidth][-3:]
if self.selectedKey > self.maxKey:
self.selectedKey = self.maxKey
start, width = self.findStartAndWidth(self.selectedKey)
x = start * self.width
w = self.sel_l.size().width()
self.list[self.selectedKey / self.keyboardWidth].append(MultiContentEntryPixmapAlphaBlend(pos=(x, 0), size=(w, self.height), png=self.sel_l))
x += w
w = self.sel_m.size().width() + (self.width * (width - 1))
self.list[self.selectedKey / self.keyboardWidth].append(MultiContentEntryPixmapAlphaBlend(pos=(x, 0), size=(w, self.height), png=self.sel_m, flags = BT_SCALE))
x += w
w = self.sel_r.size().width()
self.list[self.selectedKey / self.keyboardWidth].append(MultiContentEntryPixmapAlphaBlend(pos=(x, 0), size=(w, self.height), png=self.sel_r))
self.previousSelectedKey = self.selectedKey
self["list"].setList(self.list)
def findStartAndWidth(self, key):
if key > self.maxKey:
key = self.maxKey
row = key / self.keyboardWidth
key = key % self.keyboardWidth
start = key
while start:
if self.keyList[self.shiftLevel][row][start - 1] != self.keyList[self.shiftLevel][row][key]:
break
start -= 1
max = len(self.keyList[self.shiftLevel][row])
width = 1
while width <= max:
if start + width >= max or self.keyList[self.shiftLevel][row][start + width] != self.keyList[self.shiftLevel][row][key]:
break
width += 1
# print "[VirtualKeyBoard] DEBUG: Key='%s', Position=%d, Start=%d, Width=%d" % (self.keyList[self.shiftLevel][row][key], key, start, width)
return (start, width)
def processSelect(self):
self.smsChar = None
text = self.keyList[self.shiftLevel][self.selectedKey / self.keyboardWidth][self.selectedKey % self.keyboardWidth].encode("UTF-8")
cmd = self.cmds.get(text.upper(), None)
if cmd is None:
self['text'].char(text.encode('UTF-8'))
else:
exec(cmd)
if text not in (u"SHIFT", u"SHIFTICON") and self.shiftHold != -1:
self.shiftRestore()
def cancel(self):
self.close(None)
def save(self):
self.close(self["text"].getText())
def localeMenu(self):
languages = []
for locale, data in self.locales.iteritems():
languages.append((data[0] + " - " + data[1] + " (" + locale + ")", locale))
languages = sorted(languages)
index = 0
default = 0
for item in languages:
if item[1] == self.lang:
default = index
break
index += 1
self.session.openWithCallback(self.localeMenuCallback, ChoiceBox, _("Available locales are:"), list=languages, selection=default, keys=[])
def localeMenuCallback(self, choice):
if choice:
self.lang = choice[1]
self.setLocale()
self.buildVirtualKeyBoard()
def shiftSelected(self):
if self.shiftHold == -1:
self.shiftHold = self.shiftLevel
self.capsLockSelected()
def capsLockSelected(self):
self.shiftLevel = (self.shiftLevel + 1) % self.shiftLevels
self.shiftCommon()
def shiftCommon(self):
self.smsChar = None
nextLevel = (self.shiftLevel + 1) % self.shiftLevels
self["key_blue"].setText(self.shiftMsgs[nextLevel])
self.buildVirtualKeyBoard()
def shiftRestore(self):
self.shiftLevel = self.shiftHold
self.shiftHold = -1
self.shiftCommon()
def keyToggleOW(self):
self["text"].toggleOverwrite()
self.overwrite = not self.overwrite
if self.overwrite:
self["mode"].setText(_("OVR"))
else:
self["mode"].setText(_("INS"))
def backSelected(self):
self["text"].deleteBackward()
def forwardSelected(self):
self["text"].deleteForward()
def cursorFirst(self):
self["text"].home()
def cursorLeft(self):
self["text"].left()
def cursorRight(self):
self["text"].right()
def cursorLast(self):
self["text"].end()
def up(self):
self.smsChar = None
self.selectedKey -= self.keyboardWidth
if self.selectedKey < 0:
self.selectedKey = self.maxKey / self.keyboardWidth * self.keyboardWidth + self.selectedKey % self.keyboardWidth
if self.selectedKey > self.maxKey:
self.selectedKey -= self.keyboardWidth
self.markSelectedKey()
def left(self):
self.smsChar = None
start, width = self.findStartAndWidth(self.selectedKey)
if width > 1:
width = self.selectedKey % self.keyboardWidth - start + 1
self.selectedKey = self.selectedKey / self.keyboardWidth * self.keyboardWidth + (self.selectedKey + self.keyboardWidth - width) % self.keyboardWidth
if self.selectedKey > self.maxKey:
self.selectedKey = self.maxKey
self.markSelectedKey()
def right(self):
self.smsChar = None
start, width = self.findStartAndWidth(self.selectedKey)
if width > 1:
width = start + width - self.selectedKey % self.keyboardWidth
self.selectedKey = self.selectedKey / self.keyboardWidth * self.keyboardWidth + (self.selectedKey + width) % self.keyboardWidth
if self.selectedKey > self.maxKey:
self.selectedKey = self.selectedKey / self.keyboardWidth * self.keyboardWidth
self.markSelectedKey()
def down(self):
self.smsChar = None
self.selectedKey += self.keyboardWidth
if self.selectedKey > self.maxKey:
self.selectedKey %= self.keyboardWidth
self.markSelectedKey()
def keyNumberGlobal(self, number):
self.smsChar = self.sms.getKey(number)
self.selectAsciiKey(self.smsChar)
def keyGotAscii(self):
self.smsChar = None
if self.selectAsciiKey(str(unichr(getPrevAsciiCode()).encode("utf-8"))):
self.processSelect()
def selectAsciiKey(self, char):
if char == u" ":
char = SPACE
self.shiftLevel = -1
for keyList in (self.keyList):
self.shiftLevel = (self.shiftLevel + 1) % self.shiftLevels
self.buildVirtualKeyBoard()
selkey = 0
for keys in keyList:
for key in keys:
if key == char:
self.selectedKey = selkey
self.markSelectedKey()
return True
selkey += 1
return False
| IanSav/enigma2 | lib/python/Screens/VirtualKeyBoard.py | Python | gpl-2.0 | 61,006 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AsyncTask'
db.create_table('async_task', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=25)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('last_message', self.gf('django.db.models.fields.CharField')(max_length=255)),
('status', self.gf('django.db.models.fields.CharField')(max_length=25, db_index=True)),
('create_user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('create_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('update_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('async_task', ['AsyncTask'])
def backwards(self, orm):
# Deleting model 'AsyncTask'
db.delete_table('async_task')
models = {
'async_task.asynctask': {
'Meta': {'object_name': 'AsyncTask', 'db_table': "'async_task'"},
'create_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'create_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['async_task'] | EduPepperPDTesting/pepper2013-testing | common/djangoapps/async_task/migrations/0001_initial.py | Python | agpl-3.0 | 5,286 |
from datetime import datetime
from django.forms.widgets import (DateInput, DateTimeInput, Select,
SelectMultiple, TimeInput)
from django.utils.safestring import mark_safe
class StyledSelect(Select):
def render(self, name, value, attrs=None, renderer=None):
try:
select_render = super(StyledSelect, self).render(
name, value, attrs, renderer)
except TypeError: # versions older than Django 1.11
select_render = super(StyledSelect, self).render(
name, value, attrs)
return mark_safe('<div class="styled-select">{}</div>'.format(
select_render))
class Selectize(Select):
def __init__(self, attrs, choices):
attrs['js-selectize'] = True
super(Selectize, self).__init__(attrs, choices)
class SelectizeMultiple(SelectMultiple):
def __init__(self, attrs, choices):
attrs['js-selectize-multiple'] = True
super(SelectizeMultiple, self).__init__(attrs, choices)
class SelectizeAutoComplete(Select):
def __init__(self, attrs, choices, url):
attrs['js-selectize-autocomplete'] = True
attrs['data-url'] = url
super(SelectizeAutoComplete, self).__init__(attrs, choices)
class DateTimePickerInput(DateTimeInput):
def __init__(self, attrs, format):
attrs['js-datetimepicker'] = True
super(DateTimePickerInput, self).__init__(attrs, format)
def get_context(self, name, value, attrs):
context = super(DateTimePickerInput, self).get_context(name, value,
attrs)
if not value:
value = datetime.now()
context['widget']['attrs']['data-datetime'] =\
value.strftime('%m/%d/%Y %I:%M %p')
return context
class DatePickerInput(DateInput):
def __init__(self, attrs, format):
attrs['js-datepicker'] = True
super(DatePickerInput, self).__init__(attrs, format)
def get_context(self, name, value, attrs):
context = super(DatePickerInput, self).get_context(name, value, attrs)
if not value:
value = datetime.now()
context['widget']['attrs']['data-date'] =\
value.strftime('%m/%d/%Y')
return context
class TimePickerInput(TimeInput):
def __init__(self, attrs, format):
attrs['js-timepicker'] = True
super(TimePickerInput, self).__init__(attrs, format)
def get_context(self, name, value, attrs):
context = super(TimePickerInput, self).get_context(name, value, attrs)
if not value:
value = datetime.now()
context['widget']['attrs']['data-time'] =\
value.strftime('%I:%M %p')
return context
| dgbc/django-arctic | arctic/widgets.py | Python | mit | 2,774 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipIf
from django.core.exceptions import ValidationError
from django.db import connection, models
from django.test import SimpleTestCase, TestCase
from django.utils.functional import lazy
from .models import Post
class TestCharField(TestCase):
def test_max_length_passed_to_formfield(self):
"""
CharField passes its max_length attribute to form fields created using
the formfield() method.
"""
cf1 = models.CharField()
cf2 = models.CharField(max_length=1234)
self.assertIsNone(cf1.formfield().max_length)
self.assertEqual(1234, cf2.formfield().max_length)
def test_lookup_integer_in_charfield(self):
self.assertEqual(Post.objects.filter(title=9).count(), 0)
@skipIf(connection.vendor == 'mysql', 'See https://code.djangoproject.com/ticket/18392')
def test_emoji(self):
p = Post.objects.create(title='Smile 😀', body='Whatever.')
p.refresh_from_db()
self.assertEqual(p.title, 'Smile 😀')
class ValidationTests(SimpleTestCase):
def test_charfield_raises_error_on_empty_string(self):
f = models.CharField()
with self.assertRaises(ValidationError):
f.clean('', None)
def test_charfield_cleans_empty_string_when_blank_true(self):
f = models.CharField(blank=True)
self.assertEqual('', f.clean('', None))
def test_charfield_with_choices_cleans_valid_choice(self):
f = models.CharField(max_length=1, choices=[('a', 'A'), ('b', 'B')])
self.assertEqual('a', f.clean('a', None))
def test_charfield_with_choices_raises_error_on_invalid_choice(self):
f = models.CharField(choices=[('a', 'A'), ('b', 'B')])
with self.assertRaises(ValidationError):
f.clean('not a', None)
def test_charfield_get_choices_with_blank_defined(self):
f = models.CharField(choices=[('', '<><>'), ('a', 'A')])
self.assertEqual(f.get_choices(True), [('', '<><>'), ('a', 'A')])
def test_charfield_get_choices_doesnt_evaluate_lazy_strings(self):
# Regression test for #23098
# Will raise ZeroDivisionError if lazy is evaluated
lazy_func = lazy(lambda x: 0 / 0, int)
f = models.CharField(choices=[(lazy_func('group'), (('a', 'A'), ('b', 'B')))])
self.assertEqual(f.get_choices(True)[0], ('', '---------'))
def test_charfield_raises_error_on_empty_input(self):
f = models.CharField(null=False)
with self.assertRaises(ValidationError):
f.clean(None, None)
| nemesisdesign/django | tests/model_fields/test_charfield.py | Python | bsd-3-clause | 2,630 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Audio.encoding'
db.delete_column(u'multimedia_audio', 'encoding')
# Deleting field 'Audio.encoded'
db.delete_column(u'multimedia_audio', 'encoded')
# Deleting field 'Video.encoding'
db.delete_column(u'multimedia_video', 'encoding')
# Deleting field 'Video.encoded'
db.delete_column(u'multimedia_video', 'encoded')
def backwards(self, orm):
# Adding field 'Audio.encoding'
db.add_column(u'multimedia_audio', 'encoding',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Audio.encoded'
db.add_column(u'multimedia_audio', 'encoded',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Video.encoding'
db.add_column(u'multimedia_video', 'encoding',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Video.encoded'
db.add_column(u'multimedia_video', 'encoded',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'multimedia.audio': {
'Meta': {'object_name': 'Audio'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'profiles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['multimedia.EncodeProfile']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'multimedia.encodeprofile': {
'Meta': {'object_name': 'EncodeProfile'},
'command': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'container': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'multimedia.remotestorage': {
'Meta': {'object_name': 'RemoteStorage'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multimedia.EncodeProfile']"})
},
u'multimedia.video': {
'Meta': {'object_name': 'Video'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'profiles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['multimedia.EncodeProfile']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['multimedia'] | teury/django-multimedia | multimedia/south_migrations/0020_auto__del_field_audio_encoding__del_field_audio_encoded__del_field_vid.py | Python | bsd-3-clause | 7,755 |
"""
Saucebrush data sources, convert data in some format into python dicts.
All sources must implement the iterable interface and return python
dictionaries.
"""
from __future__ import unicode_literals
import string
from saucebrush import utils
class CSVSource(object):
""" Saucebrush source for reading from CSV files.
Takes an open csvfile, an optional set of fieldnames and optional number
of rows to skip.
CSVSource(open('test.csv')) will read a csvfile, using the first row as
the field names.
CSVSource(open('test.csv'), ('name', 'phone', 'address'), 1) will read
in a CSV file and treat the three columns as name, phone, and address,
ignoring the first row (presumed to be column names).
"""
def __init__(self, csvfile, fieldnames=None, skiprows=0, **kwargs):
import csv
self._dictreader = csv.DictReader(csvfile, fieldnames, **kwargs)
for _ in range(skiprows):
next(self._dictreader)
def __iter__(self):
return self._dictreader
class FixedWidthFileSource(object):
""" Saucebrush source for reading from fixed width field files.
FixedWidthFileSource expects an open fixed width file and a tuple
of fields with their lengths. There is also an optional fillchars
command that is the filler characters to strip from the end of each
field. (defaults to whitespace)
FixedWidthFileSource(open('testfile'), (('name',30), ('phone',12)))
will read in a fixed width file where the first 30 characters of each
line are part of a name and the characters 31-42 are a phone number.
"""
def __init__(self, fwfile, fields, fillchars=string.whitespace):
self._fwfile = fwfile
self._fields_dict = {}
self._fillchars = fillchars
from_offset = 0
to_offset = 0
for field, size in fields:
to_offset += size
self._fields_dict[field] = (from_offset, to_offset)
from_offset += size
def __iter__(self):
return self
def __next__(self):
line = next(self._fwfile)
record = {}
for name, range_ in self._fields_dict.items():
record[name] = line[range_[0]:range_[1]].rstrip(self._fillchars)
return record
def next(self):
""" Keep Python 2 next() method that defers to __next__().
"""
return self.__next__()
class HtmlTableSource(object):
""" Saucebrush source for reading data from an HTML table.
HtmlTableSource expects an open html file, the id of the table or a
number indicating which table on the page to use, an optional fieldnames
tuple, and an optional number of rows to skip.
HtmlTableSource(open('test.html'), 0) opens the first HTML table and
uses the first row as the names of the columns.
HtmlTableSource(open('test.html'), 'people', ('name','phone'), 1) opens
the HTML table with an id of 'people' and names the two columns
name and phone, skipping the first row where alternate names are
stored.
"""
def __init__(self, htmlfile, id_or_num, fieldnames=None, skiprows=0):
# extract the table
from lxml.html import parse
doc = parse(htmlfile).getroot()
if isinstance(id_or_num, int):
table = doc.cssselect('table')[id_or_num]
else:
table = doc.cssselect('table#%s' % id_or_num)
table = table[0] # get the first table
# skip the necessary number of rows
self._rows = table.cssselect('tr')[skiprows:]
# determine the fieldnames
if not fieldnames:
self._fieldnames = [td.text_content()
for td in self._rows[0].cssselect('td, th')]
skiprows += 1
else:
self._fieldnames = fieldnames
# skip the necessary number of rows
self._rows = table.cssselect('tr')[skiprows:]
def process_tr(self):
for row in self._rows:
strings = [td.text_content() for td in row.cssselect('td')]
yield dict(zip(self._fieldnames, strings))
def __iter__(self):
return self.process_tr()
class DjangoModelSource(object):
""" Saucebrush source for reading data from django models.
DjangoModelSource expects a django settings file, app label, and model
name. The resulting records contain all columns in the table for the
specified model.
DjangoModelSource('settings.py', 'phonebook', 'friend') would read all
friends from the friend model in the phonebook app described in
settings.py.
"""
def __init__(self, dj_settings, app_label, model_name):
dbmodel = utils.get_django_model(dj_settings, app_label, model_name)
# only get values defined in model (no extra fields from custom manager)
self._data = dbmodel.objects.values(*[f.name
for f in dbmodel._meta.fields])
def __iter__(self):
return iter(self._data)
class MongoDBSource(object):
""" Source for reading from a MongoDB database.
The record dict is populated with records matching the spec
from the specified database and collection.
"""
def __init__(self, database, collection, spec=None, host='localhost', port=27017, conn=None):
if not conn:
from pymongo.connection import Connection
conn = Connection(host, port)
self.collection = conn[database][collection]
self.spec = spec
def __iter__(self):
return self._find_spec()
def _find_spec(self):
for doc in self.collection.find(self.spec):
yield dict(doc)
# dict_factory for sqlite source
def dict_factory(cursor, row):
d = { }
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class SqliteSource(object):
""" Source that reads from a sqlite database.
The record dict is populated with the results from the
query argument. If given, args will be passed to the query
when executed.
"""
def __init__(self, dbpath, query, args=None, conn_params=None):
import sqlite3
self._dbpath = dbpath
self._query = query
self._args = args or []
self._conn_params = conn_params or []
# setup connection
self._conn = sqlite3.connect(self._dbpath)
self._conn.row_factory = dict_factory
if self._conn_params:
for param, value in self._conn_params.items():
setattr(self._conn, param, value)
def _process_query(self):
cursor = self._conn.cursor()
for row in cursor.execute(self._query, self._args):
yield row
cursor.close()
def __iter__(self):
return self._process_query()
def done(self):
self._conn.close()
class FileSource(object):
""" Base class for sources which read from one or more files.
Takes as input a file-like, a file path, a list of file-likes,
or a list of file paths.
"""
def __init__(self, input):
self._input = input
def __iter__(self):
# This method would be a lot cleaner with the proposed
# 'yield from' expression (PEP 380)
if hasattr(self._input, '__read__') or hasattr(self._input, 'read'):
for record in self._process_file(self._input):
yield record
elif isinstance(self._input, str):
with open(self._input) as f:
for record in self._process_file(f):
yield record
elif hasattr(self._input, '__iter__'):
for el in self._input:
if isinstance(el, str):
with open(el) as f:
for record in self._process_file(f):
yield record
elif hasattr(el, '__read__') or hasattr(el, 'read'):
for record in self._process_file(f):
yield record
def _process_file(self, file):
raise NotImplementedError('Descendants of FileSource should implement'
' a custom _process_file method.')
class JSONSource(FileSource):
""" Source for reading from JSON files.
When processing JSON files, if the top-level object is a list, will
yield each member separately. Otherwise, yields the top-level
object.
"""
def _process_file(self, f):
import json
obj = json.load(f)
# If the top-level JSON object in the file is a list
# then yield each element separately; otherwise, yield
# the top-level object.
if isinstance(obj, list):
for record in obj:
yield record
else:
yield obj
class XMLSource(FileSource):
""" Source for reading from XML files. Use with the same kind of caution
that you use to approach anything written in XML.
When processing XML files, if the top-level object is a list, will
yield each member separately, unless the dotted path to a list is
included. you can also do this with a SubrecordFilter, but XML is
almost never going to be useful at the top level.
"""
def __init__(self, input, node_path=None, attr_prefix='ATTR_',
postprocessor=None):
super(XMLSource, self).__init__(input)
self.node_list = node_path.split('.')
self.attr_prefix = attr_prefix
self.postprocessor = postprocessor
def _process_file(self, f, attr_prefix='ATTR_'):
"""xmltodict can either return attributes of nodes as prefixed fields
(prefixes to avoid key collisions), or ignore them altogether.
set attr prefix to whatever you want. Setting it to False ignores
attributes.
"""
import xmltodict
if self.postprocessor:
obj = xmltodict.parse(f, attr_prefix=self.attr_prefix,
postprocessor=self.postprocessor)
else:
obj = xmltodict.parse(f, attr_prefix=self.attr_prefix)
# If node list was given, walk down the tree
if self.node_list:
for node in self.node_list:
obj = obj[node]
# If the top-level XML object in the file is a list
# then yield each element separately; otherwise, yield
# the top-level object.
if isinstance(obj, list):
for record in obj:
yield record
else:
yield obj
| sunlightlabs/saucebrush | saucebrush/sources.py | Python | bsd-3-clause | 10,715 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of MoaT, the Master of all Things.
##
## MoaT is Copyright © 2007-2016 by Matthias Urlichs <matthias@urlichs.de>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
## This header is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘scripts/_boilerplate.py’.
## Thus, do not remove the next line, or insert any blank lines above.
##BP
"""\
This module holds a few random utility functions.
"""
from rainman.models import Log, Site,Controller,Valve,EnvGroup
import sys, os, traceback
_me = None
def log(dev, text):
v = c = s = None
if isinstance(dev,Valve):
v = dev
elif isinstance(dev,Controller):
c = dev
elif isinstance(dev,Site):
s = dev
elif isinstance(dev,EnvGroup):
s = dev.site
text = "EnvGroup "+dev.name+": "+text
else:
raise RuntimeError("Cannot log: %s" % (repr(dev),))
if v: c = v.controller
if c: s = c.site
print("%s: %s" % (dev,text), file=sys.stderr)
global _me
if _me is None:
_me = os.path.splitext(os.path.basename(sys.argv[0]))[0]
Log(site=s,controller=c,valve=v, text=text, logger=_me).save()
def log_error(dev):
log(dev,traceback.format_exc())
| smurfix/MoaT | irrigation/rainman/logging.py | Python | gpl-3.0 | 1,920 |
import datetime
import re
from typing import Any, Dict, List, Mapping, Union
from unittest import mock
import orjson
from django.conf import settings
from django.utils.timezone import now as timezone_now
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.actions import (
do_add_deactivated_redirect,
do_change_plan_type,
do_change_realm_subdomain,
do_create_realm,
do_deactivate_realm,
do_deactivate_stream,
do_scrub_realm,
do_send_realm_reactivation_email,
do_set_realm_property,
)
from zerver.lib.realm_description import get_realm_rendered_description, get_realm_text_description
from zerver.lib.send_email import send_future_email
from zerver.lib.streams import create_stream_if_needed
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import (
Attachment,
CustomProfileField,
Message,
Realm,
RealmAuditLog,
ScheduledEmail,
Stream,
UserMessage,
UserProfile,
get_realm,
get_stream,
get_user_profile_by_id,
)
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(
self, user_profile: UserProfile, new_realm_name: str
) -> None:
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_realm_creation_ensures_internal_realms(self) -> None:
with mock.patch("zerver.lib.actions.server_initialized", return_value=False):
with mock.patch(
"zerver.lib.actions.create_internal_realm"
) as mock_create_internal, self.assertLogs(level="INFO") as info_logs:
do_create_realm("testrealm", "Test Realm")
mock_create_internal.assert_called_once()
self.assertEqual(
info_logs.output,
["INFO:root:Server not yet initialized. Creating the internal realm first."],
)
def test_do_set_realm_name_caching(self) -> None:
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
realm = get_realm("zulip")
new_name = "Zed You Elle Eye Pea"
do_set_realm_property(realm, "name", new_name, acting_user=None)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name(self.example_user("hamlet"), new_name)
def test_update_realm_name_events(self) -> None:
realm = get_realm("zulip")
new_name = "Puliz"
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=1):
do_set_realm_property(realm, "name", new_name, acting_user=None)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="name",
value=new_name,
),
)
def test_update_realm_description_events(self) -> None:
realm = get_realm("zulip")
new_description = "zulip dev group"
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=1):
do_set_realm_property(realm, "description", new_description, acting_user=None)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="description",
value=new_description,
),
)
def test_update_realm_description(self) -> None:
self.login("iago")
new_description = "zulip dev group"
data = dict(description=new_description)
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=1):
result = self.client_patch("/json/realm", data)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.description, new_description)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="description",
value=new_description,
),
)
def test_realm_description_length(self) -> None:
new_description = "A" * 1001
data = dict(description=new_description)
# create an admin user
self.login("iago")
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "description is too long (limit: 1000 characters)")
realm = get_realm("zulip")
self.assertNotEqual(realm.description, new_description)
def test_realm_name_length(self) -> None:
new_name = "A" * (Realm.MAX_REALM_NAME_LENGTH + 1)
data = dict(name=new_name)
# create an admin user
self.login("iago")
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "name is too long (limit: 40 characters)")
realm = get_realm("zulip")
self.assertNotEqual(realm.name, new_name)
def test_admin_restrictions_for_changing_realm_name(self) -> None:
new_name = "Mice will play while the cat is away"
self.login("othello")
req = dict(name=new_name)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Must be an organization administrator")
def test_unauthorized_name_change(self) -> None:
data = {"full_name": "Sir Hamlet"}
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
do_set_realm_property(user_profile.realm, "name_changes_disabled", True, acting_user=None)
url = "/json/settings"
result = self.client_patch(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
# Realm admins can change their name even setting is disabled.
data = {"full_name": "New Iago"}
self.login("iago")
url = "/json/settings"
result = self.client_patch(url, data)
self.assert_json_success(result)
def test_do_deactivate_realm_clears_user_realm_cache(self) -> None:
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
hamlet_id = self.example_user("hamlet").id
get_user_profile_by_id(hamlet_id)
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
user = get_user_profile_by_id(hamlet_id)
self.assertTrue(user.realm.deactivated)
def test_do_change_realm_subdomain_clears_user_realm_cache(self) -> None:
"""The main complicated thing about changing realm subdomains is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
hamlet_id = self.example_user("hamlet").id
user = get_user_profile_by_id(hamlet_id)
realm = get_realm("zulip")
iago = self.example_user("iago")
do_change_realm_subdomain(realm, "newzulip", acting_user=iago)
user = get_user_profile_by_id(hamlet_id)
self.assertEqual(user.realm.string_id, "newzulip")
placeholder_realm = get_realm("zulip")
self.assertTrue(placeholder_realm.deactivated)
self.assertEqual(placeholder_realm.deactivated_redirect, user.realm.uri)
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_SUBDOMAIN_CHANGED, acting_user=iago
).last()
assert realm_audit_log is not None
expected_extra_data = {"old_subdomain": "zulip", "new_subdomain": "newzulip"}
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(realm_audit_log.acting_user, iago)
def test_do_deactivate_realm_clears_scheduled_jobs(self) -> None:
user = self.example_user("hamlet")
send_future_email(
"zerver/emails/followup_day1",
user.realm,
to_user_ids=[user.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_realm(user.realm, acting_user=None)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_do_change_realm_description_clears_cached_descriptions(self) -> None:
realm = get_realm("zulip")
rendered_description = get_realm_rendered_description(realm)
text_description = get_realm_text_description(realm)
realm.description = "New description"
realm.save(update_fields=["description"])
new_rendered_description = get_realm_rendered_description(realm)
self.assertNotEqual(rendered_description, new_rendered_description)
self.assertIn(realm.description, new_rendered_description)
new_text_description = get_realm_text_description(realm)
self.assertNotEqual(text_description, new_text_description)
self.assertEqual(realm.description, new_text_description)
def test_do_deactivate_realm_on_deactivated_realm(self) -> None:
"""Ensure early exit is working in realm deactivation"""
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
def test_do_set_deactivated_redirect_on_deactivated_realm(self) -> None:
"""Ensure that the redirect url is working when deactivating realm"""
realm = get_realm("zulip")
redirect_url = "new_server.zulip.com"
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
do_add_deactivated_redirect(realm, redirect_url)
self.assertEqual(realm.deactivated_redirect, redirect_url)
new_redirect_url = "test.zulip.com"
do_add_deactivated_redirect(realm, new_redirect_url)
self.assertEqual(realm.deactivated_redirect, new_redirect_url)
self.assertNotEqual(realm.deactivated_redirect, redirect_url)
def test_realm_reactivation_link(self) -> None:
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
confirmation_url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
response = self.client_get(confirmation_url)
self.assert_in_success_response(
["Your organization has been successfully reactivated"], response
)
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
def test_realm_reactivation_confirmation_object(self) -> None:
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
confirmation = Confirmation.objects.last()
assert confirmation is not None
self.assertEqual(confirmation.content_object, realm)
self.assertEqual(confirmation.realm, realm)
def test_do_send_realm_reactivation_email(self) -> None:
realm = get_realm("zulip")
iago = self.example_user("iago")
do_send_realm_reactivation_email(realm, acting_user=iago)
from django.core.mail import outbox
self.assert_length(outbox, 1)
self.assertEqual(self.email_envelope_from(outbox[0]), settings.NOREPLY_EMAIL_ADDRESS)
self.assertRegex(
self.email_display_from(outbox[0]),
fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
self.assertIn("Reactivate your Zulip organization", outbox[0].subject)
self.assertIn("Dear former administrators", outbox[0].body)
admins = realm.get_human_admin_users()
confirmation_url = self.get_confirmation_url_from_outbox(admins[0].delivery_email)
response = self.client_get(confirmation_url)
self.assert_in_success_response(
["Your organization has been successfully reactivated"], response
)
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
self.assertEqual(
RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_REACTIVATION_EMAIL_SENT, acting_user=iago
).count(),
1,
)
def test_realm_reactivation_with_random_link(self) -> None:
random_link = "/reactivate/5e89081eb13984e0f3b130bf7a4121d153f1614b"
response = self.client_get(random_link)
self.assert_in_success_response(
["The organization reactivation link has expired or is not valid."], response
)
def test_change_notifications_stream(self) -> None:
# We need an admin user.
self.login("iago")
disabled_notif_stream_id = -1
req = dict(notifications_stream_id=orjson.dumps(disabled_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.notifications_stream, None)
new_notif_stream_id = Stream.objects.get(name="Denmark").id
req = dict(notifications_stream_id=orjson.dumps(new_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
assert realm.notifications_stream is not None
self.assertEqual(realm.notifications_stream.id, new_notif_stream_id)
invalid_notif_stream_id = 1234
req = dict(notifications_stream_id=orjson.dumps(invalid_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid stream id")
realm = get_realm("zulip")
assert realm.notifications_stream is not None
self.assertNotEqual(realm.notifications_stream.id, invalid_notif_stream_id)
def test_get_default_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
notifications_stream = realm.get_notifications_stream()
assert notifications_stream is not None
self.assertEqual(notifications_stream.id, verona.id)
do_deactivate_stream(notifications_stream, acting_user=None)
self.assertIsNone(realm.get_notifications_stream())
def test_change_signup_notifications_stream(self) -> None:
# We need an admin user.
self.login("iago")
disabled_signup_notifications_stream_id = -1
req = dict(
signup_notifications_stream_id=orjson.dumps(
disabled_signup_notifications_stream_id
).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.signup_notifications_stream, None)
new_signup_notifications_stream_id = Stream.objects.get(name="Denmark").id
req = dict(
signup_notifications_stream_id=orjson.dumps(new_signup_notifications_stream_id).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
assert realm.signup_notifications_stream is not None
self.assertEqual(realm.signup_notifications_stream.id, new_signup_notifications_stream_id)
invalid_signup_notifications_stream_id = 1234
req = dict(
signup_notifications_stream_id=orjson.dumps(
invalid_signup_notifications_stream_id
).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid stream id")
realm = get_realm("zulip")
assert realm.signup_notifications_stream is not None
self.assertNotEqual(
realm.signup_notifications_stream.id, invalid_signup_notifications_stream_id
)
def test_get_default_signup_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.signup_notifications_stream = verona
realm.save(update_fields=["signup_notifications_stream"])
signup_notifications_stream = realm.get_signup_notifications_stream()
assert signup_notifications_stream is not None
self.assertEqual(signup_notifications_stream, verona)
do_deactivate_stream(signup_notifications_stream, acting_user=None)
self.assertIsNone(realm.get_signup_notifications_stream())
def test_change_realm_default_language(self) -> None:
# we need an admin user.
self.login("iago")
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=invalid_lang)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, f"Invalid language '{invalid_lang}'")
realm = get_realm("zulip")
self.assertNotEqual(realm.default_language, invalid_lang)
def test_deactivate_realm_by_owner(self) -> None:
self.login("desdemona")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
result = self.client_post("/json/realm/deactivate")
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertTrue(realm.deactivated)
def test_deactivate_realm_by_non_owner(self) -> None:
self.login("iago")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
result = self.client_post("/json/realm/deactivate")
self.assert_json_error(result, "Must be an organization owner")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
def test_invalid_integer_attribute_values(self) -> None:
integer_values = [key for key, value in Realm.property_types.items() if value is int]
invalid_values = dict(
bot_creation_policy=10,
create_stream_policy=10,
invite_to_stream_policy=10,
email_address_visibility=10,
message_retention_days=10,
video_chat_provider=10,
giphy_rating=10,
waiting_period_threshold=-10,
digest_weekday=10,
user_group_edit_policy=10,
private_message_policy=10,
message_content_delete_limit_seconds=-10,
wildcard_mention_policy=10,
invite_to_realm_policy=10,
move_messages_between_streams_policy=10,
add_custom_emoji_policy=10,
)
# We need an admin user.
self.login("iago")
for name in integer_values:
invalid_value = invalid_values.get(name)
if invalid_value is None:
raise AssertionError(f"No test created for {name}")
self.do_test_invalid_integer_attribute_value(name, invalid_value)
def do_test_invalid_integer_attribute_value(self, val_name: str, invalid_val: int) -> None:
possible_messages = {
f"Invalid {val_name}",
f"Bad value for '{val_name}'",
f"Bad value for '{val_name}': {invalid_val}",
f"Invalid {val_name} {invalid_val}",
}
req = {val_name: invalid_val}
result = self.client_patch("/json/realm", req)
msg = self.get_json_error(result)
self.assertTrue(msg in possible_messages)
def test_change_video_chat_provider(self) -> None:
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
)
self.login("iago")
invalid_video_chat_provider_value = 10
req = {"video_chat_provider": orjson.dumps(invalid_video_chat_provider_value).decode()}
result = self.client_patch("/json/realm", req)
self.assert_json_error(
result, ("Invalid video_chat_provider {}").format(invalid_video_chat_provider_value)
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["disabled"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["disabled"]["id"]
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["big_blue_button"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider,
Realm.VIDEO_CHAT_PROVIDERS["big_blue_button"]["id"],
)
req = {
"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS["zoom"]["id"]).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
def test_initial_plan_type(self) -> None:
with self.settings(BILLING_ENABLED=True):
self.assertEqual(do_create_realm("hosted", "hosted").plan_type, Realm.LIMITED)
self.assertEqual(
get_realm("hosted").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX
)
self.assertEqual(
get_realm("hosted").message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED
)
self.assertEqual(get_realm("hosted").upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
with self.settings(BILLING_ENABLED=False):
self.assertEqual(do_create_realm("onpremise", "onpremise").plan_type, Realm.SELF_HOSTED)
self.assertEqual(
get_realm("onpremise").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX
)
self.assertEqual(get_realm("onpremise").message_visibility_limit, None)
self.assertEqual(get_realm("onpremise").upload_quota_gb, None)
def test_change_plan_type(self) -> None:
realm = get_realm("zulip")
iago = self.example_user("iago")
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
do_change_plan_type(realm, Realm.STANDARD, acting_user=iago)
realm = get_realm("zulip")
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED
).last()
assert realm_audit_log is not None
expected_extra_data = {"old_value": Realm.SELF_HOSTED, "new_value": Realm.STANDARD}
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(realm_audit_log.acting_user, iago)
self.assertEqual(realm.plan_type, Realm.STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_plan_type(realm, Realm.LIMITED, acting_user=iago)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.LIMITED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
do_change_plan_type(realm, Realm.STANDARD_FREE, acting_user=iago)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.STANDARD_FREE)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_plan_type(realm, Realm.LIMITED, acting_user=iago)
do_change_plan_type(realm, Realm.SELF_HOSTED, acting_user=iago)
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
def test_message_retention_days(self) -> None:
self.login("iago")
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Must be an organization owner")
self.login("desdemona")
req = dict(message_retention_days=orjson.dumps(0).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': 0")
req = dict(message_retention_days=orjson.dumps(-10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': -10")
req = dict(message_retention_days=orjson.dumps("invalid").decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': invalid")
req = dict(message_retention_days=orjson.dumps(-1).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': -1")
req = dict(message_retention_days=orjson.dumps("forever").decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
do_change_plan_type(realm, Realm.LIMITED, acting_user=None)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Available on Zulip Standard. Upgrade to access.")
do_change_plan_type(realm, Realm.STANDARD, acting_user=None)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
def test_do_create_realm(self) -> None:
realm = do_create_realm("realm_string_id", "realm name")
self.assertEqual(realm.string_id, "realm_string_id")
self.assertEqual(realm.name, "realm name")
self.assertFalse(realm.emails_restricted_to_domains)
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
self.assertEqual(realm.description, "")
self.assertTrue(realm.invite_required)
self.assertEqual(realm.plan_type, Realm.LIMITED)
self.assertEqual(realm.org_type, Realm.ORG_TYPES["unspecified"]["id"])
self.assertEqual(type(realm.date_created), datetime.datetime)
self.assertTrue(
RealmAuditLog.objects.filter(
realm=realm, event_type=RealmAuditLog.REALM_CREATED, event_time=realm.date_created
).exists()
)
assert realm.notifications_stream is not None
self.assertEqual(realm.notifications_stream.name, "general")
self.assertEqual(realm.notifications_stream.realm, realm)
assert realm.signup_notifications_stream is not None
self.assertEqual(realm.signup_notifications_stream.name, "core team")
self.assertEqual(realm.signup_notifications_stream.realm, realm)
self.assertEqual(realm.plan_type, Realm.LIMITED)
def test_do_create_realm_with_keyword_arguments(self) -> None:
date_created = timezone_now() - datetime.timedelta(days=100)
realm = do_create_realm(
"realm_string_id",
"realm name",
emails_restricted_to_domains=True,
date_created=date_created,
email_address_visibility=Realm.EMAIL_ADDRESS_VISIBILITY_MEMBERS,
description="realm description",
invite_required=False,
plan_type=Realm.STANDARD_FREE,
org_type=Realm.ORG_TYPES["community"]["id"],
)
self.assertEqual(realm.string_id, "realm_string_id")
self.assertEqual(realm.name, "realm name")
self.assertTrue(realm.emails_restricted_to_domains)
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_MEMBERS)
self.assertEqual(realm.description, "realm description")
self.assertFalse(realm.invite_required)
self.assertEqual(realm.plan_type, Realm.STANDARD_FREE)
self.assertEqual(realm.org_type, Realm.ORG_TYPES["community"]["id"])
self.assertEqual(realm.date_created, date_created)
self.assertTrue(
RealmAuditLog.objects.filter(
realm=realm, event_type=RealmAuditLog.REALM_CREATED, event_time=realm.date_created
).exists()
)
assert realm.notifications_stream is not None
self.assertEqual(realm.notifications_stream.name, "general")
self.assertEqual(realm.notifications_stream.realm, realm)
assert realm.signup_notifications_stream is not None
self.assertEqual(realm.signup_notifications_stream.name, "core team")
self.assertEqual(realm.signup_notifications_stream.realm, realm)
class RealmAPITest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.login("desdemona")
def set_up_db(self, attr: str, value: Any) -> None:
realm = get_realm("zulip")
setattr(realm, attr, value)
realm.save(update_fields=[attr])
def update_with_api(self, name: str, value: Union[int, str]) -> Realm:
if not isinstance(value, str):
value = orjson.dumps(value).decode()
result = self.client_patch("/json/realm", {name: value})
self.assert_json_success(result)
return get_realm("zulip") # refresh data
def update_with_api_multiple_value(self, data_dict: Dict[str, Any]) -> Realm:
result = self.client_patch("/json/realm", data_dict)
self.assert_json_success(result)
return get_realm("zulip")
def do_test_realm_update_api(self, name: str) -> None:
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests: List[bool] = [False, True]
test_values: Dict[str, Any] = dict(
default_language=["de", "en"],
default_code_block_language=["javascript", ""],
description=["Realm description", "New description"],
digest_weekday=[0, 1, 2],
message_retention_days=[10, 20],
name=["Zulip", "New Name"],
waiting_period_threshold=[10, 20],
create_stream_policy=Realm.COMMON_POLICY_TYPES,
user_group_edit_policy=Realm.COMMON_POLICY_TYPES,
private_message_policy=Realm.PRIVATE_MESSAGE_POLICY_TYPES,
invite_to_stream_policy=Realm.COMMON_POLICY_TYPES,
wildcard_mention_policy=Realm.WILDCARD_MENTION_POLICY_TYPES,
bot_creation_policy=Realm.BOT_CREATION_POLICY_TYPES,
email_address_visibility=Realm.EMAIL_ADDRESS_VISIBILITY_TYPES,
video_chat_provider=[
dict(
video_chat_provider=orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
).decode(),
),
],
giphy_rating=[
Realm.GIPHY_RATING_OPTIONS["y"]["id"],
Realm.GIPHY_RATING_OPTIONS["r"]["id"],
],
message_content_delete_limit_seconds=[1000, 1100, 1200],
invite_to_realm_policy=Realm.INVITE_TO_REALM_POLICY_TYPES,
move_messages_between_streams_policy=Realm.COMMON_POLICY_TYPES,
add_custom_emoji_policy=Realm.COMMON_POLICY_TYPES,
)
vals = test_values.get(name)
if Realm.property_types[name] is bool:
vals = bool_tests
if vals is None:
raise AssertionError(f"No test created for {name}")
if name == "video_chat_provider":
self.set_up_db(name, vals[0][name])
realm = self.update_with_api_multiple_value(vals[0])
self.assertEqual(getattr(realm, name), orjson.loads(vals[0][name]))
return
self.set_up_db(name, vals[0])
for val in vals[1:]:
realm = self.update_with_api(name, val)
self.assertEqual(getattr(realm, name), val)
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
def test_update_realm_properties(self) -> None:
for prop in Realm.property_types:
with self.subTest(property=prop):
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self) -> None:
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db("allow_message_editing", False)
self.set_up_db("message_content_edit_limit_seconds", 0)
self.set_up_db("edit_topic_policy", Realm.POLICY_ADMINS_ONLY)
realm = self.update_with_api("allow_message_editing", True)
realm = self.update_with_api("message_content_edit_limit_seconds", 100)
realm = self.update_with_api("edit_topic_policy", Realm.POLICY_EVERYONE)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_EVERYONE)
realm = self.update_with_api("allow_message_editing", False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_EVERYONE)
realm = self.update_with_api("message_content_edit_limit_seconds", 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_EVERYONE)
realm = self.update_with_api("edit_topic_policy", Realm.POLICY_ADMINS_ONLY)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_ADMINS_ONLY)
realm = self.update_with_api("edit_topic_policy", Realm.POLICY_MODERATORS_ONLY)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_MODERATORS_ONLY)
realm = self.update_with_api("edit_topic_policy", Realm.POLICY_FULL_MEMBERS_ONLY)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_FULL_MEMBERS_ONLY)
realm = self.update_with_api("edit_topic_policy", Realm.POLICY_MEMBERS_ONLY)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_MEMBERS_ONLY)
# Test an invalid value for edit_topic_policy
invalid_edit_topic_policy_value = 10
req = {"edit_topic_policy": orjson.dumps(invalid_edit_topic_policy_value).decode()}
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid edit_topic_policy")
def test_update_realm_allow_message_deleting(self) -> None:
"""Tests updating the realm property 'allow_message_deleting'."""
self.set_up_db("allow_message_deleting", True)
self.set_up_db("message_content_delete_limit_seconds", 0)
realm = self.update_with_api("allow_message_deleting", False)
self.assertEqual(realm.allow_message_deleting, False)
self.assertEqual(realm.message_content_delete_limit_seconds, 0)
realm = self.update_with_api("allow_message_deleting", True)
realm = self.update_with_api("message_content_delete_limit_seconds", 100)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 100)
realm = self.update_with_api("message_content_delete_limit_seconds", 600)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 600)
def test_change_invite_to_realm_policy_by_owners_only(self) -> None:
self.login("iago")
req = {"invite_to_realm_policy": Realm.POLICY_ADMINS_ONLY}
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Must be an organization owner")
self.login("desdemona")
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.invite_to_realm_policy, Realm.POLICY_ADMINS_ONLY)
class ScrubRealmTest(ZulipTestCase):
def test_scrub_realm(self) -> None:
zulip = get_realm("zulip")
lear = get_realm("lear")
iago = self.example_user("iago")
othello = self.example_user("othello")
cordelia = self.lear_user("cordelia")
king = self.lear_user("king")
create_stream_if_needed(lear, "Shakespeare")
self.subscribe(cordelia, "Shakespeare")
self.subscribe(king, "Shakespeare")
Message.objects.all().delete()
UserMessage.objects.all().delete()
for i in range(5):
self.send_stream_message(iago, "Scotland")
self.send_stream_message(othello, "Scotland")
self.send_stream_message(cordelia, "Shakespeare")
self.send_stream_message(king, "Shakespeare")
Attachment.objects.filter(realm=zulip).delete()
Attachment.objects.create(realm=zulip, owner=iago, path_id="a/b/temp1.txt", size=512)
Attachment.objects.create(realm=zulip, owner=othello, path_id="a/b/temp2.txt", size=512)
Attachment.objects.filter(realm=lear).delete()
Attachment.objects.create(realm=lear, owner=cordelia, path_id="c/d/temp1.txt", size=512)
Attachment.objects.create(realm=lear, owner=king, path_id="c/d/temp2.txt", size=512)
CustomProfileField.objects.create(realm=lear)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 10)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 20)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertNotEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
with self.assertLogs(level="WARNING"):
do_scrub_realm(zulip, acting_user=None)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 0)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 0)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertEqual(Attachment.objects.filter(realm=zulip).count(), 0)
self.assertEqual(Attachment.objects.filter(realm=lear).count(), 2)
self.assertEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
self.assertNotEqual(CustomProfileField.objects.filter(realm=lear).count(), 0)
zulip_users = UserProfile.objects.filter(realm=zulip)
for user in zulip_users:
self.assertTrue(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
lear_users = UserProfile.objects.filter(realm=lear)
for user in lear_users:
self.assertIsNone(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
| hackerkid/zulip | zerver/tests/test_realm.py | Python | apache-2.0 | 42,353 |
#!/usr/bin/env python
import os
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from math import (
log,
exp
)
from matplotlib import pyplot as plt
import time
import util
import math
import ipdb
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
def plot_log_prob_of_all_trials(
gradient_traj_by_time,
list_of_log_prob_mat,
log_prob_owner,
state_no,
figure_save_path
):
trial_amount = len(list_of_log_prob_mat)
hidden_state_amount = list_of_log_prob_mat[0].shape[1]
subplot_per_row = 3
subplot_amount = trial_amount*3
row_amount = int(math.ceil(float(subplot_amount)/subplot_per_row))
fig, ax_mat = plt.subplots(nrows=row_amount, ncols=subplot_per_row)
if row_amount == 1:
ax_mat = ax_mat.reshape(1, -1)
ax_list = []
for i in range(trial_amount):
j = 3*i
row_no = j/subplot_per_row
col_no = j%subplot_per_row
ax_list.append(ax_mat[row_no, col_no])
j = 3*i+1
row_no = j/subplot_per_row
col_no = j%subplot_per_row
ax_list.append(ax_mat[row_no, col_no])
j = 3*i+2
row_no = j/subplot_per_row
col_no = j%subplot_per_row
ax_list.append(ax_mat[row_no, col_no])
from matplotlib.pyplot import cm
import numpy as np
colors_for_hstate = cm.rainbow(np.linspace(0, 1, hidden_state_amount))
for trial_no in range(trial_amount):
log_prob_mat = list_of_log_prob_mat[trial_no][:, :].transpose()
plot_idx = 3*trial_no+1
for hstate_no in range(hidden_state_amount):
ax_list[plot_idx].plot(log_prob_mat[hstate_no].tolist(), linestyle="solid", color=colors_for_hstate[hstate_no])
trial_name = log_prob_owner[trial_no]
ax_list[plot_idx].set_title('emission probabilities of %s hidden states'%hidden_state_amount)
ax_list[plot_idx].set_xlabel('time step')
ax_list[plot_idx].set_ylabel('log probability')
ymax = np.max(log_prob_mat)
ax_list[plot_idx].set_ylim(ymin=0, ymax=ymax)
vp_triangle_img = open(
os.path.join(
figure_save_path,
'check_if_viterbi_path_grow_incrementally',
"state_%s"%state_no,
"%s.png"%trial_name,
),
'rb',
)
import matplotlib.image as mpimg
img=mpimg.imread(vp_triangle_img)
plot_idx = 3*trial_no
ax_list[plot_idx].imshow(img)
ax_list[plot_idx].set_title('growing viterbi paths')
ax_list[plot_idx].set_ylabel('time step')
ax_list[plot_idx].set_xlabel('length of viterbi path')
plot_idx = 3*trial_no+2
ax_list[plot_idx].plot(gradient_traj_by_time[trial_no].tolist()[0], linestyle="solid", color='black')
ax_list[plot_idx].set_title('gradient of log-likelihood')
ax_list[plot_idx].set_xlabel('time step')
ax_list[plot_idx].set_ylabel('log probability')
ymax = np.max(log_prob_mat)
fig.set_size_inches(4*subplot_per_row, 4*row_amount)
if not os.path.isdir(figure_save_path+'/emission_log_prob_plot'):
os.makedirs(figure_save_path+'/emission_log_prob_plot')
title = 'state %s emission log_prob plot'%(state_no,)
fig.tight_layout()
fig.savefig(os.path.join(figure_save_path, 'emission_log_prob_plot', title+".eps"), format="eps")
fig.savefig(os.path.join(figure_save_path, 'emission_log_prob_plot', title+".png"), format="png")
plt.close(1)
def run(model_save_path,
figure_save_path,
threshold_c_value,
trials_group_by_folder_name):
trials_group_by_folder_name = util.make_trials_of_each_state_the_same_length(trials_group_by_folder_name)
one_trial_data_group_by_state = trials_group_by_folder_name.itervalues().next()
state_amount = len(one_trial_data_group_by_state)
threshold_constant = 10
threshold_offset = 10
model_group_by_state = {}
for state_no in range(1, state_amount+1):
try:
model_group_by_state[state_no] = joblib.load(model_save_path+"/model_s%s.pkl"%(state_no,))
except IOError:
print 'model of state %s not found'%(state_no,)
continue
expected_log = []
std_of_log = []
deri_threshold = []
for state_no in model_group_by_state:
all_log_curves_of_this_state = []
list_of_log_prob_mat = []
log_prob_owner = []
for trial_name in trials_group_by_folder_name:
log_prob_owner.append(trial_name)
emission_log_prob_mat = util.get_emission_log_prob_matrix(
trials_group_by_folder_name[trial_name][state_no],
model_group_by_state[state_no]
)
list_of_log_prob_mat.append(emission_log_prob_mat)
one_log_curve_of_this_state = util.fast_log_curve_calculation(
trials_group_by_folder_name[trial_name][state_no],
model_group_by_state[state_no]
)
all_log_curves_of_this_state.append(one_log_curve_of_this_state)
# use np matrix to facilitate the computation of mean curve and std
np_matrix_traj_by_time = np.matrix(all_log_curves_of_this_state)
gradient_traj_by_time = np_matrix_traj_by_time[:, 1:]-np_matrix_traj_by_time[:, :-1]
plot_log_prob_of_all_trials(
gradient_traj_by_time,
list_of_log_prob_mat,
log_prob_owner,
state_no,
figure_save_path)
| birlrobotics/HMM | hmm_for_baxter_using_only_success_trials/emission_log_prob_plot.py | Python | bsd-3-clause | 5,609 |
#!/usr/bin/env python
'''
Fit weighted average of multiple predictions
'''
import sys, argparse
from common import *
from scipy.optimize import minimize
def fit_weights(Y, labels):
w0 = np.ones(Y.shape[1]) / Y.shape[1]
cons = ({'type': 'eq', 'fun': lambda x: x.sum()-1})
b = [(0,1) for i in xrange(Y.shape[1])]
f = lambda w: score_predictions(labels, (w*Y).sum(axis=1))
res = minimize(f, w0)
return res.x
def opts():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('output',
help='Averaged predictions + weights (npz)')
parser.add_argument('--pred', type=load_npz, action='append',
required=True, help='Predicted labels (npz)')
parser.add_argument('--labels', type=load_npz,
help='Reference labels')
parser.add_argument('--weights', type=load_npz,
help='Reuse previously-fit weights')
parser.add_argument('--unweighted', action='store_true',
help='Unweighted average')
return parser
if __name__ == "__main__":
args = opts().parse_args()
print "Loading test data predictions"
Y_i = np.array([p['labels'] for p in args.pred])
nlabels = Y_i.shape[2]
if args.unweighted:
Y_avg = Y_i.mean(axis=0)
weights = np.ones(Y_i.shape[0])
elif args.weights:
weights = args.weights['weights']
Y_avg = np.zeros_like(Y_i[0])
for i in xrange(Y_i.shape[0]):
Y_avg += weights[:,i] * Y_i[i]
else:
print "Loading labels"
labels = args.labels['labels']
print "Averaging predictions"
Y_avg = []
weights = []
for i in xrange(nlabels):
Y = np.vstack([y[:,i] for y in Y_i]).T
s = [score_predictions(labels[:,i], y[:,i]) for y in Y_i]
print "Score for each prediction: %s" % s
w = fit_weights(Y, labels[:,i])
weights.append(w)
print "Weights for label %d: %s" % (i, w)
y_avg = (w*Y).sum(axis=1)
s = score_predictions(labels[:,i], y_avg)
print "Score for averaged predictions: %s" % s
Y_avg.append(y_avg)
weights = np.array(weights)
Y_avg = np.vstack(Y_avg).T
print "Writing output to %s" % args.output
ids = args.pred[0]['ids']
header = args.pred[0]['header']
save_npz(args.output, weights=weights, ids=ids,
header=header, labels=Y_avg)
| timpalpant/KaggleTSTextClassification | scripts/average.py | Python | gpl-3.0 | 2,440 |
#!/usr/bin/env python3
from . import cab
def dump_cabfolder(folder, outfile):
while True:
data = folder.pseudofile.read(4096)
if data == b"":
break
outfile.write(data)
def print_cabfile(cabfile):
print("\x1b[1mHeader\x1b[m")
print(cabfile.header)
for number, folder in enumerate(cabfile.folders):
print("\x1b[1mFolder " + str(number) + "\x1b[m")
print(folder)
for number, file_ in enumerate(cabfile.files):
print("\x1b[1mFile " + str(number) + "\x1b[m")
print(file_)
if __name__ == "__main__":
import sys
cabfile = cab.CABFile(sys.argv[1])
cabfile.readfiledata()
#dump_cabfolder(cabfile.folders[0], open('folder0.lzx', 'wb'))
#print('folder0.lzx: uncompressed size: ' + str(cabfile.folders[0].uncompressed_size))
#print('folder0.lzx: window size: ' + str(cabfile.folders[0].comp_window_size))
import code
import rlcompleter
import readline
readline.parse_and_bind("tab: complete")
c = code.InteractiveConsole(globals())
c.interact()
| LukasEgger3/Lukas | py/openage/convert/cabextract/__main__.py | Python | gpl-3.0 | 1,073 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deals', '0002_advertiser_logo'),
]
operations = [
migrations.RemoveField(
model_name='advertiser',
name='logo',
),
]
| andela/troupon | troupon/deals/migrations/0003_remove_advertiser_logo.py | Python | mit | 349 |
#!/usr/bin/env python
# encoding: utf-8
"""
game.py
Created by Scott on 2013-12-28.
Copyright (c) 2013 Scott Rice. All rights reserved.
"""
import sys
import os
import shutil
class Game(object):
@staticmethod
def valid_custom_image_extensions():
return ['.png', '.jpg', '.jpeg', '.tga']
def __init__(self, appid):
self._appid = appid
def __eq__(self, other):
"""
Steam considers two games equal if they have the same App ID
"""
return (
isinstance(other,self.__class__) and
self.appid() == other.appid()
)
def __hash__(self):
return "__GAME{0}__".format(self.appid()).__hash__()
def _custom_image_path(self, user, extension):
filename = '%s%s' % (self.appid(), extension)
return os.path.join(user.grid_directory(), filename)
def appid(self):
return str(self._appid)
def custom_image(self, user):
"""Returns the path to the custom image set for this game, or None if
no image is set"""
for ext in self.valid_custom_image_extensions():
image_location = self._custom_image_path(user, ext)
if os.path.isfile(image_location):
return image_location
return None
def set_image(self, user, image_path):
"""Sets a custom image for the game. `image_path` should refer to
an image file on disk"""
_, ext = os.path.splitext(image_path)
shutil.copy(image_path, self._custom_image_path(user, ext))
| rwesterlund/pysteam | pysteam/game.py | Python | mit | 1,546 |
from qmeq.indexing import *
def test_binarylist_to_integer():
assert binarylist_to_integer([1, 0, 1, 1, 0, 0]) == 44
def test_integer_to_binarylist():
assert integer_to_binarylist(33) == [1, 0, 0, 0, 0, 1]
assert integer_to_binarylist(33, strq=True) == '100001'
assert integer_to_binarylist(33, binlen=8) == [0, 0, 1, 0, 0, 0, 0, 1]
assert integer_to_binarylist(33, binlen=8, strq=True) == '00100001'
def test_construct_chargelst():
assert construct_chargelst(4) == [[0],
[1, 2, 4, 8],
[3, 5, 6, 9, 10, 12],
[7, 11, 13, 14],
[15]]
def test_sz_to_ind():
assert sz_to_ind(-2, 4, 6) == 0
assert sz_to_ind( 0, 4, 6) == 1
assert sz_to_ind(+2, 4, 6) == 2
def test_szrange():
assert szrange(2, 6) == [-2, 0, 2]
assert szrange(3, 6) == [-3, -1, 1, 3]
assert szrange(4, 6) == [-2, 0, 2]
def test_empty_szlst():
assert empty_szlst(4) == [[[]],
[[], []],
[[], [], []],
[[], []],
[[]]]
assert empty_szlst(4, noneq=True) == [[None],
[None, None],
[None, None, None],
[None, None],
[None]]
def test_construct_szlst():
assert construct_szlst(4) == [[[0]],
[[1, 2], [4, 8]],
[[3], [5, 6, 9, 10], [12]],
[[7, 11], [13, 14]],
[[15]]]
def test_ssq_to_ind():
assert ssq_to_ind(2, -2) == 0
assert ssq_to_ind(2, 0) == 1
assert ssq_to_ind(2, +2) == 0
def test_ssqrange():
assert ssqrange(3, 1, 6) == [1, 3]
assert ssqrange(4, 0, 6) == [0, 2]
def test_empty_ssqlst():
assert empty_ssqlst(4) == [[[[]]],
[[[]], [[]]],
[[[]], [[], []], [[]]],
[[[]], [[]]],
[[[]]]]
assert empty_ssqlst(4, noneq=True) == [[[None]],
[[None], [None]],
[[None], [None, None], [None]],
[[None], [None]],
[[None]]]
def tezt_construct_ssqlst():
szlst = construct_szlst(4)
assert construct_ssqlst(szlst, 4) == [[[[0]]],
[[[1, 2]], [[3, 4]]],
[[[5]], [[6, 7, 8], [9]], [[10]]],
[[[11, 12]], [[13, 14]]],
[[[15]]]]
def test_flatten():
szlst = construct_szlst(4)
ssqlst = construct_ssqlst(szlst, 4)
f1 = flatten(ssqlst)
f2 = flatten(f1)
f3 = flatten(f2)
assert f1 == [[[0]], [[1, 2]], [[3, 4]], [[5]], [[6, 7, 8], [9]], [[10]], [[11, 12]], [[13, 14]], [[15]]]
assert f2 == [[0], [1, 2], [3, 4], [5], [6, 7, 8], [9], [10], [11, 12], [13, 14], [15]]
assert f3 == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
def test_enum_chargelst():
chargelst_lin = construct_chargelst(4)
assert enum_chargelst(chargelst_lin) == [[0], [1, 2, 3, 4], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [15]]
def test_enum_szlst():
szlst_lin = construct_szlst(4)
assert enum_szlst(szlst_lin) == [[[0]], [[1, 2], [3, 4]], [[5], [6, 7, 8, 9], [10]], [[11, 12], [13, 14]], [[15]]]
def test_make_inverse_map():
chargelst_lin = construct_chargelst(4)
i = flatten(chargelst_lin)
assert make_inverse_map(i) == [0, 1, 2, 5, 3, 6, 7, 11, 4, 8, 9, 12, 10, 13, 14, 15]
def test_make_quantum_numbers():
si = StateIndexing(4, indexing='Lin')
qn_ind, ind_qn = make_quantum_numbers(si)
assert qn_ind == {(1, 2): 4, (2, 5): 12, (0, 0): 0, (3, 3): 14, (3, 0): 7, (3, 1): 11, (3, 2): 13, (2, 1): 5, (2, 4): 10, (2, 0): 3, (1, 3): 8, (2, 3): 9, (2, 2): 6, (1, 0): 1, (1, 1): 2, (4, 0): 15}
assert ind_qn == {0: (0, 0), 1: (1, 0), 2: (1, 1), 3: (2, 0), 4: (1, 2), 5: (2, 1), 6: (2, 2), 7: (3, 0), 8: (1, 3), 9: (2, 3), 10: (2, 4), 11: (3, 1), 12: (2, 5), 13: (3, 2), 14: (3, 3), 15: (4, 0)}
#
si = StateIndexing(4, indexing='charge')
qn_ind, ind_qn = make_quantum_numbers(si)
assert qn_ind == {(1, 2): 3, (2, 5): 10, (0, 0): 0, (3, 3): 14, (3, 0): 11, (3, 1): 12, (3, 2): 13, (2, 1): 6, (2, 4): 9, (2, 0): 5, (1, 3): 4, (2, 3): 8, (2, 2): 7, (1, 0): 1, (1, 1): 2, (4, 0): 15}
assert ind_qn == {0: (0, 0), 1: (1, 0), 2: (1, 1), 3: (1, 2), 4: (1, 3), 5: (2, 0), 6: (2, 1), 7: (2, 2), 8: (2, 3), 9: (2, 4), 10: (2, 5), 11: (3, 0), 12: (3, 1), 13: (3, 2), 14: (3, 3), 15: (4, 0)}
#
si = StateIndexing(4, indexing='sz')
qn_ind, ind_qn = make_quantum_numbers(si)
assert qn_ind == {(3, -1, 1): 12, (1, 1, 0): 3, (2, -2, 0): 5, (2, 0, 3): 9, (4, 0, 0): 15, (2, 0, 2): 8, (1, -1, 0): 1, (2, 2, 0): 10, (3, 1, 0): 13, (0, 0, 0): 0, (1, -1, 1): 2, (2, 0, 1): 7, (3, 1, 1): 14, (3, -1, 0): 11, (1, 1, 1): 4, (2, 0, 0): 6}
assert ind_qn == {0: (0, 0, 0), 1: (1, -1, 0), 2: (1, -1, 1), 3: (1, 1, 0), 4: (1, 1, 1), 5: (2, -2, 0), 6: (2, 0, 0), 7: (2, 0, 1), 8: (2, 0, 2), 9: (2, 0, 3), 10: (2, 2, 0), 11: (3, -1, 0), 12: (3, -1, 1), 13: (3, 1, 0), 14: (3, 1, 1), 15: (4, 0, 0)}
#
si = StateIndexing(4, indexing='ssq')
qn_ind, ind_qn = make_quantum_numbers(si)
assert qn_ind == {(0, 0, 0, 0): 0,
(1, -1, 1, 0): 1,
(1, -1, 1, 1): 2,
(1, 1, 1, 0): 3,
(1, 1, 1, 1): 4,
(2, -2, 2, 0): 5,
(2, 0, 0, 0): 6,
(2, 0, 0, 1): 7,
(2, 0, 0, 2): 8,
(2, 0, 2, 0): 9,
(2, 2, 2, 0): 10,
(3, -1, 1, 0): 11,
(3, -1, 1, 1): 12,
(3, 1, 1, 0): 13,
(3, 1, 1, 1): 14,
(4, 0, 0, 0): 15}
assert ind_qn == {0: (0, 0, 0, 0),
1: (1, -1, 1, 0),
2: (1, -1, 1, 1),
3: (1, 1, 1, 0),
4: (1, 1, 1, 1),
5: (2, -2, 2, 0),
6: (2, 0, 0, 0),
7: (2, 0, 0, 1),
8: (2, 0, 0, 2),
9: (2, 0, 2, 0),
10: (2, 2, 2, 0),
11: (3, -1, 1, 0),
12: (3, -1, 1, 1),
13: (3, 1, 1, 0),
14: (3, 1, 1, 1),
15: (4, 0, 0, 0)}
def test_StateIndexing():
si = StateIndexing(4)
assert si.nsingle == 4
assert si.indexing == 'Lin'
assert si.ncharge == 5
assert si.nmany == 16
assert si.nleads == 0
#
for indexing in ['Lin', None]:
si = StateIndexing(4, indexing=indexing)
assert si.chargelst == [[0],[1, 2, 4, 8],[3, 5, 6, 9, 10, 12],[7, 11, 13, 14],[15]]
assert si.i == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
assert si.j == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
#
si = StateIndexing(4, indexing='charge')
assert si.chargelst_lin == [[0], [1, 2, 4, 8], [3, 5, 6, 9, 10, 12], [7, 11, 13, 14], [15]]
assert si.chargelst == [[0], [1, 2, 3, 4], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [15]]
assert si.i == [0, 1, 2, 4, 8, 3, 5, 6, 9, 10, 12, 7, 11, 13, 14, 15]
assert si.j == [0, 1, 2, 5, 3, 6, 7, 11, 4, 8, 9, 12, 10, 13, 14, 15]
#
for indexing in ['sz', 'ssq']:
si = StateIndexing(4, indexing=indexing)
assert si.chargelst_lin == [[0], [1, 2, 4, 8], [3, 5, 6, 9, 10, 12], [7, 11, 13, 14], [15]]
assert si.chargelst == [[0], [1, 2, 3, 4], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [15]]
assert si.szlst_lin == [[[0]], [[1, 2], [4, 8]], [[3], [5, 6, 9, 10], [12]], [[7, 11], [13, 14]], [[15]]]
assert si.szlst == [[[0]], [[1, 2], [3, 4]], [[5], [6, 7, 8, 9], [10]], [[11, 12], [13, 14]], [[15]]]
assert si.i == [0, 1, 2, 4, 8, 3, 5, 6, 9, 10, 12, 7, 11, 13, 14, 15]
assert si.j == [0, 1, 2, 5, 3, 6, 7, 11, 4, 8, 9, 12, 10, 13, 14, 15]
#
si = StateIndexing(4, indexing='ssq')
assert si.ssqlst == [[[[0]]], [[[1, 2]], [[3, 4]]], [[[5]], [[6, 7, 8], [9]], [[10]]], [[[11, 12]], [[13, 14]]], [[[15]]]]
assert si.get_state(6) == [0, 1, 0, 1]
assert si.get_state(6, linq=True) == [0, 1, 1, 0]
assert si.get_state(6, strq=True) == '0101'
assert si.get_state(6, linq=True, strq=True) == '0110'
assert si.get_ind([0, 1, 0, 1]) == 6
assert si.get_ind([0, 1, 1, 0], linq=True) == 6
assert si.get_lst(charge=2) == [5, 6, 7, 8, 9, 10]
assert si.get_lst(charge=2, sz=0) == [6, 7, 8, 9]
assert si.get_lst(charge=2, sz=0, ssq=0) == [6, 7, 8]
def test_StateIndexingPauli_charge():
si = StateIndexingPauli(4, indexing='charge')
assert si.npauli_ == 16
assert si.npauli == 16
assert list(si.shiftlst0) == [0, 1, 5, 11, 15, 16]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [1, 2, 3, 4], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [15], []]
assert list(si.mapdm0) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
assert list(si.booldm0) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
assert si.get_ind_dm0(8, 8, 2, maptype=0) == 8
assert si.get_ind_dm0(8, 8, 2, maptype=1) == 8
assert si.get_ind_dm0(8, 8, 2, maptype=2) == 1
#
si.set_statesdm([[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], []])
assert si.npauli_ == 11
assert si.npauli == 11
assert list(si.shiftlst0) == [0, 1, 1, 7, 11, 11]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [], []]
assert list(si.mapdm0) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert list(si.booldm0) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
assert si.get_ind_dm0(8, 8, 2, maptype=0) == 4
assert si.get_ind_dm0(8, 8, 2, maptype=1) == 4
assert si.get_ind_dm0(8, 8, 2, maptype=2) == 1
def test_StateIndexingPauli_ssq():
si = StateIndexingPauli(4, indexing='ssq')
assert si.npauli_ == 16
assert si.npauli == 10
assert list(si.shiftlst0) == [0, 1, 5, 11, 15, 16]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [1, 2, 3, 4], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [15], []]
assert list(si.mapdm0) == [0, 1, 2, 1, 2, 3, 4, 5, 6, 3, 3, 7, 8, 7, 8, 9]
assert list(si.booldm0) == [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1]
assert si.get_ind_dm0(8, 8, 2, maptype=0) == 8
assert si.get_ind_dm0(8, 8, 2, maptype=1) == 6
assert si.get_ind_dm0(8, 8, 2, maptype=2) == 1
#
si.set_statesdm([[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], []])
assert si.npauli_ == 11
assert si.npauli == 7
assert list(si.shiftlst0) == [0, 1, 1, 7, 11, 11]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [], []]
assert list(si.mapdm0) == [0, 1, 2, 3, 4, 1, 1, 5, 6, 5, 6]
assert list(si.booldm0) == [1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0]
assert si.get_ind_dm0(8, 8, 2, maptype=0) == 4
assert si.get_ind_dm0(8, 8, 2, maptype=1) == 4
assert si.get_ind_dm0(8, 8, 2, maptype=2) == 1
def test_StateIndexingDM_charge():
si = StateIndexingDM(4, indexing='charge')
assert si.ndm0_tot == 70
assert si.ndm0_ == 70
assert si.ndm0 == 43
assert si.ndm0r == 70
assert si.npauli_ == 16
assert si.npauli == 16
assert si.ndm1_tot == 56
assert si.ndm1_ == 56
assert si.ndm1 == 56
assert list(si.shiftlst0) == [0, 1, 17, 53, 69, 70]
assert list(si.shiftlst1) == [0, 4, 28, 52, 56]
assert list(si.lenlst) == [1, 4, 6, 4, 1]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [1, 2, 3, 4], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [15], []]
assert list(si.mapdm0) == [0, 1, 16, 17, 18, 16, 2, 19, 20, 17, 19, 3, 21, 18, 20, 21, 4, 5, 22, 23, 24, 25, 26, 22, 6, 27, 28, 29, 30, 23, 27, 7, 31, 32, 33, 24, 28, 31, 8, 34, 35, 25, 29, 32, 34, 9, 36, 26, 30, 33, 35, 36, 10, 11, 37, 38, 39, 37, 12, 40, 41, 38, 40, 13, 42, 39, 41, 42, 14, 15]
assert si.inddm0 == {0: (0, 0), 1: (1, 1), 2: (2, 2), 3: (3, 3), 4: (4, 4), 5: (5, 5), 6: (6, 6), 7: (7, 7), 8: (8, 8), 9: (9, 9), 10: (10, 10), 11: (11, 11), 12: (12, 12), 13: (13, 13), 14: (14, 14), 15: (15, 15), 16: (1, 2), 17: (1, 3), 18: (1, 4), 19: (2, 3), 20: (2, 4), 21: (3, 4), 22: (5, 6), 23: (5, 7), 24: (5, 8), 25: (5, 9), 26: (5, 10), 27: (6, 7), 28: (6, 8), 29: (6, 9), 30: (6, 10), 31: (7, 8), 32: (7, 9), 33: (7, 10), 34: (8, 9), 35: (8, 10), 36: (9, 10), 37: (11, 12), 38: (11, 13), 39: (11, 14), 40: (12, 13), 41: (12, 14), 42: (13, 14)}
assert list(si.booldm0) == [1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]
assert list(si.conjdm0) == [1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]
assert si.get_ind_dm0(7, 8, 2, maptype=0) == 32
assert si.get_ind_dm0(7, 8, 2, maptype=1) == 31
assert si.get_ind_dm0(7, 8, 2, maptype=2) == 1
assert si.get_ind_dm0(7, 8, 2, maptype=3) == 1
assert si.get_ind_dm0(8, 7, 2, maptype=2) == 0
assert si.get_ind_dm0(8, 7, 2, maptype=3) == 0
assert si.get_ind_dm0(5, 8, 2, maptype=1) == 24
assert si.get_ind_dm1(5, 4, 1) == 7
#
si.set_statesdm([[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], []])
assert si.ndm0_ == 53
assert si.ndm0 == 32
assert si.ndm0r == 53
assert si.npauli_ == 11
assert si.npauli == 11
assert si.ndm1_ == 24
assert si.ndm1 == 24
assert list(si.shiftlst0) == [0, 1, 1, 37, 53, 53]
assert list(si.shiftlst1) == [0, 0, 0, 24, 24]
assert list(si.lenlst) == [1, 0, 6, 4, 0]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [], []]
assert list(si.mapdm0) == [0, 1, 11, 12, 13, 14, 15, 11, 2, 16, 17, 18, 19, 12, 16, 3, 20, 21, 22, 13, 17, 20, 4, 23, 24, 14, 18, 21, 23, 5, 25, 15, 19, 22, 24, 25, 6, 7, 26, 27, 28, 26, 8, 29, 30, 27, 29, 9, 31, 28, 30, 31, 10]
assert si.inddm0 == {0: (0, 0), 1: (5, 5), 2: (6, 6), 3: (7, 7), 4: (8, 8), 5: (9, 9), 6: (10, 10), 7: (11, 11), 8: (12, 12), 9: (13, 13), 10: (14, 14), 11: (5, 6), 12: (5, 7), 13: (5, 8), 14: (5, 9), 15: (5, 10), 16: (6, 7), 17: (6, 8), 18: (6, 9), 19: (6, 10), 20: (7, 8), 21: (7, 9), 22: (7, 10), 23: (8, 9), 24: (8, 10), 25: (9, 10), 26: (11, 12), 27: (11, 13), 28: (11, 14), 29: (12, 13), 30: (12, 14), 31: (13, 14)}
assert list(si.booldm0) == [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1]
assert list(si.conjdm0) == [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1]
assert si.get_ind_dm0(7, 8, 2, maptype=0) == 16
assert si.get_ind_dm0(7, 8, 2, maptype=1) == 20
assert si.get_ind_dm0(7, 8, 2, maptype=2) == 1
assert si.get_ind_dm0(7, 8, 2, maptype=3) == 1
assert si.get_ind_dm0(8, 7, 2, maptype=2) == 0
assert si.get_ind_dm0(8, 7, 2, maptype=3) == 0
assert si.get_ind_dm0(5, 8, 2, maptype=1) == 13
assert si.get_ind_dm1(5, 4, 1) == 3
def test_StateIndexingDM_ssq():
si = StateIndexingDM(4, indexing='ssq')
assert si.ndm0_tot == 70
assert si.ndm0_ == 70
assert si.ndm0 == 15
assert si.ndm0r == 20
assert si.npauli_ == 16
assert si.npauli == 10
assert si.ndm1_tot == 56
assert si.ndm1_ == 56
assert si.ndm1 == 56
assert list(si.shiftlst0) == [0, 1, 17, 53, 69, 70]
assert list(si.shiftlst1) == [0, 4, 28, 52, 56]
assert list(si.lenlst) == [1, 4, 6, 4, 1]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [1, 2, 3, 4], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [15], []]
assert list(si.mapdm0) == [0, 1, 10, -1, -1, 10, 2, -1, -1, -1, -1, 1, 10, -1, -1, 10, 2, 3, -1, -1, -1, -1, -1, -1, 4, 11, 12, -1, -1, -1, 11, 5, 13, -1, -1, -1, 12, 13, 6, -1, -1, -1, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, 3, 7, 14, -1, -1, 14, 8, -1, -1, -1, -1, 7, 14, -1, -1, 14, 8, 9]
assert si.inddm0 == {0: (0, 0), 1: (1, 1), 2: (2, 2), 3: (5, 5), 4: (6, 6), 5: (7, 7), 6: (8, 8), 7: (11, 11), 8: (12, 12), 9: (15, 15), 10: (1, 2), 11: (6, 7), 12: (6, 8), 13: (7, 8), 14: (11, 12)}
assert list(si.booldm0) == [1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
assert list(si.conjdm0) == [1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1]
assert si.get_ind_dm0(7, 8, 2, maptype=0) == 32
assert si.get_ind_dm0(7, 8, 2, maptype=1) == 13
assert si.get_ind_dm0(7, 8, 2, maptype=2) == 1
assert si.get_ind_dm0(7, 8, 2, maptype=3) == 1
assert si.get_ind_dm0(8, 7, 2, maptype=2) == 0
assert si.get_ind_dm0(8, 7, 2, maptype=3) == 0
assert si.get_ind_dm0(5, 8, 2, maptype=1) == -1
assert si.get_ind_dm1(5, 4, 1) == 7
#
si.set_statesdm([[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], []])
assert si.ndm0_ == 53
assert si.ndm0 == 11
assert si.ndm0r == 15
assert si.npauli_ == 11
assert si.npauli == 7
assert si.ndm1_ == 24
assert si.ndm1 == 24
assert list(si.shiftlst0) == [0, 1, 1, 37, 53, 53]
assert list(si.shiftlst1) == [0, 0, 0, 24, 24]
assert list(si.lenlst) == [1, 0, 6, 4, 0]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [], []]
assert list(si.mapdm0) == [0, 1, -1, -1, -1, -1, -1, -1, 2, 7, 8, -1, -1, -1, 7, 3, 9, -1, -1, -1, 8, 9, 4, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 5, 10, -1, -1, 10, 6, -1, -1, -1, -1, 5, 10, -1, -1, 10, 6]
assert si.inddm0 == {0: (0, 0), 1: (5, 5), 2: (6, 6), 3: (7, 7), 4: (8, 8), 5: (11, 11), 6: (12, 12), 7: (6, 7), 8: (6, 8), 9: (7, 8), 10: (11, 12)}
assert list(si.booldm0) == [1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
assert list(si.conjdm0) == [1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1]
assert si.get_ind_dm0(7, 8, 2, maptype=0) == 16
assert si.get_ind_dm0(7, 8, 2, maptype=1) == 9
assert si.get_ind_dm0(7, 8, 2, maptype=2) == 1
assert si.get_ind_dm0(7, 8, 2, maptype=3) == 1
assert si.get_ind_dm0(8, 7, 2, maptype=2) == 0
assert si.get_ind_dm0(8, 7, 2, maptype=3) == 0
assert si.get_ind_dm0(5, 8, 2, maptype=1) == -1
assert si.get_ind_dm1(5, 4, 1) == 3
def test_StateIndexingDMc_charge():
si = StateIndexingDMc(4, indexing='charge')
assert si.ndm0_tot == 70
assert si.ndm0_ == 70
assert si.ndm0 == 70
assert si.ndm0r == 124
assert si.npauli_ == 16
assert si.npauli == 16
assert si.ndm1_tot == 56
assert si.ndm1_ == 56
assert si.ndm1 == 56
assert list(si.shiftlst0) == [0, 1, 17, 53, 69, 70]
assert list(si.shiftlst1) == [0, 4, 28, 52, 56]
assert list(si.lenlst) == [1, 4, 6, 4, 1]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [1, 2, 3, 4], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [15], []]
assert list(si.mapdm0) == [0, 1, 16, 17, 18, 19, 2, 20, 21, 22, 23, 3, 24, 25, 26, 27, 4, 5, 28, 29, 30, 31, 32, 33, 6, 34, 35, 36, 37, 38, 39, 7, 40, 41, 42, 43, 44, 45, 8, 46, 47, 48, 49, 50, 51, 9, 52, 53, 54, 55, 56, 57, 10, 11, 58, 59, 60, 61, 12, 62, 63, 64, 65, 13, 66, 67, 68, 69, 14, 15]
assert si.inddm0 == {0: (0, 0), 1: (1, 1), 2: (2, 2), 3: (3, 3), 4: (4, 4), 5: (5, 5), 6: (6, 6), 7: (7, 7), 8: (8, 8), 9: (9, 9), 10: (10, 10), 11: (11, 11), 12: (12, 12), 13: (13, 13), 14: (14, 14), 15: (15, 15), 16: (1, 2), 17: (1, 3), 18: (1, 4), 19: (2, 1), 20: (2, 3), 21: (2, 4), 22: (3, 1), 23: (3, 2), 24: (3, 4), 25: (4, 1), 26: (4, 2), 27: (4, 3), 28: (5, 6), 29: (5, 7), 30: (5, 8), 31: (5, 9), 32: (5, 10), 33: (6, 5), 34: (6, 7), 35: (6, 8), 36: (6, 9), 37: (6, 10), 38: (7, 5), 39: (7, 6), 40: (7, 8), 41: (7, 9), 42: (7, 10), 43: (8, 5), 44: (8, 6), 45: (8, 7), 46: (8, 9), 47: (8, 10), 48: (9, 5), 49: (9, 6), 50: (9, 7), 51: (9, 8), 52: (9, 10), 53: (10, 5), 54: (10, 6), 55: (10, 7), 56: (10, 8), 57: (10, 9), 58: (11, 12), 59: (11, 13), 60: (11, 14), 61: (12, 11), 62: (12, 13), 63: (12, 14), 64: (13, 11), 65: (13, 12), 66: (13, 14), 67: (14, 11), 68: (14, 12), 69: (14, 13)}
assert list(si.booldm0) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
assert si.get_ind_dm0(7, 8, 2, maptype=0) == 32
assert si.get_ind_dm0(7, 8, 2, maptype=1) == 40
assert si.get_ind_dm0(7, 8, 2, maptype=2) == True
assert si.get_ind_dm0(8, 7, 2, maptype=2) == True
assert si.get_ind_dm0(5, 8, 2, maptype=1) == 30
assert si.get_ind_dm1(5, 4, 1) == 7
#
si.set_statesdm([[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], []])
assert si.ndm0_ == 53
assert si.ndm0 == 53
assert si.ndm0r == 95
assert si.npauli_ == 11
assert si.npauli == 11
assert si.ndm1_ == 24
assert si.ndm1 == 24
assert list(si.shiftlst0) == [0, 1, 1, 37, 53, 53]
assert list(si.shiftlst1) == [0, 0, 0, 24, 24]
assert list(si.lenlst) == [1, 0, 6, 4, 0]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [], []]
assert list(si.mapdm0) == [0, 1, 11, 12, 13, 14, 15, 16, 2, 17, 18, 19, 20, 21, 22, 3, 23, 24, 25, 26, 27, 28, 4, 29, 30, 31, 32, 33, 34, 5, 35, 36, 37, 38, 39, 40, 6, 7, 41, 42, 43, 44, 8, 45, 46, 47, 48, 9, 49, 50, 51, 52, 10]
assert si.inddm0 == {0: (0, 0), 1: (5, 5), 2: (6, 6), 3: (7, 7), 4: (8, 8), 5: (9, 9), 6: (10, 10), 7: (11, 11), 8: (12, 12), 9: (13, 13), 10: (14, 14), 11: (5, 6), 12: (5, 7), 13: (5, 8), 14: (5, 9), 15: (5, 10), 16: (6, 5), 17: (6, 7), 18: (6, 8), 19: (6, 9), 20: (6, 10), 21: (7, 5), 22: (7, 6), 23: (7, 8), 24: (7, 9), 25: (7, 10), 26: (8, 5), 27: (8, 6), 28: (8, 7), 29: (8, 9), 30: (8, 10), 31: (9, 5), 32: (9, 6), 33: (9, 7), 34: (9, 8), 35: (9, 10), 36: (10, 5), 37: (10, 6), 38: (10, 7), 39: (10, 8), 40: (10, 9), 41: (11, 12), 42: (11, 13), 43: (11, 14), 44: (12, 11), 45: (12, 13), 46: (12, 14), 47: (13, 11), 48: (13, 12), 49: (13, 14), 50: (14, 11), 51: (14, 12), 52: (14, 13)}
assert list(si.booldm0) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
assert si.get_ind_dm0(7, 8, 2, maptype=0) == 16
assert si.get_ind_dm0(7, 8, 2, maptype=1) == 23
assert si.get_ind_dm0(7, 8, 2, maptype=2) == 1
assert si.get_ind_dm0(8, 7, 2, maptype=2) == 1
assert si.get_ind_dm0(5, 8, 2, maptype=1) == 13
assert si.get_ind_dm1(5, 4, 1) == 3
def test_StateIndexingDMc_ssq():
si = StateIndexingDMc(4, indexing='ssq')
assert si.ndm0_tot == 70
assert si.ndm0_ == 70
assert si.ndm0 == 20
assert si.ndm0r == 30
assert si.npauli_ == 16
assert si.npauli == 10
assert si.ndm1_tot == 56
assert si.ndm1_ == 56
assert si.ndm1 == 56
assert list(si.shiftlst0) == [0, 1, 17, 53, 69, 70]
assert list(si.shiftlst1) == [0, 4, 28, 52, 56]
assert list(si.lenlst) == [1, 4, 6, 4, 1]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [1, 2, 3, 4], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [15], []]
assert list(si.mapdm0) == [0, 1, 10, -1, -1, 11, 2, -1, -1, -1, -1, 1, 10, -1, -1, 11, 2, 3, -1, -1, -1, -1, -1, -1, 4, 12, 13, -1, -1, -1, 14, 5, 15, -1, -1, -1, 16, 17, 6, -1, -1, -1, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, 3, 7, 18, -1, -1, 19, 8, -1, -1, -1, -1, 7, 18, -1, -1, 19, 8, 9]
assert si.inddm0 == {0: (0, 0), 1: (1, 1), 2: (2, 2), 3: (5, 5), 4: (6, 6), 5: (7, 7), 6: (8, 8), 7: (11, 11), 8: (12, 12), 9: (15, 15), 10: (1, 2), 11: (2, 1), 12: (6, 7), 13: (6, 8), 14: (7, 6), 15: (7, 8), 16: (8, 6), 17: (8, 7), 18: (11, 12), 19: (12, 11)}
assert list(si.booldm0) == [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
assert si.get_ind_dm0(7, 8, 2, maptype=0) == 32
assert si.get_ind_dm0(7, 8, 2, maptype=1) == 15
assert si.get_ind_dm0(7, 8, 2, maptype=2) == 1
assert si.get_ind_dm0(8, 7, 2, maptype=2) == 1
assert si.get_ind_dm0(5, 8, 2, maptype=1) == -1
assert si.get_ind_dm1(5, 4, 1) == 7
#
si.set_statesdm([[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], []])
assert si.ndm0_ == 53
assert si.ndm0 == 15
assert si.ndm0r == 23
assert si.npauli_ == 11
assert si.npauli == 7
assert si.ndm1_ == 24
assert si.ndm1 == 24
assert list(si.shiftlst0) == [0, 1, 1, 37, 53, 53]
assert list(si.shiftlst1) == [0, 0, 0, 24, 24]
assert list(si.lenlst) == [1, 0, 6, 4, 0]
assert list(si.dictdm) == [0, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0]
assert si.statesdm == [[0], [], [5, 6, 7, 8, 9, 10], [11, 12, 13, 14], [], []]
assert list(si.mapdm0) == [0, 1, -1, -1, -1, -1, -1, -1, 2, 7, 8, -1, -1, -1, 9, 3, 10, -1, -1, -1, 11, 12, 4, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 5, 13, -1, -1, 14, 6, -1, -1, -1, -1, 5, 13, -1, -1, 14, 6]
assert si.inddm0 == {0: (0, 0), 1: (5, 5), 2: (6, 6), 3: (7, 7), 4: (8, 8), 5: (11, 11), 6: (12, 12), 7: (6, 7), 8: (6, 8), 9: (7, 6), 10: (7, 8), 11: (8, 6), 12: (8, 7), 13: (11, 12), 14: (12, 11)}
assert list(si.booldm0) == [1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
assert si.get_ind_dm0(7, 8, 2, maptype=0) == 16
assert si.get_ind_dm0(7, 8, 2, maptype=1) == 10
assert si.get_ind_dm0(7, 8, 2, maptype=2) == 1
assert si.get_ind_dm0(8, 7, 2, maptype=2) == 1
assert si.get_ind_dm0(5, 8, 2, maptype=1) == -1
assert si.get_ind_dm1(5, 4, 1) == 3
| gedaskir/qmeq | qmeq/tests/test_indexing.py | Python | bsd-2-clause | 27,609 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class HarvestDataOperations(object):
"""HarvestDataOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.agrifood.farming.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_farmer_id(
self,
farmer_id, # type: str
min_total_yield=None, # type: Optional[float]
max_total_yield=None, # type: Optional[float]
min_avg_yield=None, # type: Optional[float]
max_avg_yield=None, # type: Optional[float]
min_total_wet_mass=None, # type: Optional[float]
max_total_wet_mass=None, # type: Optional[float]
min_avg_wet_mass=None, # type: Optional[float]
max_avg_wet_mass=None, # type: Optional[float]
min_avg_moisture=None, # type: Optional[float]
max_avg_moisture=None, # type: Optional[float]
min_avg_speed=None, # type: Optional[float]
max_avg_speed=None, # type: Optional[float]
sources=None, # type: Optional[List[str]]
associated_boundary_ids=None, # type: Optional[List[str]]
operation_boundary_ids=None, # type: Optional[List[str]]
min_operation_start_date_time=None, # type: Optional[datetime.datetime]
max_operation_start_date_time=None, # type: Optional[datetime.datetime]
min_operation_end_date_time=None, # type: Optional[datetime.datetime]
max_operation_end_date_time=None, # type: Optional[datetime.datetime]
min_operation_modified_date_time=None, # type: Optional[datetime.datetime]
max_operation_modified_date_time=None, # type: Optional[datetime.datetime]
min_area=None, # type: Optional[float]
max_area=None, # type: Optional[float]
ids=None, # type: Optional[List[str]]
names=None, # type: Optional[List[str]]
property_filters=None, # type: Optional[List[str]]
statuses=None, # type: Optional[List[str]]
min_created_date_time=None, # type: Optional[datetime.datetime]
max_created_date_time=None, # type: Optional[datetime.datetime]
min_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_page_size=50, # type: Optional[int]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.HarvestDataListResponse"]
"""Returns a paginated list of harvest data resources under a particular farm.
:param farmer_id: ID of the associated farmer.
:type farmer_id: str
:param min_total_yield: Minimum Yield value(inclusive).
:type min_total_yield: float
:param max_total_yield: Maximum Yield value (inclusive).
:type max_total_yield: float
:param min_avg_yield: Minimum AvgYield value(inclusive).
:type min_avg_yield: float
:param max_avg_yield: Maximum AvgYield value (inclusive).
:type max_avg_yield: float
:param min_total_wet_mass: Minimum Total WetMass value(inclusive).
:type min_total_wet_mass: float
:param max_total_wet_mass: Maximum Total WetMass value (inclusive).
:type max_total_wet_mass: float
:param min_avg_wet_mass: Minimum AvgWetMass value(inclusive).
:type min_avg_wet_mass: float
:param max_avg_wet_mass: Maximum AvgWetMass value (inclusive).
:type max_avg_wet_mass: float
:param min_avg_moisture: Minimum AvgMoisture value(inclusive).
:type min_avg_moisture: float
:param max_avg_moisture: Maximum AvgMoisture value (inclusive).
:type max_avg_moisture: float
:param min_avg_speed: Minimum AvgSpeed value(inclusive).
:type min_avg_speed: float
:param max_avg_speed: Maximum AvgSpeed value (inclusive).
:type max_avg_speed: float
:param sources: Sources of the operation data.
:type sources: list[str]
:param associated_boundary_ids: Boundary IDs associated with operation data.
:type associated_boundary_ids: list[str]
:param operation_boundary_ids: Operation boundary IDs associated with operation data.
:type operation_boundary_ids: list[str]
:param min_operation_start_date_time: Minimum start date-time of the operation data, sample
format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_start_date_time: ~datetime.datetime
:param max_operation_start_date_time: Maximum start date-time of the operation data, sample
format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_start_date_time: ~datetime.datetime
:param min_operation_end_date_time: Minimum end date-time of the operation data, sample format:
yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_end_date_time: ~datetime.datetime
:param max_operation_end_date_time: Maximum end date-time of the operation data, sample format:
yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_end_date_time: ~datetime.datetime
:param min_operation_modified_date_time: Minimum modified date-time of the operation data,
sample format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_modified_date_time: ~datetime.datetime
:param max_operation_modified_date_time: Maximum modified date-time of the operation data,
sample format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_modified_date_time: ~datetime.datetime
:param min_area: Minimum area for which operation was applied (inclusive).
:type min_area: float
:param max_area: Maximum area for which operation was applied (inclusive).
:type max_area: float
:param ids: Ids of the resource.
:type ids: list[str]
:param names: Names of the resource.
:type names: list[str]
:param property_filters: Filters on key-value pairs within the Properties object.
eg. "{testKey} eq {testValue}".
:type property_filters: list[str]
:param statuses: Statuses of the resource.
:type statuses: list[str]
:param min_created_date_time: Minimum creation date of resource (inclusive).
:type min_created_date_time: ~datetime.datetime
:param max_created_date_time: Maximum creation date of resource (inclusive).
:type max_created_date_time: ~datetime.datetime
:param min_last_modified_date_time: Minimum last modified date of resource (inclusive).
:type min_last_modified_date_time: ~datetime.datetime
:param max_last_modified_date_time: Maximum last modified date of resource (inclusive).
:type max_last_modified_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either HarvestDataListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.agrifood.farming.models.HarvestDataListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HarvestDataListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_farmer_id.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if min_total_yield is not None:
query_parameters['minTotalYield'] = self._serialize.query("min_total_yield", min_total_yield, 'float')
if max_total_yield is not None:
query_parameters['maxTotalYield'] = self._serialize.query("max_total_yield", max_total_yield, 'float')
if min_avg_yield is not None:
query_parameters['minAvgYield'] = self._serialize.query("min_avg_yield", min_avg_yield, 'float')
if max_avg_yield is not None:
query_parameters['maxAvgYield'] = self._serialize.query("max_avg_yield", max_avg_yield, 'float')
if min_total_wet_mass is not None:
query_parameters['minTotalWetMass'] = self._serialize.query("min_total_wet_mass", min_total_wet_mass, 'float')
if max_total_wet_mass is not None:
query_parameters['maxTotalWetMass'] = self._serialize.query("max_total_wet_mass", max_total_wet_mass, 'float')
if min_avg_wet_mass is not None:
query_parameters['minAvgWetMass'] = self._serialize.query("min_avg_wet_mass", min_avg_wet_mass, 'float')
if max_avg_wet_mass is not None:
query_parameters['maxAvgWetMass'] = self._serialize.query("max_avg_wet_mass", max_avg_wet_mass, 'float')
if min_avg_moisture is not None:
query_parameters['minAvgMoisture'] = self._serialize.query("min_avg_moisture", min_avg_moisture, 'float')
if max_avg_moisture is not None:
query_parameters['maxAvgMoisture'] = self._serialize.query("max_avg_moisture", max_avg_moisture, 'float')
if min_avg_speed is not None:
query_parameters['minAvgSpeed'] = self._serialize.query("min_avg_speed", min_avg_speed, 'float')
if max_avg_speed is not None:
query_parameters['maxAvgSpeed'] = self._serialize.query("max_avg_speed", max_avg_speed, 'float')
if sources is not None:
query_parameters['sources'] = [self._serialize.query("sources", q, 'str') if q is not None else '' for q in sources]
if associated_boundary_ids is not None:
query_parameters['associatedBoundaryIds'] = [self._serialize.query("associated_boundary_ids", q, 'str') if q is not None else '' for q in associated_boundary_ids]
if operation_boundary_ids is not None:
query_parameters['operationBoundaryIds'] = [self._serialize.query("operation_boundary_ids", q, 'str') if q is not None else '' for q in operation_boundary_ids]
if min_operation_start_date_time is not None:
query_parameters['minOperationStartDateTime'] = self._serialize.query("min_operation_start_date_time", min_operation_start_date_time, 'iso-8601')
if max_operation_start_date_time is not None:
query_parameters['maxOperationStartDateTime'] = self._serialize.query("max_operation_start_date_time", max_operation_start_date_time, 'iso-8601')
if min_operation_end_date_time is not None:
query_parameters['minOperationEndDateTime'] = self._serialize.query("min_operation_end_date_time", min_operation_end_date_time, 'iso-8601')
if max_operation_end_date_time is not None:
query_parameters['maxOperationEndDateTime'] = self._serialize.query("max_operation_end_date_time", max_operation_end_date_time, 'iso-8601')
if min_operation_modified_date_time is not None:
query_parameters['minOperationModifiedDateTime'] = self._serialize.query("min_operation_modified_date_time", min_operation_modified_date_time, 'iso-8601')
if max_operation_modified_date_time is not None:
query_parameters['maxOperationModifiedDateTime'] = self._serialize.query("max_operation_modified_date_time", max_operation_modified_date_time, 'iso-8601')
if min_area is not None:
query_parameters['minArea'] = self._serialize.query("min_area", min_area, 'float')
if max_area is not None:
query_parameters['maxArea'] = self._serialize.query("max_area", max_area, 'float')
if ids is not None:
query_parameters['ids'] = [self._serialize.query("ids", q, 'str') if q is not None else '' for q in ids]
if names is not None:
query_parameters['names'] = [self._serialize.query("names", q, 'str') if q is not None else '' for q in names]
if property_filters is not None:
query_parameters['propertyFilters'] = [self._serialize.query("property_filters", q, 'str') if q is not None else '' for q in property_filters]
if statuses is not None:
query_parameters['statuses'] = [self._serialize.query("statuses", q, 'str') if q is not None else '' for q in statuses]
if min_created_date_time is not None:
query_parameters['minCreatedDateTime'] = self._serialize.query("min_created_date_time", min_created_date_time, 'iso-8601')
if max_created_date_time is not None:
query_parameters['maxCreatedDateTime'] = self._serialize.query("max_created_date_time", max_created_date_time, 'iso-8601')
if min_last_modified_date_time is not None:
query_parameters['minLastModifiedDateTime'] = self._serialize.query("min_last_modified_date_time", min_last_modified_date_time, 'iso-8601')
if max_last_modified_date_time is not None:
query_parameters['maxLastModifiedDateTime'] = self._serialize.query("max_last_modified_date_time", max_last_modified_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('HarvestDataListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_farmer_id.metadata = {'url': '/farmers/{farmerId}/harvest-data'} # type: ignore
def list(
self,
min_total_yield=None, # type: Optional[float]
max_total_yield=None, # type: Optional[float]
min_avg_yield=None, # type: Optional[float]
max_avg_yield=None, # type: Optional[float]
min_total_wet_mass=None, # type: Optional[float]
max_total_wet_mass=None, # type: Optional[float]
min_avg_wet_mass=None, # type: Optional[float]
max_avg_wet_mass=None, # type: Optional[float]
min_avg_moisture=None, # type: Optional[float]
max_avg_moisture=None, # type: Optional[float]
min_avg_speed=None, # type: Optional[float]
max_avg_speed=None, # type: Optional[float]
sources=None, # type: Optional[List[str]]
associated_boundary_ids=None, # type: Optional[List[str]]
operation_boundary_ids=None, # type: Optional[List[str]]
min_operation_start_date_time=None, # type: Optional[datetime.datetime]
max_operation_start_date_time=None, # type: Optional[datetime.datetime]
min_operation_end_date_time=None, # type: Optional[datetime.datetime]
max_operation_end_date_time=None, # type: Optional[datetime.datetime]
min_operation_modified_date_time=None, # type: Optional[datetime.datetime]
max_operation_modified_date_time=None, # type: Optional[datetime.datetime]
min_area=None, # type: Optional[float]
max_area=None, # type: Optional[float]
ids=None, # type: Optional[List[str]]
names=None, # type: Optional[List[str]]
property_filters=None, # type: Optional[List[str]]
statuses=None, # type: Optional[List[str]]
min_created_date_time=None, # type: Optional[datetime.datetime]
max_created_date_time=None, # type: Optional[datetime.datetime]
min_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_page_size=50, # type: Optional[int]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.HarvestDataListResponse"]
"""Returns a paginated list of harvest data resources across all farmers.
:param min_total_yield: Minimum Yield value(inclusive).
:type min_total_yield: float
:param max_total_yield: Maximum Yield value (inclusive).
:type max_total_yield: float
:param min_avg_yield: Minimum AvgYield value(inclusive).
:type min_avg_yield: float
:param max_avg_yield: Maximum AvgYield value (inclusive).
:type max_avg_yield: float
:param min_total_wet_mass: Minimum Total WetMass value(inclusive).
:type min_total_wet_mass: float
:param max_total_wet_mass: Maximum Total WetMass value (inclusive).
:type max_total_wet_mass: float
:param min_avg_wet_mass: Minimum AvgWetMass value(inclusive).
:type min_avg_wet_mass: float
:param max_avg_wet_mass: Maximum AvgWetMass value (inclusive).
:type max_avg_wet_mass: float
:param min_avg_moisture: Minimum AvgMoisture value(inclusive).
:type min_avg_moisture: float
:param max_avg_moisture: Maximum AvgMoisture value (inclusive).
:type max_avg_moisture: float
:param min_avg_speed: Minimum AvgSpeed value(inclusive).
:type min_avg_speed: float
:param max_avg_speed: Maximum AvgSpeed value (inclusive).
:type max_avg_speed: float
:param sources: Sources of the operation data.
:type sources: list[str]
:param associated_boundary_ids: Boundary IDs associated with operation data.
:type associated_boundary_ids: list[str]
:param operation_boundary_ids: Operation boundary IDs associated with operation data.
:type operation_boundary_ids: list[str]
:param min_operation_start_date_time: Minimum start date-time of the operation data, sample
format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_start_date_time: ~datetime.datetime
:param max_operation_start_date_time: Maximum start date-time of the operation data, sample
format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_start_date_time: ~datetime.datetime
:param min_operation_end_date_time: Minimum end date-time of the operation data, sample format:
yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_end_date_time: ~datetime.datetime
:param max_operation_end_date_time: Maximum end date-time of the operation data, sample format:
yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_end_date_time: ~datetime.datetime
:param min_operation_modified_date_time: Minimum modified date-time of the operation data,
sample format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_modified_date_time: ~datetime.datetime
:param max_operation_modified_date_time: Maximum modified date-time of the operation data,
sample format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_modified_date_time: ~datetime.datetime
:param min_area: Minimum area for which operation was applied (inclusive).
:type min_area: float
:param max_area: Maximum area for which operation was applied (inclusive).
:type max_area: float
:param ids: Ids of the resource.
:type ids: list[str]
:param names: Names of the resource.
:type names: list[str]
:param property_filters: Filters on key-value pairs within the Properties object.
eg. "{testKey} eq {testValue}".
:type property_filters: list[str]
:param statuses: Statuses of the resource.
:type statuses: list[str]
:param min_created_date_time: Minimum creation date of resource (inclusive).
:type min_created_date_time: ~datetime.datetime
:param max_created_date_time: Maximum creation date of resource (inclusive).
:type max_created_date_time: ~datetime.datetime
:param min_last_modified_date_time: Minimum last modified date of resource (inclusive).
:type min_last_modified_date_time: ~datetime.datetime
:param max_last_modified_date_time: Maximum last modified date of resource (inclusive).
:type max_last_modified_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either HarvestDataListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.agrifood.farming.models.HarvestDataListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HarvestDataListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if min_total_yield is not None:
query_parameters['minTotalYield'] = self._serialize.query("min_total_yield", min_total_yield, 'float')
if max_total_yield is not None:
query_parameters['maxTotalYield'] = self._serialize.query("max_total_yield", max_total_yield, 'float')
if min_avg_yield is not None:
query_parameters['minAvgYield'] = self._serialize.query("min_avg_yield", min_avg_yield, 'float')
if max_avg_yield is not None:
query_parameters['maxAvgYield'] = self._serialize.query("max_avg_yield", max_avg_yield, 'float')
if min_total_wet_mass is not None:
query_parameters['minTotalWetMass'] = self._serialize.query("min_total_wet_mass", min_total_wet_mass, 'float')
if max_total_wet_mass is not None:
query_parameters['maxTotalWetMass'] = self._serialize.query("max_total_wet_mass", max_total_wet_mass, 'float')
if min_avg_wet_mass is not None:
query_parameters['minAvgWetMass'] = self._serialize.query("min_avg_wet_mass", min_avg_wet_mass, 'float')
if max_avg_wet_mass is not None:
query_parameters['maxAvgWetMass'] = self._serialize.query("max_avg_wet_mass", max_avg_wet_mass, 'float')
if min_avg_moisture is not None:
query_parameters['minAvgMoisture'] = self._serialize.query("min_avg_moisture", min_avg_moisture, 'float')
if max_avg_moisture is not None:
query_parameters['maxAvgMoisture'] = self._serialize.query("max_avg_moisture", max_avg_moisture, 'float')
if min_avg_speed is not None:
query_parameters['minAvgSpeed'] = self._serialize.query("min_avg_speed", min_avg_speed, 'float')
if max_avg_speed is not None:
query_parameters['maxAvgSpeed'] = self._serialize.query("max_avg_speed", max_avg_speed, 'float')
if sources is not None:
query_parameters['sources'] = [self._serialize.query("sources", q, 'str') if q is not None else '' for q in sources]
if associated_boundary_ids is not None:
query_parameters['associatedBoundaryIds'] = [self._serialize.query("associated_boundary_ids", q, 'str') if q is not None else '' for q in associated_boundary_ids]
if operation_boundary_ids is not None:
query_parameters['operationBoundaryIds'] = [self._serialize.query("operation_boundary_ids", q, 'str') if q is not None else '' for q in operation_boundary_ids]
if min_operation_start_date_time is not None:
query_parameters['minOperationStartDateTime'] = self._serialize.query("min_operation_start_date_time", min_operation_start_date_time, 'iso-8601')
if max_operation_start_date_time is not None:
query_parameters['maxOperationStartDateTime'] = self._serialize.query("max_operation_start_date_time", max_operation_start_date_time, 'iso-8601')
if min_operation_end_date_time is not None:
query_parameters['minOperationEndDateTime'] = self._serialize.query("min_operation_end_date_time", min_operation_end_date_time, 'iso-8601')
if max_operation_end_date_time is not None:
query_parameters['maxOperationEndDateTime'] = self._serialize.query("max_operation_end_date_time", max_operation_end_date_time, 'iso-8601')
if min_operation_modified_date_time is not None:
query_parameters['minOperationModifiedDateTime'] = self._serialize.query("min_operation_modified_date_time", min_operation_modified_date_time, 'iso-8601')
if max_operation_modified_date_time is not None:
query_parameters['maxOperationModifiedDateTime'] = self._serialize.query("max_operation_modified_date_time", max_operation_modified_date_time, 'iso-8601')
if min_area is not None:
query_parameters['minArea'] = self._serialize.query("min_area", min_area, 'float')
if max_area is not None:
query_parameters['maxArea'] = self._serialize.query("max_area", max_area, 'float')
if ids is not None:
query_parameters['ids'] = [self._serialize.query("ids", q, 'str') if q is not None else '' for q in ids]
if names is not None:
query_parameters['names'] = [self._serialize.query("names", q, 'str') if q is not None else '' for q in names]
if property_filters is not None:
query_parameters['propertyFilters'] = [self._serialize.query("property_filters", q, 'str') if q is not None else '' for q in property_filters]
if statuses is not None:
query_parameters['statuses'] = [self._serialize.query("statuses", q, 'str') if q is not None else '' for q in statuses]
if min_created_date_time is not None:
query_parameters['minCreatedDateTime'] = self._serialize.query("min_created_date_time", min_created_date_time, 'iso-8601')
if max_created_date_time is not None:
query_parameters['maxCreatedDateTime'] = self._serialize.query("max_created_date_time", max_created_date_time, 'iso-8601')
if min_last_modified_date_time is not None:
query_parameters['minLastModifiedDateTime'] = self._serialize.query("min_last_modified_date_time", min_last_modified_date_time, 'iso-8601')
if max_last_modified_date_time is not None:
query_parameters['maxLastModifiedDateTime'] = self._serialize.query("max_last_modified_date_time", max_last_modified_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('HarvestDataListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/harvest-data'} # type: ignore
def get(
self,
farmer_id, # type: str
harvest_data_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.HarvestData"
"""Get a specified harvest data resource under a particular farmer.
:param farmer_id: ID of the associated farmer resource.
:type farmer_id: str
:param harvest_data_id: ID of the harvest data resource.
:type harvest_data_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HarvestData, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.HarvestData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HarvestData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
'harvestDataId': self._serialize.url("harvest_data_id", harvest_data_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('HarvestData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/farmers/{farmerId}/harvest-data/{harvestDataId}'} # type: ignore
def create_or_update(
self,
farmer_id, # type: str
harvest_data_id, # type: str
harvest_data=None, # type: Optional["_models.HarvestData"]
**kwargs # type: Any
):
# type: (...) -> "_models.HarvestData"
"""Creates or updates harvest data resource under a particular farmer.
:param farmer_id: ID of the farmer.
:type farmer_id: str
:param harvest_data_id: ID of the harvest data resource.
:type harvest_data_id: str
:param harvest_data: Harvest data resource payload to create or update.
:type harvest_data: ~azure.agrifood.farming.models.HarvestData
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HarvestData, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.HarvestData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HarvestData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
content_type = kwargs.pop("content_type", "application/merge-patch+json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
'harvestDataId': self._serialize.url("harvest_data_id", harvest_data_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if harvest_data is not None:
body_content = self._serialize.body(harvest_data, 'HarvestData')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('HarvestData', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('HarvestData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/farmers/{farmerId}/harvest-data/{harvestDataId}'} # type: ignore
def delete(
self,
farmer_id, # type: str
harvest_data_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a specified harvest data resource under a particular farmer.
:param farmer_id: ID of the associated farmer resource.
:type farmer_id: str
:param harvest_data_id: ID of the harvest data.
:type harvest_data_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
'harvestDataId': self._serialize.url("harvest_data_id", harvest_data_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/farmers/{farmerId}/harvest-data/{harvestDataId}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/agrifood/azure-agrifood-farming/azure/agrifood/farming/operations/_harvest_data_operations.py | Python | mit | 43,380 |
class brokerClient(object):
"""
Broker server classes are called by the brokers server application (eg IB Gateway)
We inherit from this for specific brokers and over ride the methods in the base class to ensure a consistent API
"""
def __init__(self):
pass
def speakingClock(self):
print("Method needs to be overriden to do anything interesting")
| cmorgan/pysystemtrade | sysbrokers/baseClient.py | Python | gpl-3.0 | 392 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: A Chart Parser
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# Jean Mark Gawron <gawron@mail.sdsu.edu>
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
# $Id: chart.py 8621 2010-08-06 16:23:49Z peter.ljunglof@heatherleaf.se $
"""
Data classes and parser implementations for \"chart parsers\", which
use dynamic programming to efficiently parse a text. A X{chart
parser} derives parse trees for a text by iteratively adding \"edges\"
to a \"chart.\" Each X{edge} represents a hypothesis about the tree
structure for a subsequence of the text. The X{chart} is a
\"blackboard\" for composing and combining these hypotheses.
When a chart parser begins parsing a text, it creates a new (empty)
chart, spanning the text. It then incrementally adds new edges to the
chart. A set of X{chart rules} specifies the conditions under which
new edges should be added to the chart. Once the chart reaches a
stage where none of the chart rules adds any new edges, parsing is
complete.
Charts are encoded with the L{Chart} class, and edges are encoded with
the L{TreeEdge} and L{LeafEdge} classes. The chart parser module
defines three chart parsers:
- C{ChartParser} is a simple and flexible chart parser. Given a
set of chart rules, it will apply those rules to the chart until
no more edges are added.
- C{SteppingChartParser} is a subclass of C{ChartParser} that can
be used to step through the parsing process.
"""
from nltk.tree import Tree
from nltk.grammar import WeightedGrammar, is_nonterminal, is_terminal
from nltk.compat import defaultdict
from api import *
import re
import warnings
########################################################################
## Edges
########################################################################
class EdgeI(object):
"""
A hypothesis about the structure of part of a sentence.
Each edge records the fact that a structure is (partially)
consistent with the sentence. An edge contains:
- A X{span}, indicating what part of the sentence is
consistent with the hypothesized structure.
- A X{left-hand side}, specifying what kind of structure is
hypothesized.
- A X{right-hand side}, specifying the contents of the
hypothesized structure.
- A X{dot position}, indicating how much of the hypothesized
structure is consistent with the sentence.
Every edge is either X{complete} or X{incomplete}:
- An edge is X{complete} if its structure is fully consistent
with the sentence.
- An edge is X{incomplete} if its structure is partially
consistent with the sentence. For every incomplete edge, the
span specifies a possible prefix for the edge's structure.
There are two kinds of edge:
- C{TreeEdges<TreeEdge>} record which trees have been found to
be (partially) consistent with the text.
- C{LeafEdges<leafEdge>} record the tokens occur in the text.
The C{EdgeI} interface provides a common interface to both types
of edge, allowing chart parsers to treat them in a uniform manner.
"""
def __init__(self):
if self.__class__ == EdgeI:
raise TypeError('Edge is an abstract interface')
#////////////////////////////////////////////////////////////
# Span
#////////////////////////////////////////////////////////////
def span(self):
"""
@return: A tuple C{(s,e)}, where C{subtokens[s:e]} is the
portion of the sentence that is consistent with this
edge's structure.
@rtype: C{(int, int)}
"""
raise AssertionError('EdgeI is an abstract interface')
def start(self):
"""
@return: The start index of this edge's span.
@rtype: C{int}
"""
raise AssertionError('EdgeI is an abstract interface')
def end(self):
"""
@return: The end index of this edge's span.
@rtype: C{int}
"""
raise AssertionError('EdgeI is an abstract interface')
def length(self):
"""
@return: The length of this edge's span.
@rtype: C{int}
"""
raise AssertionError('EdgeI is an abstract interface')
#////////////////////////////////////////////////////////////
# Left Hand Side
#////////////////////////////////////////////////////////////
def lhs(self):
"""
@return: This edge's left-hand side, which specifies what kind
of structure is hypothesized by this edge.
@see: L{TreeEdge} and L{LeafEdge} for a description of
the left-hand side values for each edge type.
"""
raise AssertionError('EdgeI is an abstract interface')
#////////////////////////////////////////////////////////////
# Right Hand Side
#////////////////////////////////////////////////////////////
def rhs(self):
"""
@return: This edge's right-hand side, which specifies
the content of the structure hypothesized by this
edge.
@see: L{TreeEdge} and L{LeafEdge} for a description of
the right-hand side values for each edge type.
"""
raise AssertionError('EdgeI is an abstract interface')
def dot(self):
"""
@return: This edge's dot position, which indicates how much of
the hypothesized structure is consistent with the
sentence. In particular, C{self.rhs[:dot]} is consistent
with C{subtoks[self.start():self.end()]}.
@rtype: C{int}
"""
raise AssertionError('EdgeI is an abstract interface')
def next(self):
"""
@return: The element of this edge's right-hand side that
immediately follows its dot.
@rtype: C{Nonterminal} or X{terminal} or C{None}
"""
raise AssertionError('EdgeI is an abstract interface')
def is_complete(self):
"""
@return: True if this edge's structure is fully consistent
with the text.
@rtype: C{boolean}
"""
raise AssertionError('EdgeI is an abstract interface')
def is_incomplete(self):
"""
@return: True if this edge's structure is partially consistent
with the text.
@rtype: C{boolean}
"""
raise AssertionError('EdgeI is an abstract interface')
#////////////////////////////////////////////////////////////
# Comparisons
#////////////////////////////////////////////////////////////
def __cmp__(self, other):
raise AssertionError('EdgeI is an abstract interface')
def __hash__(self, other):
raise AssertionError('EdgeI is an abstract interface')
class TreeEdge(EdgeI):
"""
An edge that records the fact that a tree is (partially)
consistent with the sentence. A tree edge consists of:
- A X{span}, indicating what part of the sentence is
consistent with the hypothesized tree.
- A X{left-hand side}, specifying the hypothesized tree's node
value.
- A X{right-hand side}, specifying the hypothesized tree's
children. Each element of the right-hand side is either a
terminal, specifying a token with that terminal as its leaf
value; or a nonterminal, specifying a subtree with that
nonterminal's symbol as its node value.
- A X{dot position}, indicating which children are consistent
with part of the sentence. In particular, if C{dot} is the
dot position, C{rhs} is the right-hand size, C{(start,end)}
is the span, and C{sentence} is the list of subtokens in the
sentence, then C{subtokens[start:end]} can be spanned by the
children specified by C{rhs[:dot]}.
For more information about edges, see the L{EdgeI} interface.
"""
def __init__(self, span, lhs, rhs, dot=0):
"""
Construct a new C{TreeEdge}.
@type span: C{(int, int)}
@param span: A tuple C{(s,e)}, where C{subtokens[s:e]} is the
portion of the sentence that is consistent with the new
edge's structure.
@type lhs: L{Nonterminal}
@param lhs: The new edge's left-hand side, specifying the
hypothesized tree's node value.
@type rhs: C{list} of (L{Nonterminal} and C{string})
@param rhs: The new edge's right-hand side, specifying the
hypothesized tree's children.
@type dot: C{int}
@param dot: The position of the new edge's dot. This position
specifies what prefix of the production's right hand side
is consistent with the text. In particular, if
C{sentence} is the list of subtokens in the sentence, then
C{subtokens[span[0]:span[1]]} can be spanned by the
children specified by C{rhs[:dot]}.
"""
self._lhs = lhs
self._rhs = tuple(rhs)
self._span = span
self._dot = dot
# [staticmethod]
def from_production(production, index):
"""
@return: A new C{TreeEdge} formed from the given production.
The new edge's left-hand side and right-hand side will
be taken from C{production}; its span will be
C{(index,index)}; and its dot position will be C{0}.
@rtype: L{TreeEdge}
"""
return TreeEdge(span=(index, index), lhs=production.lhs(),
rhs=production.rhs(), dot=0)
from_production = staticmethod(from_production)
def move_dot_forward(self, new_end):
"""
@return: A new C{TreeEdge} formed from this edge.
The new edge's dot position is increased by C{1},
and its end index will be replaced by C{new_end}.
@rtype: L{TreeEdge}
@param new_end: The new end index.
@type new_end: C{int}
"""
return TreeEdge(span=(self._span[0], new_end),
lhs=self._lhs, rhs=self._rhs,
dot=self._dot+1)
# Accessors
def lhs(self): return self._lhs
def span(self): return self._span
def start(self): return self._span[0]
def end(self): return self._span[1]
def length(self): return self._span[1] - self._span[0]
def rhs(self): return self._rhs
def dot(self): return self._dot
def is_complete(self): return self._dot == len(self._rhs)
def is_incomplete(self): return self._dot != len(self._rhs)
def next(self):
if self._dot >= len(self._rhs): return None
else: return self._rhs[self._dot]
# Comparisons & hashing
def __cmp__(self, other):
if self.__class__ != other.__class__: return -1
return cmp((self._span, self.lhs(), self.rhs(), self._dot),
(other._span, other.lhs(), other.rhs(), other._dot))
def __hash__(self):
return hash((self.lhs(), self.rhs(), self._span, self._dot))
# String representation
def __str__(self):
str = '[%s:%s] ' % (self._span[0], self._span[1])
str += '%-2r ->' % (self._lhs,)
for i in range(len(self._rhs)):
if i == self._dot: str += ' *'
str += ' %r' % (self._rhs[i],)
if len(self._rhs) == self._dot: str += ' *'
return str
def __repr__(self):
return '[Edge: %s]' % self
class LeafEdge(EdgeI):
"""
An edge that records the fact that a leaf value is consistent with
a word in the sentence. A leaf edge consists of:
- An X{index}, indicating the position of the word.
- A X{leaf}, specifying the word's content.
A leaf edge's left-hand side is its leaf value, and its right hand
side is C{()}. Its span is C{[index, index+1]}, and its dot
position is C{0}.
"""
def __init__(self, leaf, index):
"""
Construct a new C{LeafEdge}.
@param leaf: The new edge's leaf value, specifying the word
that is recorded by this edge.
@param index: The new edge's index, specifying the position of
the word that is recorded by this edge.
"""
self._leaf = leaf
self._index = index
# Accessors
def lhs(self): return self._leaf
def span(self): return (self._index, self._index+1)
def start(self): return self._index
def end(self): return self._index+1
def length(self): return 1
def rhs(self): return ()
def dot(self): return 0
def is_complete(self): return True
def is_incomplete(self): return False
def next(self): return None
# Comparisons & hashing
def __cmp__(self, other):
if not isinstance(other, LeafEdge): return -1
return cmp((self._index, self._leaf), (other._index, other._leaf))
def __hash__(self):
return hash((self._index, self._leaf))
# String representations
def __str__(self):
return '[%s:%s] %r' % (self._index, self._index+1, self._leaf)
def __repr__(self):
return '[Edge: %s]' % (self)
########################################################################
## Chart
########################################################################
class Chart(object):
"""
A blackboard for hypotheses about the syntactic constituents of a
sentence. A chart contains a set of edges, and each edge encodes
a single hypothesis about the structure of some portion of the
sentence.
The L{select} method can be used to select a specific collection
of edges. For example C{chart.select(is_complete=True, start=0)}
yields all complete edges whose start indices are 0. To ensure
the efficiency of these selection operations, C{Chart} dynamically
creates and maintains an index for each set of attributes that
have been selected on.
In order to reconstruct the trees that are represented by an edge,
the chart associates each edge with a set of child pointer lists.
A X{child pointer list} is a list of the edges that license an
edge's right-hand side.
@ivar _tokens: The sentence that the chart covers.
@ivar _num_leaves: The number of tokens.
@ivar _edges: A list of the edges in the chart
@ivar _edge_to_cpls: A dictionary mapping each edge to a set
of child pointer lists that are associated with that edge.
@ivar _indexes: A dictionary mapping tuples of edge attributes
to indices, where each index maps the corresponding edge
attribute values to lists of edges.
"""
def __init__(self, tokens):
"""
Construct a new chart. The chart is initialized with the
leaf edges corresponding to the terminal leaves.
@type tokens: L{list}
@param tokens: The sentence that this chart will be used to parse.
"""
# Record the sentence token and the sentence length.
self._tokens = tuple(tokens)
self._num_leaves = len(self._tokens)
# Initialise the chart.
self.initialize()
def initialize(self):
"""
Clear the chart.
"""
# A list of edges contained in this chart.
self._edges = []
# The set of child pointer lists associated with each edge.
self._edge_to_cpls = {}
# Indexes mapping attribute values to lists of edges
# (used by select()).
self._indexes = {}
#////////////////////////////////////////////////////////////
# Sentence Access
#////////////////////////////////////////////////////////////
def num_leaves(self):
"""
@return: The number of words in this chart's sentence.
@rtype: C{int}
"""
return self._num_leaves
def leaf(self, index):
"""
@return: The leaf value of the word at the given index.
@rtype: C{string}
"""
return self._tokens[index]
def leaves(self):
"""
@return: A list of the leaf values of each word in the
chart's sentence.
@rtype: C{list} of C{string}
"""
return self._tokens
#////////////////////////////////////////////////////////////
# Edge access
#////////////////////////////////////////////////////////////
def edges(self):
"""
@return: A list of all edges in this chart. New edges
that are added to the chart after the call to edges()
will I{not} be contained in this list.
@rtype: C{list} of L{EdgeI}
@see: L{iteredges}, L{select}
"""
return self._edges[:]
def iteredges(self):
"""
@return: An iterator over the edges in this chart. It is
I{not} guaranteed that new edges which are added to the
chart before the iterator is exhausted will also be
generated.
@rtype: C{iter} of L{EdgeI}
@see: L{edges}, L{select}
"""
return iter(self._edges)
# Iterating over the chart yields its edges.
__iter__ = iteredges
def num_edges(self):
"""
@return: The number of edges contained in this chart.
@rtype: C{int}
"""
return len(self._edge_to_cpls)
def select(self, **restrictions):
"""
@return: An iterator over the edges in this chart. Any
new edges that are added to the chart before the iterator
is exahusted will also be generated. C{restrictions}
can be used to restrict the set of edges that will be
generated.
@rtype: C{iter} of L{EdgeI}
@kwarg span: Only generate edges C{e} where C{e.span()==span}
@kwarg start: Only generate edges C{e} where C{e.start()==start}
@kwarg end: Only generate edges C{e} where C{e.end()==end}
@kwarg length: Only generate edges C{e} where C{e.length()==length}
@kwarg lhs: Only generate edges C{e} where C{e.lhs()==lhs}
@kwarg rhs: Only generate edges C{e} where C{e.rhs()==rhs}
@kwarg next: Only generate edges C{e} where C{e.next()==next}
@kwarg dot: Only generate edges C{e} where C{e.dot()==dot}
@kwarg is_complete: Only generate edges C{e} where
C{e.is_complete()==is_complete}
@kwarg is_incomplete: Only generate edges C{e} where
C{e.is_incomplete()==is_incomplete}
"""
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(self._edges)
# Find the index corresponding to the given restrictions.
restr_keys = restrictions.keys()
restr_keys.sort()
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(restrictions[key] for key in restr_keys)
return iter(self._indexes[restr_keys].get(vals, []))
def _add_index(self, restr_keys):
"""
A helper function for L{select}, which creates a new index for
a given set of attributes (aka restriction keys).
"""
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError, 'Bad restriction: %s' % key
# Create the index.
index = self._indexes[restr_keys] = {}
# Add all existing edges to the index.
for edge in self._edges:
vals = tuple(getattr(edge, key)() for key in restr_keys)
index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
"""
A helper function for L{insert}, which registers the new
edge with all existing indexes.
"""
for (restr_keys, index) in self._indexes.items():
vals = tuple(getattr(edge, key)() for key in restr_keys)
index.setdefault(vals, []).append(edge)
#////////////////////////////////////////////////////////////
# Edge Insertion
#////////////////////////////////////////////////////////////
def insert_with_backpointer(self, new_edge, previous_edge, child_edge):
"""
Add a new edge to the chart, using a pointer to the previous edge.
"""
cpls = self.child_pointer_lists(previous_edge)
new_cpls = [cpl+(child_edge,) for cpl in cpls]
return self.insert(new_edge, *new_cpls)
def insert(self, edge, *child_pointer_lists):
"""
Add a new edge to the chart.
@type edge: L{EdgeI}
@param edge: The new edge
@type child_pointer_lists: C(sequence} of C{tuple} of L{EdgeI}
@param child_pointer_lists: A sequence of lists of the edges that
were used to form this edge. This list is used to reconstruct
the trees (or partial trees) that are associated with C{edge}.
@rtype: C{bool}
@return: True if this operation modified the chart. In
particular, return true iff the chart did not already
contain C{edge}, or if it did not already associate
C{child_pointer_lists} with C{edge}.
"""
# Is it a new edge?
if edge not in self._edge_to_cpls:
# Add it to the list of edges.
self._append_edge(edge)
# Register with indexes.
self._register_with_indexes(edge)
# Get the set of child pointer lists for this edge.
cpls = self._edge_to_cpls.setdefault(edge,{})
chart_was_modified = False
for child_pointer_list in child_pointer_lists:
child_pointer_list = tuple(child_pointer_list)
if child_pointer_list not in cpls:
# It's a new CPL; register it, and return true.
cpls[child_pointer_list] = True
chart_was_modified = True
return chart_was_modified
def _append_edge(self, edge):
self._edges.append(edge)
#////////////////////////////////////////////////////////////
# Tree extraction & child pointer lists
#////////////////////////////////////////////////////////////
def parses(self, root, tree_class=Tree):
"""
@return: A list of the complete tree structures that span
the entire chart, and whose root node is C{root}.
"""
trees = []
for edge in self.select(start=0, end=self._num_leaves, lhs=root):
trees += self.trees(edge, tree_class=tree_class, complete=True)
return trees
def trees(self, edge, tree_class=Tree, complete=False):
"""
@return: A list of the tree structures that are associated
with C{edge}.
If C{edge} is incomplete, then the unexpanded children will be
encoded as childless subtrees, whose node value is the
corresponding terminal or nonterminal.
@rtype: C{list} of L{Tree}
@note: If two trees share a common subtree, then the same
C{Tree} may be used to encode that subtree in
both trees. If you need to eliminate this subtree
sharing, then create a deep copy of each tree.
"""
return self._trees(edge, complete, memo={}, tree_class=tree_class)
def _trees(self, edge, complete, memo, tree_class):
"""
A helper function for L{trees}.
@param memo: A dictionary used to record the trees that we've
generated for each edge, so that when we see an edge more
than once, we can reuse the same trees.
"""
# If we've seen this edge before, then reuse our old answer.
if edge in memo:
return memo[edge]
trees = []
# when we're reading trees off the chart, don't use incomplete edges
if complete and edge.is_incomplete():
return trees
# Until we're done computing the trees for edge, set
# memo[edge] to be empty. This has the effect of filtering
# out any cyclic trees (i.e., trees that contain themselves as
# descendants), because if we reach this edge via a cycle,
# then it will appear that the edge doesn't generate any
# trees.
memo[edge] = []
# Leaf edges.
if isinstance(edge, LeafEdge):
leaf = self._tokens[edge.start()]
memo[edge] = leaf
return [leaf]
# Each child pointer list can be used to form trees.
for cpl in self.child_pointer_lists(edge):
# Get the set of child choices for each child pointer.
# child_choices[i] is the set of choices for the tree's
# ith child.
child_choices = [self._trees(cp, complete, memo, tree_class)
for cp in cpl]
# For each combination of children, add a tree.
for children in self._choose_children(child_choices):
lhs = edge.lhs().symbol()
trees.append(tree_class(lhs, children))
# If the edge is incomplete, then extend it with "partial trees":
if edge.is_incomplete():
unexpanded = [tree_class(elt,[])
for elt in edge.rhs()[edge.dot():]]
for tree in trees:
tree.extend(unexpanded)
# Update the memoization dictionary.
memo[edge] = trees
# Return the list of trees.
return trees
def _choose_children(self, child_choices):
"""
A helper function for L{_trees} that finds the possible sets
of subtrees for a new tree.
@param child_choices: A list that specifies the options for
each child. In particular, C{child_choices[i]} is a list of
tokens and subtrees that can be used as the C{i}th child.
"""
children_lists = [[]]
for child_choice in child_choices:
if hasattr(child_choice, '__iter__') and \
not isinstance(child_choice, basestring):
# Only iterate over the child trees
# if child_choice is iterable and NOT a string
children_lists = [child_list+[child]
for child in child_choice
for child_list in children_lists]
else:
# If child_choice is a string (or non-iterable)
# then it is a leaf
children_lists = [child_list+[child_choice]
for child_list in children_lists]
return children_lists
def child_pointer_lists(self, edge):
"""
@rtype: C{list} of C{list} of C{EdgeI}
@return: The set of child pointer lists for the given edge.
Each child pointer list is a list of edges that have
been used to form this edge.
"""
# Make a copy, in case they modify it.
return self._edge_to_cpls.get(edge, {}).keys()
#////////////////////////////////////////////////////////////
# Display
#////////////////////////////////////////////////////////////
def pp_edge(self, edge, width=None):
"""
@return: A pretty-printed string representation of a given edge
in this chart.
@rtype: C{string}
@param width: The number of characters allotted to each
index in the sentence.
"""
if width is None: width = 50/(self.num_leaves()+1)
(start, end) = (edge.start(), edge.end())
str = '|' + ('.'+' '*(width-1))*start
# Zero-width edges are "#" if complete, ">" if incomplete
if start == end:
if edge.is_complete(): str += '#'
else: str += '>'
# Spanning complete edges are "[===]"; Other edges are
# "[---]" if complete, "[--->" if incomplete
elif edge.is_complete() and edge.span() == (0,self._num_leaves):
str += '['+('='*width)*(end-start-1) + '='*(width-1)+']'
elif edge.is_complete():
str += '['+('-'*width)*(end-start-1) + '-'*(width-1)+']'
else:
str += '['+('-'*width)*(end-start-1) + '-'*(width-1)+'>'
str += (' '*(width-1)+'.')*(self._num_leaves-end)
return str + '| %s' % edge
def pp_leaves(self, width=None):
"""
@return: A pretty-printed string representation of this
chart's leaves. This string can be used as a header
for calls to L{pp_edge}.
"""
if width is None: width = 50/(self.num_leaves()+1)
if self._tokens is not None and width>1:
header = '|.'
for tok in self._tokens:
header += tok[:width-1].center(width-1)+'.'
header += '|'
else:
header = ''
return header
def pp(self, width=None):
"""
@return: A pretty-printed string representation of this chart.
@rtype: C{string}
@param width: The number of characters allotted to each
index in the sentence.
"""
if width is None: width = 50/(self.num_leaves()+1)
# sort edges: primary key=length, secondary key=start index.
# (and filter out the token edges)
edges = [(e.length(), e.start(), e) for e in self]
edges.sort()
edges = [e for (_,_,e) in edges]
return (self.pp_leaves(width) + '\n' +
'\n'.join(self.pp_edge(edge, width) for edge in edges))
#////////////////////////////////////////////////////////////
# Display: Dot (AT&T Graphviz)
#////////////////////////////////////////////////////////////
def dot_digraph(self):
# Header
s = 'digraph nltk_chart {\n'
#s += ' size="5,5";\n'
s += ' rankdir=LR;\n'
s += ' node [height=0.1,width=0.1];\n'
s += ' node [style=filled, color="lightgray"];\n'
# Set up the nodes
for y in range(self.num_edges(), -1, -1):
if y == 0:
s += ' node [style=filled, color="black"];\n'
for x in range(self.num_leaves()+1):
if y == 0 or (x <= self._edges[y-1].start() or
x >= self._edges[y-1].end()):
s += ' %04d.%04d [label=""];\n' % (x,y)
# Add a spacer
s += ' x [style=invis]; x->0000.0000 [style=invis];\n'
# Declare ranks.
for x in range(self.num_leaves()+1):
s += ' {rank=same;'
for y in range(self.num_edges()+1):
if y == 0 or (x <= self._edges[y-1].start() or
x >= self._edges[y-1].end()):
s += ' %04d.%04d' % (x,y)
s += '}\n'
# Add the leaves
s += ' edge [style=invis, weight=100];\n'
s += ' node [shape=plaintext]\n'
s += ' 0000.0000'
for x in range(self.num_leaves()):
s += '->%s->%04d.0000' % (self.leaf(x), x+1)
s += ';\n\n'
# Add the edges
s += ' edge [style=solid, weight=1];\n'
for y, edge in enumerate(self):
for x in range(edge.start()):
s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' %
(x, y+1, x+1, y+1))
s += (' %04d.%04d -> %04d.%04d [label="%s"];\n' %
(edge.start(), y+1, edge.end(), y+1, edge))
for x in range(edge.end(), self.num_leaves()):
s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' %
(x, y+1, x+1, y+1))
s += '}\n'
return s
########################################################################
## Chart Rules
########################################################################
class ChartRuleI(object):
"""
A rule that specifies what new edges are licensed by any given set
of existing edges. Each chart rule expects a fixed number of
edges, as indicated by the class variable L{NUM_EDGES}. In
particular:
- A chart rule with C{NUM_EDGES=0} specifies what new edges are
licensed, regardless of existing edges.
- A chart rule with C{NUM_EDGES=1} specifies what new edges are
licensed by a single existing edge.
- A chart rule with C{NUM_EDGES=2} specifies what new edges are
licensed by a pair of existing edges.
@type NUM_EDGES: C{int}
@cvar NUM_EDGES: The number of existing edges that this rule uses
to license new edges. Typically, this number ranges from zero
to two.
"""
def apply(self, chart, grammar, *edges):
"""
Add the edges licensed by this rule and the given edges to the
chart.
@type edges: C{list} of L{EdgeI}
@param edges: A set of existing edges. The number of edges
that should be passed to C{apply} is specified by the
L{NUM_EDGES} class variable.
@rtype: C{list} of L{EdgeI}
@return: A list of the edges that were added.
"""
raise AssertionError, 'ChartRuleI is an abstract interface'
def apply_iter(self, chart, grammar, *edges):
"""
@return: A generator that will add edges licensed by this rule
and the given edges to the chart, one at a time. Each
time the generator is resumed, it will either add a new
edge and yield that edge; or return.
@rtype: C{iter} of L{EdgeI}
@type edges: C{list} of L{EdgeI}
@param edges: A set of existing edges. The number of edges
that should be passed to C{apply} is specified by the
L{NUM_EDGES} class variable.
"""
raise AssertionError, 'ChartRuleI is an abstract interface'
def apply_everywhere(self, chart, grammar):
"""
Add all the edges licensed by this rule and the edges in the
chart to the chart.
@rtype: C{list} of L{EdgeI}
@return: A list of the edges that were added.
"""
raise AssertionError, 'ChartRuleI is an abstract interface'
def apply_everywhere_iter(self, chart, grammar):
"""
@return: A generator that will add all edges licensed by
this rule, given the edges that are currently in the
chart, one at a time. Each time the generator is resumed,
it will either add a new edge and yield that edge; or
return.
@rtype: C{iter} of L{EdgeI}
"""
raise AssertionError, 'ChartRuleI is an abstract interface'
class AbstractChartRule(ChartRuleI):
"""
An abstract base class for chart rules. C{AbstractChartRule}
provides:
- A default implementation for C{apply}, based on C{apply_iter}.
- A default implementation for C{apply_everywhere_iter},
based on C{apply_iter}.
- A default implementation for C{apply_everywhere}, based on
C{apply_everywhere_iter}. Currently, this implementation
assumes that C{NUM_EDGES}<=3.
- A default implementation for C{__str__}, which returns a
name basd on the rule's class name.
"""
# Subclasses must define apply_iter.
def apply_iter(self, chart, grammar, *edges):
raise AssertionError, 'AbstractChartRule is an abstract class'
# Default: loop through the given number of edges, and call
# self.apply() for each set of edges.
def apply_everywhere_iter(self, chart, grammar):
if self.NUM_EDGES == 0:
for new_edge in self.apply_iter(chart, grammar):
yield new_edge
elif self.NUM_EDGES == 1:
for e1 in chart:
for new_edge in self.apply_iter(chart, grammar, e1):
yield new_edge
elif self.NUM_EDGES == 2:
for e1 in chart:
for e2 in chart:
for new_edge in self.apply_iter(chart, grammar, e1, e2):
yield new_edge
elif self.NUM_EDGES == 3:
for e1 in chart:
for e2 in chart:
for e3 in chart:
for new_edge in self.apply_iter(chart,grammar,e1,e2,e3):
yield new_edge
else:
raise AssertionError, 'NUM_EDGES>3 is not currently supported'
# Default: delegate to apply_iter.
def apply(self, chart, grammar, *edges):
return list(self.apply_iter(chart, grammar, *edges))
# Default: delegate to apply_everywhere_iter.
def apply_everywhere(self, chart, grammar):
return list(self.apply_everywhere_iter(chart, grammar))
# Default: return a name based on the class name.
def __str__(self):
# Add spaces between InitialCapsWords.
return re.sub('([a-z])([A-Z])', r'\1 \2', self.__class__.__name__)
#////////////////////////////////////////////////////////////
# Fundamental Rule
#////////////////////////////////////////////////////////////
class FundamentalRule(AbstractChartRule):
"""
A rule that joins two adjacent edges to form a single combined
edge. In particular, this rule specifies that any pair of edges:
- [A S{->} S{alpha} * B S{beta}][i:j]
- [B S{->} S{gamma} *][j:k]
licenses the edge:
- [A S{->} S{alpha} B * S{beta}][i:j]
"""
NUM_EDGES = 2
def apply_iter(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.is_incomplete() and
right_edge.is_complete() and
left_edge.end() == right_edge.start() and
left_edge.next() == right_edge.lhs()):
return
# Construct the new edge.
new_edge = left_edge.move_dot_forward(right_edge.end())
# Insert it into the chart.
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class SingleEdgeFundamentalRule(FundamentalRule):
"""
A rule that joins a given edge with adjacent edges in the chart,
to form combined edges. In particular, this rule specifies that
either of the edges:
- [A S{->} S{alpha} * B S{beta}][i:j]
- [B S{->} S{gamma} *][j:k]
licenses the edge:
- [A S{->} S{alpha} B * S{beta}][i:j]
if the other edge is already in the chart.
@note: This is basically L{FundamentalRule}, with one edge left
unspecified.
"""
NUM_EDGES = 1
def apply_iter(self, chart, grammar, edge):
if edge.is_incomplete():
for new_edge in self._apply_incomplete(chart, grammar, edge):
yield new_edge
else:
for new_edge in self._apply_complete(chart, grammar, edge):
yield new_edge
def _apply_complete(self, chart, grammar, right_edge):
for left_edge in chart.select(end=right_edge.start(),
is_complete=False,
next=right_edge.lhs()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
def _apply_incomplete(self, chart, grammar, left_edge):
for right_edge in chart.select(start=left_edge.end(),
is_complete=True,
lhs=left_edge.next()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
#////////////////////////////////////////////////////////////
# Inserting Terminal Leafs
#////////////////////////////////////////////////////////////
class LeafInitRule(AbstractChartRule):
NUM_EDGES=0
def apply_iter(self, chart, grammar):
for index in range(chart.num_leaves()):
new_edge = LeafEdge(chart.leaf(index), index)
if chart.insert(new_edge, ()):
yield new_edge
#////////////////////////////////////////////////////////////
# Top-Down Prediction
#////////////////////////////////////////////////////////////
class TopDownInitRule(AbstractChartRule):
"""
A rule licensing edges corresponding to the grammar productions for
the grammar's start symbol. In particular, this rule specifies that:
- [S S{->} * S{alpha}][0:i]
is licensed for each grammar production C{S S{->} S{alpha}}, where
C{S} is the grammar's start symbol.
"""
NUM_EDGES = 0
def apply_iter(self, chart, grammar):
for prod in grammar.productions(lhs=grammar.start()):
new_edge = TreeEdge.from_production(prod, 0)
if chart.insert(new_edge, ()):
yield new_edge
class CachedTopDownInitRule(TopDownInitRule, Deprecated):
"""Use L{TopDownInitRule} instead."""
class TopDownPredictRule(AbstractChartRule):
"""
A rule licensing edges corresponding to the grammar productions
for the nonterminal following an incomplete edge's dot. In
particular, this rule specifies that:
- [A S{->} S{alpha} * B S{beta}][i:j]
licenses the edge:
- [B S{->} * S{gamma}][j:j]
for each grammar production C{B S{->} S{gamma}}.
@note: This rule corresponds to the Predictor Rule in Earley parsing.
"""
NUM_EDGES = 1
def apply_iter(self, chart, grammar, edge):
if edge.is_complete(): return
for prod in grammar.productions(lhs=edge.next()):
new_edge = TreeEdge.from_production(prod, edge.end())
if chart.insert(new_edge, ()):
yield new_edge
class TopDownExpandRule(TopDownPredictRule, Deprecated):
"""Use TopDownPredictRule instead"""
class CachedTopDownPredictRule(TopDownPredictRule):
"""
A cached version of L{TopDownPredictRule}. After the first time
this rule is applied to an edge with a given C{end} and C{next},
it will not generate any more edges for edges with that C{end} and
C{next}.
If C{chart} or C{grammar} are changed, then the cache is flushed.
"""
def __init__(self):
TopDownPredictRule.__init__(self)
self._done = {}
def apply_iter(self, chart, grammar, edge):
if edge.is_complete(): return
next, index = edge.next(), edge.end()
if not is_nonterminal(next): return
# If we've already applied this rule to an edge with the same
# next & end, and the chart & grammar have not changed, then
# just return (no new edges to add).
done = self._done.get((next, index), (None,None))
if done[0] is chart and done[1] is grammar: return
# Add all the edges indicated by the top down expand rule.
for prod in grammar.productions(lhs=next):
# If the left corner in the predicted production is
# leaf, it must match with the input.
if prod.rhs():
first = prod.rhs()[0]
if is_terminal(first):
if index >= chart.num_leaves() or first != chart.leaf(index): continue
new_edge = TreeEdge.from_production(prod, index)
if chart.insert(new_edge, ()):
yield new_edge
# Record the fact that we've applied this rule.
self._done[next, index] = (chart, grammar)
class CachedTopDownExpandRule(CachedTopDownPredictRule, Deprecated):
"""Use L{CachedTopDownPredictRule} instead."""
#////////////////////////////////////////////////////////////
# Bottom-Up Prediction
#////////////////////////////////////////////////////////////
class BottomUpPredictRule(AbstractChartRule):
"""
A rule licensing any edge corresponding to a production whose
right-hand side begins with a complete edge's left-hand side. In
particular, this rule specifies that:
- [A S{->} S{alpha} *]
licenses the edge:
- [B S{->} * A S{beta}]
for each grammar production C{B S{->} A S{beta}}.
"""
NUM_EDGES = 1
def apply_iter(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions(rhs=edge.lhs()):
new_edge = TreeEdge.from_production(prod, edge.start())
if chart.insert(new_edge, ()):
yield new_edge
class BottomUpPredictCombineRule(BottomUpPredictRule):
"""
A rule licensing any edge corresponding to a production whose
right-hand side begins with a complete edge's left-hand side. In
particular, this rule specifies that:
- [A S{->} S{alpha} *]
licenses the edge:
- [B S{->} A * S{beta}]
for each grammar production C{B S{->} A S{beta}}.
@note: This is like L{BottomUpPredictRule}, but it also applies
the L{FundamentalRule} to the resulting edge.
"""
NUM_EDGES = 1
def apply_iter(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions(rhs=edge.lhs()):
new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1)
if chart.insert(new_edge, (edge,)):
yield new_edge
class EmptyPredictRule(AbstractChartRule):
"""
A rule that inserts all empty productions as passive edges,
in every position in the chart.
"""
NUM_EDGES = 0
def apply_iter(self, chart, grammar):
for prod in grammar.productions(empty=True):
for index in xrange(chart.num_leaves() + 1):
new_edge = TreeEdge.from_production(prod, index)
if chart.insert(new_edge, ()):
yield new_edge
########################################################################
## Filtered Bottom Up
########################################################################
class FilteredSingleEdgeFundamentalRule(SingleEdgeFundamentalRule):
def _apply_complete(self, chart, grammar, right_edge):
end = right_edge.end()
nexttoken = end < chart.num_leaves() and chart.leaf(end)
for left_edge in chart.select(end=right_edge.start(),
is_complete=False,
next=right_edge.lhs()):
if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
def _apply_incomplete(self, chart, grammar, left_edge):
for right_edge in chart.select(start=left_edge.end(),
is_complete=True,
lhs=left_edge.next()):
end = right_edge.end()
nexttoken = end < chart.num_leaves() and chart.leaf(end)
if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class FilteredBottomUpPredictCombineRule(BottomUpPredictCombineRule):
def apply_iter(self, chart, grammar, edge):
if edge.is_incomplete(): return
leftcorners = grammar.leftcorners
end = edge.end()
nexttoken = end < chart.num_leaves() and chart.leaf(end)
for prod in grammar.productions(rhs=edge.lhs()):
if _bottomup_filter(grammar, nexttoken, prod.rhs()):
new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1)
if chart.insert(new_edge, (edge,)):
yield new_edge
def _bottomup_filter(grammar, nexttoken, rhs, dot=0):
if len(rhs) <= dot + 1:
return True
next = rhs[dot + 1]
if is_terminal(next):
return nexttoken == next
else:
return grammar.is_leftcorner(next, nexttoken)
########################################################################
## Generic Chart Parser
########################################################################
TD_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CachedTopDownPredictRule(),
SingleEdgeFundamentalRule()]
BU_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictRule(),
SingleEdgeFundamentalRule()]
BU_LC_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictCombineRule(),
SingleEdgeFundamentalRule()]
LC_STRATEGY = [LeafInitRule(),
FilteredBottomUpPredictCombineRule(),
FilteredSingleEdgeFundamentalRule()]
class ChartParser(ParserI):
"""
A generic chart parser. A X{strategy}, or list of
L{ChartRules<ChartRuleI>}, is used to decide what edges to add to
the chart. In particular, C{ChartParser} uses the following
algorithm to parse texts:
- Until no new edges are added:
- For each I{rule} in I{strategy}:
- Apply I{rule} to any applicable edges in the chart.
- Return any complete parses in the chart
"""
def __init__(self, grammar, strategy=BU_LC_STRATEGY, trace=0,
trace_chart_width=50, use_agenda=True, chart_class=Chart):
"""
Create a new chart parser, that uses C{grammar} to parse
texts.
@type grammar: L{ContextFreeGrammar}
@param grammar: The grammar used to parse texts.
@type strategy: C{list} of L{ChartRuleI}
@param strategy: A list of rules that should be used to decide
what edges to add to the chart (top-down strategy by default).
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
@type trace_chart_width: C{int}
@param trace_chart_width: The default total width reserved for
the chart in trace output. The remainder of each line will
be used to display edges.
@type use_agenda: C{bool}
@param use_agenda: Use an optimized agenda-based algorithm,
if possible.
@param chart_class: The class that should be used to create
the parse charts.
"""
self._grammar = grammar
self._strategy = strategy
self._trace = trace
self._trace_chart_width = trace_chart_width
# If the strategy only consists of axioms (NUM_EDGES==0) and
# inference rules (NUM_EDGES==1), we can use an agenda-based algorithm:
self._use_agenda = use_agenda
self._chart_class = chart_class
self._axioms = []
self._inference_rules = []
for rule in strategy:
if rule.NUM_EDGES == 0:
self._axioms.append(rule)
elif rule.NUM_EDGES == 1:
self._inference_rules.append(rule)
else:
self._use_agenda = False
def grammar(self):
return self._grammar
def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width):
if not trace: return
should_print_rule_header = trace > 1
for edge in new_edges:
if should_print_rule_header:
print '%s:' % rule
should_print_rule_header = False
print chart.pp_edge(edge, edge_width)
def chart_parse(self, tokens, trace=None):
"""
@return: The final parse L{Chart},
from which all possible parse trees can be extracted.
@param tokens: The sentence to be parsed
@type tokens: L{list} of L{string}
@rtype: L{Chart}
"""
if trace is None: trace = self._trace
trace_new_edges = self._trace_new_edges
tokens = list(tokens)
self._grammar.check_coverage(tokens)
chart = self._chart_class(tokens)
grammar = self._grammar
# Width, for printing trace edges.
trace_edge_width = self._trace_chart_width / (chart.num_leaves() + 1)
if trace: print chart.pp_leaves(trace_edge_width)
if self._use_agenda:
# Use an agenda-based algorithm.
for axiom in self._axioms:
new_edges = axiom.apply(chart, grammar)
trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
inference_rules = self._inference_rules
agenda = chart.edges()
# We reverse the initial agenda, since it is a stack
# but chart.edges() functions as a queue.
agenda.reverse()
while agenda:
edge = agenda.pop()
for rule in inference_rules:
new_edges = rule.apply_iter(chart, grammar, edge)
if trace:
new_edges = list(new_edges)
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
agenda += new_edges
else:
# Do not use an agenda-based algorithm.
edges_added = True
while edges_added:
edges_added = False
for rule in self._strategy:
new_edges = rule.apply_everywhere(chart, grammar)
edges_added = len(new_edges)
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
# Return the final chart.
return chart
def nbest_parse(self, tokens, n=None, tree_class=Tree):
chart = self.chart_parse(tokens)
# Return a list of complete parses.
return chart.parses(self._grammar.start(), tree_class=tree_class)[:n]
class TopDownChartParser(ChartParser):
"""
A L{ChartParser} using a top-down parsing strategy.
See L{ChartParser} for more information.
"""
def __init__(self, grammar, **parser_args):
ChartParser.__init__(self, grammar, TD_STRATEGY, **parser_args)
class BottomUpChartParser(ChartParser):
"""
A L{ChartParser} using a bottom-up parsing strategy.
See L{ChartParser} for more information.
"""
def __init__(self, grammar, **parser_args):
if isinstance(grammar, WeightedGrammar):
warnings.warn("BottomUpChartParser only works for ContextFreeGrammar, "
"use BottomUpProbabilisticChartParser instead",
category=DeprecationWarning)
ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args)
class BottomUpLeftCornerChartParser(ChartParser):
"""
A L{ChartParser} using a bottom-up left-corner parsing strategy.
This strategy is often more efficient than standard bottom-up.
See L{ChartParser} for more information.
"""
def __init__(self, grammar, **parser_args):
ChartParser.__init__(self, grammar, BU_LC_STRATEGY, **parser_args)
class LeftCornerChartParser(ChartParser):
def __init__(self, grammar, **parser_args):
if not grammar.is_nonempty():
raise ValueError("LeftCornerParser only works for grammars "
"without empty productions.")
ChartParser.__init__(self, grammar, LC_STRATEGY, **parser_args)
########################################################################
## Stepping Chart Parser
########################################################################
class SteppingChartParser(ChartParser):
"""
A C{ChartParser} that allows you to step through the parsing
process, adding a single edge at a time. It also allows you to
change the parser's strategy or grammar midway through parsing a
text.
The C{initialize} method is used to start parsing a text. C{step}
adds a single edge to the chart. C{set_strategy} changes the
strategy used by the chart parser. C{parses} returns the set of
parses that has been found by the chart parser.
@ivar _restart: Records whether the parser's strategy, grammar,
or chart has been changed. If so, then L{step} must restart
the parsing algorithm.
"""
def __init__(self, grammar, strategy=[], trace=0):
self._chart = None
self._current_chartrule = None
self._restart = False
ChartParser.__init__(self, grammar, strategy, trace)
#////////////////////////////////////////////////////////////
# Initialization
#////////////////////////////////////////////////////////////
def initialize(self, tokens):
"Begin parsing the given tokens."
self._chart = Chart(list(tokens))
self._restart = True
#////////////////////////////////////////////////////////////
# Stepping
#////////////////////////////////////////////////////////////
def step(self):
"""
@return: A generator that adds edges to the chart, one at a
time. Each time the generator is resumed, it adds a single
edge and yields that edge. If no more edges can be added,
then it yields C{None}.
If the parser's strategy, grammar, or chart is changed, then
the generator will continue adding edges using the new
strategy, grammar, or chart.
Note that this generator never terminates, since the grammar
or strategy might be changed to values that would add new
edges. Instead, it yields C{None} when no more edges can be
added with the current strategy and grammar.
"""
if self._chart is None:
raise ValueError, 'Parser must be initialized first'
while 1:
self._restart = False
w = 50/(self._chart.num_leaves()+1)
for e in self._parse():
if self._trace > 1: print self._current_chartrule
if self._trace > 0: print self._chart.pp_edge(e,w)
yield e
if self._restart: break
else:
yield None # No more edges.
def _parse(self):
"""
A generator that implements the actual parsing algorithm.
L{step} iterates through this generator, and restarts it
whenever the parser's strategy, grammar, or chart is modified.
"""
chart = self._chart
grammar = self._grammar
edges_added = 1
while edges_added > 0:
edges_added = 0
for rule in self._strategy:
self._current_chartrule = rule
for e in rule.apply_everywhere_iter(chart, grammar):
edges_added += 1
yield e
#////////////////////////////////////////////////////////////
# Accessors
#////////////////////////////////////////////////////////////
def strategy(self):
"@return: The strategy used by this parser."
return self._strategy
def grammar(self):
"@return: The grammar used by this parser."
return self._grammar
def chart(self):
"@return: The chart that is used by this parser."
return self._chart
def current_chartrule(self):
"@return: The chart rule used to generate the most recent edge."
return self._current_chartrule
def parses(self, tree_class=Tree):
"@return: The parse trees currently contained in the chart."
return self._chart.parses(self._grammar.start(), tree_class)
#////////////////////////////////////////////////////////////
# Parser modification
#////////////////////////////////////////////////////////////
def set_strategy(self, strategy):
"""
Change the startegy that the parser uses to decide which edges
to add to the chart.
@type strategy: C{list} of L{ChartRuleI}
@param strategy: A list of rules that should be used to decide
what edges to add to the chart.
"""
if strategy == self._strategy: return
self._strategy = strategy[:] # Make a copy.
self._restart = True
def set_grammar(self, grammar):
"Change the grammar used by the parser."
if grammar is self._grammar: return
self._grammar = grammar
self._restart = True
def set_chart(self, chart):
"Load a given chart into the chart parser."
if chart is self._chart: return
self._chart = chart
self._restart = True
#////////////////////////////////////////////////////////////
# Standard parser methods
#////////////////////////////////////////////////////////////
def nbest_parse(self, tokens, n=None, tree_class=Tree):
tokens = list(tokens)
self._grammar.check_coverage(tokens)
# Initialize ourselves.
self.initialize(tokens)
# Step until no more edges are generated.
for e in self.step():
if e is None: break
# Return a list of complete parses.
return self.parses(tree_class=tree_class)[:n]
########################################################################
## Demo Code
########################################################################
def demo_grammar():
from nltk.grammar import parse_cfg
return parse_cfg("""
S -> NP VP
PP -> "with" NP
NP -> NP PP
VP -> VP PP
VP -> Verb NP
VP -> Verb
NP -> Det Noun
NP -> "John"
NP -> "I"
Det -> "the"
Det -> "my"
Det -> "a"
Noun -> "dog"
Noun -> "cookie"
Verb -> "ate"
Verb -> "saw"
Prep -> "with"
Prep -> "under"
""")
def demo(choice=None,
should_print_times=True, should_print_grammar=False,
should_print_trees=True, trace=2,
sent='I saw John with a dog with my cookie', numparses=5):
"""
A demonstration of the chart parsers.
"""
import sys, time
from nltk import nonterminals, Production, ContextFreeGrammar
# The grammar for ChartParser and SteppingChartParser:
grammar = demo_grammar()
if should_print_grammar:
print "* Grammar"
print grammar
# Tokenize the sample sentence.
print "* Sentence:"
print sent
tokens = sent.split()
print tokens
print
# Ask the user which parser to test,
# if the parser wasn't provided as an argument
if choice is None:
print ' 1: Top-down chart parser'
print ' 2: Bottom-up chart parser'
print ' 3: Bottom-up left-corner chart parser'
print ' 4: Left-corner chart parser with bottom-up filter'
print ' 5: Stepping chart parser (alternating top-down & bottom-up)'
print ' 6: All parsers'
print '\nWhich parser (1-6)? ',
choice = sys.stdin.readline().strip()
print
choice = str(choice)
if choice not in "123456":
print 'Bad parser number'
return
# Keep track of how long each parser takes.
times = {}
strategies = {'1': ('Top-down', TD_STRATEGY),
'2': ('Bottom-up', BU_STRATEGY),
'3': ('Bottom-up left-corner', BU_LC_STRATEGY),
'4': ('Filtered left-corner', LC_STRATEGY)}
choices = []
if strategies.has_key(choice): choices = [choice]
if choice=='6': choices = "1234"
# Run the requested chart parser(s), except the stepping parser.
for strategy in choices:
print "* Strategy: " + strategies[strategy][0]
print
cp = ChartParser(grammar, strategies[strategy][1], trace=trace)
t = time.time()
chart = cp.chart_parse(tokens)
parses = chart.parses(grammar.start())
times[strategies[strategy][0]] = time.time()-t
print "Nr edges in chart:", len(chart.edges())
if numparses:
assert len(parses)==numparses, 'Not all parses found'
if should_print_trees:
for tree in parses: print tree
else:
print "Nr trees:", len(parses)
print
# Run the stepping parser, if requested.
if choice in "56":
print "* Strategy: Stepping (top-down vs bottom-up)"
print
t = time.time()
cp = SteppingChartParser(grammar, trace=trace)
cp.initialize(tokens)
for i in range(5):
print '*** SWITCH TO TOP DOWN'
cp.set_strategy(TD_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
print '*** SWITCH TO BOTTOM UP'
cp.set_strategy(BU_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
times['Stepping'] = time.time()-t
print "Nr edges in chart:", len(cp.chart().edges())
if numparses:
assert len(cp.parses())==numparses, 'Not all parses found'
if should_print_trees:
for tree in cp.parses(): print tree
else:
print "Nr trees:", len(cp.parses())
print
# Print the times of all parsers:
if not (should_print_times and times): return
print "* Parsing times"
print
maxlen = max(len(key) for key in times.keys())
format = '%' + `maxlen` + 's parser: %6.3fsec'
times_items = times.items()
times_items.sort(lambda a,b:cmp(a[1], b[1]))
for (parser, t) in times_items:
print format % (parser, t)
if __name__ == '__main__': demo()
| markgw/jazzparser | lib/nltk/parse/chart.py | Python | gpl-3.0 | 65,856 |
#!/usr/bin/python
#
# GemUO
#
# (c) 2005-2010 Max Kellermann <max@duempel.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
from twisted.internet import reactor
from uo.entity import *
from gemuo.entity import Item
from gemuo.simple import simple_run
from gemuo.error import *
from gemuo.defer import deferred_find_item_in, deferred_find_item_in_backpack
from gemuo.engine.messages import PrintMessages
from gemuo.engine.restock import drop_into
def run(client):
PrintMessages(client)
world = client.world
for e in world.iter_entities_at(world.player.position.x, world.player.position.y-1):
print e
simple_run(run)
| fungos/gemuo | src/entities.py | Python | gpl-2.0 | 1,058 |
# -*- coding: utf-8 -*-
##############################################################################
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# OeMedical, HMS Opensource Solution
##############################################################################
# Collaborators of this module:
# Special Credit and Thanks to Thymbra Latinoamericana S.A.
# Coded by: Parthiv Patel <parthiv@techreceptives.com>
# Coded by: Ruchir Shukla <ruchir@techreceptives.com>
# Planifyied by: Parthiv Patel <parthiv@techreceptives.com>
# Planifyied by: Nhomar Hernandéz <nhomar@vauxoo.com>
#
##############################################################################
# This project is mantained by OeMEdical Team:
# https://launchpad.net/oemedical
#
##############################################################################
# It is a collaborative effort between several companies that want to join
# efforts in have a proposal solid and strong in the Health Care environment
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'OeMedical : gynecology and obstetrics',
'version': '1.0.1',
'author': "OeMEdical Team",
'category': 'Generic Modules/Others',
'depends': ['oemedical', 'oemedical_emr','oemedical_his'],
'application': True,
'description': """
About OeMedical gynecology and obstetrics
-----------------------------------------
- Ginecology
- Mestrual History
- Detection
- Mammography History
- PAP / COLPO
- PAP History
- Colposcopy History
- Obstetrics
- Pregnancy history
""",
"website": "http://launchpad.net/oemedical",
"licence": "AGPL v3",
"data": [
'views/oemedical_gynecology_and_obstetrics_view.xml', # view has errors, please correct before enabling....
'views/oemedical_menu.xml',
'security/ir.model.access.csv',
],
"demo": [
],
'test':[
],
'css': [
],
'js': [
],
'qweb': [
],
"active": False,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| eneldoserrata/marcos_openerp | oemedical/oemedical_gynecology_and_obstetrics/__openerp__.py | Python | agpl-3.0 | 2,889 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
import os, sys
from opus_core.tests import opus_unittest
from opus_core.opus_package import OpusPackage
class TestTutorialCode(opus_unittest.OpusTestCase):
"""Fails if tutorial_code.py fails.
"""
def test_tutorial_code(self):
opus_docs_path = OpusPackage().get_path_for_package('opus_docs')
error_code = os.system('%s "%s"'
% (sys.executable, os.path.join(opus_docs_path, 'manual', 'part-command-line', 'tutorial_code.py')))
self.assert_(not error_code)
if __name__=="__main__":
opus_unittest.main()
| christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_docs/tests/test_tutorial_code.py | Python | gpl-2.0 | 709 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "Fisher", sigma = 0.0, exog_count = 100, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_Fisher/trend_MovingMedian/cycle_30/ar_/test_artificial_1024_Fisher_MovingMedian_30__100.py | Python | bsd-3-clause | 267 |
__all__ = ["root"]
import os
import numpy as np
import scipy.optimize as opt
from ._memoize import memoize
from ._calc_energies import *
from ._determinants import *
from ._singular_points import *
@memoize(500)
def root(L, mpi, a0, r0, a2, r2, d=np.array([0., 0., 0.]), irrep="A1", n=1):
"""Returns roots of the determinant equation.
Args:
L: lattice size
mpi: lattice pion mass
a0: scattering length for l=0
r0: scattering radius for l=0
a2: scattering length for l=2
r2: scattering radius for l=2
d: total momentum of the system
irrep: the chosen irrep
n: number of roots to look for
Returns:
The values of the roots.
"""
#print("Entering root")
#print(irrep, L, d, n)
# These variables were global variables
r_prec = 1e-10
# setup of variables
nroot = 0
roots = np.zeros(n)
# CJ: Used lamda functions to make code more compact
if (irrep == "A1"):
if (np.array_equal(d, np.array([0., 0., 0.]))):
calc_det = lambda q: det000(L, mpi, a0, r0, q)
singular_points = lambda i: float(i)
n_interval = 5
n_blocks = 10
elif (np.array_equal(d, np.array([0., 0., 1.]))):
calc_det = lambda q: det001(L, mpi, a0, r0, a2, r2, q)
singular_points = lambda i: SinglePointsP1(mpi, L, i)
n_interval = 6
n_blocks = 20
elif (np.array_equal(d, np.array([1., 1., 0.]))):
calc_det = lambda q: det110(L, mpi, a0, r0, a2, r2, q)
singular_points = lambda i: SinglePointsP2(mpi, L, i)
n_interval = 7
n_blocks = 20
elif (np.array_equal(d, np.array([1., 1., 1.]))):
calc_det = lambda q: det111(L, mpi, a0, r0, a2, r2, q)
singular_points = lambda i: SinglePointsP3(mpi, L, i)
n_interval = 7
n_blocks = 20
else:
print("wrong value of dVec")
os.sys.exit(-5)
elif (irrep == "E"):
if (np.array_equal(d, np.array([0., 0., 0.]))):
calc_det = lambda q: det000_E(L, mpi, a2, r2, q)
singular_points = lambda i: float(i)
n_interval = 5
n_blocks = 10
else:
print("wrong value of dVec")
os.sys.exit(-5)
elif (irrep == "T2"):
if (np.array_equal(d, np.array([0., 0., 0.]))):
calc_det = lambda q: det000_T2(L, mpi, a2, r2, q)
singular_points = lambda i: float(i)
n_interval = 5
n_blocks = 10
else:
print("wrong value of dVec")
os.sys.exit(-5)
else:
print("wrong irrep")
# set up grid
q2_range_min = np.zeros(n_interval*n_blocks)
q2_range_max = np.zeros(n_interval*n_blocks)
for i in range(n_interval):
q2_min = singular_points(i) + 10e-4
q2_max = singular_points(i+1) - 10e-4
q2_delta = (q2_max - q2_min) / float(n_blocks)
for j in range(n_blocks):
q2_range_min[i*n_blocks + j] = q2_min + j * q2_delta
q2_range_max[i*n_blocks + j] = q2_min + (j + 1) * q2_delta
# main loop
loop_i = 0
while(nroot < n):
det1 = calc_det(q2_range_min[loop_i])
det2 = calc_det(q2_range_max[loop_i])
if (det1 * det2 < 0):
try:
roots[nroot] = opt.brentq(calc_det, q2_range_min[loop_i], \
q2_range_max[loop_i], disp=True)
nroot += 1
except RuntimeError:
print("next loop")
loop_i += 1
if(loop_i == q2_range_min.shape[0]):
print("root out of range. d = (%lf, %lf, %lf)" % (d[0], d[1], d[2]))
os.sys.exit(-5)
return roots
| chjost/analysis-code | analysis/findroot.py | Python | gpl-3.0 | 3,814 |
"""
Copyright 2016, 2017 UFPE - Universidade Federal de Pernambuco
Este arquivo é parte do programa Amadeus Sistema de Gestão de Aprendizagem, ou simplesmente Amadeus LMS
O Amadeus LMS é um software livre; você pode redistribui-lo e/ou modifica-lo dentro dos termos da Licença Pública Geral GNU como publicada pela Fundação do Software Livre (FSF); na versão 2 da Licença.
Este programa é distribuído na esperança que possa ser útil, mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU, sob o título "LICENSE", junto com este programa, se não, escreva para a Fundação do Software Livre (FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from django.contrib import admin
from .models import News
# Register your models here.
class NewsAdmin(admin.ModelAdmin):
list_display = ['title', 'slug', 'creator', 'create_date']
search_fields = ['title']
admin.site.register(News, NewsAdmin)
| amadeusproject/amadeuslms | news/admin.py | Python | gpl-2.0 | 1,126 |
from __future__ import (
absolute_import, print_function, division, unicode_literals
)
import inspect
import json as json_module
import re
import six
from collections import namedtuple, Sequence, Sized
from functools import update_wrapper
from cookies import Cookies
from requests.utils import cookiejar_from_dict
from requests.exceptions import ConnectionError
from requests.sessions import REDIRECT_STATI
try:
from requests.packages.urllib3.response import HTTPResponse
except ImportError:
from urllib3.response import HTTPResponse
if six.PY2:
from urlparse import urlparse, parse_qsl
else:
from urllib.parse import urlparse, parse_qsl
if six.PY2:
try:
from six import cStringIO as BufferIO
except ImportError:
from six import StringIO as BufferIO
else:
from io import BytesIO as BufferIO
Call = namedtuple('Call', ['request', 'response'])
_wrapper_template = """\
def wrapper%(signature)s:
with responses:
return func%(funcargs)s
"""
def _is_string(s):
return isinstance(s, (six.string_types, six.text_type))
def _is_redirect(response):
try:
# 2.0.0 <= requests <= 2.2
return response.is_redirect
except AttributeError:
# requests > 2.2
return (
# use request.sessions conditional
response.status_code in REDIRECT_STATI and
'location' in response.headers
)
def get_wrapped(func, wrapper_template, evaldict):
# Preserve the argspec for the wrapped function so that testing
# tools such as pytest can continue to use their fixture injection.
args, a, kw, defaults = inspect.getargspec(func)
signature = inspect.formatargspec(args, a, kw, defaults)
is_bound_method = hasattr(func, '__self__')
if is_bound_method:
args = args[1:] # Omit 'self'
callargs = inspect.formatargspec(args, a, kw, None)
ctx = {'signature': signature, 'funcargs': callargs}
six.exec_(wrapper_template % ctx, evaldict)
wrapper = evaldict['wrapper']
update_wrapper(wrapper, func)
if is_bound_method:
wrapper = wrapper.__get__(func.__self__, type(func.__self__))
return wrapper
class CallList(Sequence, Sized):
def __init__(self):
self._calls = []
def __iter__(self):
return iter(self._calls)
def __len__(self):
return len(self._calls)
def __getitem__(self, idx):
return self._calls[idx]
def add(self, request, response):
self._calls.append(Call(request, response))
def reset(self):
self._calls = []
def _ensure_url_default_path(url, match_querystring):
if _is_string(url) and url.count('/') == 2:
if match_querystring:
return url.replace('?', '/?', 1)
else:
return url + '/'
return url
class RequestsMock(object):
DELETE = 'DELETE'
GET = 'GET'
HEAD = 'HEAD'
OPTIONS = 'OPTIONS'
PATCH = 'PATCH'
POST = 'POST'
PUT = 'PUT'
def __init__(self, assert_all_requests_are_fired=True):
self._calls = CallList()
self.reset()
self.assert_all_requests_are_fired = assert_all_requests_are_fired
def reset(self):
self._urls = []
self._calls.reset()
def add(self, method, url, body='', match_querystring=False,
status=200, adding_headers=None, stream=False,
content_type='text/plain', json=None):
# if we were passed a `json` argument,
# override the body and content_type
if json is not None:
body = json_module.dumps(json)
content_type = 'application/json'
# ensure the url has a default path set if the url is a string
url = _ensure_url_default_path(url, match_querystring)
# body must be bytes
if isinstance(body, six.text_type):
body = body.encode('utf-8')
self._urls.append({
'url': url,
'method': method,
'body': body,
'content_type': content_type,
'match_querystring': match_querystring,
'status': status,
'adding_headers': adding_headers,
'stream': stream,
})
def add_callback(self, method, url, callback, match_querystring=False,
content_type='text/plain'):
# ensure the url has a default path set if the url is a string
# url = _ensure_url_default_path(url, match_querystring)
self._urls.append({
'url': url,
'method': method,
'callback': callback,
'content_type': content_type,
'match_querystring': match_querystring,
})
@property
def calls(self):
return self._calls
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
success = type is None
self.stop(allow_assert=success)
self.reset()
return success
def activate(self, func):
evaldict = {'responses': self, 'func': func}
return get_wrapped(func, _wrapper_template, evaldict)
def _find_match(self, request):
for match in self._urls:
if request.method != match['method']:
continue
if not self._has_url_match(match, request.url):
continue
break
else:
return None
if self.assert_all_requests_are_fired:
# for each found match remove the url from the stack
self._urls.remove(match)
return match
def _has_url_match(self, match, request_url):
url = match['url']
if not match['match_querystring']:
request_url = request_url.split('?', 1)[0]
if _is_string(url):
if match['match_querystring']:
return self._has_strict_url_match(url, request_url)
else:
return url == request_url
elif isinstance(url, re._pattern_type) and url.match(request_url):
return True
else:
return False
def _has_strict_url_match(self, url, other):
url_parsed = urlparse(url)
other_parsed = urlparse(other)
if url_parsed[:3] != other_parsed[:3]:
return False
url_qsl = sorted(parse_qsl(url_parsed.query))
other_qsl = sorted(parse_qsl(other_parsed.query))
return url_qsl == other_qsl
def _on_request(self, adapter, request, **kwargs):
match = self._find_match(request)
# TODO(dcramer): find the correct class for this
if match is None:
error_msg = 'Connection refused: {0} {1}'.format(request.method,
request.url)
response = ConnectionError(error_msg)
response.request = request
self._calls.add(request, response)
raise response
if 'body' in match and isinstance(match['body'], Exception):
self._calls.add(request, match['body'])
raise match['body']
headers = {}
if match['content_type'] is not None:
headers['Content-Type'] = match['content_type']
if 'callback' in match: # use callback
status, r_headers, body = match['callback'](request)
if isinstance(body, six.text_type):
body = body.encode('utf-8')
body = BufferIO(body)
headers.update(r_headers)
elif 'body' in match:
if match['adding_headers']:
headers.update(match['adding_headers'])
status = match['status']
body = BufferIO(match['body'])
response = HTTPResponse(
status=status,
reason=six.moves.http_client.responses[status],
body=body,
headers=headers,
preload_content=False,
)
response = adapter.build_response(request, response)
if not match.get('stream'):
response.content # NOQA
try:
resp_cookies = Cookies.from_request(response.headers['set-cookie'])
response.cookies = cookiejar_from_dict(dict(
(v.name, v.value)
for _, v
in resp_cookies.items()
))
except (KeyError, TypeError):
pass
self._calls.add(request, response)
return response
def start(self):
try:
from unittest import mock
except ImportError:
import mock
def unbound_on_send(adapter, request, *a, **kwargs):
return self._on_request(adapter, request, *a, **kwargs)
self._patcher1 = mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send',
unbound_on_send)
self._patcher1.start()
self._patcher2 = mock.patch('requests.adapters.HTTPAdapter.send',
unbound_on_send)
self._patcher2.start()
def stop(self, allow_assert=True):
self._patcher1.stop()
self._patcher2.stop()
if allow_assert and self.assert_all_requests_are_fired and self._urls:
raise AssertionError(
'Not all requests have been executed {0!r}'.format(
[(url['method'], url['url']) for url in self._urls]))
# expose default mock namespace
mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False)
__all__ = []
for __attr in (a for a in dir(_default_mock) if not a.startswith('_')):
__all__.append(__attr)
globals()[__attr] = getattr(_default_mock, __attr)
| heddle317/moto | moto/packages/responses/responses.py | Python | apache-2.0 | 9,675 |
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.models import User
from django.template import RequestContext
from fclover.account.forms import SigninForm
from fclover.account.models import UserProfile
from django.contrib.auth import authenticate, login, logout
def signup(request):
return render_to_response('account/signup.html', context_instance=RequestContext(request))
def signin(request):
form = SigninForm()
if request.method == 'POST':
form = SigninForm(request.POST)
username = request.POST.get('name', '')
password = request.POST.get('password', '')
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
# Correct password, and the user is marked "active"
login(request, user)
# Redirect to a success page.
return HttpResponse("welcome back")
else:
# Show an error page
return render_to_response('account/signin.html', {'form': form, 'invalid': True}, context_instance=RequestContext(request))
return render_to_response('account/signin.html', {'form': form},context_instance=RequestContext(request))
| weaponsjtu/love-stranger | fcloverV0/account/views.py | Python | apache-2.0 | 1,367 |
# Copyright 2019 Kolushov Alexandr <https://it-projects.info/team/KolushovAlexandr>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
import odoo.tests
from odoo.api import Environment
@odoo.tests.common.at_install(True)
@odoo.tests.common.post_install(True)
class TestUi(odoo.tests.HttpCase):
def test_01_subtask_sort_button(self):
env = Environment(self.registry.test_cr, self.uid, {})
# needed because tests are run before the module is marked as
# installed. In js web will only load qweb coming from modules
# that are returned by the backend in module_boot. Without
# this you end up with js, css but no qweb.
env["ir.module.module"].search(
[("name", "=", "project_task_subtask")], limit=1
).state = "installed"
# find active tasks
task = env["project.task"].search([("active", "=", "true")], limit=1)
url = "/web?#id=%s&view_type=form&model=project.task&/" % str(task.id)
self.registry.cursor().release()
code = """
$(document).ready( function() {
setInterval(function(){
if ($('.o_x2m_control_panel .o_cp_buttons .btn.o_pager_sort').length > 0) {
console.log('ok');
}
}, 1000);
setTimeout(function(){
if ($('.o_x2m_control_panel .o_cp_buttons .btn.o_pager_sort').length > 0) {
console.log('ok');
} else {
console.log(document.documentElement.innerHTML);
console.log('error', 'Sort Button is not displayed');
}
}, 20000);
});
"""
self.phantom_js(url, code, login="admin")
| yelizariev/addons-yelizariev | project_task_subtask/tests/test_subtask_sort_button.py | Python | lgpl-3.0 | 1,815 |
"""
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
from __future__ import print_function
from __future__ import division
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized, defaultdict
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from ..base import BaseEstimator, is_classifier, clone
from ..base import MetaEstimatorMixin
from ._split import check_cv
from ._validation import _fit_and_score
from ..exceptions import NotFittedError
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..utils import check_random_state
from ..utils.fixes import sp_version
from ..utils.fixes import rankdata
from ..utils.random import sample_without_replacement
from ..utils.validation import indexable, check_is_fitted
from ..utils.metaestimators import if_delegate_has_method
from ..metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d. For exhaustive searches, use "
"GridSearchCV." % (grid_size, self.n_iter))
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
if sp_version < (0, 16):
params[k] = v.rvs()
else:
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
# XXX Remove in 0.20
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
return self.scorer_(self.best_estimator_, X, y)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError(('This GridSearchCV instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. ') % method_name)
else:
check_is_fitted(self, 'best_estimator_')
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, labels, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
X, y, labels = indexable(X, y, labels)
n_splits = cv.get_n_splits(X, y, labels)
if self.verbose > 0 and isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv.split(X, y, labels))
test_scores, test_sample_counts, _, parameters = zip(*out)
candidate_params = parameters[::n_splits]
n_candidates = len(candidate_params)
test_scores = np.array(test_scores,
dtype=np.float64).reshape(n_candidates,
n_splits)
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
# Computed the (weighted) mean and std for all the candidates
weights = test_sample_counts if self.iid else None
means = np.average(test_scores, axis=1, weights=weights)
stds = np.sqrt(np.average((test_scores - means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results = dict()
for split_i in range(n_splits):
results["test_split%d_score" % split_i] = test_scores[:, split_i]
results["test_mean_score"] = means
results["test_std_score"] = stds
ranks = np.asarray(rankdata(-means, method='min'), dtype=np.int32)
best_index = np.flatnonzero(ranks == 1)[0]
best_parameters = candidate_params[best_index]
results["test_rank_score"] = ranks
# Use one np.MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(np.ma.masked_all, (n_candidates,),
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
self.results_ = results
self.best_index_ = best_index
self.n_splits_ = n_splits
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best_parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
@property
def best_params_(self):
check_is_fitted(self, 'results_')
return self.results_['params'][self.best_index_]
@property
def best_score_(self):
check_is_fitted(self, 'results_')
return self.results_['test_mean_score'][self.best_index_]
@property
def grid_scores_(self):
warnings.warn(
"The grid_scores_ attribute was deprecated in version 0.18"
" in favor of the more elaborate results_ attribute."
" The grid_scores_ attribute will not be available from 0.20",
DeprecationWarning)
check_is_fitted(self, 'results_')
grid_scores = list()
for i, (params, mean, std) in enumerate(zip(
self.results_['params'],
self.results_['test_mean_score'],
self.results_['test_std_score'])):
scores = np.array(list(self.results_['test_split%d_score' % s][i]
for s in range(self.n_splits_)),
dtype=np.float64)
grid_scores.append(_CVScoreTuple(params, mean, scores))
return grid_scores
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
>>> sorted(clf.results_.keys())
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
['param_C', 'param_kernel', 'params', 'test_mean_score',...
'test_rank_score', 'test_split0_score', 'test_split1_score',...
'test_split2_score', 'test_std_score']
Attributes
----------
results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|test_split0_score|...|...rank..|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.8 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.7 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.8 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.9 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'test_split0_score' : [0.8, 0.7, 0.8, 0.9],
'test_split1_score' : [0.82, 0.5, 0.7, 0.78],
'test_mean_score' : [0.81, 0.60, 0.75, 0.82],
'test_std_score' : [0.02, 0.01, 0.03, 0.03],
'test_rank_score' : [2, 4, 3, 1],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None, labels=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
return self._fit(X, y, labels, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+--------------+-------------+-------------------+---+---------------+
| param_kernel | param_gamma | test_split0_score |...|test_rank_score|
+==============+=============+===================+===+===============+
| 'rbf' | 0.1 | 0.8 |...| 2 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.2 | 0.9 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.3 | 0.7 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
will be represented by a ``results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'test_split0_score' : [0.8, 0.9, 0.7],
'test_split1_score' : [0.82, 0.5, 0.7],
'test_mean_score' : [0.81, 0.7, 0.7],
'test_std_score' : [0.02, 0.2, 0.],
'test_rank_score' : [3, 1, 1],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None, labels=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, labels, sampled_params)
| altairpearl/scikit-learn | sklearn/model_selection/_search.py | Python | bsd-3-clause | 44,970 |
#!/usr/bin/python
# Check dashboard JSON files for common errors, like forgetting to templatize a
# datasource.
import json
import os
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"boulderdash.json")) as f:
dashboard = json.load(f)
# When exporting, the current value of templated variables is saved. We don't
# want to save a specific value for datasource, since that's
# deployment-specific, so we ensure that the dashboard was exported with the
# datasource template variable set to "Default."
for li in dashboard["templating"]["list"]:
if li["type"] == "datasource":
assert(li["current"]["value"] == "default")
# Additionally, ensure each panel's datasource is using the template variable
# rather than a hardcoded datasource. Grafana will choose a hardcoded
# datasource on new panels by default, so this is an easy mistake to make.
for ro in dashboard["rows"]:
for pa in ro["panels"]:
assert(pa["datasource"] == "$datasource")
# It seems that __inputs is non-empty when template variables at the top of the
# dashboard have been modified from the defaults; check for that.
assert(len(dashboard["__inputs"]) == 0)
| ibukanov/boulder | test/grafana/lint.py | Python | mpl-2.0 | 1,176 |
from tdlearner import TDLearner
from collections import defaultdict
import numpy as np
# finds Euclidean norm distance between two lists a, b
def find_dist(a,b):
cumsum = 0.0
for i in xrange(len(a)):
cumsum += (a[i]-b[i])**2
return np.sqrt(cumsum/float(len(a)))
class ModelBased(TDLearner):
'''
Implements model based learning algorithm with value iteration.
'''
def __init__(self, discount_fn = lambda i: 0,bucket_height = 1., bucket_width = 28, velocity_bucket = 1000):
super(ModelBased,self).__init__(learn_fn=lambda i:0, discount_fn=discount_fn,bucket_height=bucket_height, bucket_width=bucket_width,
velocity_bucket=velocity_bucket)
# keep track of current optimal policy, maps state s -> action a
self.optimal_policy = defaultdict(int)
# keep track of all states seen to iterate over in value_iter
self.seen_states = set()
def value_iter(self,discount):
if len(self.seen_states) != 0:
while True:
# store off old value function to test for convergence later
old_value_fn = []
for s in self.seen_states:
old_value_fn.append(self.V[s])
for s in self.seen_states:
# compute Q function for jump and no jump
for a in [0,1]:
# print "V states {}".format(self.V)
self.Q[s][a] = self.expected_reward(s,a) + discount * self.optimal_action_helper(s,a)
# find best action from state s
self.optimal_policy[s] = 1 if self.Q[s][1] > self.Q[s][0] else 0
# update value function for state s
self.V[s] = self.Q[s][self.optimal_policy[s]]
# update new value function
new_value_fn = []
for s in self.seen_states:
new_value_fn.append(self.V[s])
# test for convergence
# print "Old value {}".format(old_value_fn)
# print "V value {}".format(self.V)
# print find_dist(old_value_fn, new_value_fn)
if find_dist(old_value_fn, new_value_fn) < 0.1:
break
def action_callback(self, state):
'''
Simple Q-Learning algorithm
'''
# what state are we in?
new_state = self.get_state(state)
if self.last_state is not None:
self.seen_states.add(self.last_state)
# print "Last state {}".format(self.last_state)
# increase iteration count and maximize chose action to maximize expected reward
self.iter_num += 1
# we need update our Q for the last state and action
if (self.last_state is not None and
self.last_action is not None and
self.last_reward is not None):
s = self.last_state
a = self.last_action
r = self.last_reward
sp = new_state
self.NSA[(s, a)] += 1
self.NSAS[(s, a, sp)] += 1
self.RSA[(s, a)] += r
self.reachable[(s, a)].add(sp)
new_action = self.optimal_policy[sp]
else:
new_action = 1
# planning stage - updates optimal policy
discount = self.discount_fn(self.iter_num)
self.value_iter(discount)
self.last_action = new_action
self.last_state = new_state
return new_action
| kandluis/machine-learning | prac4/practical4-code/modelbased.py | Python | mit | 3,542 |
# Licensed to Hortonworks, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Hortonworks, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | hortonworks/hortonworks-sandbox | tutorials/tutorials_app/__init__.py | Python | apache-2.0 | 774 |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
version = __import__('philo').VERSION
setup(
name = 'philo',
version = '.'.join([str(v) for v in version]),
url = "http://philocms.org/",
description = "A foundation for developing web content management systems.",
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
maintainer = "iThink Software",
maintainer_email = "contact@ithinksw.com",
packages = find_packages(),
include_package_data=True,
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
platforms = ['OS Independent'],
license = 'ISC License (ISCL)',
install_requires = [
'django>=1.3',
'django-mptt>0.4.2,==dev',
],
extras_require = {
'docs': ["sphinx>=1.0"],
'grappelli': ['django-grappelli>=2.3'],
'migrations': ['south>=0.7.2'],
'waldo-recaptcha': ['recaptcha-django'],
'sobol-eventlet': ['eventlet'],
'sobol-scrape': ['BeautifulSoup'],
'penfield': ['django-taggit>=0.9'],
},
dependency_links = [
'https://github.com/django-mptt/django-mptt/tarball/master#egg=django-mptt-dev'
]
)
| ithinksw/philo | setup.py | Python | isc | 1,421 |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import unittest
from twisted.internet import defer
from mock import Mock
from synapse.handlers.directory import DirectoryHandler
from synapse.types import RoomAlias
from tests.utils import setup_test_homeserver
class DirectoryHandlers(object):
def __init__(self, hs):
self.directory_handler = DirectoryHandler(hs)
class DirectoryTestCase(unittest.TestCase):
""" Tests the directory service. """
@defer.inlineCallbacks
def setUp(self):
self.mock_federation = Mock(spec=[
"make_query",
"register_edu_handler",
])
self.query_handlers = {}
def register_query_handler(query_type, handler):
self.query_handlers[query_type] = handler
self.mock_federation.register_query_handler = register_query_handler
hs = yield setup_test_homeserver(
http_client=None,
resource_for_federation=Mock(),
replication_layer=self.mock_federation,
)
hs.handlers = DirectoryHandlers(hs)
self.handler = hs.get_handlers().directory_handler
self.store = hs.get_datastore()
self.my_room = RoomAlias.from_string("#my-room:test")
self.your_room = RoomAlias.from_string("#your-room:test")
self.remote_room = RoomAlias.from_string("#another:remote")
@defer.inlineCallbacks
def test_get_local_association(self):
yield self.store.create_room_alias_association(
self.my_room, "!8765qwer:test", ["test"]
)
result = yield self.handler.get_association(self.my_room)
self.assertEquals({
"room_id": "!8765qwer:test",
"servers": ["test"],
}, result)
@defer.inlineCallbacks
def test_get_remote_association(self):
self.mock_federation.make_query.return_value = defer.succeed(
{"room_id": "!8765qwer:test", "servers": ["test", "remote"]}
)
result = yield self.handler.get_association(self.remote_room)
self.assertEquals({
"room_id": "!8765qwer:test",
"servers": ["test", "remote"],
}, result)
self.mock_federation.make_query.assert_called_with(
destination="remote",
query_type="directory",
args={
"room_alias": "#another:remote",
},
retry_on_dns_fail=False,
)
@defer.inlineCallbacks
def test_incoming_fed_query(self):
yield self.store.create_room_alias_association(
self.your_room, "!8765asdf:test", ["test"]
)
response = yield self.query_handlers["directory"](
{"room_alias": "#your-room:test"}
)
self.assertEquals({
"room_id": "!8765asdf:test",
"servers": ["test"],
}, response)
| TribeMedia/synapse | tests/handlers/test_directory.py | Python | apache-2.0 | 3,440 |
json_filepath="/var/tmp/applconn/static/applconn.json"
pathprefix='/var/tmp/applconn/static'
rsyncgitpath='/var/tmp/rsyncgit/'
##
list_import_def=[
#"import_ansible_facts",
#"import_haproxy",
#"import_tungsten_fabric_network_policy",
#"import_tungsten_fabric_prouterlinkentry",
"import_testlogic"
]
##
enable_ganglia=False
enable_elasticsearch=False
enable_prometheus=False
ganglia_url='http://127.0.0.1/ganglia/'
elasticsearchurl='127.0.0.1:9200'
kibana_url='http://127.0.0.1:5601/app/kibana#/doc/*/applconn/'
prometheus_url='http://127.0.0.1:9090/'
# override settings if local_settings.py is there
try:
from local_settings import *
except ImportError as e:
pass
| tnaganawa/applconn | settings.py | Python | apache-2.0 | 693 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import division
from telemetry.core import util
from telemetry.image_processing import histogram
from telemetry.image_processing import rgba_color
from telemetry.util import external_modules
util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'png')
import png # pylint: disable=F0401
cv2 = external_modules.ImportOptionalModule('cv2')
np = external_modules.ImportRequiredModule('numpy')
def Channels(image):
return image.shape[2]
def Width(image):
return image.shape[1]
def Height(image):
return image.shape[0]
def Pixels(image):
return bytearray(np.uint8(image[:, :, ::-1]).flat) # Convert from bgr to rgb.
def GetPixelColor(image, x, y):
bgr = image[y][x]
return rgba_color.RgbaColor(bgr[2], bgr[1], bgr[0])
def WritePngFile(image, path):
assert(path.endswith('png'))
if cv2 is not None:
cv2.imwrite(path, image)
else:
with open(path, "wb") as f:
metadata = {}
metadata['size'] = (Width(image), Height(image))
metadata['alpha'] = False
metadata['bitdepth'] = 8
img = image[:, :, ::-1]
pixels = img.reshape(-1).tolist()
png.Writer(**metadata).write_array(f, pixels)
def FromRGBPixels(width, height, pixels, bpp):
img = np.array(pixels, order='F', dtype=np.uint8)
img.resize((height, width, bpp))
if bpp == 4:
img = img[:, :, :3] # Drop alpha.
return img[:, :, ::-1] # Convert from rgb to bgr.
def FromPngFile(path):
if cv2 is not None:
img = cv2.imread(path, cv2.CV_LOAD_IMAGE_COLOR)
if img is None:
raise ValueError('Image at path {0} could not be read'.format(path))
return img
else:
with open(path, "rb") as f:
return FromPng(f.read())
def FromPng(png_data):
if cv2 is not None:
file_bytes = np.asarray(bytearray(png_data), dtype=np.uint8)
return cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_COLOR)
else:
width, height, pixels, meta = png.Reader(bytes=png_data).read_flat()
return FromRGBPixels(width, height, pixels, 4 if meta['alpha'] else 3)
def _SimpleDiff(image1, image2):
if cv2 is not None:
return cv2.absdiff(image1, image2)
else:
amax = np.maximum(image1, image2)
amin = np.minimum(image1, image2)
return amax - amin
def AreEqual(image1, image2, tolerance, likely_equal):
if image1.shape != image2.shape:
return False
self_image = image1
other_image = image2
if tolerance:
if likely_equal:
return np.amax(_SimpleDiff(image1, image2)) <= tolerance
else:
for row in xrange(Height(image1)):
if np.amax(_SimpleDiff(image1[row], image2[row])) > tolerance:
return False
return True
else:
if likely_equal:
return (self_image == other_image).all()
else:
for row in xrange(Height(image1)):
if not (self_image[row] == other_image[row]).all():
return False
return True
def Diff(image1, image2):
self_image = image1
other_image = image2
if image1.shape[2] != image2.shape[2]:
raise ValueError('Cannot diff images of differing bit depth')
if image1.shape[:2] != image2.shape[:2]:
width = max(Width(image1), Width(image2))
height = max(Height(image1), Height(image2))
self_image = np.zeros((width, height, image1.shape[2]), np.uint8)
other_image = np.zeros((width, height, image1.shape[2]), np.uint8)
self_image[0:Height(image1), 0:Width(image1)] = image1
other_image[0:Height(image2), 0:Width(image2)] = image2
return _SimpleDiff(self_image, other_image)
def GetBoundingBox(image, color, tolerance):
if cv2 is not None:
color = np.array([color.b, color.g, color.r])
img = cv2.inRange(image, np.subtract(color[0:3], tolerance),
np.add(color[0:3], tolerance))
count = cv2.countNonZero(img)
if count == 0:
return None, 0
contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contour = np.concatenate(contours)
return cv2.boundingRect(contour), count
else:
if tolerance:
color = np.array([color.b, color.g, color.r])
colorm = color - tolerance
colorp = color + tolerance
b = image[:, :, 0]
g = image[:, :, 1]
r = image[:, :, 2]
w = np.where(((b >= colorm[0]) & (b <= colorp[0]) &
(g >= colorm[1]) & (g <= colorp[1]) &
(r >= colorm[2]) & (r <= colorp[2])))
else:
w = np.where((image[:, :, 0] == color.b) &
(image[:, :, 1] == color.g) &
(image[:, :, 2] == color.r))
if len(w[0]) == 0:
return None, 0
return (w[1][0], w[0][0], w[1][-1] - w[1][0] + 1, w[0][-1] - w[0][0] + 1), \
len(w[0])
def Crop(image, left, top, width, height):
img_height, img_width = image.shape[:2]
if (left < 0 or top < 0 or
(left + width) > img_width or
(top + height) > img_height):
raise ValueError('Invalid dimensions')
return image[top:top + height, left:left + width]
def GetColorHistogram(image, ignore_color, tolerance):
if cv2 is not None:
mask = None
if ignore_color is not None:
color = np.array([ignore_color.b, ignore_color.g, ignore_color.r])
mask = ~cv2.inRange(image, np.subtract(color, tolerance),
np.add(color, tolerance))
flatten = np.ndarray.flatten
hist_b = flatten(cv2.calcHist([image], [0], mask, [256], [0, 256]))
hist_g = flatten(cv2.calcHist([image], [1], mask, [256], [0, 256]))
hist_r = flatten(cv2.calcHist([image], [2], mask, [256], [0, 256]))
else:
filtered = image.reshape(-1, 3)
if ignore_color is not None:
color = np.array([ignore_color.b, ignore_color.g, ignore_color.r])
colorm = np.array(color) - tolerance
colorp = np.array(color) + tolerance
in_range = ((filtered[:, 0] < colorm[0]) | (filtered[:, 0] > colorp[0]) |
(filtered[:, 1] < colorm[1]) | (filtered[:, 1] > colorp[1]) |
(filtered[:, 2] < colorm[2]) | (filtered[:, 2] > colorp[2]))
filtered = np.compress(in_range, filtered, axis = 0)
if len(filtered[:, 0]) == 0:
return histogram.ColorHistogram(np.zeros((256)), np.zeros((256)),
np.zeros((256)), ignore_color)
hist_b = np.bincount(filtered[:, 0], minlength=256)
hist_g = np.bincount(filtered[:, 1], minlength=256)
hist_r = np.bincount(filtered[:, 2], minlength=256)
return histogram.ColorHistogram(hist_r, hist_g, hist_b, ignore_color)
| mohamed--abdel-maksoud/chromium.src | tools/telemetry/telemetry/image_processing/image_util_numpy_impl.py | Python | bsd-3-clause | 6,616 |
import sys
import pytest
from numpy.testing import assert_equal
import torch
import torch.nn as nn
sys.path.append("../../../")
from pycroscopy.learn import nnblocks
@pytest.mark.parametrize("nlayers, bnorm, nbnorm",
[(1, True, 1), (1, False, 0),
(3, True, 3), (3, False, 0)])
def test_convblock_bnorm(nlayers, bnorm, nbnorm):
c = nnblocks.ConvBlock(2, nlayers, 1, 8, batchnorm=bnorm)
nbnorm_ = len([k for k in c.state_dict().keys() if 'running_mean' in k])
assert_equal(nbnorm, nbnorm_)
@pytest.mark.parametrize("activation, activation_expected",
[("relu", nn.modules.activation.ReLU),
("lrelu", nn.modules.activation.LeakyReLU),
("softplus", nn.modules.activation.Softplus),
("tanh", nn.modules.activation.Tanh)])
def test_convblock_activation(activation, activation_expected):
conv = nnblocks.ConvBlock(2, 2, 1, 8, activation=activation)
activations_cnt = 0
for c1 in conv.children():
for c2 in c1.children():
if isinstance(c2, activation_expected):
activations_cnt += 1
assert_equal(activations_cnt, 2)
def test_convblock_noactivation():
activations = (nn.modules.activation.LeakyReLU, nn.modules.activation.ReLU,
nn.modules.activation.Tanh, nn.modules.activation.Softplus)
conv = nnblocks.ConvBlock(2, 2, 1, 8, activation=None)
activations_cnt = 0
for c1 in conv.children():
for c2 in c1.children():
if isinstance(c2, activations):
activations_cnt += 1
assert_equal(activations_cnt, 0)
@pytest.mark.parametrize("dim, conv_expected",
[(1, nn.modules.Conv1d),
(2, nn.modules.Conv2d),
(3, nn.modules.Conv3d)])
def test_convblock_dim(dim, conv_expected):
conv = nnblocks.ConvBlock(dim, 2, 1, 8)
conv_cnt = 0
for c1 in conv.children():
for c2 in c1.children():
if isinstance(c2, conv_expected):
conv_cnt += 1
assert_equal(conv_cnt, 2)
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_convblock_pool(dim, size):
data = torch.randn(2, 2, *size)
conv = nnblocks.ConvBlock(dim, 2, 2, 2, pool=True)
out = conv(data)
size_ = sum([out.size(i+2) for i in range(dim)])
assert_equal(size_, sum(size) / 2)
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_upsample_block(dim, size):
data = torch.randn(2, 2, *size)
up = nnblocks.UpsampleBlock(dim, 2, 2, mode="nearest")
out = up(data)
size_ = sum([out.size(i+2) for i in range(dim)])
assert_equal(size_, sum(size) * 2)
@pytest.mark.parametrize("in_channels, out_channels",
[(8, 8), (8, 4), (4, 8)])
def test_upsampleblock_change_number_of_channels(in_channels, out_channels):
data = torch.randn(4, in_channels, 8, 8)
up = nnblocks.UpsampleBlock(2, in_channels, out_channels)
out = up(data)
assert_equal(out.size(1), out_channels)
| pycroscopy/pycroscopy | tests/learn/dl/test_nnblocks.py | Python | mit | 3,166 |
"""Reads the Reuters dataset for Multi-label Classification
Important: Train and test sets are _switched_, since the original split leaves
the sides unbalanced.
"""
import os
import tarfile
import anna.data.utils as utils
from anna.data.api import Doc
from collections import Counter
from bs4 import BeautifulSoup
NAME = "reuters"
REUTERS_FINAL_DIR = "rcv1"
REUTERS_FILE = "rcv1.tar.xz"
REUTERS_TEXT = """To download the Reuters corpus, follow the instructions at:
http://trec.nist.gov/data/reuters/reuters.html
Once you have the RC1 file, put it at:
{}"""
TEST_DATES = \
["1996{:02}{:02}".format(month, day)
for month in range(8, 9)
for day in range(20, 32)]
TRAIN_DATES = \
["1996{:02}{:02}".format(month, day)
for month in range(9, 13)
for day in range(1, 32)] + \
["1997{:02}{:02}".format(month, day)
for month in range(1, 13)
for day in range(1, 32)]
def fetch_and_parse(data_dir):
"""
Fetches and parses the Reuters dataset.
Args:
data_dir (str): absolute path to the dir where datasets are stored
Returns:
train_docs (tf.data.Dataset): annotated articles for training
test_docs (tf.data.Dataset): annotated articles for testing
unused_docs (tf.data.Dataset): unused docs
labels (list[str]): final list of labels, from most to least frequent
"""
reuters_dir = os.path.join(data_dir, NAME)
return utils.mlc_tfrecords(reuters_dir, lambda: parse(fetch(data_dir)))
def parse(reuters_dir):
"""
Parses the RCV1-v2 dataset.
Args:
reuters_dir (str): absolute path to the extracted RCV1-v2 dir
Returns:
train_docs (list[Doc]): annotated articles for training
test_docs (list[Doc]): annotated articles for testing
unused_docs (list[Doc]): unused docs
labels (list[str]): final list of labels, from most to least frequent
"""
train_docs = []
test_docs = []
unused_docs = []
label_counts = Counter()
for date in TEST_DATES:
date_dir = os.path.join(reuters_dir, date)
for path in os.listdir(date_dir):
path = os.path.join(date_dir, path)
test_docs.append(parse_file(path))
for date in TRAIN_DATES:
date_dir = os.path.join(reuters_dir, date)
if not os.path.isdir(date_dir):
continue
for path in os.listdir(date_dir):
path = os.path.join(date_dir, path)
doc = parse_file(path)
label_counts.update(doc.labels)
train_docs.append(doc)
# Get list of labels, from frequent to rare
labels = [l[0] for l in label_counts.most_common()]
return train_docs, test_docs, unused_docs, labels
def parse_file(path):
"""
Parses a single xml file from the RCV1-v2 dataset.
Args:
path (str): absolute path to the an article from RCV1-v2
Returns:
doc (Doc): annotated Reuters article
"""
with open(path, encoding="iso-8859-1") as fp:
article = BeautifulSoup(fp, "html5lib")
doc_id = article.find("newsitem")["itemid"]
title = article.find("title").get_text()
text = article.find("text").get_text()
headline = article.find("headline")
if headline:
headline = headline.get_text()
dateline = article.find("dateline")
if dateline:
dateline = dateline.get_text()
topics = article.find(class_="bip:topics:1.0")
labels = []
if topics:
for topic in topics.find_all("code"):
labels.append(str(topic["code"]))
return Doc(doc_id, title, headline, dateline, text, labels)
def fetch(data_dir):
"""
If the Reuters dataset is not available, prints instructions on how to
get it. Otherwise, returns the folder with the file.
Args:
data_dir (str): absolute path to the folder where datasets are stored
Returns:
reuters_dir (str): absolute path to the folder where datasets are stored
"""
# Create folder
reuters_dir = os.path.join(data_dir, NAME)
utils.create_folder(reuters_dir)
# Show instructions to fetch the dataset if it's not available
reuters_file = os.path.join(reuters_dir, REUTERS_FILE)
if not os.path.exists(reuters_file):
print(REUTERS_TEXT.format(reuters_file))
exit(0)
# Extract the file
final_dir = os.path.join(reuters_dir, REUTERS_FINAL_DIR)
if not os.path.exists(final_dir):
with tarfile.open(reuters_file) as reuters:
reuters.extractall(reuters_dir)
return final_dir
| jpbottaro/anna | anna/data/dataset/reuters.py | Python | mit | 4,617 |
"""SCons.Tool.tar
Tool-specific initialization for tar.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tar.py 2014/03/02 14:18:15 garyo"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
tars = ['tar', 'gtar']
TarAction = SCons.Action.Action('$TARCOM', '$TARCOMSTR')
TarBuilder = SCons.Builder.Builder(action = TarAction,
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for tar to an Environment."""
try:
bld = env['BUILDERS']['Tar']
except KeyError:
bld = TarBuilder
env['BUILDERS']['Tar'] = bld
env['TAR'] = env.Detect(tars) or 'gtar'
env['TARFLAGS'] = SCons.Util.CLVar('-c')
env['TARCOM'] = '$TAR $TARFLAGS -f $TARGET $SOURCES'
env['TARSUFFIX'] = '.tar'
def exists(env):
return env.Detect(tars)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/scons-2.3.1/SCons/Tool/tar.py | Python | gpl-2.0 | 2,544 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 25 22:34:05 2020
@author: mostafamousavi
"""
from EQTransformer.utils.downloader import downloadMseeds, makeStationList, downloadSacs
import pytest
import glob
import os
def test_downloader():
makeStationList(client_list=["SCEDC"],
min_lat=35.50,
max_lat=35.60,
min_lon=-117.80,
max_lon=-117.40,
start_time="2019-09-01 00:00:00.00",
end_time="2019-09-03 00:00:00.00",
channel_list=["HH[ZNE]", "HH[Z21]", "BH[ZNE]", "EH[ZNE]", "SH[ZNE]", "HN[ZNE]", "HN[Z21]", "DP[ZNE]"],
filter_network=["SY"],
filter_station=[])
downloadMseeds(client_list=["SCEDC", "IRIS"],
stations_json='station_list.json',
output_dir="downloads_mseeds",
start_time="2019-09-01 00:00:00.00",
end_time="2019-09-02 00:00:00.00",
min_lat=35.50,
max_lat=35.60,
min_lon=-117.80,
max_lon=-117.40,
chunck_size=1,
channel_list=[],
n_processor=2)
dir_list = [ev for ev in os.listdir('.')]
if ('downloads_mseeds' in dir_list) and ('station_list.json' in dir_list):
successful = True
else:
successful = False
assert successful == True
def test_mseeds():
mseeds = glob.glob("downloads_mseeds/CA06/*.mseed")
assert len(mseeds) > 0
| smousavi05/EQTransformer | tests/test_downloader.py | Python | mit | 1,620 |
import csv
import os
import re
import tempfile
import urllib2
from lib.boolops import xor
# Set maximum field size to 20MB
csv.field_size_limit(20000000)
class defaultcsv(csv.Dialect):
def __init__(self):
self.delimiter = ','
self.doublequote = True
self.quotechar = '"'
self.quoting = csv.QUOTE_MINIMAL
self.lineterminator = '\n'
class tsv(csv.Dialect):
def __init__(self):
self.delimiter = '\t'
self.doublequote = True
self.quotechar = '"'
self.quoting = csv.QUOTE_MINIMAL
self.lineterminator = '\n'
class line(csv.Dialect):
def __init__(self):
self.delimiter = '\n'
self.doublequote = False
self.quotechar = '"'
self.quoting = csv.QUOTE_NONE
self.lineterminator = '\n'
urllike = re.compile(
'^((?:http(?:s)?|ftp)://)(?:(?:[A-Z0-9]+(?:-*[A-Z0-9]+)*\.)+[A-Z]{2,6}|localhost|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(?::\d+)?(?:/?|/\S+)$',
re.IGNORECASE)
urlstart = re.compile('^(http(?:s)?|ftp)', re.IGNORECASE)
boolargs = ['skipinitialspace', 'doublequote']
nonstringargs = {
'quoting': {'QUOTE_ALL': csv.QUOTE_ALL, 'QUOTE_NONE': csv.QUOTE_NONE, 'QUOTE_MINIMAL': csv.QUOTE_MINIMAL,
'QUOTE_NONNUMERIC': csv.QUOTE_NONNUMERIC},
'dialect': {'line': line(), 'tsv': tsv(), 'csv': defaultcsv(), 'json': 'json'}}
needsescape = ['delimiter', 'quotechar', 'lineterminator']
class InputsError(Exception):
pass
def inoutargsparse(args, kargs):
returnvals = {'url': False, 'header': False, 'compression': False, 'compressiontype': None, 'filename': None}
where = None # This gets registered with the Connection
if not xor((len(args) > 0), ('file' in kargs), ('url' in kargs), ('http' in kargs), ('ftp' in kargs),
('https' in kargs)):
raise InputsError()
if 'http' in kargs:
kargs['url'] = 'http:' + kargs['http']
del kargs['http']
elif 'ftp' in kargs:
kargs['url'] = 'ftp:' + kargs['ftp']
del kargs['ftp']
elif 'https' in kargs:
kargs['url'] = 'https:' + kargs['https']
del kargs['https']
returnvals['url'] = False
if len(args) > 0:
where = args[0]
elif 'file' in kargs:
where = kargs['file']
del kargs['file']
else: # url
returnvals['url'] = True
where = kargs['url']
v = urllike.match(where)
if v:
if not v.groups()[0] or v.groups()[0] == '':
where = "http://" + where
del kargs['url']
if 'header' in kargs:
returnvals['header'] = kargs['header']
del kargs['header']
#########################################################
if where.endswith('.zip'):
returnvals['compression'] = True
returnvals['compressiontype'] = 'zip'
if where.endswith('.gz') or where.endswith('.gzip'):
returnvals['compression'] = True
returnvals['compressiontype'] = 'gz'
#########################################################
if 'compression' in kargs:
returnvals['compression'] = kargs['compression']
del kargs['compression']
elif 'compressiontype' in kargs:
returnvals['compression'] = True
if 'compressiontype' not in kargs:
returnvals['compressiontype'] = 'zip'
if where.endswith('.gz') or where.endswith('.gzip'):
returnvals['compressiontype'] = 'gz'
else:
returnvals['compressiontype'] = kargs['compressiontype']
del kargs['compressiontype']
returnvals['filename'] = where
return returnvals
def cacheurl(url, extraheaders):
fd, fname = tempfile.mkstemp(suffix="kill.urlfetch")
os.close(fd)
req = urllib2.Request(url, None, extraheaders)
# urliter=urllib2.urlopen(url)
urliter = urllib2.urlopen(req)
tmp = open(fname, "wb")
for line in urliter:
tmp.write(line)
tmp.close()
urliter.close()
return fname
| madgik/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/inoutparsing.py | Python | mit | 3,972 |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Peter Pinski <peter.pinski@quantumsimulations.de>
#
'''
Localized molecular orbitals via Cholesky factorization.
The orbitals are usually less well localized than with Boys, Pipek-Mezey, etc.
On the other hand, the procedure is non-iterative and the result unique,
except for degeneracies.
F. Aquilante, T.B. Pedersen, J. Chem. Phys. 125, 174101 (2006)
https://doi.org/10.1063/1.2360264
'''
import numpy as np
from pyscf.lib.scipy_helper import pivoted_cholesky
def cholesky_mos(mo_coeff):
'''
Calculates localized orbitals through a pivoted Cholesky factorization
of the density matrix.
Args:
mo_coeff: block of MO coefficients to be localized
Returns:
the localized MOs
'''
assert(mo_coeff.ndim == 2)
nao, nmo = mo_coeff.shape
# Factorization of a density matrix-like quantity.
D = np.dot(mo_coeff, mo_coeff.T)
L, piv, rank = pivoted_cholesky(D, lower=True)
if rank < nmo:
raise RuntimeError('rank of matrix lower than the number of orbitals')
# Permute L back to the original order of the AOs.
# Superfluous columns are cropped out.
P = np.zeros((nao, nao))
P[piv, np.arange(nao)] = 1
mo_loc = np.dot(P, L[:, :nmo])
return mo_loc
if __name__ == "__main__":
import numpy
from pyscf.gto import Mole
from pyscf.scf import RHF
from pyscf.tools.mo_mapping import mo_comps
mol = Mole()
mol.atom = '''
C 0.681068338 0.605116159 0.307300799
C -0.733665805 0.654940451 -0.299036438
C -1.523996730 -0.592207689 0.138683275
H 0.609941801 0.564304456 1.384183068
H 1.228991034 1.489024155 0.015946420
H -1.242251083 1.542928348 0.046243898
H -0.662968178 0.676527364 -1.376503770
H -0.838473936 -1.344174292 0.500629028
H -2.075136399 -0.983173387 -0.703807608
H -2.212637905 -0.323898759 0.926200671
O 1.368219958 -0.565620846 -0.173113101
H 2.250134219 -0.596689848 0.204857736
'''
mol.basis = 'STO-3G'
mol.build()
mf = RHF(mol)
mf.kernel()
nocc = numpy.count_nonzero(mf.mo_occ > 0)
mo_loc = cholesky_mos(mf.mo_coeff[:, :nocc])
print('LMO Largest coefficients')
numpy.set_printoptions(precision=3, suppress=True, sign=' ')
for i in range(nocc):
li = numpy.argsort(abs(mo_loc[:, i]))
print('{0:3d} {1}'. format(i, mo_loc[li[:-6:-1], i]))
| sunqm/pyscf | pyscf/lo/cholesky.py | Python | apache-2.0 | 3,209 |
from flask import Flask, Response, abort, render_template
import config
import traceback
import sys
import shutil
import helpers
import os
import datetime
app = Flask(__name__)
@app.route("/")
def home():
message = '{0}\n'.format(config.domain)
message += 'a bytestring safe terminal pastebin clone\n'
message += '\n'
message += '# send data\n'
message += '$ echo "simple" | nc {0} {1}\n'.format(config.domain, config.socket_port)
message += 'http://{0}/deadbeef\n'.format(config.domain)
message += '\n'
message += '# read over netcat\n'
message += '$ echo "/get deadbeef" | nc {0} {1}\n'.format(config.domain, config.socket_port)
message += 'simple\n'
message += '\n'
message += '# send encrypted data as binary\n'
message += '$ echo "sign me" | gpg --sign | nc {0} {1}\n'.format(config.domain, config.socket_port)
message += 'http://{0}/de4dbe3f\n'.format(config.domain)
message += '\n'
message += '# verify or decrypt with remote gpg key in a browser\n'
message += 'http://{0}/gpg:16CHARACTERKEYID[:<keyserver>]/d34db33f\n'.format(config.domain)
message += '\n'
message += "# default is sks, accepted keyservers are\n"
for key, value in config.keyservers.items():
message += " {0} - {1}\n".format(key, value)
message += " others - send a PR at github.com/hmngwy/pipebin"
return Response(message, mimetype='text/plain')
@app.route("/<slug>")
@app.route("/gpg:<keyid>/<slug>")
@app.route("/gpg:<keyid>:<keyserver>/<slug>")
def decrypt(slug, keyid=None, keyserver='sks', nosig=False, parse=None):
if os.path.isfile(helpers.path_gen(slug)):
with open(helpers.path_gen(slug), "rb") as out:
contents = out.read()
if keyid is None:
is_binary = helpers.is_binary_string(contents)
mimetype = 'octet-stream' if is_binary else 'text/plain'
return Response(contents, mimetype=mimetype)
elif keyid is not None:
try:
keyserver_full = config.keyservers[keyserver]
except KeyError:
abort(404)
gpg, gpg_homedir = helpers.create_gpg()
try:
import_result = gpg.recv_keys(keyserver_full, keyid)
except Exception as e:
print(str(e))
abort(404)
if len(import_result.fingerprints)>0:
with open(helpers.path_gen(slug), "rb") as out:
decrypted_data = gpg.decrypt(out.read(), always_trust=True)
delete_result = gpg.delete_keys(import_result.fingerprints[0])
shutil.rmtree(gpg_homedir)
if decrypted_data.ok or decrypted_data.status == 'signature valid':
sig_info = "{0} {1}\nFingerprint: {2}\nTrust Level: {3}\nTrust Text: {4}\nSignature ID: {5}\n".format(decrypted_data.username, decrypted_data.key_id, decrypted_data.fingerprint, decrypted_data.trust_level, decrypted_data.trust_text, decrypted_data.signature_id)
sig_info += "\n----- Verification Time: " + str(datetime.datetime.now()).split('.')[0] + " -----"
contents = str(decrypted_data)
is_binary = helpers.is_binary_string(contents)
mimetype = 'text/plain'
return Response(sig_info + '\n\n' + contents, mimetype=mimetype)
else:
abort(404)
else:
print('Key import failed')
abort(404)
else:
abort(404)
if __name__ == "__main__":
app.run(host=config.http_host)
| hmngwy/pipebin | http_server.py | Python | gpl-2.0 | 3,640 |
from settings import *
import logging
BAD_LOGGER_NAMES = ['south', 'factory.containers', 'factory.generate']
bad_loggers = []
for bln in BAD_LOGGER_NAMES:
logger = logging.getLogger(bln)
logger.setLevel(logging.INFO)
bad_loggers.append(bln)
# Nose Test Runner
INSTALLED_APPS += ('django_nose',)
NOSE_ARGS = ['--with-xunit', '--with-coverage',
'--cover-package', 'api',
'--cover-package', 'frontend',
]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' | VikParuchuri/movide | movide/test.py | Python | agpl-3.0 | 507 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
script.module.metadatautils
extrafanart.py
Get extrafanart location for kodi media
'''
import os
import xbmcvfs
def get_extrafanart(file_path):
'''get extrafanart path on disk based on media path'''
result = {}
efa_path = ""
if "plugin.video.emby" in file_path:
# workaround for emby addon
efa_path = u"plugin://plugin.video.emby/extrafanart?path=" + file_path
elif "plugin://" in file_path:
efa_path = ""
elif "videodb://" in file_path:
efa_path = ""
else:
count = 0
while not count == 3:
# lookup extrafanart folder by navigating up the tree
file_path = os.path.dirname(file_path)
try_path = file_path + u"/extrafanart/"
if xbmcvfs.exists(try_path):
efa_path = try_path
break
count += 1
if efa_path:
result["art"] = {"extrafanart": efa_path}
for count, file in enumerate(xbmcvfs.listdir(efa_path)[1]):
if file.lower().endswith(".jpg"):
result["art"]["ExtraFanArt.%s" % count] = efa_path + file.decode("utf-8")
return result
| mrquim/mrquimrepo | script.module.metadatautils/lib/helpers/extrafanart.py | Python | gpl-2.0 | 1,209 |
"""Version of the SFS Toolbox.
This is the only place where the version number is stored.
During installation, this file is read by ../setup.py and when importing
the 'sfs' module, this module is imported by __init__.py.
Whenever the version is incremented, a Git tag with the same name should
be created.
"""
__version__ = "0.0.0"
| AchimTuran/sfs-python | sfs/_version.py | Python | mit | 336 |
"""Wrapper around the jQuery UI library
Exposes a single object, jq, to manipulate the widgets designed in the library
This object supports :
- subscription : js[elt_id] returns an object matching the element with the
specified id
- a method get(**kw). The only keyword currently supported is "selector". The
method returns a list of instances of the class Element, each instance wraps
the elements matching the CSS selector passed
jq(selector="button") : returns instances of Element for all button tags
The value can be a list or tuple of CSS selector strings :
js(selector=("input[type=submit]","a")) : instances of Element for all
"input" tags with attribute "type" set to "submit" + "a" tags (anchors)
Instances of Element have the same interface as the selections made by the
jQuery function $, with the additional methods provided by jQuery UI. For
instance, to turn an element into a dialog :
jq[elt_id].dialog()
When jQuery UI methods expect a Javascript object, they can be passed as
key/value pairs :
jq['tags'].autocomplete(source=availableTags)
"""
from browser import html, document, window
import javascript
_path = __file__[:__file__.rfind('/')]+'/'
document <= html.LINK(rel="stylesheet",
href=_path+'css/smoothness/jquery-ui.css')
# The scripts must be loaded in blocking mode, by using the function
# load(script_url[, names]) in module javascript
# If we just add them to the document with script tags, eg :
#
# document <= html.SCRIPT(sciprt_url)
# _jqui = window.jQuery.noConflict(True)
#
# the name "jQuery" is not in the Javascript namespace until the script is
# fully loaded in the page, so "window.jQuery" raises an exception
# Load jQuery and put name 'jQuery' in the global Javascript namespace
javascript.load(_path+'jquery-1.11.2.min.js', ['jQuery'])
javascript.load(_path+'jquery-ui.min.js')
_jqui = window.jQuery.noConflict(True)
_events = ['abort',
'beforeinput',
'blur',
'click',
'compositionstart',
'compositionupdate',
'compositionend',
'dblclick',
'error',
'focus',
'focusin',
'focusout',
'input',
'keydown',
'keyup',
'load',
'mousedown',
'mouseenter',
'mouseleave',
'mousemove',
'mouseout',
'mouseover',
'mouseup',
'resize',
'scroll',
'select',
'unload']
class JQFunction:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kw):
if kw:
# keyword arguments are passed as a single Javascript object
return self.func(*args, kw)
else:
return self.func(*args)
class Element:
"""Wrapper around the objects returned by jQuery selections"""
def __init__(self, item):
self.item = item
def bind(self, event, callback):
"""Binds an event on the element to function callback"""
getattr(self.item, event)(callback)
def __getattr__(self, attr):
res = getattr(self.item, attr)
if attr in _events:
# elt.click(f) is handled like elt.bind('click', f)
return lambda f:self.bind(attr, f)
if callable(res):
res = JQFunction(res)
return res
class jq:
@staticmethod
def get(**selectors):
items = []
for k,v in selectors.items():
if k=='selector':
if isinstance(v,[list, tuple]):
values = v
else:
values = [v]
for value in values:
items.append(Element(_jqui(value)))
elif k=='element':
items = Element(_jqui(v))
return items
@staticmethod
def __getitem__(element_id):
return jq.get(selector='#'+element_id)[0]
| code4futuredotorg/reeborg_tw | src/libraries/brython/Lib/jqueryui/__init__.py | Python | agpl-3.0 | 3,680 |
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mssdk.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""engine.SCons.Tool.mssdk
Tool-specific initialization for Microsoft SDKs, both Platform
SDKs and Windows SDKs.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
from MSCommon import mssdk_exists, \
mssdk_setup_env
def generate(env):
"""Add construction variables for an MS SDK to an Environment."""
mssdk_setup_env(env)
def exists(env):
return mssdk_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| EmanueleCannizzaro/scons | src/engine/SCons/Tool/mssdk.py | Python | mit | 1,834 |
# -*- coding: utf-8 -*-
"""
sphinx.util.inspect
~~~~~~~~~~~~~~~~~~~
Helpers for inspecting Python modules.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from six import PY3, binary_type
from six.moves import builtins
from sphinx.util import force_decode
# this imports the standard library inspect module without resorting to
# relatively import this module
inspect = __import__('inspect')
memory_address_re = re.compile(r' at 0x[0-9a-f]{8,16}(?=>)', re.IGNORECASE)
if PY3:
from functools import partial
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.__func__
if type(func) is partial:
orig_func = func.func
argspec = getargspec(orig_func)
args = list(argspec[0])
defaults = list(argspec[3] or ())
kwoargs = list(argspec[4])
kwodefs = dict(argspec[5] or {})
if func.args:
args = args[len(func.args):]
for arg in func.keywords or ():
try:
i = args.index(arg) - len(args)
del args[i]
try:
del defaults[i]
except IndexError:
pass
except ValueError: # must be a kwonly arg
i = kwoargs.index(arg)
del kwoargs[i]
del kwodefs[arg]
return inspect.FullArgSpec(args, argspec[1], argspec[2],
tuple(defaults), kwoargs,
kwodefs, argspec[6])
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
return inspect.getfullargspec(func)
else: # 2.7
from functools import partial
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.__func__
parts = 0, ()
if type(func) is partial:
keywords = func.keywords
if keywords is None:
keywords = {}
parts = len(func.args), keywords.keys()
func = func.func
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
args, varargs, varkw = inspect.getargs(func.__code__)
func_defaults = func.__defaults__
if func_defaults is None:
func_defaults = []
else:
func_defaults = list(func_defaults)
if parts[0]:
args = args[parts[0]:]
if parts[1]:
for arg in parts[1]:
i = args.index(arg) - len(args)
del args[i]
try:
del func_defaults[i]
except IndexError:
pass
return inspect.ArgSpec(args, varargs, varkw, func_defaults)
try:
import enum
except ImportError:
enum = None
def isenumclass(x):
"""Check if the object is subclass of enum."""
if enum is None:
return False
return issubclass(x, enum.Enum)
def isenumattribute(x):
"""Check if the object is attribute of enum."""
if enum is None:
return False
return isinstance(x, enum.Enum)
def isdescriptor(x):
"""Check if the object is some kind of descriptor."""
for item in '__get__', '__set__', '__delete__':
if hasattr(safe_getattr(x, item, None), '__call__'):
return True
return False
def safe_getattr(obj, name, *defargs):
"""A getattr() that turns all exceptions into AttributeErrors."""
try:
return getattr(obj, name, *defargs)
except Exception:
# sometimes accessing a property raises an exception (e.g.
# NotImplementedError), so let's try to read the attribute directly
try:
# In case the object does weird things with attribute access
# such that accessing `obj.__dict__` may raise an exception
return obj.__dict__[name]
except Exception:
pass
# this is a catch-all for all the weird things that some modules do
# with attribute access
if defargs:
return defargs[0]
raise AttributeError(name)
def safe_getmembers(object, predicate=None, attr_getter=safe_getattr):
"""A version of inspect.getmembers() that uses safe_getattr()."""
results = []
for key in dir(object):
try:
value = attr_getter(object, key, None)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
def object_description(object):
"""A repr() implementation that returns text safe to use in reST context."""
try:
s = repr(object)
except Exception:
raise ValueError
if isinstance(s, binary_type):
s = force_decode(s, None)
# Strip non-deterministic memory addresses such as
# ``<__main__.A at 0x7f68cb685710>``
s = memory_address_re.sub('', s)
return s.replace('\n', ' ')
def is_builtin_class_method(obj, attr_name):
"""If attr_name is implemented at builtin class, return True.
>>> is_builtin_class_method(int, '__init__')
True
Why this function needed? CPython implements int.__init__ by Descriptor
but PyPy implements it by pure Python code.
"""
classes = [c for c in inspect.getmro(obj) if attr_name in c.__dict__]
cls = classes[0] if classes else object
if not hasattr(builtins, safe_getattr(cls, '__name__', '')):
return False
return getattr(builtins, safe_getattr(cls, '__name__', '')) is cls
| axbaretto/beam | sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/util/inspect.py | Python | apache-2.0 | 5,997 |
from terane.settings import *
class TestSettings(object):
def test_settings_option(self):
settings = Settings("usage", "description", section="test", appname="test", confbase="./")
settings.addOption("o", "option", "test option")
ns = settings.parse(argv=['test', '-o', 'foo'])
assert ns.section('test').getString('test option') == 'foo'
ns = settings.parse(argv=['test', '--option', 'bar'])
assert ns.section('test').getString('test option') == 'bar'
def test_settings_switch(self):
settings = Settings("usage", "description", section="test", appname="test", confbase="./")
settings.addSwitch("s", "switch", "test switch")
ns = settings.parse(argv=['test', '-s'])
assert ns.section('test').getBoolean('test switch') == True
ns = settings.parse(argv=['test', '--switch'])
assert ns.section('test').getBoolean('test switch') == True
def test_settings_switch_reverse(self):
settings = Settings("usage", "description", section="test", appname="test", confbase="./")
settings.addSwitch("s", "switch", "test switch", reverse=True)
ns = settings.parse(argv=['test', '-s'])
assert ns.section('test').getBoolean('test switch') == False
ns = settings.parse(argv=['test', '--switch'])
assert ns.section('test').getBoolean('test switch') == False
def test_load_config_file(self):
settings = Settings("usage", "description", section="test", appname="test-config", confbase="./tests")
ns = settings.parse(argv=['test', '-c', './tests/test-config.conf'])
assert ns.section('test').getString('string') == 'hello world'
assert ns.section('test').getInt('int') == 42
assert ns.section('test').getFloat('float') == 3.14
assert ns.section('test').getBoolean('boolean') == False
assert ns.section('test').getPath('path') == '/bin/true'
assert ns.section('test').getList('list', int) == [1, 2, 3]
ns = settings.parse(argv=['test'])
assert ns.section('test').getString('string') == 'hello world'
assert ns.section('test').getInt('int') == 42
assert ns.section('test').getFloat('float') == 3.14
assert ns.section('test').getBoolean('boolean') == False
assert ns.section('test').getPath('path') == '/bin/true'
assert ns.section('test').getList('list', int) == [1, 2, 3]
def test_subcommand_one_level(self):
settings = Settings("usage", "description", section="test")
settings.addLongOption("option", "test option")
cmd1 = settings.addSubcommand("cmd1", "usage", "description")
cmd1.addLongOption("cmd1-option", "cmd1 option")
ns = settings.parse(argv=['test', '--option', 'foo', 'cmd1', '--cmd1-option', 'bar'])
assert ns.section('test').getString('test option') == 'foo'
assert ns.section('test:cmd1').getString('cmd1 option') == 'bar'
def test_subcommand_multi_level(self):
settings = Settings("usage", "description", section="test")
child = settings.addSubcommand("child", "usage", "description")
gchild = child.addSubcommand("grandchild", "usage", "description")
ggchild = gchild.addSubcommand("great-grandchild", "usage", "description")
ggchild.addLongOption("option", "option")
ns = settings.parse(argv=['test', 'child', 'grandchild', 'great-grandchild', '--option', 'foo'])
assert ns.section('test:child:grandchild:great-grandchild').getString('option') == 'foo'
| msfrank/terane-toolbox | tests/test_settings.py | Python | gpl-3.0 | 3,553 |
import pylab as pyl
from astLib import astStats
from sklearn.metrics import median_absolute_error, mean_squared_error
import h5py as hdf
from matplotlib.ticker import AutoMinorLocator
def calc_err(pred, true):
return (pred - true) / true
golden_mean = (pyl.sqrt(5.) - 1.0) / 2.0
f = pyl.figure(figsize=(10, 10 * golden_mean))
ax1 = pyl.subplot2grid((3, 4), (0, 0), rowspan=2)
ax2 = pyl.subplot2grid((3, 4), (0, 1), rowspan=2, sharex=ax1)
ax3 = pyl.subplot2grid((3, 4), (0, 2), rowspan=2, sharex=ax1, sharey=ax2)
ax4 = pyl.subplot2grid((3, 4), (0, 3), rowspan=2, sharex=ax1, sharey=ax2)
# now for the bottom bits
ax1s = pyl.subplot2grid((3, 4), (2, 0))
ax2s = pyl.subplot2grid((3, 4), (2, 1), sharex=ax1s)
ax3s = pyl.subplot2grid((3, 4), (2, 2), sharex=ax1s, sharey=ax2s)
ax4s = pyl.subplot2grid((3, 4), (2, 3), sharex=ax1s, sharey=ax2s)
ax2.set_yticklabels([])
ax1.set_xticklabels([])
ax2s.set_yticklabels([])
# add minor ticks to the bottom
ax1s.yaxis.set_minor_locator(AutoMinorLocator())
ax2s.yaxis.set_minor_locator(AutoMinorLocator())
### Targeted ###
################
with hdf.File('./buzzard_targetedRealistic_masses.hdf5', 'r') as f:
dset = f[f.keys()[0]]
target = dset['M200c', 'MASS', 'ML_pred_1d', 'ML_pred_2d', 'ML_pred_3d']
# filter bad values
mask = (target['ML_pred_1d'] != 0)
target = target[mask]
# plot one to one lines
ax1.plot([12, 15.5], [12, 15.5], c='k', zorder=0)
ax2.plot([12, 15.5], [12, 15.5], c='k', zorder=0)
ax3.plot([12, 15.5], [12, 15.5], c='k', zorder=0)
ax4.plot([12, 15.5], [12, 15.5], c='k', zorder=0)
ax1s.axhline(0)
ax2s.axhline(0)
ax3s.axhline(0)
ax4s.axhline(0)
# now for the plotting
###################
#### Power Law ####
###################
for d, c, style, zo in zip([target], ['#7A68A6'], ['-'], [1]):
print('power law')
y_ = astStats.runningStatistic(
pyl.log10(d['M200c']),
pyl.log10(d['MASS']),
pyl.percentile,
binNumber=20,
q=[16, 50, 84])
quants = pyl.array(y_[1])
ax1.plot(y_[0], quants[:, 1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax1.fill_between(y_[0],
quants[:, 2],
quants[:, 0],
facecolor=c,
alpha=0.3,
edgecolor=c)
err = calc_err(d['MASS'], d['M200c'])
y_ = astStats.runningStatistic(
pyl.log10(d['M200c']),
err,
pyl.percentile,
binNumber=20,
q=[16, 50, 84])
quants = pyl.array(y_[1])
ax1s.plot(y_[0], quants[:, 1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax1s.fill_between(y_[0],
quants[:, 2],
quants[:, 0],
facecolor=c,
alpha=0.3,
edgecolor=c)
print('MAE', median_absolute_error(
pyl.log10(d['M200c']), pyl.log10(d['MASS'])))
print('RMSE', pyl.sqrt(mean_squared_error(
pyl.log10(d['M200c']), pyl.log10(d['MASS']))))
############
#### 1d ####
############
print('1d')
y_ = astStats.runningStatistic(
pyl.log10(d['M200c']),
d['ML_pred_1d'],
pyl.percentile,
binNumber=20,
q=[16, 50, 84])
quants = pyl.array(y_[1])
ax2.plot(y_[0], quants[:, 1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax2.fill_between(y_[0],
quants[:, 2],
quants[:, 0],
facecolor=c,
alpha=0.4,
edgecolor=c)
err = calc_err(10**d['ML_pred_1d'], d['M200c'])
y_ = astStats.runningStatistic(
pyl.log10(d['M200c']),
err,
pyl.percentile,
binNumber=20,
q=[16, 50, 84])
quants = pyl.array(y_[1])
ax2s.plot(y_[0], quants[:, 1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax2s.fill_between(y_[0],
quants[:, 2],
quants[:, 0],
facecolor=c,
alpha=0.4,
edgecolor=c)
print('MAE', median_absolute_error(pyl.log10(d['M200c']), d['ML_pred_1d']))
print('RMSE', pyl.sqrt(mean_squared_error(
pyl.log10(d['M200c']), d['ML_pred_1d'])))
#############
#### 2d #####
#############
print('2d')
y_ = astStats.runningStatistic(
pyl.log10(d['M200c']),
d['ML_pred_2d'],
pyl.percentile,
binNumber=20,
q=[16, 50, 84])
quants = pyl.array(y_[1])
ax3.plot(y_[0], quants[:, 1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax3.fill_between(y_[0],
quants[:, 2],
quants[:, 0],
facecolor=c,
alpha=0.4,
edgecolor=c)
err = calc_err(10**d['ML_pred_2d'], d['M200c'])
y_ = astStats.runningStatistic(
pyl.log10(d['M200c']),
err,
pyl.percentile,
binNumber=20,
q=[16, 50, 84])
quants = pyl.array(y_[1])
ax3s.plot(y_[0], quants[:, 1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax3s.fill_between(y_[0],
quants[:, 2],
quants[:, 0],
facecolor=c,
alpha=0.4,
edgecolor=c)
print('MAE', median_absolute_error(pyl.log10(d['M200c']), d['ML_pred_2d']))
print('RMSE', pyl.sqrt(mean_squared_error(
pyl.log10(d['M200c']), d['ML_pred_2d'])))
##############
##### 3d #####
##############
print('3d')
y_ = astStats.runningStatistic(
pyl.log10(d['M200c']),
d['ML_pred_3d'],
pyl.percentile,
binNumber=20,
q=[16, 50, 84])
quants = pyl.array(y_[1])
ax4.plot(y_[0], quants[:, 1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax4.fill_between(y_[0],
quants[:, 2],
quants[:, 0],
facecolor=c,
alpha=0.4,
edgecolor=c)
err = calc_err(10**d['ML_pred_3d'], d['M200c'])
y_ = astStats.runningStatistic(
pyl.log10(d['M200c']),
err,
pyl.percentile,
binNumber=20,
q=[16, 50, 84])
quants = pyl.array(y_[1])
ax4s.plot(y_[0], quants[:, 1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax4s.fill_between(y_[0],
quants[:, 2],
quants[:, 0],
facecolor=c,
alpha=0.4,
edgecolor=c)
print('MAE', median_absolute_error(pyl.log10(d['M200c']), d['ML_pred_3d']))
print('RMSE', pyl.sqrt(mean_squared_error(
pyl.log10(d['M200c']), d['ML_pred_3d'])))
print '----'
### Add Legend ###
##################
line1 = pyl.Line2D([], [], ls='-', color='#7A68A6')
line2 = pyl.Line2D([], [], ls='--', color='#188487')
line3 = pyl.Line2D([], [], ls='-.', color='#e24a33')
#ax1.legend(line1, 'Targeted', loc=2)
#### tweak ####
ax1.set_xticks([12, 13, 14, 15])
ax2.set_xticks([12, 13, 14, 15])
ax2s.set_xticks([12, 13, 14, 15])
ax2s.set_ylim(-2, 4)
ax1s.set_ylim(-2, 4)
ax2s.set_yticks([-2, 0, 2])
ax1s.set_yticks([-2, 0, 2])
ax1.set_ylim(ax2.get_ylim())
ax1s.set_ylim(ax2s.get_ylim())
ax1.set_ylabel('Log $M_{pred}$')
ax1s.set_ylabel('$\epsilon$')
ax1s.set_xlabel('Log $M_{200c}$', fontsize=18)
ax2s.set_xlabel('Log $M_{200c}$', fontsize=18)
ax3s.set_xlabel('Log $M_{200c}$', fontsize=18)
ax4s.set_xlabel('Log $M_{200c}$', fontsize=18)
ax1.text(14, 12.25, 'Power Law', fontsize=18, horizontalalignment='center')
ax2.text(14, 12.25, '$ML_{\sigma}$', fontsize=18, horizontalalignment='center')
ax3.text(14,
12.25,
'$ML_{\sigma, z}$',
fontsize=18,
horizontalalignment='center')
ax4.text(14,
12.25,
'$ML_{\sigma, z, Ngal}$',
fontsize=18,
horizontalalignment='center')
| boada/vpCluster | data/boada/analysis_all/MLmethods/plot_massComparison_ML.py | Python | mit | 8,088 |
"""
byceps.services.shop.order.actions.create_ticket_bundles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from .....typing import UserID
from ....ticketing.dbmodels.ticket_bundle import TicketBundle
from ....ticketing import (
category_service as ticket_category_service,
ticket_bundle_service,
)
from ...article.transfer.models import ArticleNumber
from .. import event_service
from ..transfer.models import ActionParameters, Order, OrderID
from ._ticketing import create_tickets_sold_event, send_tickets_sold_event
def create_ticket_bundles(
order: Order,
article_number: ArticleNumber,
bundle_quantity: int,
initiator_id: UserID,
parameters: ActionParameters,
) -> None:
"""Create ticket bundles."""
category_id = parameters['category_id']
ticket_quantity = parameters['ticket_quantity']
owned_by_id = order.placed_by_id
order_number = order.order_number
category = ticket_category_service.find_category(category_id)
if category is None:
raise ValueError(f'Unknown ticket category ID for order {order_number}')
for _ in range(bundle_quantity):
bundle = ticket_bundle_service.create_bundle(
category.party_id,
category.id,
ticket_quantity,
owned_by_id,
order_number=order_number,
used_by_id=owned_by_id,
)
_create_order_event(order.id, bundle)
tickets_sold_event = create_tickets_sold_event(
order.id, initiator_id, category_id, owned_by_id, ticket_quantity
)
send_tickets_sold_event(tickets_sold_event)
def _create_order_event(order_id: OrderID, ticket_bundle: TicketBundle) -> None:
event_type = 'ticket-bundle-created'
data = {
'ticket_bundle_id': str(ticket_bundle.id),
'ticket_bundle_category_id': str(ticket_bundle.ticket_category_id),
'ticket_bundle_ticket_quantity': ticket_bundle.ticket_quantity,
'ticket_bundle_owner_id': str(ticket_bundle.owned_by_id),
}
event_service.create_event(event_type, order_id, data)
| homeworkprod/byceps | byceps/services/shop/order/actions/create_ticket_bundles.py | Python | bsd-3-clause | 2,185 |
import os
from django.apps import AppConfig
def samevalues(names):
records = []
for name in names:
if type(name) == str:
records.append({key: name for key in ['label', 'model', 'title']})
return records
pages = [
{'label': 'port', 'view': 'port_view', 'title': 'Port'},
{'label': 'counter', 'view': 'counter_view', 'title': 'Counter'},
{'label': 'select', 'view': 'select_view', 'title': 'Select'},
]
commands = [
{'label': 'test_connections', 'view': 'command', 'title': 'Test connections'},
{'label': 'collect_data', 'view': 'command', 'title': 'Collect data', 'enabled': False},
]
config_models = samevalues([
'SwitchConnection',
'CounterOid',
])
show_models = samevalues([
'SwitchConnection',
'XTimes',
'Deltas',
'CDicts',
'PDicts',
'SDicts',
'Integers',
'PortConfig',
])
class appAppConfig(AppConfig):
label = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
name = 'apps.{}'.format(label)
verbose_name = 'BCounters'
pages = pages
commands = commands
config_models = config_models
show_models = show_models
| tulsluper/sanscript | apps/bc/apps.py | Python | gpl-3.0 | 1,152 |
# -*- encoding: utf-8 -*-
import copy
import inspect
from abjad.tools import indicatortools
from abjad.tools import markuptools
from abjad.tools import pitchtools
from abjad.tools import stringtools
from abjad.tools.abctools.AbjadObject import AbjadObject
class Instrument(AbjadObject):
'''A musical instrument.
'''
### CLASS VARIABLES ###
_format_slot = 'opening'
__slots__ = (
'_allowable_clefs',
'_default_scope',
'_do_not_format',
'_instrument_name',
'_instrument_name_markup',
'_is_primary_instrument',
'_performer_names',
'_pitch_range',
'_short_instrument_name',
'_short_instrument_name_markup',
'_sounding_pitch_of_written_middle_c',
'_starting_clefs',
)
### INITIALIZER ###
def __init__(
self,
instrument_name=None,
short_instrument_name=None,
instrument_name_markup=None,
short_instrument_name_markup=None,
allowable_clefs=None,
pitch_range=None,
sounding_pitch_of_written_middle_c=None,
):
from abjad.tools import scoretools
self._do_not_format = False
self._instrument_name = instrument_name
self._instrument_name_markup = instrument_name_markup
self._short_instrument_name = short_instrument_name
self._short_instrument_name_markup = short_instrument_name_markup
allowable_clefs = allowable_clefs or ['treble']
allowable_clefs = indicatortools.ClefInventory(allowable_clefs)
self._allowable_clefs = allowable_clefs
if isinstance(pitch_range, str):
pitch_range = pitchtools.PitchRange(pitch_range)
elif isinstance(pitch_range, pitchtools.PitchRange):
pitch_range = copy.copy(pitch_range)
elif pitch_range is None:
pitch_range = pitchtools.PitchRange()
else:
raise TypeError(pitch_range)
self._pitch_range = pitch_range
sounding_pitch_of_written_middle_c = \
sounding_pitch_of_written_middle_c or pitchtools.NamedPitch("c'")
sounding_pitch_of_written_middle_c = \
pitchtools.NamedPitch(sounding_pitch_of_written_middle_c)
self._sounding_pitch_of_written_middle_c = \
sounding_pitch_of_written_middle_c
self._default_scope = scoretools.Staff
self._is_primary_instrument = False
self._performer_names = ['instrumentalist']
self._starting_clefs = copy.copy(allowable_clefs)
### SPECIAL METHODS ###
def __copy__(self, *args):
r'''Copies instrument.
Returns new instrument.
'''
return type(self)(
instrument_name=self.instrument_name,
short_instrument_name=self.short_instrument_name,
instrument_name_markup=self.instrument_name_markup,
short_instrument_name_markup=self.short_instrument_name_markup,
allowable_clefs=self.allowable_clefs,
pitch_range=self.pitch_range,
sounding_pitch_of_written_middle_c=\
self.sounding_pitch_of_written_middle_c,
)
def __eq__(self, arg):
r'''Is true when `arg` is an instrument with instrument name and short
instrument name equal to those of this instrument. Otherwise false.
Returns boolean.
'''
if isinstance(arg, type(self)):
if self.instrument_name == arg.instrument_name and \
self.short_instrument_name == arg.short_instrument_name:
return True
return False
def __format__(self, format_specification=''):
r'''Formats instrument.
Set `format_specification` to `''` or `'storage'`.
Interprets `''` equal to `'storage'`.
Returns string.
'''
from abjad.tools import systemtools
if format_specification in ('', 'storage'):
return systemtools.StorageFormatManager.get_storage_format(self)
return str(self)
def __hash__(self):
'''Gets hash value instrument.
Computed on type, instrument name and short instrument name.
Returns integer.
'''
return hash((
type(self).__name__,
self.instrument_name,
self.short_instrument_name,
))
def __repr__(self):
r'''Gets interpreter representation of instrument.
Returns string.
'''
return '{}()'.format(type(self).__name__)
### PRIVATE PROPERTIES ###
@staticmethod
def _default_instrument_name_to_instrument_class(default_instrument_name):
for instrument_class in Instrument._list_instruments():
instrument = instrument_class()
if instrument.instrument_name == default_instrument_name:
return instrument_class
# TODO: _scope_name needs to be taken from IndicatorExpression!
# should not be stored on instrument.
@property
def _lilypond_format(self):
result = []
if self._do_not_format:
return result
line = r'\set {!s}.instrumentName = {!s}'
line = line.format(
self._scope_name,
self.instrument_name_markup,
)
result.append(line)
line = r'\set {!s}.shortInstrumentName = {!s}'
line = line.format(
self._scope_name,
self.short_instrument_name_markup,
)
result.append(line)
return result
@property
def _one_line_menu_summary(self):
return self.instrument_name
@property
def _scope_name(self):
if isinstance(self._default_scope, type):
return self._default_scope.__name__
elif isinstance(self._default_scope, str):
return self._default_scope
else:
return type(self._default_scope).__name__
### PRIVATE METHODS ###
def _get_default_performer_name(self):
if self._performer_names is None:
performer_name = '{} player'.format(self._default_instrument_name)
return performer_name
else:
return self._performer_names[-1]
def _get_performer_names(self):
if self._performer_names is None:
performer_name = '{} player'.format(self._default_instrument_name)
return [performer_name]
else:
return self._performer_names[:]
def _initialize_default_name_markups(self):
if self._instrument_name_markup is None:
if self.instrument_name:
string = self.instrument_name
string = stringtools.capitalize_start(string)
markup = markuptools.Markup(contents=string)
self._instrument_name_markup = markup
else:
self._instrument_name_markup = None
if self._short_instrument_name_markup is None:
if self.short_instrument_name:
string = self.short_instrument_name
string = stringtools.capitalize_start(string)
markup = markuptools.Markup(contents=string)
self._short_instrument_name_markup = markup
else:
self._short_instrument_name_markup = None
@classmethod
def _list_instrument_names(cls):
r'''Lists instrument names.
::
>>> function = instrumenttools.Instrument._list_instrument_names
>>> for instrument_name in function():
... instrument_name
...
'accordion'
'alto'
'alto flute'
...
Returns list.
'''
instrument_names = []
for instrument_class in cls._list_instruments():
instrument = instrument_class()
assert instrument.instrument_name is not None, repr(instrument)
instrument_names.append(instrument.instrument_name)
instrument_names.sort(key=lambda x: x.lower())
return instrument_names
@staticmethod
def _list_instruments(classes=None):
r'''Lists instruments.
::
>>> function = instrumenttools.Instrument._list_instruments
>>> for instrument in function():
... instrument.__name__
...
'Accordion'
'AltoFlute'
'AltoSaxophone'
'AltoTrombone'
'AltoVoice'
'BaritoneSaxophone'
...
Returns list.
'''
from abjad.tools import instrumenttools
if classes is None:
classes = (instrumenttools.Instrument,)
instruments = []
for value in instrumenttools.__dict__.values():
try:
if issubclass(value, classes):
if not value is instrumenttools.Instrument:
instruments.append(value)
except TypeError:
pass
instruments.sort(key=lambda x: x.__name__.lower())
return instruments
@classmethod
def _list_primary_instruments(cls):
primary_instruments = []
for instrument_class in cls._list_instruments():
instrument = instrument_class()
if instrument._is_primary_instrument:
primary_instruments.append(instrument_class)
return primary_instruments
@classmethod
def _list_secondary_instruments(cls):
secondary_instruments = []
for instrument_class in cls._list_instruments():
instrument = instrument_class()
if not instrument._is_primary_instrument:
secondary_instruments.append(instrument_class)
return secondary_instruments
### PUBLIC PROPERTIES ###
@property
def allowable_clefs(self):
r'''Gets allowable clefs of instrument.
Returns clef inventory.
'''
if self._allowable_clefs is None:
self._allowable_clefs = indicatortools.ClefInventory('treble')
return self._allowable_clefs
@property
def instrument_name(self):
r'''Gets instrument name.
Returns string.
'''
return self._instrument_name
@property
def instrument_name_markup(self):
r'''Gets instrument name markup.
Returns markup.
'''
if self._instrument_name_markup is None:
self._initialize_default_name_markups()
if not isinstance(self._instrument_name_markup, markuptools.Markup):
markup = markuptools.Markup(contents=self._instrument_name_markup)
self._instrument_name_markup = markup
if self._instrument_name_markup.contents != ('',):
return self._instrument_name_markup
@property
def pitch_range(self):
r'''Gets pitch range of instrument.
Returns pitch range.
'''
return self._pitch_range
@property
def short_instrument_name(self):
r'''Gets short instrument name.
Returns string.
'''
return self._short_instrument_name
@property
def short_instrument_name_markup(self):
r'''Gets short instrument name markup.
Returns markup.
'''
if self._short_instrument_name_markup is None:
self._initialize_default_name_markups()
if not isinstance(
self._short_instrument_name_markup, markuptools.Markup):
markup = markuptools.Markup(
contents=self._short_instrument_name_markup)
self._short_instrument_name_markup = markup
if self._short_instrument_name_markup.contents != ('',):
return self._short_instrument_name_markup
@property
def sounding_pitch_of_written_middle_c(self):
r'''Gets sounding pitch of written middle C.
Returns named pitch.
'''
return self._sounding_pitch_of_written_middle_c
### PUBLIC METHODS ###
def transpose_from_sounding_pitch_to_written_pitch(self, note_or_chord):
r'''Transposes `note_or_chord` from sounding pitch of instrument to
written pitch of instrument.
Returns `note_or_chord` with adjusted pitches.
'''
from abjad.tools import scoretools
sounding_pitch = self.sounding_pitch_of_written_middle_c
index = pitchtools.NamedPitch('C4') - sounding_pitch
index *= -1
if isinstance(note_or_chord, scoretools.Note):
note_or_chord.written_pitch = \
pitchtools.transpose_pitch_carrier_by_interval(
note_or_chord.written_pitch, index)
elif isinstance(note_or_chord, scoretools.Chord):
pitches = [
pitchtools.transpose_pitch_carrier_by_interval(pitch, index)
for pitch in note_or_chord.written_pitches
]
note_or_chord.written_pitches = pitches
else:
message = 'must be note or chord: {!r}.'
message = message.format(note_or_chord)
raise TypeError(message)
def transpose_from_written_pitch_to_sounding_pitch(self, note_or_chord):
r'''Transposes `expr` from written pitch of instrument to sounding
pitch of instrument.
Returns `note_or_chord` with adjusted pitches.
'''
from abjad.tools import scoretools
sounding_pitch = self.sounding_pitch_of_written_middle_c
index = pitchtools.NamedPitch('C4') - sounding_pitch
if isinstance(note_or_chord, scoretools.Note):
note_or_chord.written_pitch = \
pitchtools.transpose_pitch_carrier_by_interval(
note_or_chord.written_pitch, index)
elif isinstance(note_or_chord, scoretools.Chord):
pitches = [
pitchtools.transpose_pitch_carrier_by_interval(pitch, index)
for pitch in note_or_chord.written_pitches
]
note_or_chord.written_pitches = pitches | mscuthbert/abjad | abjad/tools/instrumenttools/Instrument.py | Python | gpl-3.0 | 14,020 |
"""STAFF_ADMIN role stands alone - remove STAFF role from current STAFF_ADMINs
Revision ID: ed4283df2db5
Revises: 30f20e54eb5c
Create Date: 2020-08-31 11:12:30.249107
"""
from alembic import op
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import text
from portal.models.role import ROLE, Role
from portal.models.user import UserRoles
# revision identifiers, used by Alembic.
revision = 'ed4283df2db5'
down_revision = '30f20e54eb5c'
Session = sessionmaker()
def all_staff_admins(session):
"""returns list of all staff admins user ids"""
staff_admin_role_id = session.query(Role).filter(
Role.name == ROLE.STAFF_ADMIN.value).with_entities(Role.id).one()
return [ur.user_id for ur in session.query(UserRoles).filter(
UserRoles.role_id == staff_admin_role_id)]
def staff_role_id(session):
"""return staff role id"""
return session.query(Role).filter(
Role.name == ROLE.STAFF.value).with_entities(Role.id).one()[0]
def upgrade():
"""Remove the STAFF role from existing STAFF_ADMIN accounts"""
bind = op.get_bind()
session = Session(bind=bind)
conn = op.get_bind()
conn.execute(text(
"DELETE FROM user_roles WHERE role_id = :staff_id and"
" user_id in :staff_admin_ids"),
staff_id=staff_role_id(session),
staff_admin_ids=tuple(all_staff_admins(session)))
def downgrade():
"""Reapply the STAFF role to existing STAFF_ADMIN accounts"""
bind = op.get_bind()
session = Session(bind=bind)
conn = op.get_bind()
staff_id = staff_role_id()
for user_id in all_staff_admins(session):
conn.execute(text(
"INSERT INTO user_roles (user_id, role_id) VALUES "
"(:user_id, :role_id)"),
user_id=user_id,
role_id=staff_id)
| uwcirg/true_nth_usa_portal | portal/migrations/versions/ed4283df2db5_.py | Python | bsd-3-clause | 1,805 |
import sys,time,requests,json
from netaddr import *
#NetBox API Token
headers = {'Authorization': 'Token YOUR_API_TOKEN'}
def ipv4_stage():
#NetBox API call to retreive test prefix 169.254.0.0/20 list.
stage_supernet = '169.254.0.0/20'
stage_set = set()
api_link = 'https://netbox-url.net/api/ipam/prefixes/?parent=169.254.0.0/20&status=1'
api_init = requests.get(api_link, headers=headers)
s_list = json.loads(api_init.text)
for prefixes in s_list[u'results']:
#print(prefixes[u'prefix'])
stage_set.add(prefixes[u'prefix'])
if stage_supernet in stage_set: stage_set.remove(stage_supernet)
ipv4_addr_space = IPSet(['169.254.0.0/20'])
available = ipv4_addr_space ^ IPSet(stage_set)
AWS_peer_ip = list(available)[1]
S_peer_ip = list(available)[2]
bgp_peers = [str(AWS_peer_ip) + '/30', str(S_peer_ip) + '/30']
return bgp_peers
def ipv4_prod():
#Initial API call to retreive production 169.254.16.0/23 subnets.
prod_supernet = '169.254.16.0/20'
prod_set = set()
api_link = 'https://netbox-url.net/api/ipam/prefixes/?parent=169.254.16.0/20&status=1'
api_init = requests.get(api_link, headers=headers)
p_list = json.loads(api_init.text)
for prefixes in p_list[u'results']:
#print(prefixes[u'prefix'])
prod_set.add(prefixes[u'prefix'])
if prod_supernet in prod_set: prod_set.remove(prod_supernet)
ipv4_addr_space = IPSet(['169.254.16.0/20'])
available = ipv4_addr_space ^ IPSet(prod_set)
AWS_peer_ip = list(available)[1]
P_peer_ip = list(available)[2]
bgp_peers = [str(AWS_peer_ip) + '/30', str(P_peer_ip) + '/30']
return bgp_peers
| rkutsel/netbox-scripts | aws-direct-connect/api.py | Python | mit | 1,666 |
import mock
import unittest
from ice import steam_shortcut_synchronizer
from ice.rom import ICE_FLAG_TAG
from pysteam.shortcut import Shortcut
class SteamShortcutSynchronizerTests(unittest.TestCase):
def setUp(self):
self.mock_user = mock.MagicMock()
self.mock_archive = mock.MagicMock()
self.mock_logger = mock.MagicMock()
self.synchronizer = steam_shortcut_synchronizer.SteamShortcutSynchronizer(self.mock_archive, self.mock_logger)
def test_unmanaged_shortcuts_returns_shortcut_not_affiliated_with_ice(self):
random_shortcut = Shortcut("Plex", "/Some/Random/Path/plex", "/Some/Random/Path")
unmanaged = self.synchronizer.unmanaged_shortcuts([],[random_shortcut])
self.assertEquals(unmanaged, [random_shortcut])
def test_unmanaged_shortcuts_doesnt_return_shortcut_with_flag_tag(self):
tagged_shortcut = Shortcut("Game", "/Path/to/game", "/Path/to", "", ICE_FLAG_TAG)
unmanaged = self.synchronizer.unmanaged_shortcuts([],[tagged_shortcut])
self.assertEquals(unmanaged, [])
def test_unmanaged_shortcuts_doesnt_return_shortcut_with_appid_in_managed_ids(self):
managed_shortcut = Shortcut("Game", "/Path/to/game", "/Path/to", "")
random_shortcut = Shortcut("Plex", "/Some/Random/Path/plex", "/Some/Random/Path")
managed_ids = [managed_shortcut.appid()]
shortcuts = [managed_shortcut, random_shortcut]
unmanaged = self.synchronizer.unmanaged_shortcuts(managed_ids,shortcuts)
self.assertEquals(unmanaged, [random_shortcut])
def test_added_shortcuts_doesnt_return_shortcuts_that_still_exist(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "", ICE_FLAG_TAG)
old = [shortcut1, shortcut2]
new = [shortcut1, shortcut2]
self.assertEquals(self.synchronizer.added_shortcuts(old, new), [])
def test_added_shortcuts_returns_shortcuts_that_didnt_exist_previously(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "", ICE_FLAG_TAG)
new = [shortcut1, shortcut2]
self.assertEquals(self.synchronizer.added_shortcuts([], new), [shortcut1, shortcut2])
def test_added_shortcuts_only_returns_shortcuts_that_exist_now_but_not_before(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "", ICE_FLAG_TAG)
shortcut3 = Shortcut("Game3", "/Path/to/game3", "/Path/to", "", ICE_FLAG_TAG)
shortcut4 = Shortcut("Game4", "/Path/to/game4", "/Path/to", "", ICE_FLAG_TAG)
old = [shortcut1, shortcut2]
new = [shortcut1, shortcut2, shortcut3, shortcut4]
self.assertEquals(self.synchronizer.added_shortcuts(old, new), [shortcut3, shortcut4])
def test_removed_shortcuts_doesnt_return_shortcuts_that_still_exist(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "", ICE_FLAG_TAG)
old = [shortcut1, shortcut2]
new = [shortcut1, shortcut2]
self.assertEquals(self.synchronizer.removed_shortcuts(old, new), [])
def test_removed_shortcuts_returns_shortcuts_that_dont_exist_anymore(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "", ICE_FLAG_TAG)
old = [shortcut1, shortcut2]
new = []
self.assertEquals(self.synchronizer.removed_shortcuts(old, new), [shortcut1, shortcut2])
def test_removed_shortcuts_only_returns_shortcuts_that_dont_exist_now_but_did_before(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "", ICE_FLAG_TAG)
shortcut3 = Shortcut("Game3", "/Path/to/game3", "/Path/to", "", ICE_FLAG_TAG)
shortcut4 = Shortcut("Game4", "/Path/to/game4", "/Path/to", "", ICE_FLAG_TAG)
old = [shortcut1, shortcut2, shortcut3, shortcut4]
new = [shortcut1, shortcut2]
self.assertEquals(self.synchronizer.removed_shortcuts(old, new), [shortcut3, shortcut4])
def test_sync_roms_for_user_keeps_unmanaged_shortcuts(self):
random_shortcut = Shortcut("Plex", "/Some/Random/Path/plex", "/Some/Random/Path")
self.mock_user.shortcuts = [random_shortcut]
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "", ICE_FLAG_TAG)
shortcut3 = Shortcut("Game3", "/Path/to/game3", "/Path/to", "", ICE_FLAG_TAG)
shortcut4 = Shortcut("Game4", "/Path/to/game4", "/Path/to", "", ICE_FLAG_TAG)
rom1 = mock.MagicMock()
rom1.to_shortcut.return_value = shortcut1
rom2 = mock.MagicMock()
rom2.to_shortcut.return_value = shortcut2
rom3 = mock.MagicMock()
rom3.to_shortcut.return_value = shortcut3
rom4 = mock.MagicMock()
rom4.to_shortcut.return_value = shortcut4
self.synchronizer.sync_roms_for_user(self.mock_user, [rom1, rom2, rom3, rom4])
new_shortcuts = self.mock_user.shortcuts
self.assertEquals(len(new_shortcuts), 5)
self.assertIn(random_shortcut, new_shortcuts)
self.assertIn(shortcut1, new_shortcuts)
self.assertIn(shortcut2, new_shortcuts)
self.assertIn(shortcut3, new_shortcuts)
self.assertIn(shortcut4, new_shortcuts)
def test_sync_roms_for_user_logs_when_a_rom_is_added(self):
shortcut = Shortcut("Game", "/Path/to/game", "/Path/to", "", ICE_FLAG_TAG)
rom = mock.MagicMock()
rom.to_shortcut.return_value = shortcut
self.synchronizer.sync_roms_for_user(self.mock_user, [rom])
self.assertTrue(self.mock_logger.info.called)
def test_sync_roms_for_user_logs_once_for_each_added_rom(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
rom1 = mock.MagicMock()
rom1.to_shortcut.return_value = shortcut1
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "", ICE_FLAG_TAG)
rom2 = mock.MagicMock()
rom2.to_shortcut.return_value = shortcut2
shortcut3 = Shortcut("Game3", "/Path/to/game3", "/Path/to", "", ICE_FLAG_TAG)
rom3 = mock.MagicMock()
rom3.to_shortcut.return_value = shortcut3
self.synchronizer.sync_roms_for_user(self.mock_user, [rom1, rom2, rom3])
self.assertEquals(self.mock_logger.info.call_count, 3)
def test_sync_roms_for_user_logs_when_a_rom_is_removed(self):
shortcut = Shortcut("Game", "/Path/to/game", "/Path/to", "", ICE_FLAG_TAG)
self.mock_user.shortcuts = [shortcut]
self.synchronizer.sync_roms_for_user(self.mock_user, [])
self.assertTrue(self.mock_logger.info.called)
def test_sync_roms_for_user_logs_once_for_each_removed_rom(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "", ICE_FLAG_TAG)
shortcut3 = Shortcut("Game3", "/Path/to/game3", "/Path/to", "", ICE_FLAG_TAG)
self.mock_user.shortcuts = [shortcut1, shortcut2, shortcut3]
self.synchronizer.sync_roms_for_user(self.mock_user, [])
self.assertEquals(self.mock_logger.info.call_count, 3)
def test_sync_roms_for_user_both_adds_and_removes_roms(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "", ICE_FLAG_TAG)
shortcut3 = Shortcut("Game3", "/Path/to/game3", "/Path/to", "", ICE_FLAG_TAG)
shortcut4 = Shortcut("Game4", "/Path/to/game4", "/Path/to", "", ICE_FLAG_TAG)
rom1 = mock.MagicMock()
rom1.to_shortcut.return_value = shortcut1
rom2 = mock.MagicMock()
rom2.to_shortcut.return_value = shortcut2
rom3 = mock.MagicMock()
rom3.to_shortcut.return_value = shortcut3
old_shortcuts = [shortcut1, shortcut2, shortcut4]
self.mock_user.shortcuts = old_shortcuts
self.synchronizer.sync_roms_for_user(self.mock_user, [rom1, rom2, rom3])
new_shortcuts = self.mock_user.shortcuts
self.assertEquals(self.mock_logger.info.call_count, 2)
self.assertEquals(len(new_shortcuts), 3)
self.assertIn(shortcut1, new_shortcuts)
self.assertIn(shortcut2, new_shortcuts)
self.assertIn(shortcut3, new_shortcuts)
def test_sync_roms_for_user_saves_shortcuts_after_running(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "", ICE_FLAG_TAG)
rom1 = mock.MagicMock()
rom1.to_shortcut.return_value = shortcut1
self.mock_user.shortcuts = []
self.synchronizer.sync_roms_for_user(self.mock_user, [rom1])
self.assertEquals(self.mock_user.shortcuts, [shortcut1])
self.assertTrue(self.mock_user.save_shortcuts.called)
def test_sync_roms_for_user_sets_managed_ids(self):
shortcut1 = Shortcut("Game1", "/Path/to/game1", "/Path/to", "")
shortcut2 = Shortcut("Game2", "/Path/to/game2", "/Path/to", "")
rom1 = mock.MagicMock()
rom1.to_shortcut.return_value = shortcut1
rom2 = mock.MagicMock()
rom2.to_shortcut.return_value = shortcut2
self.synchronizer.sync_roms_for_user(self.mock_user, [rom1, rom2])
new_managed_ids = [shortcut1.appid(), shortcut2.appid()]
self.mock_archive.set_managed_ids.assert_called_with(self.mock_user, new_managed_ids)
| rdoyle1978/Ice | src/ice/tests/steam_shortcut_synchronizer_tests.py | Python | mit | 9,315 |
import logging
import numpy as np
import numpy.ma as ma
import pandas as pd
from bson import Binary, SON
from .._compression import compress, decompress, compress_array
from ._serializer import Serializer
DATA = 'd'
MASK = 'm'
TYPE = 't'
DTYPE = 'dt'
COLUMNS = 'c'
INDEX = 'i'
METADATA = 'md'
LENGTHS = 'ln'
class FrameConverter(object):
"""
Converts a Pandas Dataframe to and from PyMongo SON representation:
{
METADATA: {
COLUMNS: [col1, col2, ...] list of str
MASKS: {col1: mask, col2: mask, ...} dict of str: Binary
INDEX: [idx1, idx2, ...] list of str
TYPE: 'series' or 'dataframe'
LENGTHS: {col1: len, col2: len, ...} dict of str: int
}
DATA: BINARY(....) Compressed columns concatenated together
}
"""
def _convert_types(self, a):
"""
Converts object arrays of strings to numpy string arrays
"""
# No conversion for scalar type
if a.dtype != 'object':
return a, None
# We can't infer the type of an empty array, so just
# assume strings
if len(a) == 0:
return a.astype('U1'), None
# Compute a mask of missing values. Replace NaNs and Nones with
# empty strings so that type inference has a chance.
mask = pd.isnull(a)
if mask.sum() > 0:
a = a.copy()
np.putmask(a, mask, '')
else:
mask = None
if pd.lib.infer_dtype(a) == 'mixed':
a = np.array([s.encode('ascii') for s in a])
a = a.astype('O')
type_ = pd.lib.infer_dtype(a)
if type_ in ['unicode', 'string']:
max_len = pd.lib.max_len_string_array(a)
return a.astype('U{:d}'.format(max_len)), mask
else:
raise ValueError('Cannot store arrays with {} dtype'.format(type_))
def docify(self, df):
"""
Convert a Pandas DataFrame to SON.
Parameters
----------
df: DataFrame
The Pandas DataFrame to encode
"""
dtypes = {}
masks = {}
lengths = {}
columns = []
data = Binary(b'')
start = 0
arrays = []
for c in df:
try:
columns.append(str(c))
arr, mask = self._convert_types(df[c].values)
dtypes[str(c)] = arr.dtype.str
if mask is not None:
masks[str(c)] = Binary(compress(mask.tostring()))
arrays.append(arr.tostring())
except Exception as e:
typ = pd.lib.infer_dtype(df[c])
msg = "Column '{}' type is {}".format(str(c), typ)
logging.info(msg)
raise e
arrays = compress_array(arrays)
for index, c in enumerate(df):
d = Binary(arrays[index])
lengths[str(c)] = (start, start + len(d) - 1)
start += len(d)
data += d
doc = SON({DATA: data, METADATA: {}})
doc[METADATA] = {COLUMNS: columns,
MASK: masks,
LENGTHS: lengths,
DTYPE: dtypes
}
return doc
def objify(self, doc, columns=None):
"""
Decode a Pymongo SON object into an Pandas DataFrame
"""
cols = columns or doc[METADATA][COLUMNS]
data = {}
for col in cols:
d = decompress(doc[DATA][doc[METADATA][LENGTHS][col][0]: doc[METADATA][LENGTHS][col][1] + 1])
d = np.fromstring(d, doc[METADATA][DTYPE][col])
if MASK in doc[METADATA] and col in doc[METADATA][MASK]:
mask_data = decompress(doc[METADATA][MASK][col])
mask = np.fromstring(mask_data, 'bool')
d = ma.masked_array(d, mask)
data[col] = d
return pd.DataFrame(data, columns=cols)[cols]
class FrametoArraySerializer(Serializer):
TYPE = 'FrameToArray'
def __init__(self):
self.converter = FrameConverter()
def serialize(self, df):
if isinstance(df, pd.Series):
dtype = 'series'
df = df.to_frame()
else:
dtype = 'dataframe'
if (len(df.index.names) > 1 and None in df.index.names) or None in list(df.columns.values):
raise Exception("All columns and indexes must be named")
if df.index.names != [None]:
index = df.index.names
df = df.reset_index()
ret = self.converter.docify(df)
ret[METADATA][INDEX] = index
ret[METADATA][TYPE] = dtype
return ret
ret = self.converter.docify(df)
ret[METADATA][TYPE] = dtype
return ret
def deserialize(self, data, columns=None):
'''
Deserializes SON to a DataFrame
Parameters
----------
data: SON data
columns: None, or list of strings
optionally you can deserialize a subset of the data in the SON. Index
columns are ALWAYS deserialized, and should not be specified
Returns
-------
pandas dataframe or series
'''
if data == []:
return pd.DataFrame()
meta = data[0][METADATA] if isinstance(data, list) else data[METADATA]
index = INDEX in meta
if columns:
if index:
columns.extend(meta[INDEX])
if len(columns) > len(set(columns)):
raise Exception("Duplicate columns specified, cannot de-serialize")
if not isinstance(data, list):
df = self.converter.objify(data, columns)
else:
df = pd.concat([self.converter.objify(d, columns) for d in data], ignore_index=not index)
if index:
df = df.set_index(meta[INDEX])
if meta[TYPE] == 'series':
return df[df.columns[0]]
return df
def combine(self, a, b):
if a.index.names != [None]:
return pd.concat([a, b]).sort_index()
return pd.concat([a, b])
| Frankkkkk/arctic | arctic/serialization/numpy_arrays.py | Python | lgpl-2.1 | 6,258 |
from django.apps import AppConfig
class DjangoMultiimapConfig(AppConfig):
name = 'django_multiimap'
| OpenInfoporto/django_multiimap | django_multiimap/apps.py | Python | gpl-2.0 | 106 |
"""
Gregory Way 2016
NF1 Inactivation Classifier for Glioblastoma
scripts/process_rnaseq.py
Cleans up the downloaded RNAseq data and zero-one normalizes
Usage:
Run only once by run_pipeline.sh
Output:
Normalized PCL file with genes as rows and sample IDs as columns and MAD genes
if option is selected
"""
import os
import pandas as pd
from sklearn import preprocessing
from statsmodels.robust import scale
import argparse
def normalize_data(data, out_file, mad=False,
mad_file=os.path.join('tables', 'full_mad_genes.tsv'),
output=True,
method='minmax'):
"""
Filters unidentified genes and normalizes each input gene expression matrix
Arguments:
:param data: pandas DataFrame genes as rows and sample IDs as columns
:param out_file: the file name to write normalized matrix
:param mad: boolean indicating if MAD genes should be output to file
:param mad_file: the file name to write mad genes
:param method: the type of scaling to perform (defaults to minmax)
Output:
Writes normalized matrix (if output=True) and mad genes to file
(if mad=True); returns the normalized matrix if output=False
"""
# Drop all row names with unidentified gene
data = data[-data.index.str.contains('?', regex=False)]
# Sort data by gene name
data = data.sort_index()
# Zero-one normalize
if method == 'minmax':
min_max_scaler = preprocessing.MinMaxScaler()
data_normalize = min_max_scaler.fit_transform(data.T)
elif method == 'zscore':
data_normalize = preprocessing.scale(data.T, axis=0)
data_normalize = pd.DataFrame(data_normalize, index=data.columns,
columns=data.index).T
# Write to file
if output:
data_normalize.to_csv(out_file, sep='\t', header=True, index=True)
else:
return data_normalize
# Write out MAD genes
if mad:
all_mad_genes = scale.mad(data_normalize, c=1, axis=1)
all_mad_genes = pd.Series(all_mad_genes,
index=data_normalize.index.values)
all_mad_genes = all_mad_genes.sort_values(ascending=False)
all_mad_genes.to_csv(mad_file, sep='\t', header=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--tissue", dest="tissues",
help="tissues to normalize")
parser.add_argument("-m", "--madgenes", dest="mad",
help="boolean to output mad genes", default=True)
parser.add_argument("-c", "--concatenate", dest="concatenate",
help="boolean to normalize together", default=False)
parser.add_argument("-s", "--scale", dest="scale_x",
help="method to scale x matrix", default='minmax')
args = parser.parse_args()
tissues = args.tissues.split(',')
concatenate = args.concatenate
mad = args.mad
scale_x = args.scale_x
if concatenate == 'all':
pancan_file = args.tissues.replace(',', '-')
elif concatenate:
pancan_file = concatenate.replace(',', '-')
# Load Data
tissue_list = []
for tissue in tissues:
raw_file = os.path.join('data', 'X', 'raw',
'{}_RNAseq_X_matrix.tsv'.format(tissue))
out_file = os.path.join('data', 'X', 'normalized',
'{}_X_normalized.tsv'.format(tissue))
raw_df = pd.read_csv(raw_file, delimiter='\t', index_col=0)
if concatenate:
if concatenate == 'all':
tissue_list.append(raw_df)
elif tissue in concatenate.split(','):
tissue_list.append(raw_df)
else:
normalize_data(raw_df, out_file, mad, method=scale_x)
if concatenate:
pancan_file = os.path.join('data', 'X', 'normalized',
'{}_X_normalized.tsv'.format(pancan_file))
raw_df = pd.concat(tissue_list, axis=1)
normalize_data(raw_df, pancan_file)
| gwaygenomics/nf1_inactivation | scripts/process_rnaseq.py | Python | bsd-3-clause | 4,077 |
import unittest
from bolt.core.plugin import Plugin
from bolt import interval
from bolt import Bot
import yaml
class TestIntervalPlugin(Plugin):
@interval(60)
def intervaltest(self):
pass
class TestInterval(unittest.TestCase):
def setUp(self):
self.config_file = "/tmp/bolt-test-config.yaml"
fake_config = {
"api_key": "1234",
"log_dir": "/tmp/"
}
with open(self.config_file, "w") as tempconfig:
tempconfig.write(yaml.dump(fake_config))
def test_interval_decos(self):
bot = Bot(self.config_file)
plugin = TestIntervalPlugin(bot)
plugin.load()
self.assertTrue(len(plugin.intervals) > 0)
def test_interval_will_run(self):
bot = Bot(self.config_file)
plugin = TestIntervalPlugin(bot)
plugin.load()
self.assertTrue(plugin.intervals[0].ready())
| Arcbot-Org/Arcbot | tests/core/test_interval.py | Python | gpl-3.0 | 910 |
#!/usr/bin/env python
def coreference_cluster_match(gold, auto):
if len(gold) != len(auto):
return False
for gcluster in gold:
matched = False
for acluster in auto:
if acluster == gcluster:
matched = True
break
if not matched:
return False
return True
def calc_prf(match, gold, test):
'''Calculate Precision, Recall and F-Score, with:
True Positive = match
False Positive = test - match
False Negative = gold - match
>>> print calc_prf(0, 0, 0)
(1.0, 1.0, 1.0)
>>> print calc_prf(0, 0, 5)
(0.0, 1.0, 0.0)
>>> print calc_prf(0, 4, 5)
(0.0, 0.0, 0.0)
>>> print calc_prf(0, 4, 0)
(0.0, 0.0, 0.0)
>>> print calc_prf(2, 2, 8)
(0.25, 1.0, 0.4)
'''
if gold == 0:
if test == 0:
return 1.0, 1.0, 1.0
return 0.0, 1.0, 0.0
if test == 0 or match == 0:
return 0.0, 0.0, 0.0
p = match / float(test)
r = match / float(gold)
try:
f = 2 * match / (float(test + gold))
return p, r, f
except:
return 0.0, 0.0, 0.0
if __name__ == "__main__":
print "Running doctest"
import doctest
doctest.testmod()
| jkkummerfeld/1ec-graph-parser | evaluation/nlp_util/nlp_eval.py | Python | isc | 1,048 |
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import copy
from logging import getLogger
import chainer
from chainer import cuda
import chainer.functions as F
from chainerrl import agent
from chainerrl.misc.batch_states import batch_states
from chainerrl.misc.copy_param import synchronize_parameters
from chainerrl.replay_buffer import batch_experiences
from chainerrl.replay_buffer import batch_recurrent_experiences
from chainerrl.replay_buffer import ReplayUpdater
def compute_value_loss(y, t, clip_delta=True, batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
y (Variable or ndarray): Predicted values.
t (Variable or ndarray): Target values.
clip_delta (bool): Use the Huber loss function if set True.
batch_accumulator (str): 'mean' or 'sum'. 'mean' will use the mean of
the loss values in a batch. 'sum' will use the sum.
Returns:
(Variable) scalar loss
"""
assert batch_accumulator in ('mean', 'sum')
y = F.reshape(y, (-1, 1))
t = F.reshape(t, (-1, 1))
if clip_delta:
loss_sum = F.sum(F.huber_loss(y, t, delta=1.0))
if batch_accumulator == 'mean':
loss = loss_sum / y.shape[0]
elif batch_accumulator == 'sum':
loss = loss_sum
else:
loss_mean = F.mean_squared_error(y, t) / 2
if batch_accumulator == 'mean':
loss = loss_mean
elif batch_accumulator == 'sum':
loss = loss_mean * y.shape[0]
return loss
def compute_weighted_value_loss(y, t, weights,
clip_delta=True, batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
y (Variable or ndarray): Predicted values.
t (Variable or ndarray): Target values.
weights (ndarray): Weights for y, t.
clip_delta (bool): Use the Huber loss function if set True.
batch_accumulator (str): 'mean' will divide loss by batchsize
Returns:
(Variable) scalar loss
"""
assert batch_accumulator in ('mean', 'sum')
y = F.reshape(y, (-1, 1))
t = F.reshape(t, (-1, 1))
if clip_delta:
losses = F.huber_loss(y, t, delta=1.0)
else:
losses = F.square(y - t) / 2
losses = F.reshape(losses, (-1,))
loss_sum = F.sum(losses * weights)
if batch_accumulator == 'mean':
loss = loss_sum / y.shape[0]
elif batch_accumulator == 'sum':
loss = loss_sum
return loss
def _batch_reset_recurrent_states_when_episodes_end(
model, batch_done, batch_reset, recurrent_states):
"""Reset recurrent states when episodes end.
Args:
model (chainer.Link): Model that implements `StatelessRecurrent`.
batch_done (array-like of bool): True iff episodes are terminal.
batch_reset (array-like of bool): True iff episodes will be reset.
recurrent_states (object): Recurrent state.
Returns:
object: New recurrent states.
"""
indices_that_ended = [
i for i, (done, reset)
in enumerate(zip(batch_done, batch_reset)) if done or reset]
if indices_that_ended:
return model.mask_recurrent_state_at(
recurrent_states, indices_that_ended)
else:
return recurrent_states
class DQN(agent.AttributeSavingMixin, agent.BatchAgent):
"""Deep Q-Network algorithm.
Args:
q_function (StateQFunction): Q-function
optimizer (Optimizer): Optimizer that is already setup
replay_buffer (ReplayBuffer): Replay buffer
gamma (float): Discount factor
explorer (Explorer): Explorer that specifies an exploration strategy.
gpu (int): GPU device id if not None nor negative.
replay_start_size (int): if the replay buffer's size is less than
replay_start_size, skip update
minibatch_size (int): Minibatch size
update_interval (int): Model update interval in step
target_update_interval (int): Target model update interval in step
clip_delta (bool): Clip delta if set True
phi (callable): Feature extractor applied to observations
target_update_method (str): 'hard' or 'soft'.
soft_update_tau (float): Tau of soft target update.
n_times_update (int): Number of repetition of update
average_q_decay (float): Decay rate of average Q, only used for
recording statistics
average_loss_decay (float): Decay rate of average loss, only used for
recording statistics
batch_accumulator (str): 'mean' or 'sum'
episodic_update_len (int or None): Subsequences of this length are used
for update if set int and episodic_update=True
logger (Logger): Logger used
batch_states (callable): method which makes a batch of observations.
default is `chainerrl.misc.batch_states.batch_states`
recurrent (bool): If set to True, `model` is assumed to implement
`chainerrl.links.StatelessRecurrent` and is updated in a recurrent
manner.
"""
saved_attributes = ('model', 'target_model', 'optimizer')
def __init__(self, q_function, optimizer, replay_buffer, gamma,
explorer, gpu=None, replay_start_size=50000,
minibatch_size=32, update_interval=1,
target_update_interval=10000, clip_delta=True,
phi=lambda x: x,
target_update_method='hard',
soft_update_tau=1e-2,
n_times_update=1, average_q_decay=0.999,
average_loss_decay=0.99,
batch_accumulator='mean',
episodic_update_len=None,
logger=getLogger(__name__),
batch_states=batch_states,
recurrent=False,
):
self.model = q_function
self.q_function = q_function # For backward compatibility
if gpu is not None and gpu >= 0:
cuda.get_device_from_id(gpu).use()
self.model.to_gpu(device=gpu)
self.xp = self.model.xp
self.replay_buffer = replay_buffer
self.optimizer = optimizer
self.gamma = gamma
self.explorer = explorer
self.gpu = gpu
self.target_update_interval = target_update_interval
self.clip_delta = clip_delta
self.phi = phi
self.target_update_method = target_update_method
self.soft_update_tau = soft_update_tau
self.batch_accumulator = batch_accumulator
assert batch_accumulator in ('mean', 'sum')
self.logger = logger
self.batch_states = batch_states
self.recurrent = recurrent
if self.recurrent:
update_func = self.update_from_episodes
else:
update_func = self.update
self.replay_updater = ReplayUpdater(
replay_buffer=replay_buffer,
update_func=update_func,
batchsize=minibatch_size,
episodic_update=recurrent,
episodic_update_len=episodic_update_len,
n_times_update=n_times_update,
replay_start_size=replay_start_size,
update_interval=update_interval,
)
self.t = 0
self.last_state = None
self.last_action = None
self.target_model = None
self.sync_target_network()
# For backward compatibility
self.target_q_function = self.target_model
self.average_q = 0
self.average_q_decay = average_q_decay
self.average_loss = 0
self.average_loss_decay = average_loss_decay
# Recurrent states of the model
self.train_recurrent_states = None
self.train_prev_recurrent_states = None
self.test_recurrent_states = None
# Error checking
if (self.replay_buffer.capacity is not None and
self.replay_buffer.capacity <
self.replay_updater.replay_start_size):
raise ValueError(
'Replay start size cannot exceed '
'replay buffer capacity.')
def sync_target_network(self):
"""Synchronize target network with current network."""
if self.target_model is None:
self.target_model = copy.deepcopy(self.model)
call_orig = self.target_model.__call__
def call_test(self_, x):
with chainer.using_config('train', False):
return call_orig(self_, x)
self.target_model.__call__ = call_test
else:
synchronize_parameters(
src=self.model,
dst=self.target_model,
method=self.target_update_method,
tau=self.soft_update_tau)
def update(self, experiences, errors_out=None):
"""Update the model from experiences
Args:
experiences (list): List of lists of dicts.
For DQN, each dict must contains:
- state (object): State
- action (object): Action
- reward (float): Reward
- is_state_terminal (bool): True iff next state is terminal
- next_state (object): Next state
- weight (float, optional): Weight coefficient. It can be
used for importance sampling.
errors_out (list or None): If set to a list, then TD-errors
computed from the given experiences are appended to the list.
Returns:
None
"""
has_weight = 'weight' in experiences[0][0]
exp_batch = batch_experiences(
experiences, xp=self.xp,
phi=self.phi, gamma=self.gamma,
batch_states=self.batch_states)
if has_weight:
exp_batch['weights'] = self.xp.asarray(
[elem[0]['weight']for elem in experiences],
dtype=self.xp.float32)
if errors_out is None:
errors_out = []
loss = self._compute_loss(exp_batch, errors_out=errors_out)
if has_weight:
self.replay_buffer.update_errors(errors_out)
# Update stats
self.average_loss *= self.average_loss_decay
self.average_loss += (1 - self.average_loss_decay) * float(loss.array)
self.model.cleargrads()
loss.backward()
self.optimizer.update()
def update_from_episodes(self, episodes, errors_out=None):
assert errors_out is None,\
"Recurrent DQN does not support PrioritizedBuffer"
exp_batch = batch_recurrent_experiences(
episodes,
model=self.model,
xp=self.xp,
phi=self.phi, gamma=self.gamma,
batch_states=self.batch_states,
)
loss = self._compute_loss(exp_batch, errors_out=None)
# Update stats
self.average_loss *= self.average_loss_decay
self.average_loss += (1 - self.average_loss_decay) * float(loss.array)
self.optimizer.update(lambda: loss)
def _compute_target_values(self, exp_batch):
batch_next_state = exp_batch['next_state']
if self.recurrent:
target_next_qout, _ = self.target_model.n_step_forward(
batch_next_state, exp_batch['next_recurrent_state'],
output_mode='concat')
else:
target_next_qout = self.target_model(batch_next_state)
next_q_max = target_next_qout.max
batch_rewards = exp_batch['reward']
batch_terminal = exp_batch['is_state_terminal']
discount = exp_batch['discount']
return batch_rewards + discount * (1.0 - batch_terminal) * next_q_max
def _compute_y_and_t(self, exp_batch):
batch_size = exp_batch['reward'].shape[0]
# Compute Q-values for current states
batch_state = exp_batch['state']
if self.recurrent:
qout, _ = self.model.n_step_forward(
batch_state,
exp_batch['recurrent_state'],
output_mode='concat',
)
else:
qout = self.model(batch_state)
batch_actions = exp_batch['action']
batch_q = F.reshape(qout.evaluate_actions(
batch_actions), (batch_size, 1))
with chainer.no_backprop_mode():
batch_q_target = F.reshape(
self._compute_target_values(exp_batch),
(batch_size, 1))
return batch_q, batch_q_target
def _compute_loss(self, exp_batch, errors_out=None):
"""Compute the Q-learning loss for a batch of experiences
Args:
exp_batch (dict): A dict of batched arrays of transitions
Returns:
Computed loss from the minibatch of experiences
"""
y, t = self._compute_y_and_t(exp_batch)
if errors_out is not None:
del errors_out[:]
delta = F.absolute(y - t)
if delta.ndim == 2:
delta = F.sum(delta, axis=1)
delta = cuda.to_cpu(delta.array)
for e in delta:
errors_out.append(e)
if 'weights' in exp_batch:
return compute_weighted_value_loss(
y, t, exp_batch['weights'],
clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator)
else:
return compute_value_loss(y, t, clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator)
def act(self, obs):
with chainer.using_config('train', False), chainer.no_backprop_mode():
action_value =\
self._evaluate_model_and_update_recurrent_states(
[obs], test=True)
q = float(action_value.max.array)
action = cuda.to_cpu(action_value.greedy_actions.array)[0]
# Update stats
self.average_q *= self.average_q_decay
self.average_q += (1 - self.average_q_decay) * q
self.logger.debug('t:%s q:%s action_value:%s', self.t, q, action_value)
return action
def act_and_train(self, obs, reward):
# Observe the consequences
if self.last_state is not None:
assert self.last_action is not None
# Add a transition to the replay buffer
transition = {
'state': self.last_state,
'action': self.last_action,
'reward': reward,
'next_state': obs,
'is_state_terminal': False,
}
if self.recurrent:
transition['recurrent_state'] =\
self.model.get_recurrent_state_at(
self.train_prev_recurrent_states,
0, unwrap_variable=True)
self.train_prev_recurrent_states = None
transition['next_recurrent_state'] =\
self.model.get_recurrent_state_at(
self.train_recurrent_states, 0, unwrap_variable=True)
self.replay_buffer.append(**transition)
# Update the target network
if self.t % self.target_update_interval == 0:
self.sync_target_network()
# Update the model
self.replay_updater.update_if_necessary(self.t)
# Choose an action
with chainer.using_config('train', False), chainer.no_backprop_mode():
action_value =\
self._evaluate_model_and_update_recurrent_states(
[obs], test=False)
q = float(action_value.max.array)
greedy_action = cuda.to_cpu(action_value.greedy_actions.array)[0]
action = self.explorer.select_action(
self.t, lambda: greedy_action, action_value=action_value)
# Update stats
self.average_q *= self.average_q_decay
self.average_q += (1 - self.average_q_decay) * q
self.t += 1
self.last_state = obs
self.last_action = action
self.logger.debug('t:%s q:%s action_value:%s', self.t, q, action_value)
self.logger.debug('t:%s r:%s a:%s', self.t, reward, action)
return self.last_action
def _evaluate_model_and_update_recurrent_states(self, batch_obs, test):
batch_xs = self.batch_states(batch_obs, self.xp, self.phi)
if self.recurrent:
if test:
batch_av, self.test_recurrent_states = self.model(
batch_xs, self.test_recurrent_states)
else:
self.train_prev_recurrent_states = self.train_recurrent_states
batch_av, self.train_recurrent_states = self.model(
batch_xs, self.train_recurrent_states)
else:
batch_av = self.model(batch_xs)
return batch_av
def batch_act_and_train(self, batch_obs):
with chainer.using_config('train', False), chainer.no_backprop_mode():
batch_av = self._evaluate_model_and_update_recurrent_states(
batch_obs, test=False)
batch_maxq = batch_av.max.array
batch_argmax = cuda.to_cpu(batch_av.greedy_actions.array)
batch_action = [
self.explorer.select_action(
self.t, lambda: batch_argmax[i],
action_value=batch_av[i:i + 1],
)
for i in range(len(batch_obs))]
self.batch_last_obs = list(batch_obs)
self.batch_last_action = list(batch_action)
# Update stats
self.average_q *= self.average_q_decay
self.average_q += (1 - self.average_q_decay) * float(batch_maxq.mean())
return batch_action
def batch_act(self, batch_obs):
with chainer.using_config('train', False), chainer.no_backprop_mode():
batch_av = self._evaluate_model_and_update_recurrent_states(
batch_obs, test=True)
batch_argmax = cuda.to_cpu(batch_av.greedy_actions.array)
return batch_argmax
def batch_observe_and_train(self, batch_obs, batch_reward,
batch_done, batch_reset):
for i in range(len(batch_obs)):
self.t += 1
# Update the target network
if self.t % self.target_update_interval == 0:
self.sync_target_network()
if self.batch_last_obs[i] is not None:
assert self.batch_last_action[i] is not None
# Add a transition to the replay buffer
transition = {
'state': self.batch_last_obs[i],
'action': self.batch_last_action[i],
'reward': batch_reward[i],
'next_state': batch_obs[i],
'next_action': None,
'is_state_terminal': batch_done[i],
}
if self.recurrent:
transition['recurrent_state'] =\
self.model.get_recurrent_state_at(
self.train_prev_recurrent_states,
i, unwrap_variable=True)
transition['next_recurrent_state'] =\
self.model.get_recurrent_state_at(
self.train_recurrent_states,
i, unwrap_variable=True)
self.replay_buffer.append(env_id=i, **transition)
if batch_reset[i] or batch_done[i]:
self.batch_last_obs[i] = None
self.batch_last_action[i] = None
self.replay_buffer.stop_current_episode(env_id=i)
self.replay_updater.update_if_necessary(self.t)
if self.recurrent:
# Reset recurrent states when episodes end
self.train_prev_recurrent_states = None
self.train_recurrent_states =\
_batch_reset_recurrent_states_when_episodes_end(
model=self.model,
batch_done=batch_done,
batch_reset=batch_reset,
recurrent_states=self.train_recurrent_states,
)
def batch_observe(self, batch_obs, batch_reward,
batch_done, batch_reset):
if self.recurrent:
# Reset recurrent states when episodes end
self.test_recurrent_states =\
_batch_reset_recurrent_states_when_episodes_end(
model=self.model,
batch_done=batch_done,
batch_reset=batch_reset,
recurrent_states=self.test_recurrent_states,
)
def stop_episode_and_train(self, state, reward, done=False):
"""Observe a terminal state and a reward.
This function must be called once when an episode terminates.
"""
assert self.last_state is not None
assert self.last_action is not None
# Add a transition to the replay buffer
transition = {
'state': self.last_state,
'action': self.last_action,
'reward': reward,
'next_state': state,
'next_action': self.last_action,
'is_state_terminal': done,
}
if self.recurrent:
transition['recurrent_state'] =\
self.model.get_recurrent_state_at(
self.train_prev_recurrent_states, 0, unwrap_variable=True)
self.train_prev_recurrent_states = None
transition['next_recurrent_state'] =\
self.model.get_recurrent_state_at(
self.train_recurrent_states, 0, unwrap_variable=True)
self.replay_buffer.append(**transition)
self.last_state = None
self.last_action = None
if self.recurrent:
self.train_recurrent_states = None
self.replay_buffer.stop_current_episode()
def stop_episode(self):
if self.recurrent:
self.test_recurrent_states = None
def get_statistics(self):
return [
('average_q', self.average_q),
('average_loss', self.average_loss),
('n_updates', self.optimizer.t),
]
| toslunar/chainerrl | chainerrl/agents/dqn.py | Python | mit | 22,430 |
import os
import numpy as np
import librosa
import collections
def getNewTime(t):
"""
Given a time in second, convert into time in min'sec" in string format
"""
tmin = int(t/60)
tsec = (t-int(t/60)*60) # * 100
return str(tmin) + "\'" + str(int(tsec))
def printSegInfo(segs,all_idx,timeLabel):
"""
Print the medley transition info
"""
for i in range(len(all_idx)):
currSeg = segs.segments[all_idx[i]]
newStart = getNewTime(timeLabel[i])
oldSt = getNewTime(currSeg.st * 1. / currSeg.sr)
oldEd = getNewTime(currSeg.ed * 1. / currSeg.sr)
print newStart, currSeg.songname, oldSt, oldEd
def writeMedley(segs,outPath,sig,all_idx,timeLabel):
if not os.path.exists(outPath):
os.makedirs(outPath)
outName = outPath + str(all_idx[0]) + '_' + str(all_idx[1])
# write txt file
text_file = open(outName+'.txt',"w")
for i in range(len(all_idx)):
curr_idx = all_idx[i]
currSeg = segs.segments[curr_idx]
newStart = getNewTime(timeLabel[i])
oldSt = getNewTime(currSeg.st * 1. / currSeg.sr)
oldEd = getNewTime(currSeg.ed * 1. / currSeg.sr)
text_file.write(newStart + '\t' + str(all_idx[i]) + '\t' + currSeg.songname + '\t' + oldSt + '\t' + oldEd + '\n')
text_file.close
# write wave file
librosa.output.write_wav(outName+'.wav', sig ,currSeg.sr)
def createMedleyEasy(segs,st_idx, N):
"""
Given a segs object, a starting index
Specify the length of the medley, and the output directory
Create a medley directly from the class
"""
all_idx = [st_idx]
timeLabel = [0.0]
# get the value of the start segment
currSeg = segs.segments[st_idx].y
finalMedley = currSeg
for i in range(N):
next_idx = segs.mashPairs[st_idx]
all_idx.append(next_idx)
currSeg = segs.segments[next_idx] # get next best match
timeLabel.append(round(len(finalMedley) * 1. / currSeg.sr,2)) # when to start next segment
finalMedley = np.append(finalMedley,currSeg.y)
st_idx = next_idx
return all_idx, timeLabel,finalMedley
def createMedley(segs,st_idx,N):
"""
Given a segs object, a starting index
Specify the length of the medley, and the output directory
Create a medley
"""
all_idx = [st_idx]
timeLabel = [0.0]
# get the value of the start segment
currSeg = segs.segments[st_idx].y
finalMedley = currSeg
for i in range(N):
next_idx = segs.find_best_match(st_idx, diffSong = True)
all_idx.append(next_idx)
currSeg = segs.segments[next_idx] # get next best match
timeLabel.append(round(len(finalMedley) * 1. / currSeg.sr,2)) # when to start next segment
finalMedley = np.append(finalMedley,currSeg.y)
st_idx = next_idx
return all_idx, timeLabel,finalMedley
| kittyshi/medley | medleycode/medley_helper.py | Python | gpl-3.0 | 2,998 |
"""Gaussian Mixture Model."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .base import BaseMixture, _check_shape
from ..externals.six.moves import zip
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..utils.extmath import row_norms
###############################################################################
# Gaussian mixture shape checkers used by the GaussianMixture class
def _check_weights(weights, n_components):
"""Check the user provided 'weights'.
Parameters
----------
weights : array-like, shape (n_components,)
The proportions of components of each mixture.
n_components : int
Number of components.
Returns
-------
weights : array, shape (n_components,)
"""
weights = check_array(weights, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(weights, (n_components,), 'weights')
# check range
if (any(np.less(weights, 0.)) or
any(np.greater(weights, 1.))):
raise ValueError("The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights), np.max(weights)))
# check normalization
if not np.allclose(np.abs(1. - np.sum(weights)), 0.):
raise ValueError("The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f" % np.sum(weights))
return weights
def _check_means(means, n_components, n_features):
"""Validate the provided 'means'.
Parameters
----------
means : array-like, shape (n_components, n_features)
The centers of the current components.
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
means : array, (n_components, n_features)
"""
means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(means, (n_components, n_features), 'means')
return means
def _check_precision_positivity(precision, covariance_type):
"""Check a precision vector is positive-definite."""
if np.any(np.less_equal(precision, 0.0)):
raise ValueError("'%s precision' should be "
"positive" % covariance_type)
def _check_precision_matrix(precision, covariance_type):
"""Check a precision matrix is symmetric and positive-definite."""
if not (np.allclose(precision, precision.T) and
np.all(linalg.eigvalsh(precision) > 0.)):
raise ValueError("'%s precision' should be symmetric, "
"positive-definite" % covariance_type)
def _check_precisions_full(precisions, covariance_type):
"""Check the precision matrices are symmetric and positive-definite."""
for k, prec in enumerate(precisions):
prec = _check_precision_matrix(prec, covariance_type)
def _check_precisions(precisions, covariance_type, n_components, n_features):
"""Validate user provided precisions.
Parameters
----------
precisions : array-like,
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : string
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
precisions : array
"""
precisions = check_array(precisions, dtype=[np.float64, np.float32],
ensure_2d=False,
allow_nd=covariance_type == 'full')
precisions_shape = {'full': (n_components, n_features, n_features),
'tied': (n_features, n_features),
'diag': (n_components, n_features),
'spherical': (n_components,)}
_check_shape(precisions, precisions_shape[covariance_type],
'%s precision' % covariance_type)
_check_precisions = {'full': _check_precisions_full,
'tied': _check_precision_matrix,
'diag': _check_precision_positivity,
'spherical': _check_precision_positivity}
_check_precisions[covariance_type](precisions, covariance_type)
return precisions
###############################################################################
# Gaussian mixture parameters estimators (used by the M-Step)
def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
n_components, n_features = means.shape
covariances = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariances[k].flat[::n_features + 1] += reg_covar
return covariances
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):
"""Estimate the tied covariance matrix.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariance : array, shape (n_features, n_features)
The tied covariance matrix of the components.
"""
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(nk * means.T, means)
covariance = avg_X2 - avg_means2
covariance /= nk.sum()
covariance.flat[::len(covariance) + 1] += reg_covar
return covariance
def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):
"""Estimate the diagonal covariance vectors.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features)
The covariance vector of the current components.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means ** 2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk,
means, reg_covar).mean(1)
def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input data array.
resp : array-like, shape (n_samples, n_features)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like, shape (n_components,)
The numbers of data samples in the current components.
means : array-like, shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = {"full": _estimate_gaussian_covariances_full,
"tied": _estimate_gaussian_covariances_tied,
"diag": _estimate_gaussian_covariances_diag,
"spherical": _estimate_gaussian_covariances_spherical
}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, covariances
def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
estimate_precision_error_message = (
"Fitting the mixture model failed because some components have "
"ill-defined empirical covariance (for instance caused by singleton "
"or collapsed samples). Try to decrease the number of components, "
"or increase reg_covar.")
if covariance_type in 'full':
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True).T
elif covariance_type == 'tied':
_, n_features = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features),
lower=True).T
else:
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1. / np.sqrt(covariances)
return precisions_chol
###############################################################################
# Gaussian mixture probability estimators
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like,
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like, shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == 'full':
n_components, _, _ = matrix_chol.shape
log_det_chol = (np.sum(np.log(
matrix_chol.reshape(
n_components, -1)[:, ::n_features + 1]), 1))
elif covariance_type == 'tied':
log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))
elif covariance_type == 'diag':
log_det_chol = (np.sum(np.log(matrix_chol), axis=1))
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
means : array-like, shape (n_components, n_features)
precisions_chol : array-like,
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
# det(precision_chol) is half of det(precision)
log_det = _compute_log_det_cholesky(
precisions_chol, covariance_type, n_features)
if covariance_type == 'full':
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'tied':
log_prob = np.empty((n_samples, n_components))
for k, mu in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'diag':
precisions = precisions_chol ** 2
log_prob = (np.sum((means ** 2 * precisions), 1) -
2. * np.dot(X, (means * precisions).T) +
np.dot(X ** 2, precisions.T))
elif covariance_type == 'spherical':
precisions = precisions_chol ** 2
log_prob = (np.sum(means ** 2, 1) * precisions -
2 * np.dot(X, means.T * precisions) +
np.outer(row_norms(X, squared=True), precisions))
return -.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
class GaussianMixture(BaseMixture):
"""Gaussian Mixture.
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution.
.. versionadded:: 0.18
*GaussianMixture*.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, defaults to 1.
The number of mixture components.
covariance_type : {'full', 'tied', 'diag', 'spherical'},
defaults to 'full'.
String describing the type of covariance parameters to use.
Must be one of::
'full' (each component has its own general covariance matrix),
'tied' (all components share the same general covariance matrix),
'diag' (each component has its own diagonal covariance matrix),
'spherical' (each component has its own single variance).
tol : float, defaults to 1e-3.
The convergence threshold. EM iterations will stop when the
lower bound average gain is below this threshold.
reg_covar : float, defaults to 0.
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, defaults to 100.
The number of EM iterations to perform.
n_init : int, defaults to 1.
The number of initializations to perform. The best results are kept.
init_params : {'kmeans', 'random'}, defaults to 'kmeans'.
The method used to initialize the weights, the means and the
precisions.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weights_init : array-like, shape (n_components, ), optional
The user-provided initial weights, defaults to None.
If it None, weights are initialized using the `init_params` method.
means_init: array-like, shape (n_components, n_features), optional
The user-provided initial means, defaults to None,
If it None, means are initialized using the `init_params` method.
precisions_init: array-like, optional.
The user-provided initial precisions (inverse of the covariance
matrices), defaults to None.
If it None, precisions are initialized using the 'init_params' method.
The shape depends on 'covariance_type'::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
random_state: RandomState or an int seed, defaults to None.
A random number generator instance.
warm_start : bool, default to False.
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several time on similar problems.
verbose : int, default to 0.
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default to 10.
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like, shape (n_components,)
The weights of each mixture components.
means_ : array-like, shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of EM to reach the convergence.
lower_bound_ : float
Log-likelihood of the best fit of EM.
See Also
--------
BayesianGaussianMixture : Gaussian mixture model fit with a variational
inference.
"""
def __init__(self, n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weights_init=None, means_init=None, precisions_init=None,
random_state=None, warm_start=False,
verbose=0, verbose_interval=10):
super(GaussianMixture, self).__init__(
n_components=n_components, tol=tol, reg_covar=reg_covar,
max_iter=max_iter, n_init=n_init, init_params=init_params,
random_state=random_state, warm_start=warm_start,
verbose=verbose, verbose_interval=verbose_interval)
self.covariance_type = covariance_type
self.weights_init = weights_init
self.means_init = means_init
self.precisions_init = precisions_init
def _check_parameters(self, X):
"""Check the Gaussian mixture parameters are well defined."""
_, n_features = X.shape
if self.covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError("Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% self.covariance_type)
if self.weights_init is not None:
self.weights_init = _check_weights(self.weights_init,
self.n_components)
if self.means_init is not None:
self.means_init = _check_means(self.means_init,
self.n_components, n_features)
if self.precisions_init is not None:
self.precisions_init = _check_precisions(self.precisions_init,
self.covariance_type,
self.n_components,
n_features)
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type)
weights /= n_samples
self.weights_ = (weights if self.weights_init is None
else self.weights_init)
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type)
elif self.covariance_type == 'full':
self.precisions_cholesky_ = np.array(
[linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init])
elif self.covariance_type == 'tied':
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init,
lower=True)
else:
self.precisions_cholesky_ = self.precisions_init
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_log_prob(self, X):
return _estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type)
def _estimate_log_weights(self):
return np.log(self.weights_)
def _compute_lower_bound(self, _, log_prob_norm):
return log_prob_norm
def _check_is_fitted(self):
check_is_fitted(self, ['weights_', 'means_', 'precisions_cholesky_'])
def _get_parameters(self):
return (self.weights_, self.means_, self.covariances_,
self.precisions_cholesky_)
def _set_parameters(self, params):
(self.weights_, self.means_, self.covariances_,
self.precisions_cholesky_) = params
# Attributes computation
_, n_features = self.means_.shape
if self.covariance_type == 'full':
self.precisions_ = np.empty(self.precisions_cholesky_.shape)
for k, prec_chol in enumerate(self.precisions_cholesky_):
self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
elif self.covariance_type == 'tied':
self.precisions_ = np.dot(self.precisions_cholesky_,
self.precisions_cholesky_.T)
else:
self.precisions_ = self.precisions_cholesky_ ** 2
def _n_parameters(self):
"""Return the number of free parameters in the model."""
_, n_features = self.means_.shape
if self.covariance_type == 'full':
cov_params = self.n_components * n_features * (n_features + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * n_features
elif self.covariance_type == 'tied':
cov_params = n_features * (n_features + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = n_features * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
Returns
-------
bic: float
The greater the better.
"""
return (-2 * self.score(X) * X.shape[0] +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model on the input X.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float
The greater the better.
"""
return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()
| aabadie/scikit-learn | sklearn/mixture/gaussian_mixture.py | Python | bsd-3-clause | 27,463 |
# this script implements the Java interface IOSProvider
from osapi import DebugSessionException, ExecutionContext, ExecutionContextsProvider, \
Table, createField, DECIMAL, TEXT, ADDRESS, Model
def areOSSymbolsLoaded(debugger):
return debugger.symbolExists('context_switch_counter')
def isOSInitialised(debugger):
try:
# Once we've switched to the init dispatcher, we're initialised
# enough to respond to the debugger.
nswitches = debugger.evaluateExpression('context_switch_counter')
return nswitches >= 1
except DebugSessionException:
return False
def getOSContextProvider():
return ContextsProvider()
def getDataModel():
return Model('Barrelfish', [RunnableDispatchers()])
def runnable_dispatchers(debugger):
# Walk the current kernel's scheduling queue
dcb_ptr = debugger.evaluateExpression('kcb_current->queue_head')
while dcb_ptr.readAsNumber() != 0:
yield dcb_ptr
fields = dcb_ptr.dereferencePointer().getStructureMembers()
dcb_ptr = fields['next']
class RunnableDispatchers(Table):
def __init__(self):
id = "dispatchers"
fields = [createField(id, 'dcb', ADDRESS),
createField(id, 'name', TEXT),
createField(id, 'disabled', DECIMAL),
createField(id, 'domain', DECIMAL),
createField(id, 'udisp', ADDRESS),
createField(id, 'core', DECIMAL)]
Table.__init__(self, id, fields)
def getRecords(self, debugger):
return []
def dispatcherContext(debugger, id, dcb_ptr):
# Get the handle to the shared dispatcher structure
dcb = dcb_ptr.dereferencePointer()
fields = dcb.getStructureMembers()
handle = fields['disp'].readAsNumber()
# Cast and dereference the handle to find the ARM shared structure
dispatcher_shared_arm = debugger.evaluateExpression('*((struct dispatcher_shared_arm *)0x%x)' % handle)
dsa_fields = dispatcher_shared_arm.getStructureMembers()
# Grab the generic shared fields
dispatcher_shared_generic = dsa_fields['d']
dsg_fields = dispatcher_shared_generic.getStructureMembers()
name = dsg_fields['name'].readAsNullTerminatedString()
disabled = dsg_fields['disabled'].readAsNumber()
state = ""
if disabled:
state += "D"
else:
state += "E"
ec = ExecutionContext(id, name, state)
ec.getAdditionalData()['kernel'] = False
return ec
def kernelContext(debugger, id, kcb_ptr):
ec = ExecutionContext(id, 'kernel', None)
ec.getAdditionalData()['kernel'] = True
return ec
class ContextsProvider(ExecutionContextsProvider):
def getCurrentOSContext(self, debugger):
pc = debugger.evaluateExpression('$pc').readAsNumber()
if pc >= 0x80000000:
# We're in the kernel - use the KCB address as context ID
kcb_current = debugger.evaluateExpression('kcb_current')
id = kcb_current.readAsNumber()
return kernelContext(debugger, id, kcb_current)
else :
# We use the physical address of the DCB to identify dispatchers.
dcb_current = debugger.evaluateExpression('dcb_current')
id = dcb_current.readAsNumber()
if id == 0:
return None
else:
return dispatcherContext(debugger, id, dcb_current)
def getAllOSContexts(self, debugger):
contexts = []
for d in runnable_dispatchers(debugger):
contexts.append(dispatcherContext(debugger, d.readAsNumber(), d))
return contexts
def getOSContextSavedRegister(self, debugger, context, name):
if context.getAdditionalData()['kernel']:
return None
# Reconstruct a pointer to the DCB from the context ID
dcb = debugger.evaluateExpression('*((struct dcb *)0x%x)' % context.getId())
fields = dcb.getStructureMembers()
# Cast and dereference the handle to find the ARM shared structure
handle = fields['disp'].readAsNumber()
dispatcher_shared_arm = debugger.evaluateExpression('*((struct dispatcher_shared_arm *)0x%x)' % handle)
dsa_fields = dispatcher_shared_arm.getStructureMembers()
# Look in the enabled or disabled area, as appropriate
if fields['disabled'].readAsNumber() == 0:
save_area = dsa_fields['enabled_save_area']
else:
save_area = dsa_fields['disabled_save_area']
sa_fields = save_area.getStructureMembers()
regs = sa_fields['named'].getStructureMembers()
# Map DS-5 register names to fields
if name == "XPSR":
return regs['cpsr']
elif name == "R9":
return regs['rtls']
elif name == "SP":
return regs['stack']
elif name == "LR":
return regs['link']
elif name == "PC":
return regs['pc']
else:
return regs[name.lower()] | kishoredbn/barrelfish | tools/ds5/configdb/OS/Barrelfish/provider.py | Python | mit | 5,161 |
#!/usr/bin/python3
import json
import numpy as np
import random
class Payload():
'''
Instance contains altitude data for a balloon flight. Use alt() method
to get interpolated data.
'''
addr = None
name = "Payload"
time_index = 0.0 # index into the flight profile data, in seconds.
timestep = 1.0 # gets larger as weight decreases.
last_request_time = 0
last_alt = 0
def __init__(self, filename, mass, ballast, name):
self.parse_profile(filename)
self.initial_mass = mass # kg
self.mass = self.initial_mass # kg
# ballast is included in total mass.
self.initial_ballast = ballast # liters
self.name = name
def parse_profile(self, filename):
'''
Parse a JSON file containing an ascent profile.
'''
with open(filename, 'r') as data_file:
profile = json.load(data_file)
# Create an array of int32's. Alt is in decimeters.
self.alts = (np.array(profile['data'])*10).astype(np.int32)
self.ref_timestep = profile['timestep'] # s
self.ref_mass = profile['mass'] # s
self.times = np.arange(0, self.alts.size*self.ref_timestep, self.ref_timestep)
def alt(self):
'''
Returns the altitude at the desired time.
s is the time in seconds, with 0 being the beginning
of the flight.
'''
# alt = None if seconds is outside of the flight time.
if (self.time_index > self.alts.size):
alt = None
print("time index > alt size")
print(self.time_index)
elif (self.time_index < 0):
alt = None
print("time index < 0")
print(self.time_index)
# otherwise, linearly interpolate between the two closest values.
else:
alt = np.empty
alt = np.interp(self.time_index, self.times, self.alts)
return alt
# Did curve-fitting on HabHub data to come up with timestep adjustment.
def adjust_time(self, time_elapsed):
time_delta = time_elapsed - self.last_request_time
self.last_request_time = time_elapsed
x = self.ref_mass / self.mass
self.timestep = 1.0/(-0.0815243*x*x*x + 0.1355*x*x - 0.391461*x + 1.33748611)
self.time_index += time_delta*self.timestep
def drop_mass(self, ballast_time_ms):
# experimental results show 4.925ml/s drain rate. with current setup.
# we give it random +-10% error, because the payload is getting
# blasted by high winds and the ballast is sloshing around.
noise = random.uniform(0.9, 1.1)
new_mass = self.mass - (noise*ballast_time_ms*0.004925/1000)*0.8
if (new_mass > self.initial_mass - self.initial_ballast):
self.mass = new_mass
else:
self.mass = self.initial_mass - self.initial_ballast
if __name__ == '__main__':
# initialize Flights. 'PAYLOAD_X_ID' is the digimesh ID of payload X.
fp1 = Payload('profiles/umhab52.json', 1.2)
fp2 = Payload('profiles/umhab48.json', 1.4)
'''
xbee = XBee.init('/dev/xbee', 1200) # initialize serial for XBee, 1200baud
ft = 0 # flight time starts at 0
cur_payload = None
while(True):
# Wait for payloads to request an altitude, send it, and update the
# payload’s mass. Add noise into the system for realistic simulation.
req = alt_request_wait();
if ((req.addr != fp1.addr) and (req.addr != fp2.addr)):
if (fp1.addr == None):
fp1.addr = req.addr
cur_payload = fp1
else if (fp2.addr == None):
fp2.addr = req.addr
cur_payload = fp2
else:
print('Got another XBee\'s frame. Maybe change the network id.')
elif (req.addr == fp1.addr):
print('got a fp1 alt request')
cur_payload = fp1
else:
print('got a fp2 alt request')
cur_payload = fp2
XBee.sendAlt(cur_payload.addr, cur_payload.alt())
fp1.mass -= XBee.getBallastDropped(fp1.id)*mass_noise()
ft += timestep
print(fp.timestep)
print(fp.alts)
print(fp.alts.size)
print(fp.times.size)
print(fp.alt(24))
'''
| liberza/bacon | simulator/payload.py | Python | mit | 4,318 |
# coding:utf-8
from __future__ import unicode_literals
import os
import shutil
from six.moves import http_client, urllib
from cactus.site import Site
from cactus.plugin.manager import PluginManager
from cactus.utils.helpers import CaseInsensitiveDict
from cactus.utils.parallel import PARALLEL_DISABLED
from cactus.tests import BaseTestCase
class DummyPluginManager(PluginManager):
"""
Doesn't do anything
"""
def call(self, method, *args, **kwargs):
"""
Trap the call
"""
pass
class IntegrationTestCase(BaseTestCase):
def setUp(self):
super(IntegrationTestCase, self).setUp()
self.site = Site(
self.path,
PluginManagerClass=DummyPluginManager,
DeploymentEngineClass=self.get_deployment_engine_class()
)
self.site._parallel = PARALLEL_DISABLED
self.site.config.set('site-url', 'http://example.com/')
# Clean up the site paths
for path in (self.site.page_path, self.site.static_path):
shutil.rmtree(path)
os.mkdir(path)
def get_deployment_engine_class(self):
"""
Should return a deployment engine in tests.
"""
pass
class BaseTestHTTPConnection(object):
last_request = None
def __init__(self, host, *args, **kwargs):
self.host = host
self.requests = []
def connect(self):
pass
def close(self):
pass
def request(self, method, url, body=b'', headers=None):
"""
Send a full request at once
"""
if headers is None:
headers = {}
self.last_request = TestHTTPRequest(self, method, url, body, headers)
def putrequest(self, method, url, *args, **kwargs):
"""
Create a new request, but add more things to it later
"""
self.current_request = TestHTTPRequest(self, method, url, b'', {})
self.current_request.state = "headers"
def putheader(self, header, value):
"""
Add an header to a request that's in progress
"""
self.current_request.headers[header] = value
def endheaders(self, data=None):
"""
End the headers of a request that's in progress
"""
self.current_request.state = "body"
self.last_request = self.current_request
if data is not None:
self.send(data)
def send(self, data):
"""
Add data to a request that's in progress
"""
self.current_request.body += data
def getresponse(self):
request = self.last_request
self.requests.append(request)
return self.handle_request(request)
def handle_request(self, request):
"""
:param request: The request to handle
"""
raise NotImplementedError("handle_request should be implemented by subclasses")
def set_debuglevel(self, level):
pass
class DebugHTTPSConnectionFactory(object):
def __init__(self, conn_cls):
self.conn_cls = conn_cls
self.connections = []
@property
def requests(self):
"""
:returns: A dictionary of the calls made through this connection factory (method -> list of calls)
"""
out = []
for connection in self.connections:
out.extend(connection.requests)
return out
def __call__(self, *args, **kwargs):
"""
Create a new connection from our connection class
"""
connection = self.conn_cls(*args, **kwargs)
self.connections.append(connection)
return connection
class TestHTTPRequest(object):
state = None
def __init__(self, connection, method, url, body, headers):
self.connection = connection
self.method = method
self.url = url
self.body = body
self.headers = CaseInsensitiveDict(headers)
u = urllib.parse.urlparse(url)
self.path = u.path
self.params = urllib.parse.parse_qs(u.query, keep_blank_values=True)
class TestHTTPResponse(object):
def __init__(self, status, reason=None, headers=None, body=''):
if reason is None:
reason = http_client.responses[status]
if headers is None:
headers = {}
self.status = status
self.reason = reason
self.headers = CaseInsensitiveDict(headers)
self.body = body
def getheader(self, header, default=None):
return self.headers.get(header, default)
def getheaders(self):
return self.headers
def read(self):
return self.body
| dreadatour/Cactus | cactus/tests/integration/__init__.py | Python | bsd-3-clause | 4,636 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import getpass
pwd = getpass.getpass("请输入密码:")
#列表增删改查 | zhangyage/Python-oldboy | my_study/mm/getpass_mima.py | Python | apache-2.0 | 125 |
import random
# Binary search tree. Not balanced.
class Node(object):
def __init__(self, value, left=None, right=None):
self.v = value
self.l = left
self.r = right
def min(self):
if self.l:
return self.l.min()
else:
return self.v
def max(self):
if self.r:
return self.r.max()
else:
return self.v
def is_bst(self):
if not self.l and not self.r:
return True
is_left_bst = True
is_right_bst = True
if self.l:
is_left_bst = self.l.is_bst() and self.v > self.l.max()
if self.r:
is_right_bst = self.r.is_bst() and self.v < self.r.min()
return is_left_bst and is_right_bst
def contains(self, value):
if value == self.v:
return True
if self.l and value < self.v:
return self.l.contains(value)
if self.r and value > self.v:
return self.r.contains(value)
return False
def insert(self, value):
if value == self.v:
return
if value < self.v:
if self.l:
self.l.insert(value)
else:
self.l = Node(value, None, None)
elif value > self.v:
if self.r:
self.r.insert(value)
else:
self.r = Node(value, None, None)
if __name__ == "__main__":
one = Node(1, None, None)
three = Node(3, None, None)
two = Node(2, one, three)
print two.is_bst(), "should be True"
print two.contains(2), "should be True"
five = Node(5, None, None)
six = Node(6, five, None)
four = Node(4, three, six)
print four.is_bst(), "should be True"
print four.contains(3), "should be True"
four = Node(4, six, three)
print four.is_bst(), "should be False"
t = Node(random.randint(0, 1000000), None, None)
for _ in range(1000):
t.insert(random.randint(0, 1000000))
print t.is_bst(), "should be True"
| jlucangelio/cs-basics | data_structures/binary_search_tree.py | Python | bsd-3-clause | 2,059 |
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import logbook
import datetime
from functools import wraps
import pandas as pd
import numpy as np
from zipline.data.loader import load_market_data
from zipline.utils import tradingcalendar
from zipline.assets import AssetFinder
from zipline.errors import UpdateAssetFinderTypeError
log = logbook.Logger('Trading')
# The financial simulations in zipline depend on information
# about the benchmark index and the risk free rates of return.
# The benchmark index defines the benchmark returns used in
# the calculation of performance metrics such as alpha/beta. Many
# components, including risk, performance, transforms, and
# batch_transforms, need access to a calendar of trading days and
# market hours. The TradingEnvironment maintains two time keeping
# facilities:
# - a DatetimeIndex of trading days for calendar calculations
# - a timezone name, which should be local to the exchange
# hosting the benchmark index. All dates are normalized to UTC
# for serialization and storage, and the timezone is used to
# ensure proper rollover through daylight savings and so on.
#
# This module maintains a global variable, environment, which is
# subsequently referenced directly by zipline financial
# components. To set the environment, you can set the property on
# the module directly:
# from zipline.finance import trading
# trading.environment = TradingEnvironment()
#
# or if you want to switch the environment for a limited context
# you can use a TradingEnvironment in a with clause:
# lse = TradingEnvironment(bm_index="^FTSE", exchange_tz="Europe/London")
# with lse:
# the code here will have lse as the global trading.environment
# algo.run(start, end)
#
# User code will not normally need to use TradingEnvironment
# directly. If you are extending zipline's core financial
# compponents and need to use the environment, you must import the module
# NOT the variable. If you import the module, you will get a
# reference to the environment at import time, which will prevent
# your code from responding to user code that changes the global
# state.
environment = None
class NoFurtherDataError(Exception):
"""
Thrown when next trading is attempted at the end of available data.
"""
pass
class TradingEnvironment(object):
@classmethod
def instance(cls):
global environment
if not environment:
environment = TradingEnvironment()
return environment
def __init__(
self,
load=None,
bm_symbol='^GSPC',
exchange_tz="US/Eastern",
max_date=None,
env_trading_calendar=tradingcalendar
):
"""
@load is function that returns benchmark_returns and treasury_curves
The treasury_curves are expected to be a DataFrame with an index of
dates and columns of the curve names, e.g. '10year', '1month', etc.
"""
self.trading_day = env_trading_calendar.trading_day.copy()
# `tc_td` is short for "trading calendar trading days"
tc_td = env_trading_calendar.trading_days
if max_date:
self.trading_days = tc_td[tc_td <= max_date].copy()
else:
self.trading_days = tc_td.copy()
self.first_trading_day = self.trading_days[0]
self.last_trading_day = self.trading_days[-1]
self.early_closes = env_trading_calendar.get_early_closes(
self.first_trading_day, self.last_trading_day)
self.open_and_closes = env_trading_calendar.open_and_closes.loc[
self.trading_days]
self.prev_environment = self
self.bm_symbol = bm_symbol
if not load:
load = load_market_data
self.benchmark_returns, self.treasury_curves = \
load(self.trading_day, self.trading_days, self.bm_symbol)
if max_date:
tr_c = self.treasury_curves
# Mask the treasury curves down to the current date.
# In the case of live trading, the last date in the treasury
# curves would be the day before the date considered to be
# 'today'.
self.treasury_curves = tr_c[tr_c.index <= max_date]
self.exchange_tz = exchange_tz
self.asset_finder = AssetFinder()
def __enter__(self, *args, **kwargs):
global environment
self.prev_environment = environment
environment = self
# return value here is associated with "as such_and_such" on the
# with clause.
return self
def __exit__(self, exc_type, exc_val, exc_tb):
global environment
environment = self.prev_environment
# signal that any exceptions need to be propagated up the
# stack.
return False
def update_asset_finder(self,
clear_metadata=False,
asset_finder=None,
asset_metadata=None,
identifiers=None):
"""
Updates the AssetFinder using the provided asset metadata and
identifiers.
If clear_metadata is True, all metadata and assets held in the
asset_finder will be erased before new metadata is provided.
If asset_finder is provided, the existing asset_finder will be replaced
outright with the new asset_finder.
If asset_metadata is provided, the existing metadata will be cleared
and replaced with the provided metadata.
All identifiers will be inserted in the asset metadata if they are not
already present.
:param clear_metadata: A boolean
:param asset_finder: An AssetFinder object to replace the environment's
existing asset_finder
:param asset_metadata: A dict, DataFrame, or readable object
:param identifiers: A list of identifiers to be inserted
:return:
"""
if clear_metadata:
self.asset_finder.clear_metadata()
if asset_finder is not None:
if not isinstance(asset_finder, AssetFinder):
raise UpdateAssetFinderTypeError(cls=asset_finder.__class__)
self.asset_finder = asset_finder
if asset_metadata is not None:
self.asset_finder.clear_metadata()
self.asset_finder.consume_metadata(asset_metadata)
if identifiers is not None:
self.asset_finder.consume_identifiers(identifiers)
def normalize_date(self, test_date):
test_date = pd.Timestamp(test_date, tz='UTC')
return pd.tseries.tools.normalize_date(test_date)
def utc_dt_in_exchange(self, dt):
return pd.Timestamp(dt).tz_convert(self.exchange_tz)
def exchange_dt_in_utc(self, dt):
return pd.Timestamp(dt, tz=self.exchange_tz).tz_convert('UTC')
def is_market_hours(self, test_date):
if not self.is_trading_day(test_date):
return False
mkt_open, mkt_close = self.get_open_and_close(test_date)
return test_date >= mkt_open and test_date <= mkt_close
def is_trading_day(self, test_date):
dt = self.normalize_date(test_date)
return (dt in self.trading_days)
def next_trading_day(self, test_date):
dt = self.normalize_date(test_date)
delta = datetime.timedelta(days=1)
while dt <= self.last_trading_day:
dt += delta
if dt in self.trading_days:
return dt
return None
def previous_trading_day(self, test_date):
dt = self.normalize_date(test_date)
delta = datetime.timedelta(days=-1)
while self.first_trading_day < dt:
dt += delta
if dt in self.trading_days:
return dt
return None
def add_trading_days(self, n, date):
"""
Adds n trading days to date. If this would fall outside of the
trading calendar, a NoFurtherDataError is raised.
:Arguments:
n : int
The number of days to add to date, this can be positive or
negative.
date : datetime
The date to add to.
:Returns:
new_date : datetime
n trading days added to date.
"""
if n == 1:
return self.next_trading_day(date)
if n == -1:
return self.previous_trading_day(date)
idx = self.get_index(date) + n
if idx < 0 or idx >= len(self.trading_days):
raise NoFurtherDataError('Cannot add %d days to %s' % (n, date))
return self.trading_days[idx]
def days_in_range(self, start, end):
mask = ((self.trading_days >= start) &
(self.trading_days <= end))
return self.trading_days[mask]
def opens_in_range(self, start, end):
return self.open_and_closes.market_open.loc[start:end]
def closes_in_range(self, start, end):
return self.open_and_closes.market_close.loc[start:end]
def minutes_for_days_in_range(self, start, end):
"""
Get all market minutes for the days between start and end, inclusive.
"""
start_date = self.normalize_date(start)
end_date = self.normalize_date(end)
all_minutes = []
for day in self.days_in_range(start_date, end_date):
day_minutes = self.market_minutes_for_day(day)
all_minutes.append(day_minutes)
# Concatenate all minutes and truncate minutes before start/after end.
return pd.DatetimeIndex(
np.concatenate(all_minutes), copy=False, tz='UTC',
)
def next_open_and_close(self, start_date):
"""
Given the start_date, returns the next open and close of
the market.
"""
next_open = self.next_trading_day(start_date)
if next_open is None:
raise NoFurtherDataError(
"Attempt to backtest beyond available history. \
Last successful date: %s" % self.last_trading_day)
return self.get_open_and_close(next_open)
def previous_open_and_close(self, start_date):
"""
Given the start_date, returns the previous open and close of the
market.
"""
previous = self.previous_trading_day(start_date)
if previous is None:
raise NoFurtherDataError(
"Attempt to backtest beyond available history. "
"First successful date: %s" % self.first_trading_day)
return self.get_open_and_close(previous)
def next_market_minute(self, start):
"""
Get the next market minute after @start. This is either the immediate
next minute, or the open of the next market day after start.
"""
next_minute = start + datetime.timedelta(minutes=1)
if self.is_market_hours(next_minute):
return next_minute
return self.next_open_and_close(start)[0]
def previous_market_minute(self, start):
"""
Get the next market minute before @start. This is either the immediate
previous minute, or the close of the market day before start.
"""
prev_minute = start - datetime.timedelta(minutes=1)
if self.is_market_hours(prev_minute):
return prev_minute
return self.previous_open_and_close(start)[1]
def get_open_and_close(self, day):
index = self.open_and_closes.index.get_loc(day.date())
todays_minutes = self.open_and_closes.values[index]
return todays_minutes[0], todays_minutes[1]
def market_minutes_for_day(self, stamp):
market_open, market_close = self.get_open_and_close(stamp)
return pd.date_range(market_open, market_close, freq='T')
def open_close_window(self, start, count, offset=0, step=1):
"""
Return a DataFrame containing `count` market opens and closes,
beginning with `start` + `offset` days and continuing `step` minutes at
a time.
"""
# TODO: Correctly handle end of data.
start_idx = self.get_index(start) + offset
stop_idx = start_idx + (count * step)
index = np.arange(start_idx, stop_idx, step)
return self.open_and_closes.iloc[index]
def market_minute_window(self, start, count, step=1):
"""
Return a DatetimeIndex containing `count` market minutes, starting with
`start` and continuing `step` minutes at a time.
"""
if not self.is_market_hours(start):
raise ValueError("market_minute_window starting at "
"non-market time {minute}".format(minute=start))
all_minutes = []
current_day_minutes = self.market_minutes_for_day(start)
first_minute_idx = current_day_minutes.searchsorted(start)
minutes_in_range = current_day_minutes[first_minute_idx::step]
# Build up list of lists of days' market minutes until we have count
# minutes stored altogether.
while True:
if len(minutes_in_range) >= count:
# Truncate off extra minutes
minutes_in_range = minutes_in_range[:count]
all_minutes.append(minutes_in_range)
count -= len(minutes_in_range)
if count <= 0:
break
if step > 0:
start, _ = self.next_open_and_close(start)
current_day_minutes = self.market_minutes_for_day(start)
else:
_, start = self.previous_open_and_close(start)
current_day_minutes = self.market_minutes_for_day(start)
minutes_in_range = current_day_minutes[::step]
# Concatenate all the accumulated minutes.
return pd.DatetimeIndex(
np.concatenate(all_minutes), copy=False, tz='UTC',
)
def trading_day_distance(self, first_date, second_date):
first_date = self.normalize_date(first_date)
second_date = self.normalize_date(second_date)
# TODO: May be able to replace the following with searchsorted.
# Find leftmost item greater than or equal to day
i = bisect.bisect_left(self.trading_days, first_date)
if i == len(self.trading_days): # nothing found
return None
j = bisect.bisect_left(self.trading_days, second_date)
if j == len(self.trading_days):
return None
return j - i
def get_index(self, dt):
"""
Return the index of the given @dt, or the index of the preceding
trading day if the given dt is not in the trading calendar.
"""
ndt = self.normalize_date(dt)
if ndt in self.trading_days:
return self.trading_days.searchsorted(ndt)
else:
return self.trading_days.searchsorted(ndt) - 1
class SimulationParameters(object):
def __init__(self, period_start, period_end,
capital_base=10e3,
emission_rate='daily',
data_frequency='daily'):
self.period_start = period_start
self.period_end = period_end
self.capital_base = capital_base
self.emission_rate = emission_rate
self.data_frequency = data_frequency
# copied to algorithm's environment for runtime access
self.arena = 'backtest'
self._update_internal()
def _update_internal(self):
# This is the global environment for trading simulation.
environment = TradingEnvironment.instance()
assert self.period_start <= self.period_end, \
"Period start falls after period end."
assert self.period_start <= environment.last_trading_day, \
"Period start falls after the last known trading day."
assert self.period_end >= environment.first_trading_day, \
"Period end falls before the first known trading day."
self.first_open = self.calculate_first_open()
self.last_close = self.calculate_last_close()
start_index = \
environment.get_index(self.first_open)
end_index = environment.get_index(self.last_close)
# take an inclusive slice of the environment's
# trading_days.
self.trading_days = \
environment.trading_days[start_index:end_index + 1]
def calculate_first_open(self):
"""
Finds the first trading day on or after self.period_start.
"""
first_open = self.period_start
one_day = datetime.timedelta(days=1)
while not environment.is_trading_day(first_open):
first_open = first_open + one_day
mkt_open, _ = environment.get_open_and_close(first_open)
return mkt_open
def calculate_last_close(self):
"""
Finds the last trading day on or before self.period_end
"""
last_close = self.period_end
one_day = datetime.timedelta(days=1)
while not environment.is_trading_day(last_close):
last_close = last_close - one_day
_, mkt_close = environment.get_open_and_close(last_close)
return mkt_close
@property
def days_in_period(self):
"""return the number of trading days within the period [start, end)"""
return len(self.trading_days)
def __repr__(self):
return """
{class_name}(
period_start={period_start},
period_end={period_end},
capital_base={capital_base},
data_frequency={data_frequency},
emission_rate={emission_rate},
first_open={first_open},
last_close={last_close})\
""".format(class_name=self.__class__.__name__,
period_start=self.period_start,
period_end=self.period_end,
capital_base=self.capital_base,
data_frequency=self.data_frequency,
emission_rate=self.emission_rate,
first_open=self.first_open,
last_close=self.last_close)
def with_environment(asname='env'):
"""
Decorator to automagically pass TradingEnvironment to the function
under the name asname. If the environment is passed explicitly as a keyword
then the explicitly passed value will be used instead.
usage:
with_environment()
def f(env=None):
pass
with_environment(asname='my_env')
def g(my_env=None):
pass
"""
def with_environment_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
# inject env into the namespace for the function.
# This doesn't use setdefault so that grabbing the trading env
# is lazy.
if asname not in kwargs:
kwargs[asname] = TradingEnvironment.instance()
return f(*args, **kwargs)
return wrapper
return with_environment_decorator
| euri10/zipline | zipline/finance/trading.py | Python | apache-2.0 | 19,394 |
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" mongodb """
import logging
import pymongo
LOG = logging.getLogger(__name__)
class MongoApi(object):
"""Class handling MongoDB specific functionality.
This class uses PyMongo APIs internally to create database connection,
also serves as handle to collection for get/set operations, etc.
"""
# class level attributes for re-use of db client connection and collection
_DB = {} # dict of db_name: db connection reference
_MONGO_COLLS = {} # dict of cache_collection : db collection reference
def __init__(self, connection_url, db_name, use_replica=False, replica_name='rs1',
read_preference=3, write_concern=-1, w_timeout=5000):
"""for parameters details, please see
http://api.mongodb.org/python/current/api/pymongo/mongo_client.html
:param connection_url: string like mongodb://localhost:27017
:param db_name: database name, string
:param use_replica: if use replica, true of false
:param replica_name: if use replica, then the replica set name
:param read_preference: read preference, interger
:param write_concern: write concern, interger
:param w_timeout: write concern timeout, interger
"""
LOG.debug('Creating MongoDB API class')
self.conection_url = connection_url
self.db_name = db_name
self.collection_name = None
# config about replica set
self.use_replica = use_replica
self.replica_name = replica_name
self.read_preference = read_preference
# Write Concern options:
self.w = write_concern
self.w_timeout = w_timeout
def _get_db(self):
"""
get database
:return: database
"""
try:
if self.use_replica: # if use replica set configuration
connection = pymongo.MongoClient(
self.conection_url, replicaSet=self.replica_name)
else: # use for standalone node or mongos in sharded setup
connection = pymongo.MongoClient(self.conection_url)
except pymongo.errors.ConnectionFailure as e:
LOG.warn('Unable to connect to the database server, %s', e)
raise
database = getattr(connection, self.db_name)
return database
def _close_db(self):
if self.db_name in self._DB:
self._DB[self.db_name].client.close()
self._DB.pop(self.db_name)
def get_collection(self):
"""
get collection
:return: database collection
"""
if self.collection_name not in self._MONGO_COLLS:
if self.db_name not in self._DB:
self._DB[self.db_name] = self._get_db()
coll = getattr(self._DB[self.db_name], self.collection_name)
if self.use_replica:
# for read preference
# TODO(xiaoquwl) fix this as more elegant way in future
if self.read_preference is not None:
if self.read_preference == 0:
coll.with_options(read_preference=pymongo.ReadPreference.PRIMARY)
elif self.read_preference == 1:
coll.with_options(read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
elif self.read_preference == 2:
coll.with_options(read_preference=pymongo.ReadPreference.SECONDARY)
elif self.read_preference == 3:
coll.with_options(read_preference=pymongo.ReadPreference.SECONDARY_PREFERRED)
elif self.read_preference == 4:
coll.with_options(read_preference=pymongo.ReadPreference.NEAREST)
else:
LOG.error('unknow read preference setting')
pass
# for write concern
if self.w > -1:
coll.write_concern['w'] = self.w
coll.write_concern['wtimeout'] = 5000
self._MONGO_COLLS[self.collection_name] = coll
return self._MONGO_COLLS[self.collection_name]
def remove_collection(self):
self._DB[self.db_name].drop_collection(self.collection_name)
| smartbgp/yarib | yarib/db/mongodb.py | Python | apache-2.0 | 4,903 |
__author__ = 'robert'
from pypet import Environment, Trajectory
from pypet.tests.testutils.ioutils import make_temp_dir, get_log_config
import os
import matplotlib.pyplot as plt
import numpy as np
import time
def job(traj):
traj.f_ares('$set.$', 42, comment='A result')
def get_runtime(length):
filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')
with Environment(filename = filename,
log_levels=50, report_progress=(0.0002, 'progress', 50),
overwrite_file=True, purge_duplicate_comments=False,
log_stdout=False,
multiproc=False, ncores=2, use_pool=True,
wrap_mode='PIPE', #freeze_input=True,
summary_tables=False, small_overview_tables=False) as env:
traj = env.v_traj
traj.par.f_apar('x', 0, 'parameter')
traj.f_explore({'x': range(length)})
# traj.v_full_copy = False
max_run = 1000
for idx in range(len(traj)):
if idx > max_run:
traj.f_get_run_information(idx, copy=False)['completed'] = 1
start = time.time()
env.f_run(job)
end = time.time()
# dicts = [traj.f_get_run_information(x) for x in range(min(len(traj), max_run))]
total = end - start
return total/float(min(len(traj), max_run)), total/float(min(len(traj), max_run)) * len(traj)
def main():
#lengths = [1000000, 500000, 100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1]
lengths = [100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1]
runtimes = [get_runtime(x) for x in lengths]
avg_runtimes = [x[0] for x in runtimes]
summed_runtime = [x[1] for x in runtimes]
plt.subplot(2, 1, 1)
plt.semilogx(list(reversed(lengths)), list(reversed(avg_runtimes)), linewidth=2)
plt.xlabel('Runs')
plt.ylabel('t[s]')
plt.title('Average Runtime per single run')
plt.grid()
plt.subplot(2, 1, 2)
plt.loglog(lengths, summed_runtime, linewidth=2)
plt.grid()
plt.xlabel('Runs')
plt.ylabel('t[s]')
plt.title('Total runtime of experiment')
plt.savefig('avg_runtime_as_func_of_lenght_1000_single_core')
plt.show()
if __name__ == '__main__':
main() | nigroup/pypet | pypet/tests/profiling/speed_analysis/avg_runtima_as_function_of_length.py | Python | bsd-3-clause | 2,266 |
import urllib,urllib2,re,string,sys,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
from resources.libs import main
#Mash Up - by Mash2k3 2012.
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
art = main.art
prettyName = 'iWatchOnline'
def AtoZiWATCHtv():
main.addDir('0-9','http://www.iwatchonline.to/tv-show?startwith=09&p=0',589,art+'/09.png')
for i in string.ascii_uppercase:
main.addDir(i,'http://www.iwatchonline.to/tv-show?startwith='+i.lower()+'&p=0',589,art+'/'+i.lower()+'.png')
main.GA("Tvshows","A-ZTV")
main.VIEWSB()
def AtoZiWATCHm():
main.addDir('0-9','http://www.iwatchonline.to/movies?startwith=09&p=0',587,art+'/09.png')
for i in string.ascii_uppercase:
main.addDir(i,'http://www.iwatchonline.to/movies?startwith='+i.lower()+'&p=0',587,art+'/'+i.lower()+'.png')
main.GA("Movies","A-ZM")
main.VIEWSB()
def iWatchMAIN():
main.addDir('Movies','http://www.iwatchonline.org/',586,art+'/iwatchm.png')
main.addDir('Tv Shows','http://www.iwatchonline.org/',585,art+'/iwatcht.png')
main.addDir('Todays Episodes','http://www.iwatchonline.to/tv-schedule',592,art+'/iwatcht.png')
main.GA("Plugin","iWatchonline")
main.VIEWSB2()
def iWatchMOVIES():
main.addDir('Search Movies','http://www.iwatchonline.to',644,art+'/search.png')
main.addDir('A-Z','http://www.iwatchonline.to',595,art+'/az.png')
main.addDir('Popular','http://www.iwatchonline.to/movies?sort=popular&p=0',587,art+'/iwatchm.png')
main.addDir('Latest Added','http://www.iwatchonline.to/movies?sort=latest&p=0',587,art+'/iwatchm.png')
main.addDir('Featured Movies','http://www.iwatchonline.to/movies?sort=featured&p=0',587,art+'/iwatchm.png')
main.addDir('Latest HD Movies','http://www.iwatchonline.to/movies?quality=hd&sort=latest&p=0',587,art+'/iwatchm.png')
main.addDir('Upcoming','http://www.iwatchonline.to/movies?sort=upcoming&p=0',587,art+'/iwatchm.png')
main.addDir('Genre','http://www.iwatchonline.to',596,art+'/genre.png')
main.GA("iWatchonline","Movies")
main.VIEWSB2()
def iWatchTV():
main.addDir('Search TV Shows','http://www.iwatchonline.to',642,art+'/search.png')
main.addDir('A-Z','http://www.iwatchonline.to',593,art+'/az.png')
main.addDir('Todays Episodes','http://www.iwatchonline.to/tv-schedule',592,art+'/iwatcht.png')
main.addDir('Featured Shows','http://www.iwatchonline.to/tv-show?sort=featured&p=0',589,art+'/iwatcht.png')
main.addDir('Popular Shows','http://www.iwatchonline.to/tv-show?sort=popular&p=0',589,art+'/iwatcht.png')
main.addDir('Latest Additions','http://www.iwatchonline.to/tv-show?sort=latest&p=0',589,art+'/iwatcht.png')
main.addDir('Genre','http://www.iwatchonline.to',594,art+'/genre.png')
main.GA("iWatchonline","Tvshows")
main.VIEWSB2()
def SearchhistoryTV():
seapath=os.path.join(main.datapath,'Search')
SeaFile=os.path.join(seapath,'SearchHistoryTv')
if not os.path.exists(SeaFile):
SEARCHTV()
else:
main.addDir('Search TV Shows','###',643,art+'/search.png')
main.addDir('Clear History',SeaFile,128,art+'/cleahis.png')
thumb=art+'/link.png'
searchis=re.compile('search="(.+?)",').findall(open(SeaFile,'r').read())
for seahis in reversed(searchis):
seahis=seahis.replace('%20',' ')
url=seahis
main.addDir(seahis,url,643,thumb)
def superSearch(encode,type):
try:
if type=='Movies': type='m'
else: type='t'
returnList=[]
search_url = 'http://www.iwatchonline.to/search'
from t0mm0.common.net import Net as net
search_content = net().http_POST(search_url, { 'searchquery' : encode, 'searchin' : type} ).content.encode('utf-8')
r = re.findall('(?s)<table(.+?)</table>',search_content)
r=main.unescapes(r[0])
match=re.compile('<img.+?src=\"(.+?)\".+?<a.+?href=\"(.+?)\">(.+?)</a>').findall(r)
for thumb,url,name in match:
if type=='m':
returnList.append((name,prettyName,url,thumb,588,True))
else:
returnList.append((name,prettyName,url,thumb,590,True))
return returnList
except: return []
def SEARCHTV(murl = ''):
encode = main.updateSearchFile(murl,'TV')
if not encode: return False
search_url = 'http://www.iwatchonline.to/search'
from t0mm0.common.net import Net as net
search_content = net().http_POST(search_url, { 'searchquery' : encode, 'searchin' : 't'} ).content.encode('utf-8')
r = re.findall('(?s)<table(.+?)</table>',search_content)
r=main.unescapes(r[0])
match=re.compile('<img[^>]+?src="([^"]+?)\".+?<a[^>]+?href="([^"]+?)">([^<]+?)</a>').findall(r)
for thumb,url,name in match:
main.addDirT(name,url,590,thumb,'','','','','')
main.GA("iWatchonline","Search")
def SearchhistoryM():
seapath=os.path.join(main.datapath,'Search')
SeaFile=os.path.join(seapath,'SearchHistory25')
if not os.path.exists(SeaFile):
SEARCHM('')
else:
main.addDir('Search Movies','###',645,art+'/search.png')
main.addDir('Clear History',SeaFile,128,art+'/cleahis.png')
thumb=art+'/link.png'
searchis=re.compile('search="(.+?)",').findall(open(SeaFile,'r').read())
for seahis in reversed(searchis):
seahis=seahis.replace('%20',' ')
url=seahis
main.addDir(seahis,url,645,thumb)
def SEARCHM(murl):
encode = main.updateSearchFile(murl,'Movies')
if not encode: return False
search_url = 'http://www.iwatchonline.to/search'
from t0mm0.common.net import Net as net
search_content = net().http_POST(search_url, { 'searchquery' : encode, 'searchin' : 'm'} ).content.encode('utf-8')
r = re.findall('(?s)<table(.+?)</table>',search_content)
r=main.unescapes(r[0])
match=re.compile('<img.+?src=\"(.+?)\".+?<a.+?href=\"(.+?)\">(.+?)</a>').findall(r)
dialogWait = xbmcgui.DialogProgress()
ret = dialogWait.create('Please wait until Movie list is cached.')
totalLinks = len(match)
loadedLinks = 0
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(0,'[B]Will load instantly from now on[/B]',remaining_display)
xbmc.executebuiltin("XBMC.Dialog.Close(busydialog,true)")
for thumb,url,name in match:
main.addDirM(name,url,588,thumb,'','','','','')
loadedLinks = loadedLinks + 1
percent = (loadedLinks * 100)/totalLinks
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display)
if dialogWait.iscanceled(): return False
dialogWait.close()
del dialogWait
main.GA("iWatchonline","Search")
def ENTYEAR():
dialog = xbmcgui.Dialog()
d = dialog.numeric(0, 'Enter Year')
if d:
encode=urllib.quote(d)
if encode < '2014' and encode > '1900':
surl='http://www.iwatchonline.to/main/content_more/movies/?year='+encode+'&start=0'
iWatchLISTMOVIES(surl)
else:
dialog = xbmcgui.Dialog()
ret = dialog.ok('Wrong Entry', 'Must enter year in four digit format like 1999','Enrty must be between 1900 and 2014')
def GotoPage(url):
dialog = xbmcgui.Dialog()
r=re.findall('http://www.iwatchonline.to/movies(.+?)&p=.+?',url)
d = dialog.numeric(0, 'Please Enter Page number.')
if d:
temp=int(d)-1
page= int(temp)*25
encode=str(page)
url='http://www.iwatchonline.to/movies'+r[0]
surl=url+'&p='+encode
iWatchLISTMOVIES(surl)
else:
dialog = xbmcgui.Dialog()
xbmcplugin.endOfDirectory(int(sys.argv[1]), False, False)
return False
def iWatchGenreTV():
link=main.OPENURL('http://www.iwatchonline.to/tv-show')
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<li.+?a href=".?gener=([^<]+)">(.+?)</a>.+?/li>').findall(link)
for url,genre in match:
genre=genre.replace(' ','')
if not 'Adult' in genre:
main.addDir(genre,'http://www.iwatchonline.to/tv-show?sort=popular&gener='+url+'',589,art+'/folder.png')
main.GA("Tvshows","GenreT")
def iWatchGenreM():
link=main.OPENURL('http://www.iwatchonline.to/movies')
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<li.+?a href=".?gener=([^<]+)">(.+?)</a>.+?/li>').findall(link)
for url,genre in match:
genre=genre.replace(' ','')
if not 'Adult' in genre:
main.addDir(genre,'http://www.iwatchonline.to/movies?sort=popular&gener='+url+'&p=0',587,art+'/folder.png')
main.GA("Movies","GenreM")
def iWatchYearM():
main.addDir('2013','http://www.iwatchonline.to/main/content_more/movies/?year=2013&start=0',587,art+'/year.png')
main.addDir('2012','http://www.iwatchonline.to/main/content_more/movies/?year=2012&start=0',587,art+'/2012.png')
main.addDir('2011','http://www.iwatchonline.to/main/content_more/movies/?year=2011&start=0',587,art+'/2011.png')
main.addDir('2010','http://www.iwatchonline.to/main/content_more/movies/?year=2010&start=0',587,art+'/2010.png')
main.addDir('2009','http://www.iwatchonline.to/main/content_more/movies/?year=2009&start=0',587,art+'/2009.png')
main.addDir('2008','http://www.iwatchonline.to/main/content_more/movies/?year=2008&start=0',587,art+'/2008.png')
main.addDir('2007','http://www.iwatchonline.to/main/content_more/movies/?year=2007&start=0',587,art+'/2007.png')
main.addDir('2006','http://www.iwatchonline.to/main/content_more/movies/?year=2006&start=0',587,art+'/2006.png')
main.addDir('2005','http://www.iwatchonline.to/main/content_more/movies/?year=2005&start=0',587,art+'/2005.png')
main.addDir('2004','http://www.iwatchonline.to/main/content_more/movies/?year=2004&start=0',587,art+'/2004.png')
main.addDir('2003','http://www.iwatchonline.to/main/content_more/movies/?year=2003&start=0',587,art+'/2003.png')
main.addDir('Enter Year','iwatchonline',653,art+'/enteryear.png')
def iWatchLISTMOVIES(murl):
main.GA("Movies","List")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
videos = re.search('<ul class="thumbnails">(.+?)</ul>', link)
if videos:
videos = videos.group(1)
match=re.compile('<li.+?<a.+?href=\"(.+?)\".+?<img.+?src=\"(.+?)\".+?<div class=\"title.+?>(.+?)<div').findall(videos)
dialogWait = xbmcgui.DialogProgress()
ret = dialogWait.create('Please wait until Movie list is cached.')
totalLinks = len(match)
loadedLinks = 0
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(0,'[B]Will load instantly from now on[/B]',remaining_display)
xbmc.executebuiltin("XBMC.Dialog.Close(busydialog,true)")
for url,thumb,name in match:
main.addDirIWO(name,url,588,thumb,'','','','','')
loadedLinks = loadedLinks + 1
percent = (loadedLinks * 100)/totalLinks
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display)
if (dialogWait.iscanceled()):
return False
dialogWait.close()
del dialogWait
if len(match)==25:
paginate=re.compile('([^<]+)&p=([^<]+)').findall(murl)
for purl,page in paginate:
i=int(page)+25
pg=(int(page)/25)+2
# if pg >2:
# main.addDir('[COLOR red]Home[/COLOR]','',2000,art+'/home.png')
main.addDir('[COLOR red]Enter Page #[/COLOR]',murl,654,art+'/gotopage.png')
main.addDir('[COLOR blue]Page '+ str(pg)+'[/COLOR]',purl+'&p='+str(i),587,art+'/next2.png')
xbmcplugin.setContent(int(sys.argv[1]), 'Movies')
main.VIEWS()
def iWatchToday(murl):
main.GA("Tvshows","TodaysList")
link=main.OPENURL(murl)
daysback = 2
for x in range(0, daysback):
match = re.findall(r"</i></a> <a href='(.*?)'" , link)
if(match):
link = link + main.OPENURL("http://www.iwatchonline.to/tv-schedule" + match[x])
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
link = re.sub('>\s*','>',link)
link = re.sub('\s*<','<',link)
match=re.compile('<img src="([^"]+?)"[^<]+?<br /><a href="([^"]+?)">(.+?)</a></td><td.+?>([^<]+?)</td><td.+?>([^<]+?)</td>.*?>(\d{,2}) Link\(s\)', re.M).findall(link)
dialogWait = xbmcgui.DialogProgress()
ret = dialogWait.create('Please wait until Show list is cached.')
totalLinks = len(match)
loadedLinks = 0
remaining_display = 'Episodes loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(0,'[B]Will load instantly from now on[/B]',remaining_display)
xbmc.executebuiltin("XBMC.Dialog.Close(busydialog,true)")
for thumb,url,name,episea,epiname,active in match:
if(active == '0'):
totalLinks -= 1
continue
name=name.strip()
thumb=thumb.strip()
url=url.strip()
episea=episea.strip()
epiname=epiname.strip()
name=name.replace('(','').replace(')','')
name=name.replace('(\d{4})','')
main.addDirTE(name+' '+episea+' [COLOR blue]'+epiname+'[/COLOR]',url,588,thumb,'','','','','')
loadedLinks = loadedLinks + 1
percent = (loadedLinks * 100)/totalLinks
remaining_display = 'Episodes loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display)
if (dialogWait.iscanceled()):
return False
dialogWait.close()
del dialogWait
def iWatchLISTSHOWS(murl):
main.GA("Tvshows","List")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
videos = re.search('<ul class="thumbnails">(.+?)</ul>', link)
if videos:
videos = videos.group(1)
match=re.compile('<li.+?<a[^>]+?href=\"([^"]+?)\".+?<img[^>]+?src=\"([^"]+?)\".+?<div class=\"title[^>]+?>([^>]+?)<div').findall(videos)
for url,thumb,name in match:
main.addDirT(name,url,590,thumb,'','','','','')
if len(match)==25:
paginate=re.compile('([^<]+)&p=([^<]+)').findall(murl)
for purl,page in paginate:
i=int(page)+25
main.addDir('[COLOR blue]Next[/COLOR]',purl+'&p='+str(i),589,art+'/next2.png')
xbmcplugin.setContent(int(sys.argv[1]), 'Movies')
main.VIEWS()
def iWatchSeason(name,murl,thumb):
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<h5><i.+?</i>.*?(.+?)</h5>').findall(link)
for season in match:
main.addDir(name.strip()+' '+season.strip(),murl,591,thumb,'')
def GET_HTML(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543 Safari/419.3')
response = urllib2.urlopen(req)
link = response.read()
response.close()
link = link.replace('\\','')
return link
def _decode_callback(matches):
id = matches.group(1)
try: return unichr(int(id))
except: return id
def decode(data):
return re.sub("&#(\d+)(;|(?=\s))", _decode_callback, data).strip()
def PANEL_REPLACER(content):
panel_exists = True
panel_id = 0
while panel_exists == True:
panel_name = "panel-id." + str(panel_id)
panel_search_pattern = "(?s)\"" + panel_name + "\"\:\[\{(.+?)\}\]"
panel_data = re.search(panel_search_pattern, content)
if panel_data:
panel_data = panel_data.group(1)
content = re.sub("begin " + panel_name, "-->" + panel_data + "<!--", content)
content = re.sub(panel_search_pattern, "panel used", content)
panel_id = panel_id + 1
else:
panel_exists = False
content = main.unescapes(content)
content = re.sub("\\\"", "\"", content)
from resources.universal import _common as univ_common
content = univ_common.str_conv(decode(content))
return content
def iWatchEpisode(mname,murl):
seanum = mname.split('Season ')[1]
tv_content=main.OPENURL(murl)
link = PANEL_REPLACER(tv_content)
descs=re.compile('<meta name="description" content="(.+?)">').findall(link)
if len(descs)>0: desc=descs[0]
else: desc=''
thumbs=re.compile('<div class="movie-cover span2"><img src="(.+?)" alt=".+?" class=".+?" />').findall(link)
if len(thumbs)>0: thumb=thumbs[0]
else: thumb=''
episodes = re.search('(?sim)season'+seanum+'(.+?)</table>', link)
if episodes:
episodes = episodes.group(1)
match=re.compile('<a[^>]+?href=\"([^"]+?)\".+?</i>([^<]+?)</a>.+?<td>([^<]+?)</td>').findall(episodes)
dialogWait = xbmcgui.DialogProgress()
ret = dialogWait.create('Please wait until Show list is cached.')
totalLinks = len(match)
loadedLinks = 0
remaining_display = 'Episodes loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(0,'[B]Will load instantly from now on[/B]',remaining_display)
for url,epi,name in match:
mname=mname.replace('(','').replace(')','')
mname = re.sub(" \d{4}", "", mname)
sea=re.compile('s'+str(seanum)).findall(url)
if len(sea)>0:
main.addDirTE(mname.strip()+' '+epi.strip()+' [COLOR blue]'+name.strip()+'[/COLOR]',url,588,thumb,desc,'','','','')
loadedLinks = loadedLinks + 1
percent = (loadedLinks * 100)/totalLinks
remaining_display = 'Episodes loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display)
if (dialogWait.iscanceled()):
return False
dialogWait.close()
del dialogWait
if selfAddon.getSetting('auto-view') == 'true':
xbmc.executebuiltin("Container.SetViewMode(%s)" % selfAddon.getSetting('episodes-view'))
def GetUrl(url):
link=main.OPENURL(url)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<iframe.+?src=\"(.+?)\"').findall(link)
link=match[0]
return link
def iWatchLINK(mname,url):
link=main.OPENURL(url)
movie_content = main.unescapes(link)
movie_content = re.sub("\\\"", "\"", movie_content)
movie_content=movie_content.replace('\'','')
from resources.universal import _common as univ_common
link2 = univ_common.str_conv(decode(movie_content))
if selfAddon.getSetting("hide-download-instructions") != "true":
main.addLink("[COLOR red]For Download Options, Bring up Context Menu Over Selected Link.[/COLOR]",'','')
links = re.search('<tbody>(.+?)</tbody>', link2)
if links:
links = links.group(1)
match=re.compile('<a href="([^"]+?)".+?<img.+?> ([^<]+?)</a>.+?<td>.+?<td>.+?<td>([^<]+?)</td>', re.DOTALL).findall(links)
import urlresolver
for url, name, qua in match:
name=name.replace(' ','')
if name[0:1]=='.':
name=name[1:]
name=name.split('.')[0]
hosted_media = urlresolver.HostedMediaFile(host=name.lower(), media_id=name.lower())
if hosted_media:
main.addDown2(mname+' [COLOR red]('+qua+')[/COLOR]'+' [COLOR blue]'+name.upper()+'[/COLOR]',url,649,art+'/hosts/'+name.lower()+'.png',art+'/hosts/'+name.lower()+'.png')
def iWatchLINKB(mname,url):
main.GA("iWatchonline","Watched")
ok=True
hname=mname
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Opening Link,3000)")
mname=mname.split(' [COLOR red]')[0]
r = re.findall('Season(.+?)Episode([^<]+)',mname)
if r:
infoLabels =main.GETMETAEpiT(mname,'','')
video_type='episode'
season=infoLabels['season']
episode=infoLabels['episode']
else:
infoLabels =main.GETMETAT(mname,'','','')
video_type='movie'
season=''
episode=''
img=infoLabels['cover_url']
fanart =infoLabels['backdrop_url']
imdb_id=infoLabels['imdb_id']
infolabels = { 'supports_meta' : 'true', 'video_type':video_type, 'name':str(infoLabels['title']), 'imdb_id':str(infoLabels['imdb_id']), 'season':str(season), 'episode':str(episode), 'year':str(infoLabels['year']) }
link=main.OPENURL(url)
link=main.unescapes(link)
match=re.compile('<(?:iframe|pagespeed_iframe).+?src=\"(.+?)\"').findall(link)
try :
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
stream_url = main.resolve_url(match[0])
if stream_url == False: return
infoL={'Title': infoLabels['title'], 'Plot': infoLabels['plot'], 'Genre': infoLabels['genre'],'originalTitle': main.removeColoredText(infoLabels['title'])}
# play with bookmark
from resources.universal import playbackengine
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type=video_type, title=str(infoLabels['title']),season=str(season), episode=str(episode), year=str(infoLabels['year']),img=img,infolabels=infoL, watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id=imdb_id)
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
from resources.universal import watchhistory
wh = watchhistory.WatchHistory('plugin.video.movie25')
wh.add_item(hname+' '+'[COLOR green]'+prettyName+'[/COLOR]', sys.argv[0]+sys.argv[2], infolabels=infolabels, img=str(img), fanart=str(fanart), is_folder=False)
player.KeepAlive()
return ok
except Exception, e:
if stream_url != False:
main.ErrorReport(e)
return ok
| marduk191/plugin.video.movie25 | resources/libs/plugins/iwatchonline.py | Python | gpl-3.0 | 22,479 |
#!/usr/bin/env python3
"""
Calculate mean and standard deviation of column data using Welford's one pass algorithm
"""
import sys
import numpy as np
np.set_printoptions(formatter={"float": lambda x: "{0:10.3f}".format(x)})
def average_columns(filename):
with open(filename) as f:
mean, sdev = None, None
nlines = 0
for line in f:
if not line.strip():
continue
try:
cols = np.array(list(map(float, line.split())))
except ValueError:
continue
if mean is None:
mean = np.zeros(len(cols))
sdev = np.zeros(len(cols))
nlines += 1
delta = cols - mean
mean += delta / nlines
sdev += delta * (cols - mean)
sdev = np.sqrt(sdev / (nlines - 1))
print("File: {0}".format(filename))
print("Mean: {0}".format(mean))
print("Sdev: {0}".format(sdev))
print()
return (mean, sdev)
def compare_files(filename1, filename2):
means = (average_columns(filename1)[0], average_columns(filename2)[0])
print("Diff: {0}".format(means[1] - means[0]))
print("%Dif: {0}".format(100. * (means[1] - means[0]) / means[0]))
if __name__ == "__main__":
if len(sys.argv) == 2:
average_columns(sys.argv[1])
elif len(sys.argv) == 3:
compare_files(sys.argv[1], sys.argv[2])
else:
print("Give one filename to average columns, or two filenames to compare.")
| jag1g13/pycgtool | doc/tutorial_files/average_columns.py | Python | gpl-3.0 | 1,517 |
class Coordinate:
coordX = 0
coordY = 0
def __init__(self, coordX=0, coordY=0):
self.coordX = coordX
self.coordY = coordY
pass
def set(self, coordX, coordY):
self.coordX = coordX
self.coordY = coordY
return coordX, coordY
if __name__ == '__main__':
pass
| hemmerling/codingdojo | src/game_of_life/python_coderetreat_socramob/cr_socramob05/coordinate.py | Python | apache-2.0 | 340 |
# -*- python -*-
# Copyright (C) 2009-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/home/build/work/GCC-4-9-build/install-native/share/gcc-arm-none-eabi'
libdir = '/home/build/work/GCC-4-9-build/install-native/arm-none-eabi/lib/armv7-m'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| trfiladelfo/tdk | gcc-arm-none-eabi/arm-none-eabi/lib/armv7-m/libstdc++.a-gdb.py | Python | mit | 2,397 |
"""
Copyright 2015 Erik Perillo <erik.perillo@gmail.com>
This file is part of pig.
This is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this. If not, see <http://www.gnu.org/licenses/>.
"""
from Adafruit_I2C import Adafruit_I2C
import time
import sys
import math
import pig.core
class Compass(pig.core.VectorSensor):
#definitions
DEVICE_MODEL = "HMC5883L"
PLATFORMS = ["beagle", "beagleboneblack", "bbb", "beaglebone"]
#device registers
REGISTERS = {"a": 0x00, "b": 0x01, "mode": 0x02,
"x_msb": 0x03, "x_lsb": 0x04,
"z_msb": 0x05, "z_lsb": 0x06,
"y_msb": 0x07, "y_lsb": 0x08,
"status": 0x09, "id_a": 0x0a, "id_b": 0x0b, "id_c": 0x0c}
#available (measurement) modes
MODES = {"default": 0x00, "continuous": 0x00, "single": 0x01}
#gains (LBb/Gauss)
GAINS = {"default": 0x01, "low": 0x07, "mean": 0x03, "high": 0x00}
#measuring rates (Hz)
RATES = {"default": 0x04, "low": 0x00, "mean": 0x05, "high": 0x06}
#number of samples to get per measurement
SAMPLING = {"default": 0x00, "low": 0x00, "mean": 0x02, "high": 0x03}
#bias to apply to device
BIAS = {"default": 0x00, "positive": 0x01, "negative": 0x02}
#status to status registers
STATUS = {"ready": 0x01}
@classmethod
def get_model_name(cls):
return Compass.DEVICE_MODEL
@classmethod
def supports(cls, platform):
return platform.lower() in Compass.PLATFORMS
def __init__(self, port, cfg_file="", gain="default", rate="default",
mode="default", samples="default", bias="default"):
#initialization of device
self.device = Adafruit_I2C(port)
#setting mode via registers
self.set_reg_a(Compass.SAMPLING[samples], Compass.RATES[rate], Compass.BIAS[bias])
self.set_reg_b(Compass.GAINS[gain])
self.set_reg_mode(Compass.MODES[mode])
#opening configuration file; must be .csv
if cfg_file:
with open(cfg_file, "r") as fd:
self.errors = tuple(int(val) for val in fd.read().split(","))
else:
self.errors = 2*(1, 0)
def write(self, register, mode):
self.device.write8(register,mode)
def read(self, register):
return self.device.readS8(register)
def set_reg_a(self, samples="default", rate="default", bias="default"):
mode = (Compass.SAMPLING[samples] << 5) | (Compass.RATES[rate] << 2) | Compass.BIAS[bias]
self.write(REGISTERS["a"], mode)
def set_reg_b(self, gain=GAINS["default"]):
mode = gain << 5
self.write(REGISTERS["b"], mode)
def set_reg_mode(self, mode=MODES["continuous"]):
mode = mode
self.write(REGISTERS["mode"], mode)
def refresh(self):
#defining registers from which to read
registers = [Compass.REGISTERS[key] for key in ("x_msb", "x_lsb", "y_msb",
"y_lsb", "z_msb", "z_lsb")]
#reading data from sensor's registers
raw_data = [self.read(reg) for reg in registers]
#making the right shifts to get the value
self._x, self._y, self._z = tuple(raw_data[i] << 8 | raw_data[i+1] \
for i in range(0, len(raw_data), 2))
def value_x(self):
return self._x
def value_y(self):
return self._y
def value_z(self):
return self._z
def angle(self, ):
self.refresh()
x, y = self.value_x(), self.value_y()
#compensating errors
xmax, xmin, ymax, ymin = self.errors
x = 2*(x-xmin)/float(xmax-xmin) - 1
y = 2*(y-ymin)/float(ymax-ymin) - 1
return math.atan2(y, x)
| erikperillo/pig | pig/beagle/compass.py | Python | gpl-3.0 | 4,266 |
"""The tests for the Graphite component."""
import socket
import unittest
try:
from unittest import mock
except ImportError:
import mock
import homeassistant.core as ha
import homeassistant.components.graphite as graphite
from homeassistant.const import (
EVENT_STATE_CHANGED,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
STATE_ON, STATE_OFF)
from tests.common import get_test_home_assistant
class TestGraphite(unittest.TestCase):
"""Test the Graphite component."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.latitude = 32.87336
self.hass.config.longitude = 117.22743
self.gf = graphite.GraphiteFeeder(self.hass, 'foo', 123, 'ha')
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
@mock.patch('homeassistant.components.graphite.GraphiteFeeder')
def test_minimal_config(self, mock_gf):
"""Test setup with minimal configuration."""
self.assertTrue(graphite.setup(self.hass, {}))
mock_gf.assert_called_once_with(self.hass, 'localhost', 2003, 'ha')
@mock.patch('homeassistant.components.graphite.GraphiteFeeder')
def test_full_config(self, mock_gf):
"""Test setup with full configuration."""
config = {
'graphite': {
'host': 'foo',
'port': 123,
'prefix': 'me',
}
}
self.assertTrue(graphite.setup(self.hass, config))
mock_gf.assert_called_once_with(self.hass, 'foo', 123, 'me')
@mock.patch('homeassistant.components.graphite.GraphiteFeeder')
def test_config_bad_port(self, mock_gf):
"""Test setup with invalid port."""
config = {
'graphite': {
'host': 'foo',
'port': 'wrong',
}
}
self.assertFalse(graphite.setup(self.hass, config))
self.assertFalse(mock_gf.called)
def test_subscribe(self):
"""Test the subscription."""
fake_hass = mock.MagicMock()
gf = graphite.GraphiteFeeder(fake_hass, 'foo', 123, 'ha')
fake_hass.bus.listen_once.has_calls([
mock.call(EVENT_HOMEASSISTANT_START, gf.start_listen),
mock.call(EVENT_HOMEASSISTANT_STOP, gf.shutdown),
])
fake_hass.bus.listen.assert_called_once_with(
EVENT_STATE_CHANGED, gf.event_listener)
def test_start(self):
"""Test the start."""
with mock.patch.object(self.gf, 'start') as mock_start:
self.gf.start_listen('event')
mock_start.assert_called_once_with()
def test_shutdown(self):
"""Test the shutdown."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
self.gf.shutdown('event')
mock_queue.put.assert_called_once_with(self.gf._quit_object)
def test_event_listener(self):
"""Test the event listener."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
self.gf.event_listener('foo')
mock_queue.put.assert_called_once_with('foo')
@mock.patch('time.time')
def test_report_attributes(self, mock_time):
"""Test the reporting with attributes."""
mock_time.return_value = 12345
attrs = {'foo': 1,
'bar': 2.0,
'baz': True,
'bat': 'NaN',
}
expected = [
'ha.entity.state 0.000000 12345',
'ha.entity.foo 1.000000 12345',
'ha.entity.bar 2.000000 12345',
'ha.entity.baz 1.000000 12345',
]
state = mock.MagicMock(state=0, attributes=attrs)
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_report_with_string_state(self, mock_time):
"""Test the reporting with strings."""
mock_time.return_value = 12345
expected = [
'ha.entity.foo 1.000000 12345',
'ha.entity.state 1.000000 12345',
]
state = mock.MagicMock(state='above_horizon', attributes={'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_report_with_binary_state(self, mock_time):
"""Test the reporting with binary state."""
mock_time.return_value = 12345
state = ha.State('domain.entity', STATE_ON, {'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
expected = ['ha.entity.foo 1.000000 12345',
'ha.entity.state 1.000000 12345']
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
state.state = STATE_OFF
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
expected = ['ha.entity.foo 1.000000 12345',
'ha.entity.state 0.000000 12345']
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_send_to_graphite_errors(self, mock_time):
"""Test the sending with errors."""
mock_time.return_value = 12345
state = ha.State('domain.entity', STATE_ON, {'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
mock_send.side_effect = socket.error
self.gf._report_attributes('entity', state)
mock_send.side_effect = socket.gaierror
self.gf._report_attributes('entity', state)
@mock.patch('socket.socket')
def test_send_to_graphite(self, mock_socket):
"""Test the sending of data."""
self.gf._send_to_graphite('foo')
mock_socket.assert_called_once_with(socket.AF_INET,
socket.SOCK_STREAM)
sock = mock_socket.return_value
sock.connect.assert_called_once_with(('foo', 123))
sock.sendall.assert_called_once_with('foo'.encode('ascii'))
sock.send.assert_called_once_with('\n'.encode('ascii'))
sock.close.assert_called_once_with()
def test_run_stops(self):
"""Test the stops."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
mock_queue.get.return_value = self.gf._quit_object
self.assertEqual(None, self.gf.run())
mock_queue.get.assert_called_once_with()
mock_queue.task_done.assert_called_once_with()
def test_run(self):
"""Test the running."""
runs = []
event = mock.MagicMock(event_type=EVENT_STATE_CHANGED,
data={'entity_id': 'entity',
'new_state': mock.MagicMock()})
def fake_get():
if len(runs) >= 2:
return self.gf._quit_object
elif runs:
runs.append(1)
return mock.MagicMock(event_type='somethingelse',
data={'new_event': None})
else:
runs.append(1)
return event
with mock.patch.object(self.gf, '_queue') as mock_queue:
with mock.patch.object(self.gf, '_report_attributes') as mock_r:
mock_queue.get.side_effect = fake_get
self.gf.run()
# Twice for two events, once for the stop
self.assertEqual(3, mock_queue.task_done.call_count)
mock_r.assert_called_once_with(
'entity',
event.data['new_state'])
| Julian/home-assistant | tests/components/test_graphite.py | Python | mit | 8,257 |
"""A cache for storing small matrices in multiple formats."""
from sympy import Matrix, I, Pow, Rational, exp, pi
from sympy.physics.quantum.matrixutils import (
to_sympy, to_numpy, to_scipy_sparse
)
class MatrixCache(object):
"""A cache for small matrices in different formats.
This class takes small matrices in the standard ``sympy.Matrix`` format,
and then converts these to both ``numpy.matrix`` and
``scipy.sparse.csr_matrix`` matrices. These matrices are then stored for
future recovery.
"""
def __init__(self, dtype='complex'):
self._cache = {}
self.dtype = dtype
def cache_matrix(self, name, m):
"""Cache a matrix by its name.
Parameters
----------
name : str
A descriptive name for the matrix, like "identity2".
m : list of lists
The raw matrix data as a sympy Matrix.
"""
try:
self._sympy_matrix(name, m)
except ImportError:
pass
try:
self._numpy_matrix(name, m)
except ImportError:
pass
try:
self._scipy_sparse_matrix(name, m)
except ImportError:
pass
def get_matrix(self, name, format):
"""Get a cached matrix by name and format.
Parameters
----------
name : str
A descriptive name for the matrix, like "identity2".
format : str
The format desired ('sympy', 'numpy', 'scipy.sparse')
"""
m = self._cache.get((name, format))
if m is not None:
return m
raise NotImplementedError(
'Matrix with name %s and format %s is not available.' %\
(name, format)
)
def _store_matrix(self, name, format, m):
self._cache[(name, format)] = m
def _sympy_matrix(self, name, m):
self._store_matrix(name, 'sympy', to_sympy(m))
def _numpy_matrix(self, name, m):
m = to_numpy(m, dtype=self.dtype)
self._store_matrix(name, 'numpy', m)
def _scipy_sparse_matrix(self, name, m):
# TODO: explore different sparse formats. But sparse.kron will use
# coo in most cases, so we use that here.
m = to_scipy_sparse(m, dtype=self.dtype)
self._store_matrix(name, 'scipy.sparse', m)
sqrt2_inv = Pow(2, Rational(-1,2), evaluate=False)
# Save the common matrices that we will need
matrix_cache = MatrixCache()
matrix_cache.cache_matrix('eye2', Matrix([[1,0],[0,1]]))
matrix_cache.cache_matrix('op11', Matrix([[0,0],[0,1]])) # |1><1|
matrix_cache.cache_matrix('op00', Matrix([[1,0],[0,0]])) # |0><0|
matrix_cache.cache_matrix('op10', Matrix([[0,0],[1,0]])) # |1><0|
matrix_cache.cache_matrix('op01', Matrix([[0,1],[0,0]])) # |0><1|
matrix_cache.cache_matrix('X', Matrix([[0, 1], [1, 0]]))
matrix_cache.cache_matrix('Y', Matrix([[0, -I], [I, 0]]))
matrix_cache.cache_matrix('Z', Matrix([[1, 0], [0, -1]]))
matrix_cache.cache_matrix('S', Matrix([[1, 0], [0, I]]))
matrix_cache.cache_matrix('T', Matrix([[1, 0], [0, exp(I*pi/4)]]))
matrix_cache.cache_matrix('H', sqrt2_inv*Matrix([[1, 1], [1, -1]]))
matrix_cache.cache_matrix('Hsqrt2', Matrix([[1, 1], [1, -1]]))
matrix_cache.cache_matrix('SWAP',Matrix([[1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]))
matrix_cache.cache_matrix('ZX', sqrt2_inv*Matrix([[1,1],[1,-1]]))
matrix_cache.cache_matrix('ZY', Matrix([[I,0],[0,-I]]))
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sympy/physics/quantum/matrixcache.py | Python | agpl-3.0 | 3,427 |
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 David Farr <david.farr@sap.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Ed Holland <eholland@alertlogic.com> #
# Copyright 2016 John Eskew <jeskew@edx.org> #
# Copyright 2016 Matthew Neal <meneal@matthews-mbp.raleigh.ibm.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2016 Sam Corbett <sam.corbett@cloudsoftcorp.com> #
# Copyright 2017 Aaron Levine <allevin@sandia.gov> #
# Copyright 2017 Nicolas Agustín Torres <nicolastrres@gmail.com> #
# Copyright 2018 Hayden Fuss <wifu1234@gmail.com> #
# Copyright 2018 Shinichi TAMURA <shnch.tmr@gmail.com> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2018 Vinay Hegde <vinayhegde2010@gmail.com>
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from .AuthenticatedUser import AuthenticatedUser
from .Authentication import Authentication
from .Authorization import Authorization
from .BadAttributes import BadAttributes
from .Branch import Branch
from .BranchProtection import BranchProtection
from .Commit import Commit
from .CommitCombinedStatus import CommitCombinedStatus
from .CommitComment import CommitComment
from .CommitStatus import CommitStatus
from .ConditionalRequestUpdate import ConditionalRequestUpdate
from .Connection import Connection
from .ContentFile import ContentFile
from .Download import Download
from .Enterprise import Enterprise
from .Equality import Equality
from .Event import Event
from .Exceptions import Exceptions, SpecificExceptions
from .ExposeAllAttributes import ExposeAllAttributes
from .Gist import Gist
from .GistComment import GistComment
from .GitBlob import GitBlob
from .GitCommit import GitCommit
from .Github_ import Github
from .GithubIntegration import GithubIntegration
from .GitRef import GitRef
from .GitRelease import Release
from .GitReleaseAsset import ReleaseAsset
from .GitTag import GitTag
from .GitTree import GitTree
from .Hook import Hook
from .Issue import Issue
from .Issue33 import Issue33
from .Issue50 import Issue50
from .Issue54 import Issue54
from .Issue80 import Issue80
from .Issue87 import Issue87
from .Issue131 import Issue131
from .Issue133 import Issue133
from .Issue134 import Issue134
from .Issue139 import Issue139
from .Issue140 import Issue140
from .Issue142 import Issue142
from .Issue158 import Issue158
from .Issue174 import Issue174
from .Issue214 import Issue214
from .Issue216 import Issue216
from .Issue278 import Issue278
from .Issue494 import Issue494
from .Issue572 import Issue572
from .Issue823 import Issue823
from .Issue937 import Issue937
from .Issue945 import Issue945
from .IssueComment import IssueComment
from .IssueEvent import IssueEvent
from .Label import Label
from .License import License
from .Logging_ import Logging
from .Markdown import Markdown
from .Migration import Migration
from .Milestone import Milestone
from .NamedUser import NamedUser
from .Notification import Notification
from .Organization import Organization
from .OrganizationHasInMembers import OrganizationHasInMembers
from .PaginatedList import PaginatedList
from .Persistence import Persistence
from .Project import Project
from .PullRequest import PullRequest
from .PullRequest1168 import PullRequest1168
from .PullRequest1169 import PullRequest1169
from .PullRequestComment import PullRequestComment
from .PullRequestFile import PullRequestFile
from .PullRequestReview import PullRequestReview
from .RateLimiting import RateLimiting
from .RawData import RawData
from .Reaction import Reaction
from .Repository import LazyRepository, Repository
from .RepositoryKey import RepositoryKey
from .RequiredPullRequestReviews import RequiredPullRequestReviews
from .RequiredStatusChecks import RequiredStatusChecks
from .Retry import Retry
from .Search import Search
from .SourceImport import SourceImport
from .Tag import Tag
from .Team import Team
from .Topic import Topic
from .Traffic import Traffic
from .UserKey import UserKey
__all__ = [
AuthenticatedUser,
Authentication,
Authorization,
BadAttributes,
Branch,
BranchProtection,
Commit,
CommitCombinedStatus,
CommitComment,
CommitStatus,
ConditionalRequestUpdate,
Connection,
ContentFile,
Download,
Enterprise,
Equality,
Event,
Exceptions,
ExposeAllAttributes,
Gist,
GistComment,
GitBlob,
GitCommit,
Github,
GithubIntegration,
GitRef,
GitTag,
GitTree,
Hook,
Issue,
Issue33,
Issue50,
Issue54,
Issue80,
Issue87,
Issue131,
Issue133,
Issue134,
Issue139,
Issue140,
Issue142,
Issue158,
Issue174,
Issue214,
Issue216,
Issue278,
Issue494,
Issue572,
Issue823,
Issue937,
Issue945,
IssueComment,
IssueEvent,
LazyRepository,
Label,
License,
Logging,
Markdown,
Migration,
Milestone,
NamedUser,
Notification,
Organization,
OrganizationHasInMembers,
PaginatedList,
Persistence,
Project,
PullRequest,
PullRequest1168,
PullRequest1169,
PullRequestComment,
PullRequestReview,
PullRequestFile,
RateLimiting,
RawData,
Reaction,
Release,
ReleaseAsset,
Repository,
RepositoryKey,
RequiredPullRequestReviews,
RequiredStatusChecks,
Retry,
Search,
SpecificExceptions,
SourceImport,
Tag,
Team,
Topic,
Traffic,
UserKey,
]
| allevin/PyGithub | tests/AllTests.py | Python | lgpl-3.0 | 7,803 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.